repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
brunasenra/Store_Sales_Prediction
[ "6f7371188e37ebb905e171bd8afaae9e41f5cdf5" ]
[ "API/rossmann/Rossmann.py" ]
[ "import pickle\nimport inflection\nimport pandas as pd\nimport numpy as np\nimport math\nimport datetime\n\nclass Rossmann(object):\n def __init__(self):\n self.home_path = 'C:/Users/bruna/OneDrive/Favoritos compartilhados\\DATA SCIENCE\\BRUNA\\DATA SCIENCE\\PROJECTS\\Store_Sales_Prediction'\n \n # loads the rescaling\n self.competition_distance_scaler = pickle.load(open(self.home_path + 'parameter/competition_distance_scaler.pkl', 'rb'))\n self.competition_time_month_scaler = pickle.load(open(self.home_path + 'parameter/competition_time_month_scaler.pkl', 'rb'))\n self.promo_time_week_scaler = pickle.load(open(self.home_path + 'parameter/promo_time_week_scaler.pkl', 'rb'))\n self.year_scaler = pickle.load(open(self.home_path + 'parameter/year_scaler.pkl', 'rb'))\n \n # loads the encoder\n self.store_type_scaler = pickle.load(open(self.home_path + 'parameter/store_type_scaler.pkl', 'rb'))\n\n \n def data_cleaning(self, df1):\n\n ## 1.2. Renaming columns\n cols_old = ['Store', 'DayOfWeek', 'Date', 'Open', 'Promo', 'StateHoliday', \n 'SchoolHoliday', 'StoreType', 'Assortment','CompetitionDistance', 'CompetitionOpenSinceMonth', \n 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval']\n\n # snake_case\n snakecase = lambda x: inflection.underscore(x)\n\n # creates new columns from old columns in snakecase \n cols_new = list(map(snakecase, cols_old))\n\n # renames the old columns\n df1.columns = cols_new\n\n\n ## 1.4. Checking data types\n # transforms 'date' column to datetime type\n df1['date'] = pd.to_datetime(df1['date'])\n\n\n ## 1.6. Filling out the NaN values\n # competition_distance\n df1['competition_distance'] = df1['competition_distance'].apply(lambda x: 200000.0 if math.isnan(x) else x)\n\n # competition_open_since_month\n df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if math.isnan(x['competition_open_since_month']) else x['competition_open_since_month'], axis=1)\n\n # competition_open_since_year \n df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['competition_open_since_year']) else x['competition_open_since_year'], axis=1)\n\n # promo2_since_week\n df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if math.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis=1)\n\n # promo2_since_year\n df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis=1)\n\n # promo_interval\n month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',\n 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}\n\n df1['promo_interval'].fillna(0, inplace=True)\n df1['month_map'] = df1['date'].dt.month.map(month_map)\n df1['is_promo'] = df1[['promo_interval','month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1)\n\n\n ## 1.7. Changing data types\n df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int)\n df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int)\n\n # transforms promotion data to int\n df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)\n df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)\n \n return df1\n\n \n def feature_engineering(self, df2):\n # 2.0 FEATURE ENGINEERING\n # year\n df2['year'] = df2['date'].dt.year\n\n # month\n df2['month'] = df2['date'].dt.month\n\n # day\n df2['day'] = df2['date'].dt.day\n\n # week of year\n df2['week_of_year'] = df2['date'].dt.weekofyear\n\n # year week\n df2['year_week'] = df2['date'].dt.strftime('%Y-%W')\n\n # Competition since\n df2['competition_since'] = df2.apply( lambda x: datetime.datetime(year = x['competition_open_since_year'], \n month = x['competition_open_since_month'], day = 1), axis = 1)\n df2['competition_time_month'] = ((df2['date'] - df2['competition_since'])/30).apply(lambda x: x.days).astype(int)\n\n # Promo since\n df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)\n df2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.datetime.strptime\n (x + '-1', '%Y-%W-%w') - datetime.timedelta(days = 7))\n df2['promo_time_week'] = ((df2['date'] - df2['promo_since'])/7).apply(lambda x: x.days).astype(int)\n\n # assortment\n df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended' )\n\n # state holiday\n df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' \n if x == 'b' else 'christmas' if x =='c' else 'regular_day')\n\n # 3.0 VARIABLE FILTERING\n ## 3.1 Row filtering\n df2 = df2[df2['open'] != 0]\n \n ## 3.2 Column filtering\n cols_drop = ['open', 'promo_interval', 'month_map']\n df2 = df2.drop(cols_drop, axis=1)\n \n return df2\n\n\n def data_preparation(self, df5):\n \n ## 5.2 Rescaling\n ### 5.2.1 Rescaling competition_distance\n # competition_distance\n df5['competition_distance'] = self.competition_distance_scaler.fit_transform(df5[['competition_distance']])\n\n\n ### 5.2.2 Rescaling competition_time_month\n # competition_time_month\n df5['competition_time_month'] = self.competition_time_month_scaler.fit_transform(df5[['competition_time_month']])\n\n \n ### 5.2.3 Rescaling promo_time_week\n # promo_time_week\n df5['promo_time_week'] = self.promo_time_week_scaler.fit_transform(df5[['promo_time_week']])\n\n # year\n df5['year'] = self.year_scaler.fit_transform(df5[['year']])\n \n \n ### 5.3.1 Encoding\n\n # state_holiday - One Hot Encoding\n df5 = pd.get_dummies(df5, prefix=['state_holiday'], columns=['state_holiday'])\n\n # store_type\n df5['store_type'] = self.store_type_scaler.fit_transform(df5['store_type'])\n\n # assortment\n assortment_dict = {'basic':1, 'extra': 2, 'extended': 3}\n df5['assortment'] = df5['assortment'].map(assortment_dict)\n\n ### 5.3.2 Nature Transformation\n # day_of_week\n df5['day_of_week_sin'] = df5['day_of_week'].apply(lambda x: np.sin(x *(2. * np.pi / 7)))\n df5['day_of_week_cos'] = df5['day_of_week'].apply(lambda x: np.cos(x *(2. * np.pi / 7)))\n\n # month\n df5['month_sin'] = df5['month'].apply(lambda x: np.sin(x *(2. * np.pi / 12)))\n df5['month_cos'] = df5['month'].apply(lambda x: np.cos(x *(2. * np.pi / 12)))\n\n # day\n df5['day_sin'] = df5['day'].apply(lambda x: np.sin(x *(2. * np.pi / 30)))\n df5['day_cos'] = df5['day'].apply(lambda x: np.cos(x *(2. * np.pi / 30)))\n\n # week_of_year\n df5['week_of_year_sin'] = df5['week_of_year'].apply(lambda x: np.sin(x *(2. * np.pi / 52)))\n df5['week_of_year_cos'] = df5['week_of_year'].apply(lambda x: np.cos(x *(2. * np.pi / 52)))\n \n cols_selected = ['store', 'promo', 'store_type',\n 'assortment','competition_distance', 'competition_open_since_month',\n 'competition_open_since_year','promo2', 'promo2_since_week',\n 'promo2_since_year','competition_time_month', 'promo_time_week',\n 'day_of_week_sin','day_of_week_cos', 'month_sin','month_cos',\n 'day_sin','day_cos', 'week_of_year_sin', 'week_of_year_cos']\n \n return df5[cols_selected]\n \n \n def get_prediction(self, model, original_data, test_data):\n #predicts\n pred = model.predict(test_data)\n \n # joins pred into the original data\n original_data['prediction'] = np.expm1(pred)\n \n return original_data.to_json(orient='records', date_format='iso')\n" ]
[ [ "pandas.to_datetime", "numpy.cos", "numpy.expm1", "numpy.sin", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Huangheyl/Paddle
[ "c560a7d57aad990f374ebadd330351f18e2ca65f", "a1b640bc66a5cc9583de503e7406aeba67565e8d", "c560a7d57aad990f374ebadd330351f18e2ca65f", "c560a7d57aad990f374ebadd330351f18e2ca65f", "c560a7d57aad990f374ebadd330351f18e2ca65f" ]
[ "python/paddle/fluid/tests/unittests/test_jit_save_load.py", "python/paddle/fluid/framework.py", "python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py", "python/paddle/vision/datasets/flowers.py", "python/paddle/metric/metrics.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport pickle\nimport unittest\nimport numpy as np\nimport paddle\nfrom paddle.static import InputSpec\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph import Linear\nfrom paddle.fluid.dygraph import declarative, ProgramTranslator\nfrom paddle.fluid.dygraph.io import EXTRA_VAR_INFO_FILENAME\n\nBATCH_SIZE = 32\nBATCH_NUM = 10\nSEED = 10\n\n\ndef random_batch_reader(input_size, label_size):\n def _get_random_inputs_and_labels(input_size, label_size):\n np.random.seed(SEED)\n input = np.random.random(size=input_size).astype('float32')\n label = np.random.random(size=label_size).astype('int64')\n return input, label\n\n def __reader__():\n for _ in range(BATCH_NUM):\n batch_input, batch_label = _get_random_inputs_and_labels(\n [BATCH_SIZE, input_size], [BATCH_SIZE, label_size])\n yield batch_input, batch_label\n\n return __reader__\n\n\nclass LinearNet(fluid.dygraph.Layer):\n def __init__(self, in_size, out_size):\n super(LinearNet, self).__init__()\n self._linear = Linear(in_size, out_size)\n\n @declarative\n def forward(self, x):\n return self._linear(x)\n\n\nclass LinearNetNotDeclarative(fluid.dygraph.Layer):\n def __init__(self, in_size, out_size):\n super(LinearNetNotDeclarative, self).__init__()\n self._linear = Linear(in_size, out_size)\n\n def forward(self, x):\n return self._linear(x)\n\n\nclass LinearNetReturnLoss(fluid.dygraph.Layer):\n def __init__(self, in_size, out_size):\n super(LinearNetReturnLoss, self).__init__()\n self._linear = Linear(in_size, out_size)\n\n @declarative\n def forward(self, x):\n y = self._linear(x)\n z = self._linear(y)\n loss = fluid.layers.mean(z)\n return z, loss\n\n\ndef train(layer, input_size=784, label_size=1):\n # create optimizer\n sgd = fluid.optimizer.SGDOptimizer(\n learning_rate=0.01, parameter_list=layer.parameters())\n # create data loader\n train_loader = fluid.io.DataLoader.from_generator(capacity=5)\n train_loader.set_batch_generator(\n random_batch_reader(input_size, label_size))\n # train\n for data in train_loader():\n img, label = data\n label.stop_gradient = True\n\n cost = layer(img)\n\n loss = fluid.layers.cross_entropy(cost, label)\n avg_loss = fluid.layers.mean(loss)\n\n avg_loss.backward()\n sgd.minimize(avg_loss)\n layer.clear_gradients()\n return [img], layer, avg_loss\n\n\nclass TestJitSaveLoad(unittest.TestCase):\n def setUp(self):\n self.model_path = \"model.test_jit_save_load\"\n # enable dygraph mode\n fluid.enable_dygraph()\n # config seed\n paddle.manual_seed(SEED)\n paddle.framework.random._manual_program_seed(SEED)\n\n def train_and_save_model(self, model_path=None, configs=None):\n layer = LinearNet(784, 1)\n example_inputs, layer, _ = train(layer)\n final_model_path = model_path if model_path else self.model_path\n orig_input_types = [type(x) for x in example_inputs]\n fluid.dygraph.jit.save(\n layer=layer,\n model_path=final_model_path,\n input_spec=example_inputs,\n configs=configs)\n new_input_types = [type(x) for x in example_inputs]\n self.assertEqual(orig_input_types, new_input_types)\n return layer\n\n def test_save_load(self):\n # train and save model\n train_layer = self.train_and_save_model()\n # load model\n program_translator = ProgramTranslator()\n program_translator.enable(False)\n loaded_layer = fluid.dygraph.jit.load(self.model_path)\n self.load_and_inference(train_layer, loaded_layer)\n self.load_dygraph_state_dict(train_layer)\n self.load_and_finetune(train_layer, loaded_layer)\n program_translator.enable(True)\n\n def load_and_inference(self, train_layer, infer_layer):\n train_layer.eval()\n infer_layer.eval()\n # inference & compare\n x = fluid.dygraph.to_variable(\n np.random.random((1, 784)).astype('float32'))\n self.assertTrue(\n np.array_equal(train_layer(x).numpy(), infer_layer(x).numpy()))\n\n def load_and_finetune(self, train_layer, load_train_layer):\n train_layer.train()\n load_train_layer.train()\n # train & compare\n img0, _, train_loss = train(train_layer)\n img1, _, load_train_loss = train(load_train_layer)\n self.assertTrue(\n np.array_equal(train_loss.numpy(), load_train_loss.numpy()))\n\n def load_dygraph_state_dict(self, train_layer):\n train_layer.eval()\n # construct new model\n new_layer = LinearNet(784, 1)\n model_dict, _ = fluid.dygraph.load_dygraph(self.model_path)\n new_layer.set_dict(model_dict)\n new_layer.eval()\n # inference & compare\n x = fluid.dygraph.to_variable(\n np.random.random((1, 784)).astype('float32'))\n self.assertTrue(\n np.array_equal(train_layer(x).numpy(), new_layer(x).numpy()))\n\n def test_save_get_program_failed(self):\n layer = LinearNetNotDeclarative(784, 1)\n example_inputs, layer, _ = train(layer)\n with self.assertRaises(RuntimeError):\n fluid.dygraph.jit.save(\n layer=layer,\n model_path=self.model_path,\n input_spec=example_inputs)\n\n def test_load_dygraph_no_path(self):\n model_path = \"model.test_jit_save_load.no_path\"\n new_layer = LinearNet(784, 1)\n with self.assertRaises(ValueError):\n model_dict, _ = fluid.dygraph.load_dygraph(model_path)\n\n\nclass LinearNetMultiInput(fluid.dygraph.Layer):\n def __init__(self, in_size, out_size):\n super(LinearNetMultiInput, self).__init__()\n self._linear1 = Linear(in_size, out_size)\n # self._linear2 = Linear(in_size, out_size)\n\n @declarative(input_spec=[\n InputSpec(\n [None, 8], dtype='float32'), InputSpec(\n [None, 8], dtype='float32')\n ])\n def forward(self, x, y):\n x_out = self._linear1(x)\n y_out = self._linear1(y)\n loss = fluid.layers.mean(x_out + y_out)\n return x_out, y_out, loss\n\n\nclass TestSaveLoadWithInputSpec(unittest.TestCase):\n def setUp(self):\n # enable dygraph mode\n fluid.enable_dygraph()\n\n def test_with_input_spec(self):\n net = LinearNetReturnLoss(8, 8)\n # set x.shape = [None, 8]\n net.forward = declarative(\n net.forward, input_spec=[InputSpec(\n [None, 8], name='x')])\n\n model_path = \"model.input_spec.output_spec\"\n configs = fluid.dygraph.jit.SaveLoadConfig()\n # check inputs and outputs\n self.assertTrue(len(net.forward.inputs) == 1)\n input_x = net.forward.inputs[0]\n self.assertTrue(input_x.shape == (-1, 8))\n self.assertTrue(input_x.name == 'x')\n\n # 1. prune loss\n configs.output_spec = net.forward.outputs[:1]\n fluid.dygraph.jit.save(net, model_path, configs=configs)\n\n # 2. load to infer\n infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)\n x = fluid.dygraph.to_variable(\n np.random.random((4, 8)).astype('float32'))\n pred = infer_layer(x)\n\n def test_multi_in_out(self):\n net = LinearNetMultiInput(8, 8)\n\n model_path = \"model.multi_inout.output_spec1\"\n configs = fluid.dygraph.jit.SaveLoadConfig()\n # 1. check inputs and outputs\n self.assertTrue(len(net.forward.inputs) == 2)\n input_x = net.forward.inputs[0]\n input_y = net.forward.inputs[1]\n self.assertTrue(input_x.shape == (-1, 8))\n self.assertTrue(input_y.shape == (-1, 8))\n\n # 2. prune loss\n configs.output_spec = net.forward.outputs[:2]\n fluid.dygraph.jit.save(net, model_path, configs=configs)\n\n # 3. load to infer\n infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)\n x = fluid.dygraph.to_variable(\n np.random.random((4, 8)).astype('float32'))\n y = fluid.dygraph.to_variable(\n np.random.random((4, 8)).astype('float32'))\n # 4. predict\n pred_x, pred_y = infer_layer(x, y)\n\n # 1. prune y and loss\n model_path = \"model.multi_inout.output_spec2\"\n configs.output_spec = net.forward.outputs[:1]\n fluid.dygraph.jit.save(net, model_path, [input_x], configs)\n # 2. load again\n infer_layer2 = fluid.dygraph.jit.load(model_path, configs=configs)\n # 3. predict\n pred_xx = infer_layer2(x)\n\n # 4. assert pred_x == pred_xx\n self.assertTrue(np.allclose(pred_x.numpy(), pred_xx.numpy()))\n\n\nclass TestJitSaveLoadConfig(unittest.TestCase):\n def setUp(self):\n # enable dygraph mode\n fluid.enable_dygraph()\n # config seed\n paddle.manual_seed(SEED)\n paddle.framework.random._manual_program_seed(SEED)\n\n def basic_save_load(self, layer, model_path, configs):\n # 1. train & save\n example_inputs, train_layer, _ = train(layer)\n fluid.dygraph.jit.save(\n layer=train_layer,\n model_path=model_path,\n input_spec=example_inputs,\n configs=configs)\n # 2. load \n infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)\n train_layer.eval()\n # 3. inference & compare\n x = fluid.dygraph.to_variable(\n np.random.random((1, 784)).astype('float32'))\n self.assertTrue(\n np.array_equal(train_layer(x).numpy(), infer_layer(x).numpy()))\n\n def test_model_filename(self):\n layer = LinearNet(784, 1)\n model_path = \"model.save_load_config.output_spec\"\n configs = fluid.dygraph.jit.SaveLoadConfig()\n configs.model_filename = \"__simplenet__\"\n self.basic_save_load(layer, model_path, configs)\n\n def test_params_filename(self):\n layer = LinearNet(784, 1)\n model_path = \"model.save_load_config.params_filename\"\n configs = fluid.dygraph.jit.SaveLoadConfig()\n configs.params_filename = \"__params__\"\n self.basic_save_load(layer, model_path, configs)\n\n def test_separate_params(self):\n layer = LinearNet(784, 1)\n model_path = \"model.save_load_config.separate_params\"\n configs = fluid.dygraph.jit.SaveLoadConfig()\n configs.separate_params = True\n self.basic_save_load(layer, model_path, configs)\n\n def test_output_spec(self):\n train_layer = LinearNetReturnLoss(8, 8)\n adam = fluid.optimizer.AdamOptimizer(\n learning_rate=0.1, parameter_list=train_layer.parameters())\n x = fluid.dygraph.to_variable(\n np.random.random((4, 8)).astype('float32'))\n for i in range(10):\n out, loss = train_layer(x)\n loss.backward()\n adam.minimize(loss)\n train_layer.clear_gradients()\n\n model_path = \"model.save_load_config.output_spec\"\n configs = fluid.dygraph.jit.SaveLoadConfig()\n configs.output_spec = [out]\n fluid.dygraph.jit.save(\n layer=train_layer,\n model_path=model_path,\n input_spec=[x],\n configs=configs)\n\n train_layer.eval()\n infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)\n x = fluid.dygraph.to_variable(\n np.random.random((4, 8)).astype('float32'))\n self.assertTrue(\n np.array_equal(train_layer(x)[0].numpy(), infer_layer(x).numpy()))\n\n\nclass MultiLoadingLinearNet(fluid.dygraph.Layer):\n def __init__(self, size, model_path):\n super(MultiLoadingLinearNet, self).__init__()\n self._linear = Linear(size, size)\n self._load_linear1 = fluid.dygraph.jit.load(model_path)\n self._load_linear2 = fluid.dygraph.jit.load(model_path)\n\n @declarative\n def forward(self, x):\n tmp1 = self._linear(x)\n tmp2 = self._load_linear1(tmp1)\n tmp3 = self._load_linear2(tmp2)\n y = self._linear(tmp3)\n return y\n\n\nclass TestJitMultipleLoading(unittest.TestCase):\n def setUp(self):\n self.linear_size = 4\n self.model_path = \"model.jit_multi_load\"\n # enable dygraph mode\n fluid.enable_dygraph()\n # config seed\n paddle.manual_seed(SEED)\n paddle.framework.random._manual_program_seed(SEED)\n # train and save base model\n self.train_and_save_orig_model()\n\n def train_and_save_orig_model(self):\n layer = LinearNet(self.linear_size, self.linear_size)\n example_inputs, layer, _ = train(layer, self.linear_size, 1)\n fluid.dygraph.jit.save(\n layer=layer, model_path=self.model_path, input_spec=example_inputs)\n\n def test_load_model_retransform_inference(self):\n multi_loaded_layer = MultiLoadingLinearNet(self.linear_size,\n self.model_path)\n state_dict = multi_loaded_layer.state_dict()\n name_set = set()\n for _, var in state_dict.items():\n self.assertTrue(var.name not in name_set)\n name_set.add(var.name)\n\n\nclass LinearNetReturnHidden(fluid.dygraph.Layer):\n def __init__(self, in_size, out_size):\n super(LinearNetReturnHidden, self).__init__()\n self._linear_1 = Linear(in_size, out_size)\n self._linear_2 = Linear(in_size, out_size)\n\n @declarative\n def forward(self, x):\n y = self._linear_1(x)\n z = self._linear_2(y)\n loss = fluid.layers.mean(z)\n return y, loss\n\n\nclass TestJitPruneModelAndLoad(unittest.TestCase):\n def setUp(self):\n self.linear_size = 4\n self.model_path = \"model.jit_prune_model_and_load\"\n # enable dygraph mode\n fluid.enable_dygraph()\n # config seed\n paddle.manual_seed(SEED)\n paddle.framework.random._manual_program_seed(SEED)\n\n def train_and_save(self):\n train_layer = LinearNetReturnHidden(8, 8)\n adam = fluid.optimizer.AdamOptimizer(\n learning_rate=0.1, parameter_list=train_layer.parameters())\n x = fluid.dygraph.to_variable(\n np.random.random((4, 8)).astype('float32'))\n for i in range(10):\n hidden, loss = train_layer(x)\n loss.backward()\n adam.minimize(loss)\n train_layer.clear_gradients()\n\n configs = fluid.dygraph.jit.SaveLoadConfig()\n configs.output_spec = [hidden]\n fluid.dygraph.jit.save(\n layer=train_layer,\n model_path=self.model_path,\n input_spec=[x],\n configs=configs)\n\n return train_layer\n\n def test_load_pruned_model(self):\n train_layer = self.train_and_save()\n train_layer.eval()\n\n infer_layer = fluid.dygraph.jit.load(self.model_path)\n\n x = fluid.dygraph.to_variable(\n np.random.random((4, 8)).astype('float32'))\n self.assertTrue(\n np.array_equal(train_layer(x)[0].numpy(), infer_layer(x).numpy()))\n\n def test_load_var_not_in_extra_var_info(self):\n self.train_and_save()\n\n # chage extra var info\n var_info_path = os.path.join(self.model_path, EXTRA_VAR_INFO_FILENAME)\n with open(var_info_path, 'rb') as f:\n extra_var_info = pickle.load(f)\n extra_var_info.clear()\n with open(var_info_path, 'wb') as f:\n pickle.dump(extra_var_info, f, protocol=2)\n\n with self.assertRaises(RuntimeError):\n fluid.dygraph.jit.load(self.model_path)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport collections\nfrom collections import defaultdict\nfrom collections import Iterable\nimport contextlib\nfrom .wrapped_decorator import signature_safe_contextmanager, wrap_decorator\nimport os\nimport re\nimport traceback\nimport six\n\nimport numpy as np\nimport subprocess\nimport multiprocessing\nimport sys\nimport logging\nfrom .. import compat as cpt\nfrom .proto import framework_pb2\n\nfrom . import core\nfrom . import unique_name\nimport paddle.version as fluid_version\nimport warnings\nimport functools\n\n__all__ = [\n 'Program',\n 'default_startup_program',\n 'default_main_program',\n 'program_guard',\n 'name_scope',\n 'cuda_places',\n 'cpu_places',\n 'cuda_pinned_places',\n 'in_dygraph_mode',\n 'is_compiled_with_cuda',\n 'is_compiled_with_xpu',\n 'Variable',\n 'ComplexVariable',\n 'load_op_library',\n 'require_version',\n 'device_guard',\n 'set_flags',\n 'get_flags',\n]\n\nEMPTY_VAR_NAME = core.kEmptyVarName()\nTEMP_VAR_NAME = core.kTempVarName()\nGRAD_VAR_SUFFIX = core.kGradVarSuffix()\nZERO_VAR_SUFFIX = core.kZeroVarSuffix()\nCONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()\n\n_dygraph_tracer_ = None\n_global_expected_place_ = None\n_current_device = None\nglobal_prog_seed = 0\n\n\ndef require_version(min_version, max_version=None):\n \"\"\"\n Check if the installed version of PaddlePaddle is in [min_version, max_version],\n if the installed version is lower than ``min_version`` or higher than ``max_version``,\n an exception will be thrown, NO returns if the installed version is satisfied.\n\n Args:\n min_version (str): the minimum version required (like '1.4.0').\n max_version (str, optional): the max version required (like '1.6.0'), default is None,\n meaning any version equal or higher than ``min_version`` is acceptable.\n\n Returns:\n None.\n\n Raises:\n TypeError: if the type of ``min_version`` is not str.\n TypeError: if the type of ``max_version`` is not str or type(None).\n ValueError: if the value of ``min_version`` is not in version format.\n ValueError: if the value of ``max_version`` is not in version format or None.\n Exception: if the installed version is lower than ``min_version`` or higher than ``max_version``.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n # any version >= 0.1.0 is acceptable.\n fluid.require_version('0.1.0')\n\n # if 0.1.0 <= version <= 10.0.0, it is acceptable.\n fluid.require_version(min_version='0.1.0', max_version='10.0.0')\n \"\"\"\n if not isinstance(min_version, str):\n raise TypeError(\n \"The type of 'min_version' in require_version must be str, but received %s.\"\n % (type(min_version)))\n\n if not isinstance(max_version, (str, type(None))):\n raise TypeError(\n \"The type of 'max_version' in require_version must be str or type(None), but received %s.\"\n % (type(max_version)))\n\n check_format = re.match(r'\\d+(\\.\\d+){0,3}', min_version)\n if check_format is None or check_format.group() != min_version:\n raise ValueError(\n \"The value of 'min_version' in require_version must be in format '\\\\d+(\\\\.\\\\d+){0,3}', \"\n \"like '1.5.2.0', but received %s\" % min_version)\n\n if max_version is not None:\n check_format = re.match(r'\\d+(\\.\\d+){0,3}', max_version)\n if check_format is None or check_format.group() != max_version:\n raise ValueError(\n \"The value of 'max_version' in require_version must be in format '\\\\d+(\\\\.\\\\d+){0,3}', \"\n \"like '1.5.2.0', but received %s\" % max_version)\n\n version_installed = [\n fluid_version.major, fluid_version.minor, fluid_version.patch,\n fluid_version.rc\n ]\n zero_version = ['0', '0', '0', '0']\n\n def version_cmp(ver_a, ver_b):\n for i in six.moves.range(len(ver_a)):\n if int(ver_a[i]) > int(ver_b[i]):\n return 1\n elif int(ver_a[i]) < int(ver_b[i]):\n return -1\n return 0\n\n if version_cmp(version_installed, zero_version) == 0:\n if max_version is not None:\n warnings.warn(\n \"PaddlePaddle version in [%s, %s] required, but %s installed. \"\n \"Maybe you are using a develop version, \"\n \"please make sure the version is good with your code.\" %\n (min_version, max_version, fluid_version.full_version))\n else:\n warnings.warn(\n \"PaddlePaddle version %s or higher is required, but %s installed, \"\n \"Maybe you are using a develop version, \"\n \"please make sure the version is good with your code.\" %\n (min_version, fluid_version.full_version))\n return\n\n min_version_split = min_version.split('.')\n min_version_to_check = min_version_split + zero_version[len(\n min_version_split):]\n\n if max_version is not None:\n max_version_split = max_version.split('.')\n max_version_to_check = max_version_split + zero_version[len(\n max_version_split):]\n\n if version_cmp(version_installed,\n max_version_to_check) > 0 or version_cmp(\n version_installed, min_version_to_check) < 0:\n raise Exception(\n \"VersionError: PaddlePaddle version in [%s, %s] required, but %s installed.\"\n % (min_version, max_version, fluid_version.full_version))\n else:\n if version_cmp(version_installed, min_version_to_check) < 0:\n raise Exception(\n \"VersionError: PaddlePaddle version %s or higher is required, but %s installed, \"\n \"please upgrade your PaddlePaddle to %s or other higher version.\"\n % (min_version, fluid_version.full_version, min_version))\n\n\ndef in_dygraph_mode():\n \"\"\"\n :alias_main: paddle.in_dygraph_mode\n\t:alias: paddle.in_dygraph_mode\n\t:old_api: paddle.fluid.framework.in_dygraph_mode\n\n This function checks whether the program runs in dynamic graph mode or not.\n You can enter dynamic graph mode with :ref:`api_fluid_dygraph_guard` api,\n or enable and disable dynamic graph mode with :ref:`api_fluid_dygraph_enable`\n and :ref:`api_fluid_dygraph_disable` api .\n\n Returns:\n bool: Whether the program is running in dynamic graph mode.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n fluid.enable_dygraph() # Now we are in dygragh mode\n print(fluid.in_dygraph_mode()) # True\n fluid.disable_dygraph()\n print(fluid.in_dygraph_mode()) # False\n \"\"\"\n return _dygraph_tracer_ is not None\n\n\ndef _dygraph_not_support_(func):\n def __impl__(*args, **kwargs):\n assert not in_dygraph_mode(\n ), \"We don't support %s in imperative mode\" % func.__name__\n return func(*args, **kwargs)\n\n return __impl__\n\n\ndef _dygraph_only_(func):\n def __impl__(*args, **kwargs):\n assert in_dygraph_mode(\n ), \"We Only support %s in dynamic mode, please call 'paddle.disable_static()' to enter dynamic mode.\" % func.__name__\n return func(*args, **kwargs)\n\n return __impl__\n\n\n# NOTE(zhiqiu): This decorator is used for the APIs of Variable which is only\n# used to make Variable and VarBase has same interfaces, like numpy. Since VarBase is not exposed in our\n# official docments, logically, we want to keep VarBase and logically consistent. While, actually,\n# in our implementation, there some APIs not supported, like numpy, because Variable contains the desc.\n# So, those APIs are listed under class Variable to generate docs only.\n# TODO(zhiqiu): We should make VarBase consistent with Variable in future, for example, by inheritting\n# same base class. \ndef _fake_interface_only_(func):\n def __impl__(*args, **kwargs):\n raise AssertionError(\n \"'%s' should be called by imperative Varible in imperative mode, please use fluid.dygraph.guard() as context to run it in imperative mode\"\n % func.__name__)\n\n return __impl__\n\n\n# NOTE(chenweihang): There is argument name typo (stat_dict, correct name is state_dict) \n# in fluid api Layer.set_dict, Optimizer.load, in order to correct the argument without \n# introducing compatibility issues, add this decorator\n# NOTE(chenweihang): not using `wrap_decorator` here is because `wrap_decorator` will\n# move kwargs to args, which doesn't work in this decorate case\ndef deprecate_stat_dict(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if 'stat_dict' in kwargs:\n warnings.warn(\n \"The argument `stat_dict` has deprecated, please change it to `state_dict`.\",\n DeprecationWarning)\n kwargs['state_dict'] = kwargs['stat_dict']\n kwargs.pop('stat_dict')\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndygraph_not_support = wrap_decorator(_dygraph_not_support_)\ndygraph_only = wrap_decorator(_dygraph_only_)\nfake_interface_only = wrap_decorator(_fake_interface_only_)\n\n\ndef _dygraph_tracer():\n return _dygraph_tracer_\n\n\ndef _current_expected_place():\n global _global_expected_place_\n if _global_expected_place_ is None:\n if core.is_compiled_with_cuda():\n _global_expected_place_ = core.CUDAPlace(0)\n else:\n _global_expected_place_ = core.CPUPlace()\n\n return _global_expected_place_\n\n\ndef _set_dygraph_tracer_expected_place(place):\n global _dygraph_tracer_\n if _dygraph_tracer_ is not None:\n _dygraph_tracer_._expected_place = place\n\n\ndef _set_expected_place(place):\n global _global_expected_place_\n _global_expected_place_ = place\n _set_dygraph_tracer_expected_place(place)\n\n\n# TODO(zhiqiu): remove this function.\ndef _var_base_to_np(var_base):\n \"\"\"\t\n convert VarBase tp numpy\t\n \t\n Args:\t\n var_base(VarBase) : the VarBase to convert\t\n Returns (np.ndarray): the np.ndarray contain the value of VarBase\t\n \"\"\"\n\n warnings.warn(\n \"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base).\"\n )\n\n return var_base.numpy()\n\n\ndef _cpu_num():\n if \"CPU_NUM\" not in os.environ.keys():\n if multiprocessing.cpu_count() > 1:\n sys.stderr.write(\n '!!! The CPU_NUM is not specified, you should set CPU_NUM in the environment variable list.\\n'\n 'CPU_NUM indicates that how many CPUPlace are used in the current task.\\n'\n 'And if this parameter are set as N (equal to the number of physical CPU core) the program may be faster.\\n\\n'\n 'export CPU_NUM={} # for example, set CPU_NUM as number of physical CPU core which is {}.\\n\\n'\n '!!! The default number of CPU_NUM=1.\\n'.format(\n multiprocessing.cpu_count(), multiprocessing.cpu_count()))\n os.environ['CPU_NUM'] = str(1)\n cpu_num = os.environ.get('CPU_NUM')\n return int(cpu_num)\n\n\ndef _cuda_ids():\n gpus_env = os.getenv(\"FLAGS_selected_gpus\")\n if gpus_env:\n device_ids = [int(s) for s in gpus_env.split(\",\")]\n else:\n device_ids = six.moves.range(core.get_cuda_device_count())\n return device_ids\n\n\ndef is_compiled_with_xpu():\n \"\"\"\n Whether this whl package can be used to run the model on XPU.\n\n Returns (bool): support xpu or not.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n support_xpu = fluid.is_compiled_with_xpu()\n \"\"\"\n return core.is_compiled_with_xpu()\n\n\ndef is_compiled_with_cuda():\n \"\"\"\n Whether this whl package can be used to run the model on GPU.\n\n Returns (bool): support gpu or not.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n support_gpu = fluid.is_compiled_with_cuda()\n \"\"\"\n return core.is_compiled_with_cuda()\n\n\ndef cuda_places(device_ids=None):\n \"\"\"\n **Note**:\n For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device.\n The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable.\n\n This function creates a list of :code:`fluid.CUDAPlace` objects.\n\n If :code:`device_ids` is None, environment variable of\n :code:`FLAGS_selected_gpus` would be checked first. For example, if\n :code:`FLAGS_selected_gpus=0,1,2`, the returned list would\n be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].\n If :code:`FLAGS_selected_gpus` is not set, all visible\n gpu places would be returned according to the :code:`CUDA_VISIBLE_DEVICES` environment variable.\n\n If :code:`device_ids` is not None, it should be the device\n ids of GPUs. For example, if :code:`device_ids=[0,1,2]`,\n the returned list would be \n [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].\n \n Parameters:\n device_ids (list or tuple of int, optional): list of GPU device ids.\n\n Returns:\n list of fluid.CUDAPlace: Created GPU place list.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cuda_places = fluid.cuda_places()\n\n \"\"\"\n assert core.is_compiled_with_cuda(), \\\n \"Not compiled with CUDA\"\n if device_ids is None:\n device_ids = _cuda_ids()\n elif not isinstance(device_ids, (list, tuple)):\n device_ids = [device_ids]\n return [core.CUDAPlace(dev_id) for dev_id in device_ids]\n\n\ndef cpu_places(device_count=None):\n \"\"\"\n This function creates a list of :code:`fluid.CPUPlace` objects, and returns the created list.\n \n If :code:`device_count` is None, the device count would\n be determined by environment variable :code:`CPU_NUM`. \n If :code:`CPU_NUM` is not set, the default value is 1,\n i.e. CPU_NUM=1.\n :code:`CPU_NUM` indicates the number of devices used in the current task.\n The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.\n\n Parameters:\n device_count (int, optional): device number. Default: None.\n\n Returns:\n list of fluid.CPUPlace: Created list of CPU places.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cpu_places = fluid.cpu_places()\n \"\"\"\n\n if device_count is None:\n device_count = _cpu_num()\n return [core.CPUPlace()] * device_count\n\n\ndef cuda_pinned_places(device_count=None):\n \"\"\"\n This function creates a list of :code:`fluid.CUDAPinnedPlace` objects.\n\n If :code:`device_count` is None, the device count would\n be determined by environment variable :code:`CPU_NUM`. \n If :code:`CPU_NUM` is not set, the default value is 1,\n i.e. CPU_NUM=1.\n :code:`CPU_NUM` indicates the number of devices used in the current task.\n The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.\n\n Parameters:\n device_count (int, optional): device number. Default: None.\n\n Returns:\n list of fluid.CUDAPinnedPlace: Created list of CUDA pinned places.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cuda_pinned_places_cpu_num = fluid.cuda_pinned_places()\n # or\n cuda_pinned_places = fluid.cuda_pinned_places(1)\n\n \"\"\"\n assert core.is_compiled_with_cuda(), \\\n \"Not compiled with CUDA\"\n if device_count is None:\n device_count = len(_cuda_ids())\n return [core.CUDAPinnedPlace()] * device_count\n\n\nclass NameScope(object):\n def __init__(self, name=\"\", parent=None):\n self._children = dict()\n self._name = name\n self._parent = parent\n\n def child(self, prefix):\n if prefix not in self._children:\n new_child = NameScope(prefix, self)\n self._children[prefix] = [new_child]\n else:\n new_child = NameScope(prefix + \"_%d\" % len(self._children[prefix]),\n self)\n self._children[prefix].append(new_child)\n return new_child\n\n def parent(self):\n return self._parent\n\n def name(self):\n return self._name\n\n\n_name_scope = NameScope()\n\n\n@signature_safe_contextmanager\ndef name_scope(prefix=None):\n \"\"\"\n :api_attr: Static Graph\n\n Generate hierarchical name prefix for the operators.\n\n Note: \n This should only used for debugging and visualization purpose.\n Don't use it for serious analysis such as graph/program transformations.\n\n Args:\n prefix(str, optional): prefix. Default is none.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n with fluid.name_scope(\"s1\"):\n a = fluid.data(name='data', shape=[None, 1], dtype='int32')\n b = a + 1\n with fluid.name_scope(\"s2\"):\n c = b * 1\n with fluid.name_scope(\"s3\"):\n d = c / 1\n with fluid.name_scope(\"s1\"):\n f = fluid.layers.pow(d, 2.0)\n with fluid.name_scope(\"s4\"):\n g = f - 1\n\n # Op are created in the default main program. \n for op in fluid.default_main_program().block(0).ops:\n # elementwise_add is created in /s1/\n if op.type == 'elementwise_add':\n assert op.desc.attr(\"op_namescope\") == '/s1/'\n # elementwise_mul is created in '/s1/s2'\n elif op.type == 'elementwise_mul':\n assert op.desc.attr(\"op_namescope\") == '/s1/s2/'\n # elementwise_div is created in '/s1/s3'\n elif op.type == 'elementwise_div':\n assert op.desc.attr(\"op_namescope\") == '/s1/s3/'\n # elementwise_sum is created in '/s4'\n elif op.type == 'elementwise_sub':\n assert op.desc.attr(\"op_namescope\") == '/s4/'\n # pow is created in /s1_1/\n elif op.type == 'pow':\n assert op.desc.attr(\"op_namescope\") == '/s1_1/'\n \"\"\"\n # TODO(panyx0718): Only [0-9a-z].\n # in dygraph we don't need namescope since it will cause mem leak\n if in_dygraph_mode():\n yield\n else:\n assert prefix, \"namescope prefix can not be empty.\"\n global _name_scope\n _name_scope = _name_scope.child(prefix)\n try:\n yield\n finally:\n _name_scope = _name_scope.parent()\n\n\ndef _full_name_scope():\n global _name_scope\n scope = _name_scope\n name = \"\"\n while scope:\n name = scope.name() + \"/\" + name\n scope = scope.parent()\n return name\n\n\ndef generate_control_dev_var_name():\n import random\n return CONTROL_DEP_VAR_PREFIX + \"@\" + str(random.random())\n\n\ndef grad_var_name(var_name):\n \"\"\"\n Returns:\n str: gradient name for a certain var name\n \"\"\"\n return var_name + GRAD_VAR_SUFFIX\n\n\ndef convert_np_dtype_to_dtype_(np_dtype):\n \"\"\"\n Convert the data type in numpy to the data type in Paddle\n\n Args:\n np_dtype(np.dtype): the data type in numpy.\n\n Returns:\n core.VarDesc.VarType: the data type in Paddle.\n\n \"\"\"\n dtype = np.dtype(np_dtype)\n if dtype == np.float32:\n return core.VarDesc.VarType.FP32\n elif dtype == np.float64:\n return core.VarDesc.VarType.FP64\n elif dtype == np.float16:\n return core.VarDesc.VarType.FP16\n elif dtype == np.int32:\n return core.VarDesc.VarType.INT32\n elif dtype == np.int16:\n return core.VarDesc.VarType.INT16\n elif dtype == np.int64:\n return core.VarDesc.VarType.INT64\n elif dtype == np.bool:\n return core.VarDesc.VarType.BOOL\n elif dtype == np.uint16:\n return core.VarDesc.VarType.INT16\n elif dtype == np.uint8:\n return core.VarDesc.VarType.UINT8\n elif dtype == np.int8:\n return core.VarDesc.VarType.INT8\n else:\n raise ValueError(\"Not supported numpy dtype %s\" % dtype)\n\n\ndef dtype_is_floating(dtype):\n \"\"\"\n Check the data type is floating or not.\n Args:\n dtype(np.dtype|core.VarDesc.VarType): data type.\n Could be numpy format or Paddle format\n\n Returns(bool): True if data type is a float value\n\n \"\"\"\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n return dtype in [\n core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.FP64\n ]\n\n\ndef _debug_string_(proto, throw_on_error=True):\n \"\"\"\n Get the debug string of a protobuf message. The message could be not\n initialized.\n Args:\n proto(google.protobuf.message.Message): The protobuf message\n throw_on_error(bool): True if raise an error when the protobuf message\n is not initialized.\n\n Returns(str): The debug string of the protobuf message\n\n \"\"\"\n error_fields = list()\n if not proto.IsInitialized(error_fields) and throw_on_error:\n raise ValueError(\"{0} are not initialized.\\nThe message is {1}:\\n\".\n format(error_fields, proto))\n return proto.__str__()\n\n\ndef _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,\n name=None,\n shape=None,\n dtype=None,\n persistable=None,\n **kwargs):\n if dtype is not None:\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n return core.VarBase(dtype if dtype else core.VarDesc.VarType.FP32,\n list(shape) if shape else [], name, type\n if type else core.VarDesc.VarType.LOD_TENSOR, True\n if persistable else False)\n\n\nclass VariableMetaClass(type):\n @classmethod\n def __instancecheck__(cls, instance):\n t = type(instance)\n if in_dygraph_mode():\n return issubclass(t, core.VarBase)\n else:\n return issubclass(t, Variable)\n\n\nclass ParameterMetaClass(VariableMetaClass):\n @classmethod\n def __instancecheck__(cls, instance):\n t = type(instance)\n if in_dygraph_mode():\n return issubclass(t, ParamBase)\n else:\n return issubclass(t, Parameter)\n\n\ndef _getitem_impl_(var, item):\n \"\"\"\n Slice the variable.\n\n Args:\n item(int/slice/tuple) : the index.\n\n Returns:\n Sliced variable\n \"\"\"\n\n if not isinstance(item, tuple):\n item = [item]\n\n decrease_axis = []\n slice_axis = []\n slice_start = []\n slice_end = []\n slice_step = []\n use_strided_slice = False\n reverse_axis = []\n target_block = default_main_program().current_block()\n\n def fill_constant(shape, value, force_cpu=False, out=None):\n var.block.append_op(\n type='fill_constant',\n inputs={},\n outputs={'Out': [out]},\n attrs={\n 'shape': shape,\n 'dtype': out.dtype,\n 'value': float(value),\n 'force_cpu': force_cpu\n })\n out.stop_gradient = True\n return out\n\n for dim, slice_item in enumerate(item):\n if isinstance(slice_item, slice):\n start = slice_item.start\n end = slice_item.stop\n step = slice_item.step\n\n if start is None and end is None and step is None:\n continue\n\n if step is None:\n step = 1\n\n if start is None and end is None:\n assert (step == -1)\n reverse_axis.append(dim)\n continue\n\n if start is None:\n start = 0\n\n if end is None:\n end = 10000000\n\n if step != 1:\n use_strided_slice = True\n\n slice_axis.append(dim)\n slice_start.append(start)\n slice_end.append(end)\n slice_step.append(step)\n else:\n decrease_axis.append(dim)\n slice_axis.append(dim)\n slice_start.append(slice_item)\n slice_step.append(1)\n if isinstance(slice_item, Variable):\n temp_1 = var.block.create_var(dtype=slice_item.dtype)\n fill_constant([1], 1, force_cpu=True, out=temp_1)\n temp_end = target_block.create_var(dtype=slice_item.dtype)\n target_block.append_op(\n type='elementwise_add',\n inputs={'X': slice_item,\n 'Y': temp_1},\n outputs={'Out': temp_end},\n attrs={'axis': -1})\n slice_end.append(temp_end)\n else:\n slice_end.append(slice_item + 1\n if slice_item != -1 else 10000000)\n\n def contain_var(one_list):\n for ele in one_list:\n if isinstance(ele, Variable):\n return True\n return False\n\n def get_new_list_tensor(old_list):\n new_list_tensor = []\n for dim in old_list:\n if isinstance(dim, Variable):\n dim.stop_gradient = True\n new_list_tensor.append(dim)\n else:\n assert (isinstance(dim, int))\n temp_out = var.block.create_var(dtype='int32')\n fill_constant([1], dim, force_cpu=True, out=temp_out)\n new_list_tensor.append(temp_out)\n return new_list_tensor\n\n inputs = {'Input': [var]}\n attrs = {\n 'axes': slice_axis,\n 'starts': [],\n 'ends': [],\n 'decrease_axis': decrease_axis\n }\n if (use_strided_slice == True):\n attrs['strides'] = []\n infer_flags = list(1 for i in range(len(slice_axis)))\n\n # starts\n if contain_var(slice_start):\n inputs['StartsTensorList'] = get_new_list_tensor(slice_start)\n for i, dim in enumerate(slice_start):\n if isinstance(dim, Variable):\n attrs['starts'].append(-1)\n infer_flags[i] = -1\n else:\n attrs['starts'].append(dim)\n else:\n attrs['starts'] = slice_start\n\n # ends\n if contain_var(slice_end):\n inputs['EndsTensorList'] = get_new_list_tensor(slice_end)\n for i, dim in enumerate(slice_end):\n if isinstance(dim, Variable):\n attrs['ends'].append(-1)\n infer_flags[i] = -1\n else:\n attrs['ends'].append(dim)\n else:\n attrs['ends'] = slice_end\n\n # strides\n if use_strided_slice == True:\n if contain_var(slice_step):\n inputs['StridesTensorList'] = get_new_list_tensor(slice_step)\n for i, dim in enumerate(slice_step):\n if isinstance(dim, Variable):\n attrs['strides'].append(-1)\n infer_flags[i] = -1\n else:\n attrs['strides'].append(dim)\n else:\n attrs['strides'] = slice_step\n # infer_flags\n attrs['infer_flags'] = infer_flags\n\n out = var\n if use_strided_slice == False and len(slice_axis) > 0:\n # append slice_op here\n slice_out_var = target_block.create_var(\n name=unique_name.generate_with_ignorable_key(var.name + \"_slice\"),\n dtype=var.dtype)\n\n target_block.append_op(\n type=\"slice\",\n inputs=inputs,\n outputs={'Out': [slice_out_var]},\n attrs=attrs)\n\n out = slice_out_var\n elif use_strided_slice == True and len(slice_axis) > 0:\n strided_slice_out_var = target_block.create_var(\n name=unique_name.generate_with_ignorable_key(var.name +\n \"_strided_slice\"),\n dtype=var.dtype)\n target_block.append_op(\n type=\"strided_slice\",\n inputs=inputs,\n outputs={'Out': [strided_slice_out_var]},\n attrs=attrs)\n\n out = strided_slice_out_var\n\n if len(reverse_axis) > 0:\n reverse_out_var = target_block.create_var(\n name=unique_name.generate_with_ignorable_key(var.name +\n \"_slice_reverse\"),\n dtype=var.dtype)\n target_block.append_op(\n type=\"reverse\",\n inputs={'X': out},\n outputs={'Out': [reverse_out_var]},\n attrs={'axis': reverse_axis})\n\n out = reverse_out_var\n\n return out\n\n\[email protected]_metaclass(VariableMetaClass)\nclass Variable(object):\n \"\"\"\n **Notes**:\n **The constructor of Variable should not be invoked directly.**\n\n **In Static Graph Mode: Please use** `Block.create_var` **to create a Static variable which has no data until being feed.**\n\n **In Dygraph Mode: Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph variable with real data**\n\n In Fluid, every input and output of an OP is a variable. In most\n cases, variables are used for holding different kinds of data or training\n labels. A variable belongs to a :ref:`api_guide_Block_en` . All variable has its own name and\n two variables in different :ref:`api_guide_Block_en` could have the same name.\n\n There are many kinds of variables. Each kind of them has its own attributes\n and usages. Please refer to the `framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_ for details.\n\n Most of a Variable's member variables can be set to be None. It mean\n it is not available or will be specified later.\n\n Examples:\n In Static Graph Mode:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n In `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ Mode:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n with fluid.dygraph.guard():\n new_variable = fluid.dygraph.to_variable(np.arange(10))\n\n \"\"\"\n\n def __init__(self,\n block,\n type=core.VarDesc.VarType.LOD_TENSOR,\n name=None,\n shape=None,\n dtype=None,\n lod_level=None,\n capacity=None,\n persistable=None,\n error_clip=None,\n stop_gradient=False,\n is_data=False,\n need_check_feed=False,\n belong_to_optimizer=False,\n **kwargs):\n self.block = block\n if name is None:\n name = unique_name.generate('_generated_var')\n\n if dtype is not None:\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n self.belong_to_optimizer = belong_to_optimizer\n\n self.error_clip = error_clip\n\n is_new_var = False\n name = cpt.to_text(name)\n self.desc = self.block.desc.find_var(cpt.to_bytes(name))\n\n if self.desc is None:\n self.desc = self.block.desc.var(cpt.to_bytes(name))\n is_new_var = True\n\n if is_new_var:\n self.desc.set_type(type)\n elif self.desc.type() != type:\n raise ValueError(\"Variable {0} has been created before. The \"\n \"previous type is {1}; the new type is {2}. They\"\n \" are not matched\".format(self.name,\n self.desc.type(), type))\n\n if shape is not None:\n if is_new_var:\n self.desc.set_shape(shape)\n else:\n old_shape = self.shape\n shape = tuple(shape)\n if shape != old_shape:\n raise ValueError(\n \"Variable {0} has been created before. the previous \"\n \"shape is {1}; the new shape is {2}. They are not \"\n \"matched.\".format(self.name, old_shape, shape))\n if dtype is not None:\n if is_new_var:\n self.desc.set_dtype(dtype)\n else:\n old_dtype = self.dtype\n if dtype != old_dtype:\n raise ValueError(\"Variable {0} has been created before. \"\n \"The previous data type is {1}; the new \"\n \"data type is {2}. They are not \"\n \"matched.\".format(self.name, old_dtype,\n dtype))\n\n if lod_level is not None:\n if is_new_var:\n self.desc.set_lod_level(lod_level)\n else:\n if lod_level != self.lod_level:\n raise ValueError(\"Variable {0} has been created before. \"\n \"The previous lod_level is {1}; the new \"\n \"lod_level is {2}. They are not \"\n \"matched\".format(self.name, self.lod_level,\n lod_level))\n if persistable is not None:\n if is_new_var:\n self.desc.set_persistable(persistable)\n else:\n if persistable != self.persistable:\n raise ValueError(\n \"Variable {0} has been created before.\"\n \"The previous persistable is {1}; the new \"\n \"persistable is {2}. They are not matched\".format(\n self.name, self.persistable, persistable))\n\n if need_check_feed and is_new_var:\n self.desc.set_need_check_feed(need_check_feed)\n\n if capacity is not None:\n if is_new_var:\n self.desc.set_capacity(capacity)\n else:\n # TODO(abhinavarora) : Compare with set capacity once,\n # get_capacity is implemented\n pass\n\n self.block.vars[name] = self\n self.op = None\n self._stop_gradient = stop_gradient\n self.is_data = is_data\n\n @fake_interface_only\n def detach(self):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Returns a new Variable, detached from the current graph.\n\n Returns:\n ( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable.\n\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n from paddle.fluid.dygraph import Linear\n import numpy as np\n\n data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')\n with fluid.dygraph.guard():\n linear = Linear(32, 64)\n data = to_variable(data)\n x = linear(data)\n y = x.detach()\n\n \"\"\"\n pass\n\n @fake_interface_only\n def numpy(self):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Returns a numpy array shows the value of current :ref:`api_guide_Variable_en`\n\n Returns:\n ndarray: The numpy value of current Variable.\n\n Returns type:\n ndarray: dtype is same as current Variable\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n from paddle.fluid.dygraph import Linear\n import numpy as np\n\n data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')\n with fluid.dygraph.guard():\n linear = Linear(32, 64)\n data = to_variable(data)\n x = linear(data)\n print(x.numpy())\n\n \"\"\"\n pass\n\n @fake_interface_only\n def set_value(self, value):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Set a new value for this Variable.\n\n Args:\n value (Variable|np.ndarray): the new value.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n from paddle.fluid.dygraph import Linear\n import numpy as np\n\n data = np.ones([3, 1024], dtype='float32')\n with fluid.dygraph.guard():\n linear = fluid.dygraph.Linear(1024, 4)\n t = to_variable(data)\n linear(t) # call with default weight\n custom_weight = np.random.randn(1024, 4).astype(\"float32\")\n linear.weight.set_value(custom_weight) # change existing weight\n out = linear(t) # call with different weight\n\n \"\"\"\n pass\n\n @fake_interface_only\n def backward(self, retain_graph=False):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Run backward of current Graph which starts from current Tensor.\n\n Args:\n retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would\n like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter\n :code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.\n Defaults to False.\n\n Returns:\n NoneType: None\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n paddle.disable_static()\n\n x = np.ones([2, 2], np.float32)\n inputs = []\n for _ in range(10):\n tmp = paddle.to_tensor(x)\n # if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since\n # there is no one need gradient on it.\n tmp.stop_gradient=False\n inputs.append(tmp)\n ret = paddle.sums(inputs)\n loss = paddle.reduce_sum(ret)\n loss.backward()\n\n \"\"\"\n pass\n\n @fake_interface_only\n def gradient(self):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Get the Gradient of Current Variable\n\n Returns:\n ndarray or tuple of ndarray: if Variable's type is LoDTensor, return numpy value of the gradient of current Variable, if Variable's type is SelectedRows, return tuple of ndarray, first element of tuple is numpy value of the gradient of current Variable, second element of tuple is numpy value of the rows of current Variable.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n # example1: return ndarray\n x = np.ones([2, 2], np.float32)\n with fluid.dygraph.guard():\n inputs2 = []\n for _ in range(10):\n tmp = fluid.dygraph.base.to_variable(x)\n tmp.stop_gradient=False\n inputs2.append(tmp)\n ret2 = fluid.layers.sums(inputs2)\n loss2 = fluid.layers.reduce_sum(ret2)\n loss2.backward()\n print(loss2.gradient())\n\n # example2: return tuple of ndarray\n with fluid.dygraph.guard():\n embedding = fluid.dygraph.Embedding(\n size=[20, 32],\n param_attr='emb.w',\n is_sparse=True)\n x_data = np.arange(12).reshape(4, 3).astype('int64')\n x_data = x_data.reshape((-1, 3, 1))\n x = fluid.dygraph.base.to_variable(x_data)\n out = embedding(x)\n out.backward()\n print(embedding.weight.gradient())\n\n \"\"\"\n pass\n\n @fake_interface_only\n def clear_gradient(self):\n \"\"\"\n **Notes**:\n **1. This API is ONLY available in Dygraph mode**\n\n **2. Use it only Variable has gradient, normally we use this for Parameters since other temporal Variable will be deleted by Python's GC**\n\n Clear (set to ``0`` ) the Gradient of Current Variable\n\n Returns: None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n x = np.ones([2, 2], np.float32)\n with fluid.dygraph.guard():\n inputs2 = []\n for _ in range(10):\n tmp = fluid.dygraph.base.to_variable(x)\n tmp.stop_gradient=False\n inputs2.append(tmp)\n ret2 = fluid.layers.sums(inputs2)\n loss2 = fluid.layers.reduce_sum(ret2)\n loss2.backward()\n print(loss2.gradient())\n loss2.clear_gradient()\n print(\"After clear {}\".format(loss2.gradient()))\n\n \"\"\"\n pass\n\n def __str__(self):\n return self._to_readable_code()\n\n def _to_readable_code(self):\n \"\"\"\n Get readable debug string of Variable.\n\n .. note::\n If you want to get the debug string in protobuf format,\n please use :code:`to_string` method.\n\n Returns:\n string: The formatted Variable string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(new_variable._to_readable_code())\n \"\"\"\n if self.type == core.VarDesc.VarType.SELECTED_ROWS or self.type == core.VarDesc.VarType.LOD_TENSOR:\n var_str = \"{name} : fluid.{type}.shape{shape}.astype({dtype})\".\\\n format(i=\"{\", e=\"}\", name=self.name, type=self.type, shape=self.shape, dtype=self.dtype)\n else:\n var_str = \"{name} : fluid.{type})\".\\\n format(i=\"{\", e=\"}\", name=self.name, type=self.type)\n\n if type(self) == Parameter:\n if self.trainable:\n var_str = \"trainable param \" + var_str\n else:\n var_str = \"param \" + var_str\n else:\n var_str = \"var \" + var_str\n\n if self.persistable:\n var_str = \"persist \" + var_str\n\n return var_str\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n Get debug string.\n\n Args:\n\n throw_on_error (bool): True if raise an exception when self is not initialized.\n\n with_details (bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True. Default value is False;\n\n Returns:\n str: The debug string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(new_variable.to_string(True))\n print(\"=============with detail===============\")\n print(new_variable.to_string(True, True))\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n if with_details:\n additional_attr = (\"error_clip\", \"stop_gradient\")\n for attr_name in additional_attr:\n res_str += \"%s: %s\\n\" % (attr_name,\n cpt.to_text(getattr(self, attr_name)))\n\n return res_str\n\n __repr__ = __str__\n\n @property\n def stop_gradient(self):\n \"\"\"\n Indicating if we stop gradient from current Variable\n\n **Notes: This Property has default value as** ``True`` **in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, while Parameter's default value is False. However, in Static Graph Mode all Variable's default stop_gradient value is** ``False``\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n with fluid.dygraph.guard():\n value0 = np.arange(26).reshape(2, 13).astype(\"float32\")\n value1 = np.arange(6).reshape(2, 3).astype(\"float32\")\n value2 = np.arange(10).reshape(2, 5).astype(\"float32\")\n linear = fluid.Linear(13, 5, dtype=\"float32\")\n linear2 = fluid.Linear(3, 3, dtype=\"float32\")\n a = fluid.dygraph.to_variable(value0)\n b = fluid.dygraph.to_variable(value1)\n c = fluid.dygraph.to_variable(value2)\n out1 = linear(a)\n out2 = linear2(b)\n out1.stop_gradient = True\n out = fluid.layers.concat(input=[out1, out2, c], axis=1)\n out.backward()\n\n assert linear.weight.gradient() is None\n assert (out1.gradient() == 0).all()\n \"\"\"\n return self._stop_gradient\n\n @stop_gradient.setter\n def stop_gradient(self, s):\n self._stop_gradient = s\n\n @property\n def persistable(self):\n \"\"\"\n Indicating if we current Variable should be long-term alive\n\n\n **Notes: This Property will be deprecated and this API is just to help user understand concept**\n\n **1. All Variable's persistable is** ``False`` **except Parameters.**\n\n **2. In** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, this property should not be changed**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"persistable of current Var is: {}\".format(new_variable.persistable))\n \"\"\"\n return self.desc.persistable()\n\n @persistable.setter\n def persistable(self, p):\n self.desc.set_persistable(p)\n\n @property\n def name(self):\n \"\"\"\n Indicating name of current Variable\n\n **Notes: If it has two or more Varaible share the same name in the same** :ref:`api_guide_Block_en` **, it means these Variable will share content in no-** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode. This is how we achieve Parameter sharing**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"name of current Var is: {}\".format(new_variable.name))\n \"\"\"\n return cpt.to_text(self.desc.name())\n\n @property\n def grad_name(self):\n \"\"\"\n Indicating name of the gradient Variable of current Variable.\n\n **Notes: This is a read-only property. It simply returns name of\n gradient Variable from a naming convention but doesn't guarantee\n the gradient exists.**\n \n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n x = fluid.data(name=\"x\", shape=[-1, 23, 48], dtype='float32')\n print(x.grad_name) # output is \"x@GRAD\"\n\n \"\"\"\n return self.name + \"@GRAD\"\n\n @name.setter\n def name(self, new_name):\n self.desc.set_name(new_name)\n\n @property\n def shape(self):\n \"\"\"\n Indicating shape of current Variable\n\n **Notes: This is a read-only property**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"shape of current Var is: {}\".format(new_variable.shape))\n\n \"\"\"\n # convert to tuple, make it as same as numpy API.\n return tuple(self.desc.shape())\n\n @property\n def dtype(self):\n \"\"\"\n Indicating data type of current Variable\n\n **Notes: This is a read-only property**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"Dtype of current Var is: {}\".format(new_variable.dtype))\n \"\"\"\n return self.desc.dtype()\n\n @property\n def lod_level(self):\n \"\"\"\n Indicating ``LoD`` info of current Variable, please refer to :ref:`api_fluid_LoDTensor_en` to check the meaning\n of ``LoD``\n\n **Notes**:\n\n **1. This is a read-only property**\n\n **2. Don't support this property in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, it's value should be** ``0(int)``\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"LoD Level of current Var is: {}\".format(new_variable.lod_level))\n \"\"\"\n if self.type == core.VarDesc.VarType.SELECTED_ROWS:\n raise Exception(\"SelectedRows DO NOT supprt lod\")\n\n return self.desc.lod_level()\n\n @property\n def type(self):\n \"\"\"\n Indicating Type of current Variable\n\n **Notes: This is a read-only property**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"Type of current Var is: {}\".format(new_variable.type))\n \"\"\"\n return self.desc.type()\n\n def _set_error_clip(self, error_clip):\n \"\"\"\n Set the error_clip.\n\n Args:\n error_clip(BaseErrorClipAttr) : The new error_clip.\n\n Returns:\n None\n \"\"\"\n self.error_clip = error_clip\n\n def _set_info(self, key, value):\n \"\"\"\n Set key-value information for this variable.\n\n Args:\n key(str): Key for this information.\n value(object): The value associated to the key.\n\n Returns: \n None\n \"\"\"\n if not hasattr(self, \"_info\"):\n self._info = {}\n self._info[key] = value\n\n def _get_info(self, key):\n \"\"\"\n Get the information of this variable corresponding to key.\n\n Args:\n key(str): Key for this information.\n\n Returns: \n object\n \"\"\"\n if hasattr(self, \"_info\") and key in self._info:\n return self._info[key]\n return None\n\n def _slice_indices(self, slice, length):\n \"\"\"\n Reference implementation for the slice.indices method.\n \"\"\"\n # Compute step and length as integers.\n step = 1 if slice.step is None else slice.step\n\n # Raise ValueError for negative length or zero step.\n if length < 0:\n raise ValueError(\"length should not be negative\")\n if step == 0:\n raise ValueError(\"slice step can not be zero\")\n\n # Find lower and upper bounds for start and stop.\n lower = -1 if step < 0 else 0\n upper = length - 1 if step < 0 else length\n\n # Compute start.\n if slice.start is None:\n start = upper if step < 0 else lower\n else:\n start = slice.start\n start = max(start + length, lower) if start < 0 else min(start,\n upper)\n\n # Compute stop.\n if slice.stop is None:\n stop = lower if step < 0 else upper\n else:\n stop = slice.stop\n stop = max(stop + length, lower) if stop < 0 else min(stop, upper)\n\n return start, stop, step\n\n def _detectEllipsis(self, item):\n has_ellipsis = False\n start = 0\n end = len(self.shape)\n for index, o in enumerate(item):\n if o is Ellipsis:\n if has_ellipsis:\n raise ValueError(\"Index can have one ellipsis only.\")\n has_ellipsis = True\n start = index\n else:\n if has_ellipsis:\n end = index\n return has_ellipsis, start, end\n\n def _reconstructSliceinfo(self, item):\n has_ellipsis, start, end = self._detectEllipsis(item)\n if has_ellipsis:\n newitem = []\n for i in range(start):\n newitem.append(item[i])\n for i in range(start, end):\n newitem.append(slice(None, None, None))\n for i in range(end, len(item)):\n newitem.append(item[i])\n return newitem\n else:\n return None\n\n def _detectContinuesSlice(self, item):\n starts = []\n ends = []\n for index, o in enumerate(item):\n if isinstance(o, int):\n start = int(o)\n if (index > 0 and index >= self.shape[index]) \\\n or (index < 0 and (index + self.shape[index]) < 0):\n raise IndexError(\"invalid index\")\n start = max(start + self.shape[index], 0) if start < 0 else min(\n start, self.shape[index])\n starts.append(start)\n ends.append(start + 1)\n elif isinstance(o, slice):\n start, stop, step = self._slice_indices(o, self.shape[index])\n if step == 1 or step == -1:\n starts.append(start)\n ends.append(stop)\n else:\n return False, None\n else:\n raise IndexError(\"Valid index accept int or slice or ellipsis\")\n return True, [starts, ends]\n\n def _cloneVar(self, copy=False):\n if not copy:\n return self.block.create_var(\n name=unique_name.generate_with_ignorable_key(self.name),\n dtype=self.dtype)\n else:\n return self\n\n def _sliceVar(self, axes, starts, ends):\n new_var = self._cloneVar()\n self.block.append_op(\n type=\"slice\",\n inputs={'Input': [self]},\n outputs={'Out': [new_var]},\n attrs={'axes': axes,\n 'starts': starts,\n 'ends': ends})\n return new_var\n\n def _concatVar(self, inputs, axis):\n new_var = self._cloneVar()\n self.block.append_op(\n type=\"concat\",\n inputs={'X': inputs},\n outputs={'Out': [new_var]},\n attrs={'axis': axis, })\n return new_var\n\n def _sliceAndConcatVar(self, item, axis):\n if isinstance(item, slice):\n if self.shape[axis] < 0:\n return self._cloneVar(True)\n start, stop, step = self._slice_indices(item, self.shape[axis])\n if step == 1:\n return self._sliceVar([axis], [start], [stop])\n else:\n vars = []\n if step > 0:\n while start < stop:\n vars.append(\n self._sliceVar([axis], [start], [start + 1]))\n start += step\n else:\n while start > stop:\n vars.append(\n self._sliceVar([axis], [start], [start + 1]))\n start += step\n return self._concatVar(vars, axis)\n elif isinstance(item, int):\n if self.shape[axis] < 0:\n return self._cloneVar(True)\n index = int(item)\n if (index > 0 and index >= self.shape[axis]) \\\n or (index < 0 and (index + self.shape[axis]) < 0):\n raise IndexError(\"invalid index\")\n return self._sliceVar([axis], [index], [index + 1])\n else:\n raise IndexError(\"Valid index accept int or slice or tuple\")\n\n def __getitem__(self, item):\n return _getitem_impl_(self, item)\n\n\ndef get_all_op_protos():\n \"\"\"\n Get all registered op proto from PaddlePaddle C++ end.\n\n Returns:\n list: list of OpProto.\n \"\"\"\n protostrs = core.get_all_op_protos()\n ret_values = []\n for pbstr in protostrs:\n op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))\n ret_values.append(op_proto)\n return ret_values\n\n\nclass ComplexVariable(object):\n \"\"\"\n The ComplexTensor defined on the complex number domain. It contains two common \n real number Tensor as its members, :attr:`real` and :attr:`imag` \n holding the real part and imaginary part of complex numbers respectively.\n \n **Notes**:\n **The constructor of ComplexTensor should not be invoked directly.**\n\n **Only support dygraph mode at present. Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph ComplexTensor with complex number data.**\n\n Args:\n real (Tensor): The Tensor holding real-part data.\n imag (Tensor): The Tensor holding imaginery-part data.\n \n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n paddle.enable_imperative()\n x = paddle.to_tensor([1.0+2.0j, 0.2])\n print(x.name, x.dtype, x.shape)\n # ({'real': 'generated_tensor_0.real', 'imag': 'generated_tensor_0.imag'}, 'complex128', [2L])\n print(x.numpy())\n # [1. +2.j 0.2+0.j]\n print(type(x))\n # <class 'paddle.ComplexTensor'>\n \"\"\"\n\n def __new__(cls, *arg, **kwargs):\n cls.__module__ = \"paddle\"\n cls.__name__ = \"ComplexTensor\"\n return super(ComplexVariable, cls).__new__(cls)\n\n def __init__(self, real, imag):\n assert real.shape == imag.shape, \"The real part and imaginary part \" \\\n \"of a ComplexVariable should have the same shape!\"\n assert real.dtype == imag.dtype, \"The real part and imaginary part \" \\\n \"of a ComplexVariable should have the same data type!\"\n\n self.real = real\n self.imag = imag\n if self.real.dtype in [\n core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32\n ]:\n self._dtype = \"complex64\"\n else:\n self._dtype = \"complex128\"\n self._shape = self.real.shape\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def shape(self):\n return self._shape\n\n @property\n def name(self):\n return {\"real\": self.real.name, \"imag\": self.imag.name}\n\n @name.setter\n def name(self, name):\n # rename\n if isinstance(name, str):\n self.real.name = name + \".real\"\n self.imag.name = name + \".imag\"\n elif (isinstance(name, tuple) or isinstance(name,\n list)) and len(name) == 2:\n self.real.name, self.imag.name = name[0], name[1]\n else:\n raise ValueError(\n \"An invalid name assigned to the ComplexVariable, \"\n \"which must be a string, or a tuple or a list with length 2!\")\n\n def numpy(self):\n return self.real.numpy() + 1j * self.imag.numpy()\n\n def __str__(self):\n return \"ComplexTensor[real]: %s\\n%s\\nComplexTensor[imag]: %s\\n%s\" % (\n self.real.name, str(self.real.value().get_tensor()), self.imag.name,\n str(self.imag.value().get_tensor()))\n\n __repr__ = __str__\n\n\nclass OpProtoHolder(object):\n \"\"\"\n A global variable to hold all OpProtos from C++ as a map\n \"\"\"\n\n @classmethod\n def instance(cls):\n if not hasattr(cls, '_instance'):\n cls._instance = cls()\n return cls._instance\n\n def __init__(self):\n assert not hasattr(\n self.__class__,\n '_instance'), 'Please use `instance()` to get OpProtoHolder object!'\n op_protos = get_all_op_protos()\n self.op_proto_map = {}\n for proto in op_protos:\n self.op_proto_map[proto.type] = proto\n\n def get_op_proto(self, type):\n \"\"\"\n Get OpProto by a type string.\n Args:\n type(str): The type that operator registered in C++ side.\n\n Returns(framework_pb2.OpProto): The OpProto\n\n \"\"\"\n if type not in self.op_proto_map:\n raise ValueError(\"Operator \\\"%s\\\" has not been registered.\" % type)\n return self.op_proto_map[type]\n\n def update_op_proto(self):\n op_protos = get_all_op_protos()\n for proto in op_protos:\n if proto.type not in self.op_proto_map:\n self.op_proto_map[proto.type] = proto\n\n @staticmethod\n def generated_op_attr_names():\n return {\n core.op_proto_and_checker_maker.kOpRoleAttrName(),\n core.op_proto_and_checker_maker.kOpRoleVarAttrName(),\n core.op_proto_and_checker_maker.kOpNameScopeAttrName(),\n core.op_proto_and_checker_maker.kOpCreationCallstackAttrName(),\n core.op_proto_and_checker_maker.kOpDeviceAttrName()\n }\n\n\nclass Operator(object):\n \"\"\"\n In Fluid, all the operation are represented by Operator, and Operator\n is regarded as a build in an instruction of a Block. Users can use the\n build in instructions to describe their neural network.\n\n Args:\n block(Block): The block has the current operator.\n desc(core.OpDesc): The protobuf description of Operator.\n type(str): The type of operator. Default None.\n inputs(dict): The input of this Operator. it is a dictionary, for every\n element, key is the input parameter name, and value is a list of\n variables. Default None.\n outputs(dict): The output of this Operator. it is a dictionary, for\n every element, key is the input parameter name, and value is a list\n of variables. Default None.\n attrs(dict): The attributes of this Operator. it is a dictionary, for\n every element, key is attribute name, and value is the attribute value.\n The attribute type should be as same as the type registered in C++ side.\n Default None.\n\n Returns:\n Operator: The initialized Operator.\n\n Raises:\n ValueError: If the passed input, output and attrs doesn't match the\n initializing Operator's that registered in C++ side.\n\n Notes:\n The constructor of operator should not be invoked directly. Use\n Block.append_op or Block._prepend_op instead.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n # var1 += var2 + var3\n cur_block.append_op(type=\"sum\",\n inputs={\"X\": [var1, var2, var3]},\n outputs={\"Out\": [var1]})\n \"\"\"\n OP_WITHOUT_KERNEL_SET = {\n 'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',\n 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',\n 'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',\n 'gen_nccl_id', 'c_gen_nccl_id', 'c_comm_init', 'c_sync_calc_stream',\n 'c_sync_comm_stream', 'queue_generator', 'dequeue', 'enqueue'\n }\n\n def __init__(self,\n block,\n desc,\n type=None,\n inputs=None,\n outputs=None,\n attrs=None):\n if in_dygraph_mode():\n if type is None:\n raise ValueError(\n \"`type` to initialized an Operator can not be None.\")\n self._type = type\n self.attrs = attrs if attrs else {}\n else:\n self.block = block\n self.desc = desc\n # note: not add self.attrs here:\n # https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173\n op_attrs = attrs\n if op_attrs is None:\n op_attrs = dict()\n del attrs\n\n op_maker = core.op_proto_and_checker_maker\n\n if op_maker.kOpRoleAttrName() not in op_attrs:\n op_attrs[op_maker.kOpRoleAttrName(\n )] = self.block.program._op_role\n\n role_var_name = op_maker.kOpRoleVarAttrName()\n if len(self.block.program.\n _op_role_var) != 0 and role_var_name not in op_attrs:\n op_attrs[role_var_name] = self.block.program._op_role_var\n\n if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0:\n del op_attrs[role_var_name]\n\n if len(self.desc.type()) != 0:\n return\n if type is None:\n raise ValueError(\n \"`type` to initialized an Operator can not be None.\")\n else:\n callstack_var_name = op_maker.kOpCreationCallstackAttrName()\n op_attrs[callstack_var_name] = []\n for frame in traceback.extract_stack():\n op_attrs[callstack_var_name].append(\n ' File \"{}\", line {}, in {}'.format(frame[0], frame[1],\n frame[2]))\n op_attrs[callstack_var_name].append(' {}'.format(frame[\n 3]))\n\n self.desc.set_type(type)\n proto = OpProtoHolder.instance().get_op_proto(type)\n\n namescope_var_name = op_maker.kOpNameScopeAttrName()\n op_attrs[namescope_var_name] = _full_name_scope()\n\n # set device for op with kernels, give warning for op without kernels\n # when force_cpu and device_guard are used at the same time, a warning will be given.\n # TODO(zhangting2020): when force_cpu is removed, clear warning below.\n if _current_device is not None:\n if self._has_kernel(type):\n op_device = op_maker.kOpDeviceAttrName()\n op_attrs[op_device] = _current_device\n else:\n warnings.warn(\"The Op(%s) is not support to set device.\" %\n type)\n if 'force_cpu' in op_attrs:\n if (type is 'less_than' and op_attrs['force_cpu'] != None\n ) or op_attrs['force_cpu'] != False:\n warnings.warn(\n \"The Attr(force_cpu) of Op(%s) will be deprecated in the future, \"\n \"please use 'device_guard' instead. 'device_guard' has higher priority when they are \"\n \"used at the same time.\" % type)\n\n def find_name(var_list, name):\n for var_name in var_list:\n if var_list[var_name] is not None and var_name == name:\n return True\n return False\n\n if inputs is not None:\n for in_proto in proto.inputs:\n found = find_name(inputs, in_proto.name)\n assert found or in_proto.dispensable, \"Input {} not found\".format(\n in_proto.name)\n if found:\n in_args = inputs[in_proto.name]\n if not isinstance(in_args, (list, tuple)):\n in_args = [in_args]\n if not in_proto.duplicable and len(in_args) > 1:\n raise ValueError(\n \"Input %s expects only one input, but %d are given.\"\n % (in_proto.name, len(in_args)))\n in_arg_names = []\n for index, arg in enumerate(in_args):\n if isinstance(arg, six.string_types):\n in_arg_names.append(arg)\n elif isinstance(arg, six.binary_type):\n in_arg_names.append(arg.decode())\n elif isinstance(arg, (Variable, core.VarBase)):\n in_arg_names.append(cpt.to_text(arg.name))\n else:\n raise TypeError(\n \"The type of '%s' in operator %s should be \"\n \"one of [basestring(), str, Varibale] in python2, \"\n \"or one of [str, bytes, Variable] in python3.\"\n \"but received : %s\" %\n (in_proto.name, type, arg))\n self.desc.set_input(in_proto.name, in_arg_names)\n else:\n self.desc.set_input(in_proto.name, [])\n\n if outputs is not None:\n for m in proto.outputs:\n if (m.name not in outputs) and m.dispensable:\n continue\n if not ((m.name in outputs) or m.dispensable):\n raise ValueError((\"Incorrect setting for output(s) of \"\n \"operator \\\"%s\\\", should set: [%s].\")\n % (type, m.name))\n for out_proto in proto.outputs:\n if out_proto.name not in outputs:\n continue\n out_args = outputs[out_proto.name]\n if not isinstance(out_args, list):\n out_args = [out_args]\n if not out_proto.duplicable and len(out_args) > 1:\n raise ValueError(\n \"Output %s expects only one output, but %d are given.\"\n % (out_proto.name, len(out_args)))\n out_arg_names = []\n for arg in out_args:\n out_arg_names.append(cpt.to_text(arg.name))\n # TODO(minqiyang): could we remove variable's op in static mode?\n if not in_dygraph_mode():\n arg.op = self\n self.desc.set_output(out_proto.name, out_arg_names)\n\n if op_attrs is not None:\n if not isinstance(op_attrs, dict):\n raise TypeError(\"'attrs' should be a dict.\")\n for attr in proto.attrs:\n attr_name = attr.name\n if (attr_name not in op_attrs) or (\n op_attrs[attr_name] is None):\n continue\n attr_val = op_attrs[attr_name]\n self._update_desc_attr(attr_name, attr_val)\n\n self.desc.check_attrs()\n if self._has_kernel(type):\n self.desc.infer_var_type(self.block.desc)\n self.desc.infer_shape(self.block.desc)\n\n def _has_kernel(self, op_type):\n return op_type not in self.OP_WITHOUT_KERNEL_SET\n\n def to_string(self, throw_on_error):\n \"\"\"\n Get debug string.\n\n Args:\n throw_on_error(bool): Whether to raise exception if self is not\n initialized.\n\n Returns:\n str: The debug string.\n\n \"\"\"\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))\n return _debug_string_(proto, throw_on_error)\n\n def _to_readable_code(self, skip_op_callstack=True):\n \"\"\"\n Get readable debug string of Operator.\n\n .. note::\n If you want to get the debug string in protobuf format,\n please use :code:`to_string` method.\n\n Args:\n skip_op_callstack(bool): whether to skip parsing Operator's attribute\n op_callstack, default value is True\n\n Returns:\n string: The formatted Operator string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n new_op = cur_block.append_op(type=\"abs\",\n inputs={\"X\": [var]},\n outputs={\"Out\": [var]})\n print(new_op._to_readable_code())\n \"\"\"\n assert isinstance(\n skip_op_callstack, bool\n ), \"skip_op_callstack parameter's type is error, expect bool, received %s\".format(\n type(skip_op_callstack))\n outputs_str = \"{\"\n for i in range(0, len(self.output_names)):\n outputs_str += \"{name}=\".format(name=self.output_names[i])\n o = self.output(self.output_names[i])\n outputs_str += \"{value}\".format(value=o)\n if i != len(self.output_names) - 1:\n outputs_str += \", \"\n outputs_str += \"}\"\n\n inputs_str = \"{\"\n for i in range(0, len(self.input_names)):\n inputs_str += \"{name}=\".format(name=self.input_names[i])\n o = self.input(self.input_names[i])\n inputs_str += \"{value}\".format(value=o)\n\n if i != len(self.input_names) - 1:\n inputs_str += \", \"\n inputs_str += \"}\"\n\n attr_names = sorted(self.attr_names)\n attrs_str = \"\"\n for i in range(0, len(attr_names)):\n name = attr_names[i]\n if skip_op_callstack and name == \"op_callstack\":\n continue\n\n attr_type = self.desc.attr_type(name)\n if attr_type == core.AttrType.BLOCK:\n a = \"{name} = block[{value}]\".format(\n name=name, type=attr_type, value=self._block_attr_id(name))\n attrs_str += a\n if i != len(attr_names) - 1:\n attrs_str += \", \"\n continue\n\n if attr_type == core.AttrType.BLOCKS:\n a = \"{name} = blocks{value}\".format(\n name=name,\n type=attr_type,\n value=self._blocks_attr_ids(name))\n attrs_str += a\n if i != len(attr_names) - 1:\n attrs_str += \", \"\n continue\n\n a = \"{name} = {value}\".format(\n name=name, type=attr_type, value=self.desc.attr(name))\n attrs_str += a\n if i != len(attr_names) - 1:\n attrs_str += \", \"\n\n if outputs_str != \"{}\":\n op_str = \"{outputs} = {op_type}(inputs={inputs}, {attrs})\".\\\n format(outputs = outputs_str, op_type=self.type, inputs=inputs_str, attrs=attrs_str)\n else:\n op_str = \"{op_type}(inputs={inputs}, {attrs})\".\\\n format(op_type=self.type, inputs=inputs_str, attrs=attrs_str)\n return op_str\n\n def __str__(self):\n return self._to_readable_code()\n\n __repr__ = __str__\n\n @property\n def type(self):\n return self.desc.type()\n\n def input(self, name):\n \"\"\"\n Get the input arguments according to the input parameter name.\n\n Args:\n name(str): The input parameter name.\n\n Returns:\n list: return the list of argument names that associated with \\\n the specific parameter name.\n \"\"\"\n return self.desc.input(name)\n\n def _rename_input(self, old_name, new_name):\n \"\"\"\n Rename the `old_name` to `new_name`.\n\n Args:\n old_name(str): The old name of the Operator's input.\n new_name(str): The new name of the Operator's input.\n\n Returns:\n None\n \"\"\"\n self.desc._rename_input(old_name, new_name)\n\n def _rename_output(self, old_name, new_name):\n \"\"\"\n Rename the `old_name` to `new_name`.\n\n Args:\n old_name(str): The old name of the Operator's output.\n new_name(str): The new name of the Operator's output.\n\n Returns:\n None\n \"\"\"\n self.desc._rename_output(old_name, new_name)\n\n @property\n def input_names(self):\n return self.desc.input_names()\n\n @property\n def input_arg_names(self):\n return self.desc.input_arg_names()\n\n @property\n def output_arg_names(self):\n return self.desc.output_arg_names()\n\n def output(self, name):\n \"\"\"\n Get output arguments by the output parameter name.\n\n Args:\n name(str): The output parameter name.\n\n Returns:\n list: return the list of argument names associated with \\\n the specific parameter name.\n \"\"\"\n return self.desc.output(name)\n\n @property\n def output_names(self):\n return self.desc.output_names()\n\n @property\n def idx(self):\n for i, op in enumerate(self.block.ops):\n if op == self:\n return i\n raise ValueError(\n \"Can't find op itself in it's block. It could be a bug of Paddle.\")\n\n def has_attr(self, name):\n \"\"\"\n Whether this Operator has the attribute with name or not.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n bool: True if has this attribute.\n\n \"\"\"\n return self.desc.has_attr(name)\n\n def attr_type(self, name):\n \"\"\"\n Get the type of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n core.AttrType: the attribute type.\n \"\"\"\n return self.desc.attr_type(name)\n\n def _set_attr(self, name, val):\n \"\"\"\n Set the value of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n\n Raises:\n ValueError: If the type of value doesn't match with desc.attr_type(name).\n \"\"\"\n self._update_desc_attr(name, val)\n\n def _remove_attr(self, name):\n self.desc.remove_attr(name)\n\n def _update_desc_attr(self, name, val):\n \"\"\"\n Update the value of desc's attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n\n Raises:\n ValueError: If the type of value doesn't match with desc.attr_type(name).\n \"\"\"\n if isinstance(val, Block):\n self.desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and all(\n isinstance(v, Block) for v in val):\n self.desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n self.desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n self.desc._set_attr(name, val)\n\n @property\n def attr_names(self):\n return self.desc.attr_names()\n\n def attr(self, name):\n \"\"\"\n Get the attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n bool|int|str|float|list: The attribute value. The return value\n can be any valid attribute type.\n \"\"\"\n return self.desc.attr(name)\n\n def _block_attr_id(self, name):\n \"\"\"\n Get the block attribute's id by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n int: the block index.\n \"\"\"\n return self.desc._block_attr_id(name)\n\n def _block_attr(self, name):\n \"\"\"\n Get the block attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n block: the block attribute.\n \"\"\"\n\n id = self._block_attr_id(name)\n assert (id >= 0 and id < len(self.block.program.blocks))\n return self.block.program.blocks[id]\n\n def _blocks_attr(self, name):\n \"\"\"\n Get the blocks attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n list: list of the blocks attribute.\n \"\"\"\n attrs = []\n for i in self._blocks_attr_ids(name):\n assert (i >= 0 and i < len(self.block.program.blocks))\n attrs.append(self.block.program.blocks[i])\n\n return attrs\n\n def _blocks_attr_ids(self, name):\n \"\"\"\n Get the blocks attribute's ids by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n list: list of the blocks ids.\n \"\"\"\n\n return self.desc._blocks_attr_ids(name)\n\n def all_attrs(self):\n \"\"\"\n Get the attribute dict.\n\n Returns:\n dict: The Operator's attribute dict, name->attr.\n \"\"\"\n attr_names = self.attr_names\n attr_map = {}\n for n in attr_names:\n attr_type = self.desc.attr_type(n)\n if attr_type == core.AttrType.BLOCK:\n attr_map[n] = self._block_attr(n)\n continue\n\n if attr_type == core.AttrType.BLOCKS:\n attr_map[n] = self._blocks_attr(n)\n continue\n\n attr_map[n] = self.attr(n)\n\n return attr_map\n\n def _is_optimize_op(self):\n op_maker = core.op_proto_and_checker_maker\n OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize\n\n if not self.desc.has_attr(op_maker.kOpRoleAttrName()):\n return False\n\n op_role = self.desc.attr(op_maker.kOpRoleAttrName())\n if op_role & int(OPTIMIZE):\n return True\n\n return False\n\n def _is_backward_op(self):\n op_maker = core.op_proto_and_checker_maker\n BACKWARD = core.op_proto_and_checker_maker.OpRole.Backward\n\n if not self.desc.has_attr(op_maker.kOpRoleAttrName()):\n return False\n\n op_role = self.desc.attr(op_maker.kOpRoleAttrName())\n if op_role & int(BACKWARD):\n return True\n\n return False\n\n\nclass Block(object):\n \"\"\"\n In Fluid, a Program is consistence of multi-Block, and Block stores\n VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name.\n One block could have some child blocks, and child block's name scopes\n should inherit the parent's so that OpDesc in child block can reference\n a VarDesc that is stored in the parent block.\n Please reference the framework.proto for details.\n\n Args:\n program(Program): The Program that the Block belongs to.\n idx(int): The block's id in the Program.\n\n Notes:\n The constructor of Block should not be invoked directly. Please\n use `Program._create_block()` to create a block.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n cur_block.append_op(type=\"abs\",\n inputs={\"X\": [var]},\n outputs={\"Out\": [var]})\n \"\"\"\n\n def __init__(self, program, idx):\n self.desc = program.desc.block(idx)\n self.vars = collections.OrderedDict() # var_name --> var\n self.ops = list() # operator list\n self.program = program\n self.removed_vars = collections.OrderedDict()\n\n def __str__(self):\n return self._to_readable_code()\n\n def _to_readable_code(self, skip_op_callstack=True):\n \"\"\"\n Get readable debug string of Block.\n\n .. note::\n If you want to get the debug string in protobuf format,\n please use :code:`to_string` method.\n\n Args:\n skip_op_callstack(bool): whether to skip parsing Operator's attribute\n op_callstack, default value is True\n\n Returns:\n string: The formatted Block string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n new_op = cur_block.append_op(type=\"abs\",\n inputs={\"X\": [new_var]},\n outputs={\"Out\": [new_var]})\n print(cur_block._to_readable_code())\n \"\"\"\n assert isinstance(\n skip_op_callstack, bool\n ), \"skip_op_callstack parameter's type is error, expect bool, received %s\".format(\n type(skip_op_callstack))\n block_str = \"{ // block \"\n block_str += \"{}\\n\".format(self.idx)\n for var in list(self.vars.values()):\n block_str += \" {}\\n\".format(var._to_readable_code())\n block_str += \"\\n\"\n for op in self.ops:\n block_str += \" {}\\n\".format(\n op._to_readable_code(skip_op_callstack))\n block_str += \"}\"\n return block_str\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n Get debug string.\n\n Args:\n throw_on_error(bool): raise exception when self is not initialized\n when throw_on_error is True.\n with_details(bool): more details about variables and parameters\n (e.g. trainable, optimize_attr, ...) will be printed when\n with_details is True. Default False.\n\n Returns:\n str: The debug string.\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n if with_details:\n re_add_indent = re.compile(r\"\\n(.)\")\n res_str = \"blocks {\\n idx: %d\\n parent_idx: %d\" % (\n self.idx, self.parent_idx)\n for var in list(self.vars.values()):\n res_str += \"\\n vars {\\n %s }\" % re_add_indent.sub(\n r\"\\n \\1\", var.to_string(throw_on_error, with_details))\n for op in self.ops:\n res_str += \"\\n ops {\\n %s }\" % re_add_indent.sub(\n r\"\\n \\1\", op.to_string(throw_on_error))\n res_str += \"\\n}\"\n else:\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.BlockDesc.FromString(\n six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n return res_str\n\n __repr__ = __str__\n\n @property\n def parent_idx(self):\n return self.desc.parent\n\n @property\n def forward_block_idx(self):\n return self.desc.get_forward_block_idx()\n\n def _set_forward_block_idx(self, idx):\n \"\"\"\n Set the forward block Idx.\n\n Args:\n idx(int): the block index.\n\n Returns:\n None\n \"\"\"\n self.desc._set_forward_block_idx(idx)\n\n @property\n def backward_block_idx(self):\n cur_block_idx = self.idx\n for block in self.program.blocks:\n if block.forward_block_idx == cur_block_idx:\n return block.idx\n return -1\n\n @property\n def idx(self):\n return self.desc.id\n\n def var(self, name):\n \"\"\"\n Get a Variable by name from this block.\n\n Args:\n name(str): the Variable's name.\n\n Raises:\n ValueError: The If input's type is not str, or this block\n doesn't have a Variable with the giving name.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n if not isinstance(name, six.string_types):\n raise TypeError(\n \"var require string as parameter, but get %s instead.\" %\n (type(name)))\n v = self.vars.get(name, None)\n if v is None:\n raise ValueError(\"var %s not in this block\" % name)\n return v\n\n def _find_var_recursive(self, name):\n \"\"\"\n Get a Variable by name from this block recursively.\n\n Args:\n name(str): the Variable's name.\n\n Returns:\n Variable: the Variable with the giving name. Or None if not found.\n \"\"\"\n frontier = list()\n visited = set()\n\n frontier.append(self)\n\n prog = self.program\n\n while len(frontier) != 0: # BFS\n cur = frontier[0]\n frontier = frontier[1:]\n\n if id(cur) in visited:\n continue\n\n if cur.has_var(name):\n return cur.var(name)\n\n if cur.parent_idx != -1:\n frontier.append(prog.block(cur.parent_idx))\n\n if cur.forward_block_idx != -1:\n frontier.append(prog.block(cur.forward_block_idx))\n\n visited.add(id(cur))\n return None\n\n def _var_recursive(self, name):\n \"\"\"\n Get a Variable by name from this block recursively.\n\n Args:\n name(str): the Variable's name.\n\n Raises:\n ValueError: this block and this parent block doesn't\n have a Variable with the giving name.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n var = self._find_var_recursive(name)\n if var:\n return var\n else:\n raise ValueError(\"Var {0} is not found recursively\".format(name))\n\n def all_parameters(self):\n return list(self.iter_parameters())\n\n def iter_parameters(self):\n return (item[1] for item in six.iteritems(self.vars)\n if isinstance(item[1], Parameter))\n\n def create_var(self, *args, **kwargs):\n if in_dygraph_mode():\n var = _varbase_creator(*args, **kwargs)\n else:\n var = Variable(block=self, *args, **kwargs)\n if 'initializer' in kwargs:\n kwargs['initializer'](var, self)\n return var\n\n def has_var(self, name):\n return name in self.vars\n\n def _rename_var(self, name, new_name):\n \"\"\"\n Rename variable in vars and ops' inputs and outputs\n\n Args:\n name(str): the name that need to be renamed.\n new_name(str): the name that need to rename to.\n\n Raises:\n ValueError: If this block doesn't have this the giving name,\n or the type of the var with the giving name is not Parameter\n or Variable.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n name = cpt.to_text(name)\n new_name = cpt.to_text(new_name)\n\n if not self.has_var(name):\n raise ValueError(\"var %s is not in current block\" % name)\n v = self.var(name)\n if type(v) == Parameter:\n var_type = \"Parameter\"\n stop_gradient = v.stop_gradient\n trainable = v.trainable\n optimize_attr = v.optimize_attr\n regularizer = v.regularizer\n error_clip = v.error_clip\n elif type(v) == Variable:\n var_type = \"Variable\"\n error_clip = v.error_clip\n stop_gradient = v.stop_gradient\n else:\n raise ValueError(\"unsupported var type: %s\", type(v))\n orig_var_type = v.type\n self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))\n # NOTE: v is destroyed by C++ after calling _rename_var.\n d = self.desc.find_var(cpt.to_bytes(new_name))\n if var_type == \"Parameter\":\n if in_dygraph_mode():\n var = ParamBase(\n d.shape(),\n d.dtype(),\n type=orig_var_type,\n name=new_name,\n stop_gradient=stop_gradient,\n trainable=trainable,\n optimize_attr=optimize_attr,\n regularizer=regularizer,\n error_clip=error_clip)\n else:\n var = Parameter(\n self,\n d.shape(),\n d.dtype(),\n type=orig_var_type,\n name=new_name,\n stop_gradient=stop_gradient,\n trainable=trainable,\n optimize_attr=optimize_attr,\n regularizer=regularizer,\n error_clip=error_clip)\n elif var_type == \"Variable\":\n var = Variable(\n self,\n type=orig_var_type,\n name=new_name,\n error_clip=error_clip,\n stop_gradient=stop_gradient)\n\n # rename the python side, _sync_with_cpp will only add\n # new vars/ops to python side.\n self.vars[new_name] = var\n del self.vars[name]\n self._sync_with_cpp()\n return var\n\n def _remove_var(self, name):\n self._sync_with_cpp()\n self.desc._remove_var(cpt.to_bytes(name))\n del self.vars[name]\n\n def create_parameter(self, *args, **kwargs):\n global_block = self.program.global_block()\n param = None\n if in_dygraph_mode():\n param = ParamBase(*args, **kwargs)\n else:\n param = Parameter(global_block, *args, **kwargs)\n if 'initializer' in kwargs:\n\n def _is_inited_by(block, var):\n init_ops = []\n for op in block.ops:\n if var.name in op.output_arg_names:\n # In startup_program, \"c_broadcast\" and \"c_sync_comm_stream\"\n # are treated as initialization ops that cause error. \n # Think of \"c_broadcast\" and \"c_sync_comm_stream\" as a special case here.\n if op.type in [\"c_broadcast\", \"c_sync_comm_stream\"]:\n continue\n init_ops.append(op)\n return init_ops\n\n initializer = kwargs['initializer']\n init_ops = _is_inited_by(global_block, param)\n init_ops_len = len(init_ops)\n if init_ops_len > 1:\n raise RuntimeError(\"param \" + param.name +\n \" is inited by multiple init ops \" + str(\n init_ops))\n elif init_ops_len == 1:\n # TODO already inited, do nothing, should log a warning\n pass\n else:\n initializer(param, self)\n param.stop_gradient = False\n return param\n\n def append_op(self, *args, **kwargs):\n \"\"\"\n Appends a new Operator according to the giving arguments.\n\n Returns:\n Operator: the append Operator.\n \"\"\"\n if in_dygraph_mode():\n attrs = kwargs.get(\"attrs\", {})\n type = kwargs.get(\"type\", None)\n op = Operator(\n block=self,\n desc=None,\n type=type,\n inputs=None,\n outputs=None,\n attrs=attrs)\n\n # record ops in tracer rather than blocks\n #\n # TODO(minqiyang): add op stop_gradient support in static mode too.\n # currently, we only support stop_gradient in dygraph mode.\n\n _dygraph_tracer().trace_op(type,\n kwargs.get(\"inputs\", {}),\n kwargs.get(\"outputs\", {}), attrs\n if attrs else {},\n kwargs.get(\"stop_gradient\", False))\n else:\n op_desc = self.desc.append_op()\n op = Operator(\n block=self,\n desc=op_desc,\n type=kwargs.get(\"type\", None),\n inputs=kwargs.get(\"inputs\", None),\n outputs=kwargs.get(\"outputs\", None),\n attrs=kwargs.get(\"attrs\", None))\n\n self.ops.append(op)\n\n return op\n\n def _insert_op(self, index, *args, **kwargs):\n \"\"\"\n Insert a Operator according to the giving arguments.\n\n Args:\n index(int): the place that the operator to insert.\n\n Returns:\n Operator: the insert Operator.\n \"\"\"\n self._sync_with_cpp()\n op_desc = self.desc._insert_op(index)\n op = Operator(block=self, desc=op_desc, *args, **kwargs)\n self.ops.insert(index, op)\n return op\n\n def _remove_op(self, index):\n \"\"\"\n Remove the specific position operator.\n\n Args:\n index(int): the position that the operator to insert.\n\n Returns:\n None\n \"\"\"\n self._sync_with_cpp()\n self.desc._remove_op(index, index + 1)\n del self.ops[index]\n\n def _slice_ops(self, start, end):\n \"\"\"\n Return the Operator between start and end.\n\n Args:\n start(int): the start position.\n end(int): the end position.\n\n Returns:\n list: the Operators between start and end.\n \"\"\"\n return self.ops[start:end]\n\n def _prepend_op(self, *args, **kwargs):\n if in_dygraph_mode():\n type = kwargs.get(\"type\", None)\n attrs = kwargs.get(\"attrs\", {})\n op = Operator(\n self, None, type=type, inputs=None, outputs=None, attrs=attrs)\n\n _dygraph_tracer().trace_op(type,\n kwargs.get(\"inputs\", {}),\n kwargs.get(\"outputs\", {}), attrs\n if attrs else {},\n kwargs.get(\"stop_gradient\", False))\n else:\n op_desc = self.desc._prepend_op()\n op = Operator(\n self,\n op_desc,\n type=kwargs.get(\"type\", None),\n inputs=kwargs.get(\"inputs\", None),\n outputs=kwargs.get(\"outputs\", None),\n attrs=kwargs.get(\"attrs\", None))\n self.ops.insert(0, op)\n\n return op\n\n def _sync_with_cpp(self):\n \"\"\"\n Sync from the desc on the c++ end. This method is used to synchronize\n the c++ desc instance generated by backward.\n \"\"\"\n # sync variables from cpp\n for var in self.desc.all_vars():\n if not self.has_var(var.name()):\n self.create_var(name=var.name(), desc=var, type=var.type())\n\n # sync variables removed from c++ end\n for var in list(self.vars.keys()):\n if not self.desc.find_var(cpt.to_bytes(var)):\n self.vars.pop(var)\n\n # sync operators from cpp\n ops_in_cpp = []\n for op_idx in range(0, self.desc.op_size()):\n ops_in_cpp.append(self.desc.op(op_idx))\n\n if len(self.ops) != 0:\n first_op_in_python = self.ops[0].desc\n last_op_in_python = self.ops[len(self.ops) - 1].desc\n start_index = None\n end_index = None\n for index in range(len(ops_in_cpp)):\n if first_op_in_python == ops_in_cpp[index]:\n start_index = index\n if last_op_in_python == ops_in_cpp[index]:\n end_index = index\n assert start_index is not None\n assert end_index is not None\n assert start_index <= end_index\n else:\n start_index = 0\n end_index = -1\n\n # sync ops append to the head of cpp_ops\n for index in range((start_index - 1 - 1), -1, -1):\n op_desc = ops_in_cpp[index]\n op = Operator(self, op_desc)\n self.ops.insert(0, op)\n\n # sync ops append to the end of cpp_ops\n for index in range((end_index + 1), len(ops_in_cpp)):\n op_desc = ops_in_cpp[index]\n op = Operator(self, op_desc)\n self.ops.append(op)\n\n # sync ops removed from c++ end\n if end_index != -1 and end_index < len(self.ops):\n ops_in_cpp_index = 0\n ops_in_python_index = 0\n while ops_in_python_index < len(\n self.ops) and ops_in_cpp_index < len(ops_in_cpp):\n if self.ops[ops_in_python_index].desc != ops_in_cpp[\n ops_in_cpp_index]:\n del self.ops[ops_in_python_index]\n else:\n ops_in_cpp_index += 1\n ops_in_python_index += 1\n\n assert len(self.ops) == len(ops_in_cpp)\n for index in range(len(self.ops)):\n assert self.ops[index].desc == ops_in_cpp[index]\n\n def _copy_param_info_from(self, other):\n \"\"\"\n Copy the information of parameters from the other block.\n\n Args:\n other(Block): the other block.\n\n Raises:\n ValueError: If type of input is not Block, or the `other` and this\n block is not in the same topology.\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Block):\n raise TypeError(\n \"_copy_param_info_from should be invoked with Block\")\n for p in other.iter_parameters():\n assert isinstance(p, Parameter)\n v = self.vars.get(p.name, None)\n if v is None:\n # if the Parameter is pruned, v may be None\n continue\n assert isinstance(v, Variable)\n new_p = None\n if in_dygraph_mode():\n new_p = ParamBase(\n shape=v.shape,\n dtype=v.dtype,\n type=v.type,\n lod_level=v.lod_level,\n stop_gradient=p.stop_gradient,\n trainable=p.trainable,\n optimize_attr=p.optimize_attr,\n regularizer=p.regularizer,\n error_clip=p.error_clip,\n name=v.name)\n else:\n new_p = Parameter(\n block=self,\n shape=v.shape,\n dtype=v.dtype,\n type=v.type,\n lod_level=v.lod_level\n if v.type == core.VarDesc.VarType.LOD_TENSOR else None,\n stop_gradient=p.stop_gradient,\n trainable=p.trainable,\n optimize_attr=p.optimize_attr,\n regularizer=p.regularizer,\n error_clip=p.error_clip,\n name=v.name)\n self.vars[new_p.name] = new_p\n\n def _clone_variable(self, var, force_persistable=True):\n \"\"\"\n Clone a variable into current block.\n\n Args:\n var: the variable to be cloned.\n force_persistable(bool): True means setting the result variable to being persistable.\n False means setting the persistable the same with that of input var.\n default: True.\n\n Returns:\n Variable: the new variable cloned from 'var' in current block.\n \"\"\"\n assert isinstance(var, Variable)\n ret_var = None\n # make STEP_SCOPES var can be safely cloned.\n if var.type == core.VarDesc.VarType.STEP_SCOPES:\n ret_var = self.create_var(\n name=var.name, persistable=var.persistable, type=var.type)\n elif var.type == core.VarDesc.VarType.RAW:\n ret_var = self.create_var(\n name=var.name, persistable=var.persistable, type=var.type)\n elif var.type == core.VarDesc.VarType.SELECTED_ROWS:\n ret_var = self.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n persistable=True if force_persistable else var.persistable,\n is_data=var.is_data,\n need_check_feed=var.desc.need_check_feed())\n else:\n ret_var = self.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n lod_level=var.lod_level,\n persistable=True if force_persistable else var.persistable,\n is_data=var.is_data,\n need_check_feed=var.desc.need_check_feed())\n return ret_var\n\n\nclass IrNode(object):\n \"\"\"\n Python IrNode. Beneath it is a core.Node, which is used for Ir Pass.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node,\n core.Node), 'node must be the instance of core.Node.'\n self.node = node\n\n def name(self):\n \"\"\"\n Return the node name.\n\n Returns:\n str: node name.\n \"\"\"\n return self.node.name()\n\n def node_type(self):\n \"\"\"\n Return the node type.\n\n Returns:\n core.Node.Type: node type(core.Node.Type.Operation or core.Node.Type.Variable).\n \"\"\"\n return self.node.node_type()\n\n def var(self):\n \"\"\"\n Return the node variable description.\n\n Returns:\n core.VarDesc: node variable description.\n \"\"\"\n return self.node.var()\n\n def op(self):\n \"\"\"\n Return the node operator description.\n\n Returns:\n core.OpDesc: node operator description.\n \"\"\"\n return self.node.op()\n\n def id(self):\n \"\"\"\n Return the node id.\n\n Returns:\n int: node id.\n \"\"\"\n return self.node.id()\n\n def is_op(self):\n \"\"\"\n If the node is an operator, then return true.\n\n Returns:\n bool: indicate whether the node is an operator.\n \"\"\"\n return self.node.is_op()\n\n def is_var(self):\n \"\"\"\n If the node is a variable, then return true.\n\n Returns:\n bool: indicate whether the node is a variable.\n \"\"\"\n return self.node.is_var()\n\n def is_ctrl_var(self):\n \"\"\"\n If the node is a control dependence variable, then return true.\n\n Returns:\n bool: indicate whether the node is a control dependence variable.\n \"\"\"\n return self.node.is_ctrl_var()\n\n def clear_inputs(self):\n \"\"\"\n Clear the node inputs. After executing the `clear_inputs` function,\n the node inputs will be empty.\n \"\"\"\n self.node.clear_inputs()\n\n def remove_input_by_id(self, node_id):\n \"\"\"\n Remove a node from inputs by the given node id.\n\n Args:\n node_id(int): the given node id.\n \"\"\"\n self.node.remove_input(node_id)\n\n def remove_input(self, node):\n \"\"\"\n Remove a node from inputs.\n\n Args:\n node(IrNode): the node being removed.\n \"\"\"\n self.node.remove_input(node.node)\n\n def append_input(self, node):\n \"\"\"\n Append a node in inputs.\n\n Args:\n node(IrNode): the node being appended.\n \"\"\"\n self.node.append_input(node.node)\n\n def clear_outputs(self):\n \"\"\"\n Clear the node outputs. After executing the `clear_outputs` function,\n the node outputs will be empty.\n \"\"\"\n self.node.clear_outputs()\n\n def remove_output_by_id(self, node_id):\n \"\"\"\n Remove a node from outputs by the given node id.\n\n Args:\n node_id(int): the given node id.\n \"\"\"\n self.node.remove_output(node_id)\n\n def remove_output(self, node):\n \"\"\"\n Remove a node from outputs.\n\n Args:\n node(IrNode): the node being removed.\n \"\"\"\n self.node.remove_output(node.node)\n\n def append_output(self, node):\n \"\"\"\n Append a node in outputs.\n\n Args:\n node(IrNode): the node being appended.\n \"\"\"\n self.node.append_output(node.node)\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrNode): node inputs wrapped by IrNode.\n \"\"\"\n return [IrNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrNode): node outputs wrapped by IrNode.\n \"\"\"\n return [IrNode(n) for n in self.node.outputs]\n\n\nclass IrVarNode(IrNode):\n \"\"\"\n Python IrVarNode. Beneath it is a core.Node, it inherits from IrNode.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrVarNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node, core.Node) and node.is_var(), \\\n 'node must be the instance of core.Node and it must be a variable node.'\n super(IrVarNode, self).__init__(node)\n self.node = node\n\n def set_shape(self, shape):\n \"\"\"\n Set the node variable shape.\n\n Args:\n shape(list): shape to be set.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n self.node.var().set_shape(shape)\n\n def persistable(self):\n \"\"\"\n If the variable node is a persistable variable, then return true.\n\n Returns:\n bool: indicate whether the variable is persistable.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n return self.node.var().persistable()\n\n def type(self):\n \"\"\"\n Return the variable type.\n\n Returns:\n core.VarDesc.VarType: the variable type.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n return self.node.var().type()\n\n def dtype(self):\n \"\"\"\n Return the variable data type.\n\n Returns:\n core.VarDesc.VarType: the variable data type.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n return self.node.var().dtype()\n\n def shape(self):\n \"\"\"\n Return the variable shape.\n\n Returns:\n list: the variable shape.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n return self.node.var().shape()\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrOpNode): node inputs wrapped by IrOpNode.\n \"\"\"\n return [IrOpNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrOpNode): node outputs wrapped by IrOpNode.\n \"\"\"\n return [IrOpNode(n) for n in self.node.outputs]\n\n\nclass IrOpNode(IrNode):\n \"\"\"\n Python IrOpNode. Beneath it is a core.Node, it inherits from IrNode.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrOpNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node, core.Node) and node.is_op(), \\\n 'node must be the instance of core.Node and it must be a operator node.'\n super(IrOpNode, self).__init__(node)\n self.node = node\n\n def rename_input(self, old_input_name, new_input_name):\n \"\"\"\n Rename the input of this node.\n\n Args:\n old_input_name(str): the old input name.\n new_input_name(str): the new input name.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n self.node.op()._rename_input(old_input_name, new_input_name)\n\n def rename_output(self, old_output_name, new_output_name):\n \"\"\"\n Rename the output of this node.\n\n Args:\n old_output_name(str): the old output name.\n new_output_name(str): the new output name.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n self.node.op()._rename_output(old_output_name, new_output_name)\n\n def input(self, name):\n \"\"\"\n Get the argument name list by the parameter name for input.\n\n Args:\n name(str): the parameter name.\n\n Returns:\n list(str): the argument name list.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().input(name)\n\n def output(self, name):\n \"\"\"\n Get the argument name list by the parameter name for output.\n\n Args:\n name(str): the parameter name.\n\n Returns:\n list(str): the argument name list.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().output(name)\n\n def set_type(self, new_type):\n \"\"\"\n Change the operator type into new type.\n\n Args:\n new_type(str): new operator type to be set.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().set_type(new_type)\n\n def set_attr(self, name, val):\n \"\"\"\n Set the value of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n \"\"\"\n self._update_desc_attr(name, val)\n\n def _update_desc_attr(self, name, val):\n \"\"\"\n Update the value of the op desc's attribute by attribute's name.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n desc = self.node.op()\n if isinstance(val, Block):\n desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and \\\n all(isinstance(v, Block) for v in val):\n desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n desc._set_attr(name, val)\n\n def input_arg_names(self):\n \"\"\"\n Return input arguments' names of this op node.\n\n Returns:\n list(str): input arguments' names of this op node.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().input_arg_names()\n\n def output_arg_names(self):\n \"\"\"\n Return output arguments' names of this op node.\n\n Returns:\n list(str): output arguments' names of this op node.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().output_arg_names()\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrVarNode): node inputs wrapped by IrVarNode.\n \"\"\"\n return [IrVarNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrVarNode): node outputs wrapped by IrVarNode.\n \"\"\"\n return [IrVarNode(n) for n in self.node.outputs]\n\n\nclass IrGraph(object):\n \"\"\"\n Python IrGraph. Beneath it is a core.Graph, which is used for\n creating a c++ Ir Pass Graph. An IrGraph is just a graph view of\n a Program. In an IrGraph, both Variables and Operators are graph\n nodes.\n \"\"\"\n\n def __init__(self, graph, for_test=False):\n \"\"\"\n Construct an IrGraph using core.Graph.\n\n Args:\n graph(core.Graph): C++ Graph.\n for_test(bool): True for the test graph and false for the train graph.\n \"\"\"\n assert isinstance(\n graph, core.Graph), 'graph must be the instance of core.Graph.'\n self.graph = graph\n self._for_test = for_test\n\n def clone(self):\n \"\"\"\n Create a new and duplicated IrGraph.\n\n Warns:\n The method only clones the graph structure, not its attributes.\n\n Returns:\n IrGraph: A new and duplicated graph.\n \"\"\"\n g = self.graph.clone()\n return IrGraph(g, self._for_test)\n\n def is_test(self):\n \"\"\"\n If the graph is used for testing, the function returns true. Otherwise, returns false.\n \"\"\"\n return self._for_test\n\n def all_nodes(self):\n \"\"\"\n Return all nodes included in the graph as a set.\n \"\"\"\n return {IrNode(node) for node in self.graph.nodes()}\n\n def all_var_nodes(self):\n \"\"\"\n Return all variable nodes included in the graph as a set.\n \"\"\"\n return {IrVarNode(node) for node in self.graph.nodes() if node.is_var()}\n\n def all_persistable_nodes(self):\n \"\"\"\n Return all persistable variable nodes included in the graph as a set.\n \"\"\"\n persistable_nodes = set()\n for node in self.graph.nodes():\n if node.is_var() and node.var() is not None and node.var(\n ).persistable():\n persistable_nodes.add(node)\n return {IrVarNode(p) for p in persistable_nodes}\n\n def all_op_nodes(self):\n \"\"\"\n Return all operator nodes included in the graph as a set.\n \"\"\"\n return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()}\n\n def create_persistable_node(self, name, var_type, shape, var_dtype):\n \"\"\"\n Create a persistable variable node in the graph. In IrGraph,\n it can not distinguish between persistable variables and parameters.\n\n Args:\n name(str): the name of the persistable variable node.\n vart_type(core.VarDesc.VarType): the type of the persistable variable node.\n shape(list): the shape of the persistable variable node.\n var_dtype(core.VarDesc.VarType): the data type of the persistable variable node.\n\n Returns:\n IrVarNode: the created persistable variable node.\n \"\"\"\n var_desc = core.VarDesc(name)\n var_desc.set_type(var_type)\n var_desc.set_shape(shape)\n var_desc.set_dtype(var_dtype)\n var_desc.set_persistable(True)\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_var_node(self, name, var_type, shape, var_dtype):\n \"\"\"\n Create a variable node in the graph. The created variable node is\n not persistable.\n\n Args:\n name(str): the name of the variable node.\n vart_type(core.VarDesc.VarType): the type of the variable node.\n shape(list): the shape of the variable node.\n var_dtype(core.VarDesc.VarType): the data type of the variable node.\n\n Returns:\n IrVarNode: the created variable node.\n \"\"\"\n\n var_desc = core.VarDesc(name)\n var_desc.set_type(var_type)\n var_desc.set_shape(shape)\n var_desc.set_dtype(var_dtype)\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_control_dep_var(self):\n \"\"\"\n create a control var\n \"\"\"\n return IrVarNode(self.graph.create_control_dep_var())\n\n def create_var_node_from_desc(self, var_desc):\n \"\"\"\n Create a variable node by using an existing VarDesc in the graph.\n Depend on the giving VarDesc, the created variable node may be persistable.\n\n Args:\n var_desc(core.VarDesc): the giving variable description.\n\n Returns:\n IrVarNode: the created variable node.\n \"\"\"\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_op_node(self, op_type, attrs, inputs, outputs):\n \"\"\"\n Create a operator node in the graph.\n\n Args:\n op_type(str): the type of the operator node.\n attrs(dict): the attributes of the operator node.\n inputs(dict): the inputs of the operator node.\n outputs(dict): the outputs of the operator node.\n\n Returns:\n IrOpNode: the created operator node.\n \"\"\"\n op_desc = core.OpDesc()\n op_desc.set_type(op_type)\n for attr, value in six.iteritems(attrs):\n self._update_desc_attr(op_desc, attr, value)\n for input_name, var_nodes in six.iteritems(inputs):\n if not isinstance(var_nodes, list):\n var_nodes = [var_nodes]\n op_desc.set_input(input_name,\n [var_node.name() for var_node in var_nodes])\n for output_name, var_nodes in six.iteritems(outputs):\n if not isinstance(var_nodes, list):\n var_nodes = [var_nodes]\n op_desc.set_output(output_name,\n [var_node.name() for var_node in var_nodes])\n return IrOpNode(self.graph.create_op_node(op_desc))\n\n def create_op_node_from_desc(self, op_desc):\n \"\"\"\n Create a operator node by using an existing OpDesc in the graph.\n\n Args:\n op_desc(core.VarDesc): the giving operator description.\n\n Returns:\n IrOpNode: the created operator node.\n \"\"\"\n return IrOpNode(self.graph.create_op_node(op_desc))\n\n def update_input_link(self, old_input_node, new_input_node, op_node):\n \"\"\"\n Update the input's link of a operator node.\n\n Args:\n old_input_node(IrNode): the old input node of the giving op_node.\n new_input_node(IrNode): the new input node of the giving op_node.\n op_node(IrOpNode): the operator node that is needed to update input's link.\n \"\"\"\n assert old_input_node.node in self.graph.nodes() and new_input_node.node in \\\n self.graph.nodes() and op_node.node in self.graph.nodes(), \\\n 'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'\n old_input_node.remove_output(op_node)\n op_node.remove_input(old_input_node)\n new_input_node.append_output(op_node)\n op_node.append_input(new_input_node)\n op_node.rename_input(old_input_node.name(), new_input_node.name())\n\n def update_output_link(self, old_output_node, new_output_node, op_node):\n \"\"\"\n Update the output's link of an operator node.\n\n Args:\n old_output_node(IrNode): the old output node of the giving op_node.\n new_output_node(IrNode): the new output node of the giving op_node.\n op_node(IrOpNode): the operator node that is needed to update input's link.\n \"\"\"\n assert old_output_node.node in self.graph.nodes() and new_output_node.node in \\\n self.graph.nodes() and op_node.node in self.graph.nodes(), \\\n 'The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes.'\n old_output_node.remove_input(op_node)\n op_node.remove_output(old_output_node)\n new_output_node.append_input(op_node)\n op_node.append_output(new_output_node)\n op_node.rename_output(old_output_node.name(), new_output_node.name())\n\n def link_to(self, node_in, node_out):\n \"\"\"\n Connect two nodes.\n\n Args:\n node_in(IrNode): the input node.\n node_out(IrNode): the output node.\n \"\"\"\n assert node_in.node in self.graph.nodes() and node_out.node in self.graph.nodes(), \\\n 'The two arguments(node_in&node_out) must be in the graph nodes.'\n node_in.append_output(node_out)\n node_out.append_input(node_in)\n\n def safe_remove_nodes(self, remove_nodes):\n \"\"\"\n Remove nodes safely since links connected to these removed nodes are\n also removed.\n\n Args:\n remove_nodes(set): the nodes prepared to be removed.\n \"\"\"\n if not isinstance(remove_nodes, set):\n if isinstance(remove_nodes, Iterable):\n remove_nodes = set(remove_nodes)\n else:\n remove_nodes = {remove_nodes}\n original_nodes = {n.node for n in remove_nodes}\n core.graph_safe_remove_nodes(self.graph, original_nodes)\n\n def resolve_hazard(self):\n ordered_nodes = core.topology_sort(self.graph)\n var_nodes = dict()\n for node in ordered_nodes:\n if node.is_op() and node.op() is not None:\n for each_var_name in node.op().input_arg_names():\n if each_var_name not in var_nodes:\n var_nodes[each_var_name] = [\n self._find_node_by_name(node.inputs, each_var_name)\n ]\n for each_var_name in node.op().output_arg_names():\n if each_var_name not in var_nodes:\n var_nodes[each_var_name] = [\n self._find_node_by_name(node.outputs, each_var_name)\n ]\n else:\n var_nodes[each_var_name].append(\n self._find_node_by_name(node.outputs,\n each_var_name))\n self.graph.resolve_hazard(var_nodes)\n\n def has_circle(self):\n \"\"\"\n Check if the graph has a circle.\n\n Returns:\n bool: True if the graph has a circle else False.\n \"\"\"\n return core.has_circle(self.graph)\n\n def graph_num(self):\n \"\"\"\n Count the number of unconnected graphs in this graph.\n\n Returns:\n int: the number of unconnected graphs.\n \"\"\"\n return core.graph_num(self.graph)\n\n def topology_sort(self):\n \"\"\"\n Perform the topology sort operation on the graph.\n\n Notes: the `graph` can not contain a circle.\n\n Returns:\n list(IrNode): nodes in topology order.\n \"\"\"\n ordered_nodes = core.topology_sort(self.graph)\n return [IrNode(n) for n in ordered_nodes]\n\n def build_adjacency_list(self):\n \"\"\"\n Build an adjacency list of operations for the `graph`.\n\n Returns:\n dict{IrNode: set(IrNode)}: the adjacency list.\n \"\"\"\n adj_list = core.build_adjacency_list(self.graph)\n wrapped_adj_list = dict()\n for k, v in six.iteritems(adj_list):\n wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}\n return wrapped_adj_list\n\n def draw(self, save_path, name, marked_nodes=None, remove_ctr_var=True):\n \"\"\"\n Draw the graph. If `dot` command is installed, the drawn graph\n will be saved as pdf file type, otherwise dot file type is used.\n\n Args:\n save_path(str): the save path of drawn graph.\n name(str): the name of drawn graph.\n marked_nodes(set(IrNode)): nodes that are needed to be marked.\n Default value is None.\n remove_ctr_var(bool): If it is set True, all control variable nodes\n in the graph will be removed. Default value is True.\n \"\"\"\n\n def _convert_to_pdf(dot_file_path):\n pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'\n exited_code = subprocess.call('dot -Tpdf ' + dot_file_path \\\n + ' -o ' + pdf_save_path, shell=True)\n if exited_code != 0:\n print('The dot command is needed for creating pdf files.')\n print('The {} is saved as the dot filetype.'.format(\n dot_file_path))\n\n remove_ctr_vars = set()\n if remove_ctr_var:\n for node in self.all_var_nodes():\n if node.is_ctrl_var():\n remove_ctr_vars.add(node)\n self.safe_remove_nodes(remove_ctr_vars)\n print('Total ops num = {}.'.format(len(self.all_op_nodes())))\n\n if marked_nodes is not None:\n if not isinstance(marked_nodes, set):\n if isinstance(marked_nodes, Iterable):\n marked_nodes = set(marked_nodes)\n else:\n marked_nodes = {marked_nodes}\n marked_nodes = {n.node for n in marked_nodes}\n remove_ctr_vars = {n.node for n in remove_ctr_vars}\n marked_nodes = marked_nodes - remove_ctr_vars\n if self.graph.has('__graphviz__marked_node__'):\n self.graph.erase('__graphviz__marked_node__')\n self.graph.set('__graphviz__marked_node__', marked_nodes)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n viz_dot_path = os.path.join(save_path, name) + '.dot'\n viz_pass = core.get_pass('graph_viz_pass')\n viz_pass.set('graph_viz_path', viz_dot_path)\n viz_pass.apply(self.graph)\n _convert_to_pdf(viz_dot_path)\n\n def to_program(self):\n \"\"\"\n Convert the graph into a Program.\n\n WARN: When the graph includes backward operator nodes, the\n conversion process may be failed. Usually, this function is\n only used to convert a test graph.\n\n Returns:\n Program: a program converted from the graph.\n \"\"\"\n convert_pass = core.get_pass('graph_to_program_pass')\n desc = core.ProgramDesc()\n convert_pass.set_not_owned('program', desc)\n convert_pass.apply(self.graph)\n program = Program._construct_from_desc(desc)\n return program\n\n def _find_node_by_name(self, nodes, node_name):\n \"\"\"\n Find a node in the giving nodes set by the name.\n \"\"\"\n target_node = None\n for n in nodes:\n if n.name() == node_name:\n target_node = n\n assert target_node is not None, \"Cannot find the target node in the giving set.\"\n return target_node\n\n def _update_desc_attr(self, desc, name, val):\n \"\"\"\n Update the value of desc's attribute by attribute's name.\n \"\"\"\n if isinstance(val, Block):\n desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and all(\n isinstance(v, Block) for v in val):\n desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n desc._set_attr(name, val)\n\n\nclass Program(object):\n \"\"\"\n Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the\n control flow op like conditional_block, while :ref:`api_fluid_layers_While` is included,\n it will contain nested block.\n\n Please reference the\n `framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_\n for details.\n\n A set of Program usually contains startup program and main program.\n A startup program is set to contain some initial work, eg. initialize the ``Parameter``, and the main\n program will contain the network structure and vars for train.\n\n A set of Program can be used for test or train, in train program ,\n Paddle will contain all content to build a train network, in test\n program Paddle will prune some content which is irrelevant to test, eg.\n backward ops and vars.\n\n **Notes**:\n **we have** :ref:`api_fluid_default_startup_program` **and** :ref:`api_fluid_default_main_program`\n **by default, a pair of them will shared the parameters. The** :ref:`api_fluid_default_startup_program` **only run once to initialize parameters,**\n :ref:`api_fluid_default_main_program` **run in every mini batch and adjust the weights.**\n\n Returns:\n Program: An empty Program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n main_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(main_program=main_program, startup_program=startup_program):\n x = fluid.layers.data(name=\"x\", shape=[-1, 784], dtype='float32')\n y = fluid.layers.data(name=\"y\", shape=[-1, 1], dtype='int32')\n z = fluid.layers.fc(name=\"fc\", input=x, size=10, act=\"relu\")\n\n print(\"main program is: {}\".format(main_program))\n print(\"start up program is: {}\".format(startup_program))\n\n \"\"\"\n\n def __init__(self):\n self.desc = core.ProgramDesc()\n self.blocks = [Block(self, 0)]\n self.current_block_idx = 0\n global global_prog_seed\n self._seed = global_prog_seed\n self._current_role = core.op_proto_and_checker_maker.OpRole.Forward\n self.__op_role_var = []\n\n # for distribute training\n # _is_distributed = True if under distributed training\n self._is_distributed = False\n # _is_chief = True if the trainer is the first one, usually No.0\n self._is_chief = False\n # _parameters_on_pservers records all the parameters distributed on parameter servers.\n self._parameters_on_pservers = None\n # _endpoints is a list about parameter servers ip:port, such as [\"ip:port\",\"ip:port\"]\n self._endpoints = []\n # if current role is parameter server, the _ps_endpoint is its \"ip:port\"\n self._ps_endpoint = None\n # trainers_endpoints, it is used for distribution.\n self._trainers_endpoints = []\n # the distributed lookup table names\n self._distributed_lookup_table = None\n\n # use Deep gradient comrepssion or not\n self._enable_dgc = False\n self._use_lamb = False\n\n self._nccl_comm_num = 1\n self._use_hierarchical_allreduce = False\n self._hierarchical_allreduce_inter_nranks = 0\n\n # if this program has been optimized by distributed optimizer\n # fleet_opt will be given a value\n self._fleet_opt = None\n self._program_config = None\n\n # assigned if this program has been parsed by a pipeline optimizer\n self._pipeline_opt = None\n\n # appending gradients times\n self._appending_grad_times = 0\n\n # identifier for auto checkpoint\n self._auto_checkpoint_name = unique_name.generate(\n \"__auto_checkpoint_program__\")\n\n # compiled program, i.e. Graph\n self._graph = None\n\n def global_seed(self, seed=0):\n \"\"\"\n Set global seed for Program\n\n Returns:\n None.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n print(prog.random_seed)\n ## 0\n ## the default random seed is 0\n\n prog.global_seed(102)\n prog1 = fluid.default_main_program()\n print(prog1.random_seed)\n ## 102\n ## the random seed is 102\n \"\"\"\n global global_prog_seed\n global_prog_seed = seed\n self._seed = global_prog_seed\n\n @property\n def _op_role(self):\n \"\"\"\n The operator role. In a enum {Forward, Backward, Optimize}.\n\n Notes: this is a low level API. It is used only for ParallelExecutor to\n duplicate or schedule operator to devices.\n\n For example, the forward operator should be executed on every device.\n The backward operator should be executed on every device and the\n parameter gradient of backward (use :code:`_op_role_var` to get this\n variable) operator should be merged to one device. The optimization\n operators should be executed on only one device and broadcast the\n optimization result, i.e., the new parameter, to every other device.\n \"\"\"\n return self._current_role\n\n @_op_role.setter\n def _op_role(self, role):\n self._current_role = role\n\n @property\n def _op_role_var(self):\n \"\"\"\n The auxiliary variables for :code:`_op_role` property.\n\n See Also: :code:`Program._op_role`'s documentation for details.\n\n Notes: This is a very low-level API. Users should not use it directly.\n \"\"\"\n return self.__op_role_var\n\n @signature_safe_contextmanager\n def _backward_role_guard(self):\n tmp_role = self._current_role\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.Backward\n try:\n yield\n finally:\n self._current_role = tmp_role\n\n @signature_safe_contextmanager\n def _optimized_guard(self, param_and_grads):\n \"\"\"\n A with guard to set :code:`Optimization` :code:`OpRole` and\n :code:`OpRoleVar` automatically.\n\n Notes: This is a very low level API. Users should not use it directly.\n\n Args:\n param_and_grads(list): The variables (names) to be optimized.\n\n Examples:\n\n >>> import paddle.fluid as fluid\n >>> p, g = backward(...)\n >>> with program._optimized_guard([p,g]):\n >>> p = p - 0.001 * g\n \"\"\"\n tmp_role = self._current_role\n tmp_var = self.__op_role_var\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.Optimize\n self.__op_role_var = [\n var.name if isinstance(var, Variable) else var\n for var in param_and_grads\n ]\n try:\n yield\n finally:\n self.__op_role_var = tmp_var\n self._current_role = tmp_role\n\n @signature_safe_contextmanager\n def _lr_schedule_guard(self, is_with_opt=False):\n \"\"\"\n A with guard to set :code:`LRSched` :code:`OpRole` and\n :code:`OpRoleVar` automatically. The :code:`OpRoleVar` is\n set to the target learning rate.\n\n Notes: This is a very low level API. Users should not use it directly.\n\n Args:\n is_with_opt: Only set to true if these ops a in the middle\n of a bunch of optimize ops so that it can be treated\n correctly. For example, sgd->lr_op->sgd->lr_op->sgd.\n\n Examples:\n\n >>> import paddle.fluid as fluid\n >>> p, g = backward(...)\n >>> with program.lr_schedule_guard():\n >>> lr = lr * decay\n \"\"\"\n\n tmp_role = self._current_role\n tmp_var = self.__op_role_var\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.LRSched\n if is_with_opt:\n self._current_role = int(OpRole.LRSched) | int(OpRole.Optimize)\n # TODO(typhoonzero): how to set target learning rate var\n self.__op_role_var = []\n try:\n yield\n finally:\n self.__op_role_var = tmp_var\n self._current_role = tmp_role\n\n def __str__(self):\n \"\"\"\n Get the protobuf debug string of this Program.\n\n Returns:\n (str): The protobuf debug string.\n\n Raises:\n ValueError: If any of required fields is not set.\n \"\"\"\n return self._to_readable_code()\n\n def _to_readable_code(self, skip_op_callstack=True):\n \"\"\"\n Get readable debug string of Program.\n\n .. note::\n If you want to get the debug string in protobuf format,\n please use :code:`to_string` method.\n\n Args:\n skip_op_callstack(bool): whether to skip parsing Operator's attribute\n op_callstack, default value is True\n\n Returns:\n string: The formatted Program string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n new_op = cur_block.append_op(type=\"abs\",\n inputs={\"X\": [new_var]},\n outputs={\"Out\": [new_var]})\n print(cur_program._to_readable_code())\n \"\"\"\n assert isinstance(\n skip_op_callstack, bool\n ), \"skip_op_callstack parameter's type is error, expect bool, received %s\".format(\n type(skip_op_callstack))\n program_str = \"\"\n for block in self.blocks:\n program_str += block._to_readable_code(skip_op_callstack)\n program_str += '\\n'\n return program_str\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n To debug string.\n\n Args:\n\n throw_on_error (bool): raise Value error when any of required fields is not set.\n\n with_details (bool): True if more details about variables and parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need to print.\n\n Returns:\n str: The debug string describe current Program.\n\n Raises:\n ValueError: If any of required fields is not set and throw_on_error is True.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n x = fluid.layers.data(name=\"X\", shape=[2,3], dtype=\"float32\", append_batch_size=False)\n pred = fluid.layers.fc(x, size=3)\n prog_string = prog.to_string(throw_on_error=True, with_details=False)\n prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True)\n print(\"program string without detail: {}\".format(prog_string))\n print(\"program string with detail: {}\".format(prog_string_with_details))\n \"\"\"\n assert isinstance(\n throw_on_error, bool\n ), \"The type of throw_on_error parameter is wrong, expected bool, but received {}.\".format(\n type(throw_on_error))\n assert isinstance(\n with_details, bool\n ), \"The type of with_details parameter is wrong, expected bool, but received {}.\".format(\n type(with_details))\n\n if with_details:\n res_str = \"\"\n for block in self.blocks:\n res_str += block.to_string(throw_on_error, with_details)\n else:\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.ProgramDesc.FromString(\n six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n return res_str\n\n def _get_desc(self):\n \"\"\"\n Get the C++ side of `ProgramDesc` object pointer. The C++ object is\n exposed by :code:`pybind`.\n\n Notes: This is a very low level API. Users should not use this API\n directly.\n \"\"\"\n return self.desc\n\n def _version(self):\n return self.desc._version()\n\n def clone(self, for_test=False):\n \"\"\"\n **Notes**:\n **1.** :code:`Program.clone()` **method DOES NOT clone** :ref:`api_fluid_io_DataLoader` .\n\n **2. Recommend you to use** :code:`clone` **before using** :code:`Opimizer.minimize`.\n\n **3. This API has no effect in Dygraph Mode**\n\n Create a new Program with forward content of original one when ``for_test=True``.\n Create a new Program as same as the original one when ``for_test=False``.\n\n Some operators, e.g., :ref:`api_fluid_layers_batch_norm` , behave differently between\n training and testing. They have an attribute, :code:`is_test`, to\n control this behaviour. This method will change the :code:`is_test`\n attribute of them to :code:`True` when :code:`for_test=True`.\n\n * Set for_test to False when you want to clone the program for training.\n * Set for_test to True when you want to clone the program for testing.\n We will prune the backward and optimize part of the program when you\n use :code:`clone` after :code:`Opimizer.minimize`, but we still\n recommend you to use :code:`clone` before using :code:`Opimizer.minimize`.\n\n For Example:\n ::\n\n import paddle.fluid as fluid\n img = fluid.layers.data(name='image', shape=[784])\n pred = fluid.layers.fc(input=img, size=10, act='relu')\n loss = fluid.layers.mean(pred)\n # Here we use clone before Momentum\n test_program = fluid.default_main_program().clone(for_test=True)\n optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)\n optimizer.minimize(loss)\n\n Args:\n\n for_test (bool): True if change the :code:`is_test` attribute of operators to :code:`True`\n and prune the backward and optimize part of the program. The default value is :code:`False` .\n\n Returns:\n Program: A new Program with forward content of original one when ``for_test=True``. A new Program as same as the original one when ``for_test=False``\n\n\n Examples:\n\n **Notes: The Program's order maybe different after** :code:`clone` **and\n this will not affect your training or testing progress. In the following\n example we give you an simple method** :code:`print_prog(program)` **to\n print Program Descs inorder to make sure you have same print result\n after** :code:`clone`:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import six\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n\n\n 1. To clone a test program, the sample code is:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import six\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n\n train_program = fluid.Program()\n startup_program = fluid.Program()\n\n # startup_program is used to do some parameter init work,\n # and main program is used to hold the network\n with fluid.program_guard(train_program, startup_program):\n with fluid.unique_name.guard():\n img = fluid.layers.data(name='image', shape=[784])\n hidden = fluid.layers.fc(input=img, size=200, act='relu')\n hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)\n loss = fluid.layers.cross_entropy(\n input=fluid.layers.fc(hidden, size=10, act='softmax'),\n label=fluid.layers.data(name='label', shape=[1], dtype='int64'))\n avg_loss = fluid.layers.mean(loss)\n test_program = train_program.clone(for_test=True)\n print_prog(test_program)\n\n # Due to parameter sharing usage for train and test, so we need to use startup program of train\n # instead of using test startup program, while nothing is in test's startup program\n\n # In Paddle Fluid we will share weights by using the same Variable name. In train and test program\n # all parameters will have the same name and this can make train and test program sharing parameters,\n # that's why we need to use startup program of train. And for startup program of test, it has nothing,\n # since it is a new program.\n\n with fluid.program_guard(train_program, startup_program):\n with fluid.unique_name.guard():\n sgd = fluid.optimizer.SGD(learning_rate=1e-3)\n sgd.minimize(avg_loss)\n\n\n 2. The clone method can be avoid if you create program for training and program for testing individually.\n .. code-block:: python\n\n import paddle.fluid as fluid\n import six\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n \n def network():\n img = fluid.layers.data(name='image', shape=[784])\n hidden = fluid.layers.fc(input=img, size=200, act='relu')\n hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)\n loss = fluid.layers.cross_entropy(\n input=fluid.layers.fc(hidden, size=10, act='softmax'),\n label=fluid.layers.data(name='label', shape=[1], dtype='int64'))\n avg_loss = fluid.layers.mean(loss)\n return avg_loss\n\n train_program_2 = fluid.Program()\n startup_program_2 = fluid.Program()\n test_program_2 = fluid.Program()\n with fluid.program_guard(train_program_2, startup_program_2):\n with fluid.unique_name.guard():\n avg_loss = network()\n sgd = fluid.optimizer.SGD(learning_rate=1e-3)\n sgd.minimize(avg_loss)\n # the test startup program is not used.\n with fluid.program_guard(test_program_2, startup_program_2):\n with fluid.unique_name.guard():\n avg_loss = network()\n print_prog(test_program_2)\n\n The two code snippets above will generate and print same programs.\n \"\"\"\n\n #NOTE(zhiqiu): we sync the original program first, since its program may diff with\n # its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.\n self._sync_with_cpp()\n\n pruned_origin_block_id_map = None\n if for_test:\n forward_prog = Program()\n forward_prog.desc, pruned_origin_block_id_map = core.prune_backward(\n self.desc)\n forward_prog.blocks = [\n Block(forward_prog, i)\n for i in six.moves.range(forward_prog.desc.num_blocks())\n ]\n forward_prog._sync_with_cpp()\n p = forward_prog._inference_optimize(prune_read_op=False)\n else:\n p = Program()\n p.current_block_idx = self.current_block_idx\n p._seed = self._seed\n p.desc = core.ProgramDesc(self.desc)\n p.blocks = [\n Block(p, i) for i in six.moves.range(self.desc.num_blocks())\n ]\n\n p._current_role = self._current_role\n p.__op_role_var = self.__op_role_var\n p._appending_grad_times = self._appending_grad_times\n if hasattr(self, 'lr_sheduler'):\n p.lr_sheduler = self.lr_sheduler\n\n #NOTE(zhiqiu): we sync the cloned program, to update its program by\n # its desc.\n p._sync_with_cpp()\n\n p._copy_param_info_from(self)\n p._copy_data_info_from(self, pruned_origin_block_id_map)\n p._copy_dist_param_info_from(self)\n return p\n\n def _prune(self, targets):\n \"\"\"\n Prune operators and variables which are not needed to generate\n :code:`targets`.\n\n Notes: This is a very low level API. Users should not use this API\n directly. This API is in flux and not stable.\n\n Args:\n targets(list|Variable|Operator): A list of variables, operators, or variable names\n need to be pruned\n\n Returns:\n Program: A new, pruned program.\n \"\"\"\n return self._prune_with_input([], targets)\n\n def _prune_with_input(self, feeded_var_names, targets):\n \"\"\"\n Prune operators and variables which are not needed to generate\n :code:`targets`. Prune operators and variables which are needed \n to generate feeded_var \n\n Notes: This is a very low level API. Users should not use this API\n directly. This API is in flux and not stable.\n\n Args:\n feeded_var_names(list|str): A list of variable names from where\n pruning start. If it is set as [], this API works just like _prune()\n targets(list|Variable|Operator): A list of variables, operators, or variable names\n need to be pruned\n\n Returns:\n Program: A new, pruned program.\n \"\"\"\n\n #NOTE(zhiqiu): we sync the original program first, since its program may diff with\n # its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.\n self._sync_with_cpp()\n\n if not isinstance(feeded_var_names, list):\n feeded_var_names = [feeded_var_names]\n if not isinstance(targets, list):\n targets = [targets]\n\n for var in feeded_var_names:\n if not isinstance(var, six.string_types):\n raise ValueError(\n \"All feeded_var_names of Program._prune_with_input() can only be \"\n \"str, but received %s.\" % type(var))\n\n targets_idx = []\n for t in targets:\n if not isinstance(t, Operator):\n if isinstance(t, Variable):\n name = t.name\n elif isinstance(t, six.string_types):\n name = str(t)\n else:\n raise ValueError(\n \"All targets of Program._prune_with_input() can only be \"\n \"Variable or Operator, but received %s.\" % type(t))\n\n # NOTEZ(zhiqiu): For variable to be fed in fetch_list, there two cases:\n # (1) the variable is leaf, it has no op that generates it;\n # (2) the variable is not leaf, and we need to prune the op that generates it.\n # In both cases, wo can just skip target_op of that it.\n if name in feeded_var_names:\n continue\n\n # After transpiler processing, the op that output this\n # variable maybe has been changed, so t.op is not reliable\n # and we need to find the current op that generate this\n # variable here.\n target_op = None\n global_block = self.global_block()\n for idx, op in enumerate(global_block.ops):\n if name in op.output_arg_names:\n # NOTE(zhiqiu): Find op that generate target name.\n # Skip optimize op except for optimize op in targets, \n # since optimize op generates parameters.\n if op._is_optimize_op() and op not in targets:\n continue\n else:\n target_op = op\n break\n if target_op is None:\n raise ValueError(\n \"The target variable used for pruning should have an \"\n \"associated operator that generates it.\")\n else:\n targets_idx.append([target_op.block.idx, target_op.idx])\n else:\n targets_idx.append([t.block.idx, t.idx])\n\n res = Program()\n res.desc, pruned_origin_block_id_map = core.prune(self.desc,\n set(feeded_var_names),\n targets_idx)\n res.blocks = [\n Block(res, i) for i in six.moves.range(res.desc.num_blocks())\n ]\n res._sync_with_cpp()\n\n res._copy_param_info_from(self)\n res._copy_data_info_from(self, pruned_origin_block_id_map)\n res._copy_dist_param_info_from(self)\n\n return res\n\n def _inference_optimize(self, prune_read_op=True):\n \"\"\"\n This method will create a new program and do following adjustments on it:\n 1. Remove all reader variables and their creator ops if exist.\n\n 2. Remove the :code:`read_op` if exists.\n\n 3. change the :code:`is_test`\n attribute of operators to :code:`True`. All the :code:`Parameter`\n information will be lost.\n\n Args:\n prune_read_op(bool): remove the read ops that are added by py_reader\n for cpp inference library\n\n Notes: This API is a very low level API. Use\n :code:`Program.clone(for_test=True)` instead.\n\n Returns:\n Program: The new program.\n \"\"\"\n res = Program()\n res.desc = core.ProgramDesc(self.desc)\n\n # remove all readers and the read_op if exist\n read_op_idx = 0\n root_block = res.desc.block(0)\n if prune_read_op:\n while True:\n if read_op_idx >= root_block.op_size() or root_block.op(\n read_op_idx).type() == 'read':\n break\n read_op_idx += 1\n if read_op_idx < root_block.op_size():\n root_block._remove_op(0, read_op_idx + 1)\n for var in root_block.all_vars():\n if var.type() == core.VarDesc.VarType.READER:\n root_block._remove_var(cpt.to_bytes(var.name()))\n\n # change all `is_test` attributes to True\n for i in six.moves.range(res.desc.num_blocks()):\n block = res.desc.block(i)\n for j in six.moves.range(block.op_size()):\n op = block.op(j)\n if op.has_attr('is_test'):\n op._set_attr('is_test', True)\n res.blocks = [\n Block(res, i) for i in six.moves.range(res.desc.num_blocks())\n ]\n res._sync_with_cpp()\n return res\n\n @staticmethod\n def parse_from_string(binary_str):\n \"\"\"\n **Notes**:\n **1. All information about parameters will be lost after serialization**\n\n **2. This API has no effect in Dygraph mode**\n\n Deserialize a Program from `protobuf <https://en.wikipedia.org/wiki/Protocol_Buffers>`_ binary string.\n This method always use to save and load model\n\n Args:\n\n binary_str_type (str): the binary prootbuf string.\n\n Returns:\n Program: A deserialized Program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n startup_prog = fluid.Program()\n main_prog = fluid.Program()\n with fluid.program_guard(startup_prog, main_prog):\n x = fluid.layers.data(\n name='X', shape=[1000, 784], dtype='float32', append_batch_size=False)\n\n y = fluid.layers.data(\n name='Y', shape=[784, 100], dtype='float32', append_batch_size=False)\n\n z = fluid.layers.mul(x=x, y=y)\n\n binary_str = fluid.default_main_program().desc.serialize_to_string()\n prog_restored = fluid.default_main_program().parse_from_string(binary_str)\n\n print(fluid.default_main_program())\n print(prog_restored)\n \"\"\"\n p = Program()\n p.desc = core.ProgramDesc(binary_str)\n p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]\n p._sync_with_cpp()\n return p\n\n @staticmethod\n def _construct_from_desc(desc):\n \"\"\"\n Construct a program from program desc.\n\n Args:\n desc(core.ProgramDesc): The program desc for constructing.\n\n Returns:\n Program: A program.\n \"\"\"\n p = Program()\n p.desc = desc\n p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]\n p._sync_with_cpp()\n return p\n\n @property\n def random_seed(self):\n \"\"\"\n The default random seed for random operators in Program. ``0`` means get\n the random seed from random device.\n\n **Notes: It must be set before the operators have been added.**\n\n Returns:\n int64: Random seed in current Program\n\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n random_seed = prog.random_seed\n x_var = fluid.layers.data(name=\"X\", shape=[3,3], dtype=\"float32\", append_batch_size=False)\n print(random_seed)\n ## 0\n ## the default random seed is 0\n\n # Here we need to set random seed before we use fluid.layers.dropout\n prog.random_seed = 1\n z_var = fluid.layers.dropout(x_var, 0.7)\n\n print(prog.random_seed)\n ## 1\n ## the random seed is change to 1\n \"\"\"\n return self._seed\n\n @property\n def num_blocks(self):\n \"\"\"\n The number of :ref:`api_guide_Block_en` in this Program.\n\n **Notes: This API has no effect in Dygraph mode**\n\n Returns:\n int(Platform-dependent size): num of :ref:`api_guide_Block_en` in current Program\n\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n num_blocks = prog.num_blocks\n print(num_blocks)\n\n\n \"\"\"\n return self.desc.num_blocks()\n\n @random_seed.setter\n def random_seed(self, seed):\n if not isinstance(seed, int):\n raise ValueError(\n \"Program.random_seed's input seed must be an integer, but received %s.\"\n % type(seed))\n self._seed = seed\n\n def __repr__(self):\n return self.__str__()\n\n def global_block(self):\n \"\"\"\n **Notes**:\n **This API has no effect in Dygraph mode**\n\n Get the first :ref:`api_guide_Block_en` of this Program.\n\n Returns:\n :ref:`api_guide_Block_en`: The first :ref:`api_guide_Block_en` of this Program.\n\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n gb_block = prog.global_block()\n print(gb_block)\n\n \"\"\"\n return self.blocks[0]\n\n def block(self, index):\n \"\"\"\n **Notes**:\n **This API has no effect in Dygraph mode**\n\n Get the :code:`index` :ref:`api_guide_Block_en` of this Program\n\n Args:\n index (int) - The index of :ref:`api_guide_Block_en` to get\n\n Returns:\n :ref:`api_guide_Block_en`: The :code:`index` block\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n block_0 = prog.block(0)\n print(block_0)\n \"\"\"\n return self.blocks[index]\n\n def current_block(self):\n \"\"\"\n **Notes**:\n **This API has no effect in Dygraph mode**\n\n Get the current :ref:`api_guide_Block_en` . The :code:`current` :ref:`api_guide_Block_en`\n is the :ref:`api_guide_Block_en` to append operators.\n\n Returns:\n :ref:`api_guide_Block_en`: The :code:`index` :ref:`api_guide_Block_en`\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n current_blk = prog.current_block()\n print(current_blk)\n \"\"\"\n return self.blocks[self.current_block_idx]\n\n def _create_block(self, parent_idx=None):\n \"\"\"\n Create a new block with the :code:`parent_idx` and change the current block\n to new block.\n\n Args:\n\n parent_idx(int): The parent block index.\n\n Returns:\n Block: The new block.\n \"\"\"\n new_block_idx = len(self.blocks)\n parent = self.current_block() if parent_idx is None else self.block(\n parent_idx)\n self.desc.append_block(parent.desc)\n self.current_block_idx = new_block_idx\n self.blocks.append(Block(self, self.current_block_idx))\n return self.current_block()\n\n def _rollback(self):\n \"\"\"\n Exit a code block, i.e., roll back to the parent block.\n Returns:\n None\n \"\"\"\n self.current_block_idx = self.current_block().parent_idx\n\n def _sync_with_cpp(self):\n \"\"\"\n Synchronize Python instance to its binding C++ object instance.\n If the program is modified in C++ space, this method should be invoked.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Returns:\n None\n \"\"\"\n for block_idx in range(len(self.blocks), self.desc.num_blocks()):\n self.blocks.append(Block(self, block_idx))\n for block in self.blocks:\n block._sync_with_cpp()\n\n def _copy_param_info_from(self, other):\n \"\"\"\n Copy the information of parameters from other program.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Args:\n other(Program): Other program\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\n \"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s\"\n % type(other))\n\n self.global_block()._copy_param_info_from(other.global_block())\n\n def _copy_dist_param_info_from(self, other):\n \"\"\"\n Copy the information of distributed information from other program.\n\n Args:\n other(Program): Other program\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\n \"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s\"\n % type(other))\n self._is_distributed = other._is_distributed\n self._is_chief = other._is_chief\n self._parameters_on_pservers = other._parameters_on_pservers\n self._endpoints = other._endpoints\n self._ps_endpoint = other._ps_endpoint\n self._distributed_lookup_table = other._distributed_lookup_table\n\n def _copy_data_info_from(self, other, pruned_origin_block_id_map=None):\n \"\"\"\n Copy the information of data variables from other program.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Args:\n other(Program): Other program\n pruned_origin_block_id_map(dict{int:int}): A dict which maps the block id in program\n self to the block id in program other. For example, {0:0, 1:1, 2:3} means block 0 in self is \n cloned from block 0 in other, etc. Default is None, which means default mapped, \n {0:0, 1:1,..., n:n}.\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\n \"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s\"\n % type(other))\n\n if not pruned_origin_block_id_map:\n pruned_origin_block_id_map = {\n i: i\n for i in six.moves.range(self.desc.num_blocks())\n }\n\n # NOTE(zhiqiu): All vars in cloned program exist in original program.\n # The reverse is not true, due to backward pruning.\n for i, block in enumerate(self.blocks):\n other_block = other.blocks[pruned_origin_block_id_map[i]]\n for var in list(block.vars.values()):\n other_var = other_block.var(var.name)\n if other_var.is_data:\n var.is_data = True\n if other_var.desc.need_check_feed():\n var.desc.set_need_check_feed(True)\n if other_var.stop_gradient:\n var.stop_gradient = True\n\n def list_vars(self):\n \"\"\"\n Get all :ref:`api_guide_Variable_en` from this Program. A iterable object is returned.\n\n Returns:\n iterable :ref:`api_guide_Variable_en`: The Generator will yield every variable in this program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n img = fluid.layers.data(name='img', shape=[1,28,28], dtype='float32')\n label = fluid.layers.data(name='label', shape=[128,1], dtype='int64')\n for var in prog.list_vars():\n print(var)\n \"\"\"\n for each_block in self.blocks:\n for each_var in list(each_block.vars.values()):\n yield each_var\n\n def all_parameters(self):\n \"\"\"\n Get all :ref:`api_guide_parameter_en` from this Program. A list object is returned.\n\n Returns:\n list[ :ref:`api_guide_parameter_en` ]: The list contians all parameters in this program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n program = fluid.default_main_program()\n data = fluid.data(name='x', shape=[None, 13], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)\n\n for param in program.all_parameters():\n print(param)\n\n # Here will print all parameters in current program, in this example,\n # the result is like:\n #\n # name: \"fc_0.w_0\"\n # type {\n # type: LOD_TENSOR\n # lod_tensor {\n # tensor {\n # data_type: FP32\n # dims: 13\n # dims: 10\n # }\n # }\n # }\n # persistable: true\n #\n # name: \"fc_0.b_0\"\n # type {\n # type: LOD_TENSOR\n # lod_tensor {\n # tensor {\n # data_type: FP32\n # dims: 10\n # }\n # }\n # }\n # persistable: true\n #\n # Here print(param) will print out all the properties of a parameter,\n # including name, type and persistable, you can access to specific\n # property of a parameter, such as param.name, param.type\n \"\"\"\n parameters = []\n for each_block in self.blocks:\n parameters.extend(each_block.all_parameters())\n return parameters\n\n\[email protected]_metaclass(ParameterMetaClass)\nclass Parameter(Variable):\n \"\"\"\n Parameter is derived from Variable. A parameter is a persistable\n Variable, and will be updated by optimizers after each iteration.\n The training of a neural network is essentially the updating of\n its parameters.\n\n Relative to a general Variable, a Parameter has several its own\n member variables:\n\n Args:\n trainable(bool): True if the parameter need to be updated after\n iterations.\n optimize_attr(map): Parameter attributes related with optimizing.\n Currently, it only contains 'learning_rate'.\n Default: {'learning_rate': 1.0}\n regularizer(WeightDecayRegularizer): The Regularizer which will\n be applied on the parameter. Default: None\n do_model_average(bool): True if the model average strategy will\n be applied on this parameter.\n \"\"\"\n\n def __init__(self,\n block,\n shape,\n dtype,\n type=core.VarDesc.VarType.LOD_TENSOR,\n **kwargs):\n if shape is None:\n raise ValueError(\"The shape of Parameter should not be None\")\n if dtype is None:\n raise ValueError(\"The dtype of Parameter should not be None\")\n\n if len(shape) == 0:\n raise ValueError(\n \"The dimensions of shape for Parameter must be greater than 0\")\n\n for each in shape:\n if each < 0:\n raise ValueError(\n \"Each dimension of shape for Parameter must be greater than 0, but received %s\"\n % list(shape))\n\n Variable.__init__(\n self,\n block,\n persistable=True,\n shape=shape,\n dtype=dtype,\n type=type,\n **kwargs)\n self.trainable = kwargs.get('trainable', True)\n\n self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})\n\n self.regularizer = kwargs.get('regularizer', None)\n\n self.do_model_average = kwargs.get('do_model_average', None)\n\n self.is_distributed = False\n\n def __str__(self):\n return self._to_readable_code()\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n To debug string.\n\n Args:\n throw_on_error(bool): raise exception when self is not initialized\n when throw_on_error is True\n with_details(bool): more details about variables and parameters\n (e.g. trainable, optimize_attr, ...) will be printed when with_details is True\n\n Returns(str): The debug string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n rlt = fluid.layers.data(\"fake_data\", shape=[1,1], dtype='float32')\n debug_str = prog.to_string(throw_on_error=True, with_details=False)\n print(debug_str)\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n if with_details:\n res_str = Variable.to_string(self, throw_on_error, True)\n additional_attr = (\"trainable\", \"optimize_attr\", \"regularizer\",\n \"do_model_average\")\n for attr_name in additional_attr:\n res_str += \"%s: %s\\n\" % (attr_name,\n cpt.to_text(getattr(self, attr_name)))\n else:\n res_str = Variable.to_string(self, throw_on_error, False)\n return res_str\n\n __repr__ = __str__\n\n\nclass ParamBase(core.VarBase):\n \"\"\"\n ParamBase is derived from Tensor( Which is the concept in Dygraph Mode). \n A ParamBase is a persistable Tensor, and will be updated by optimizers \n after each iteration.\n The training of a neural network is essentially the updating of\n its ParamBase.\n\n Relative to a general Tensor, a ParamBase has several its own\n member variables:\n\n Args:\n trainable(bool): True if the ParamBase need to be updated after\n iterations.\n optimize_attr(map): ParamBase attributes related with optimizing.\n Currently, it only contains 'learning_rate'.\n Default: {'learning_rate': 1.0}\n regularizer(WeightDecayRegularizer): The Regularizer which will\n be applied on the ParamBase. Default: None\n do_model_average(bool): True if the model average strategy will\n be applied on this ParamBase.\n \"\"\"\n\n @dygraph_only\n def __init__(self, shape, dtype, **kwargs):\n if shape is None:\n raise ValueError(\"The shape of Parameter should not be None\")\n if dtype is None:\n raise ValueError(\"The dtype of Parameter should not be None\")\n\n if len(shape) == 0:\n raise ValueError(\n \"The dimensions of shape for Parameter must be greater than 0\")\n\n for each in shape:\n if each < 0:\n raise ValueError(\n \"Each dimension of shape for Parameter must be greater than 0, but received %s\"\n % list(shape))\n\n if dtype is not None:\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n name = kwargs.get('name', unique_name.generate('_param_base'))\n\n super(ParamBase, self).__init__(dtype\n if dtype else core.VarDesc.VarType.FP32,\n list(shape) if shape else [], name,\n core.VarDesc.VarType.LOD_TENSOR, True)\n\n trainable = kwargs.get('trainable', True)\n self.stop_gradient = not trainable\n\n self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})\n\n self.regularizer = kwargs.get('regularizer', None)\n\n self.do_model_average = kwargs.get('do_model_average', None)\n\n self.is_distributed = False\n # self.block = default_main_program().global_block()\n\n @property\n def trainable(self):\n return not self.stop_gradient\n\n @trainable.setter\n def trainable(self, trainable):\n if isinstance(trainable, bool):\n self.stop_gradient = not trainable\n else:\n raise ValueError(\n \"The type of trainable MUST be bool, but the type is \",\n type(trainable))\n\n def __str__(self):\n \"\"\"\n Convert a ParamBase object to a readable string.\n\n Returns(str): A readable string.\n\n Examples:\n .. code-block:: python\n\n import paddle\n paddle.disable_static()\n conv = paddle.nn.Conv2D(3, 3, 5)\n print(conv.weight)\n # Parameter: conv2d_0.w_0\n # - place: CUDAPlace(0)\n # - shape: [3, 3, 5, 5]\n # - layout: NCHW\n # - dtype: float\n # - data: [...] \n paddle.enable_static()\n \"\"\"\n return \"Parameter containing:\\n {}\\n - stop_gradient: {}\".format(\n super(ParamBase, self).__str__(), self.stop_gradient)\n\n __repr__ = __str__\n\n\n# program is a global instance.\n_main_program_ = Program()\n_startup_program_ = Program()\n\n\ndef default_startup_program():\n \"\"\"\n Get default/global startup program.\n\n The layer function in :ref:`api_fluid_layers` will create parameters, :ref:`api_paddle_data_reader_reader` ,\n `NCCL <https://developer.nvidia.com/nccl>`_ handles as global variables. The :code:`startup_program` will\n initialize them by the OPs in startup :ref:`api_fluid_Program` . The :ref:`api_fluid_layers` function will\n append these initialization operators into startup program.\n\n This method will return the :code:`default` or the :code:`current` startup\n program. Users can use :ref:`api_fluid_program_guard` to switch :ref:`api_fluid_Program` .\n\n Returns: current default startup :ref:`api_fluid_Program`\n\n Returns type: :ref:`api_fluid_Program`\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n main_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(main_program=main_program, startup_program=startup_program):\n x = fluid.layers.data(name=\"x\", shape=[-1, 784], dtype='float32')\n y = fluid.layers.data(name=\"y\", shape=[-1, 1], dtype='int32')\n z = fluid.layers.fc(name=\"fc\", input=x, size=10, act=\"relu\")\n\n print(\"main program is: {}\".format(fluid.default_main_program()))\n print(\"start up program is: {}\".format(fluid.default_startup_program()))\n \"\"\"\n return _startup_program_\n\n\ndef default_main_program():\n \"\"\"\n This API can be used to get ``default main program`` which store the \n descriptions of ``op`` and ``variable``.\n \n For example ``z = fluid.layers.elementwise_add(x, y)`` will create a new ``elementwise_add`` \n ``op`` and a new ``z`` ``variable``, and they will be recorded in ``default main program`` \n\n The ``default_main_program`` is the default value for ``Program`` parameter in \n a lot of ``fluid`` APIs. For example, the :code:`Executor.run()` will execute the\n :code:`default_main_program` when the program is not specified.\n\n If you want to replace the ``default main program``, you can use :ref:`api_fluid_program_guard`\n \n Returns:\n :ref:`api_fluid_Program`: a ``Program`` which holding the descriptions of ops and variables in the network.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n # Sample Network:\n data = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n \n conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None)\n bn1 = fluid.layers.batch_norm(conv1, act='relu')\n pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2)\n conv2 = fluid.layers.conv2d(pool1, 16, 5, 1, act=None)\n bn2 = fluid.layers.batch_norm(conv2, act='relu')\n pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2)\n \n fc1 = fluid.layers.fc(pool2, size=50, act='relu')\n fc2 = fluid.layers.fc(fc1, size=102, act='softmax')\n \n loss = fluid.layers.cross_entropy(input=fc2, label=label)\n loss = fluid.layers.mean(loss)\n opt = fluid.optimizer.Momentum(\n learning_rate=0.1,\n momentum=0.9,\n regularization=fluid.regularizer.L2Decay(1e-4))\n opt.minimize(loss)\n \n #print the number of blocks in the program, 1 in this case\n print(fluid.default_main_program().num_blocks)\n\n #print the description of variable 'image'\n print(fluid.default_main_program().blocks[0].var('image'))\n\n \"\"\"\n return _main_program_\n\n\ndef switch_main_program(program):\n \"\"\"\n Switch the main program to a new program.\n\n Args:\n program(Program): The new main program\n\n Returns:\n Program: The previous main program\n \"\"\"\n global _main_program_\n prev_program = _main_program_\n _main_program_ = program\n return prev_program\n\n\ndef switch_startup_program(program):\n \"\"\"\n Switch the startup program to a new program\n Args:\n program(Program): The new startup program\n\n Returns:\n Program: The previous startup program\n \"\"\"\n global _startup_program_\n prev_program = _startup_program_\n _startup_program_ = program\n return prev_program\n\n\n@signature_safe_contextmanager\ndef program_guard(main_program, startup_program=None):\n \"\"\"\n :api_attr: Static Graph\n\n Change the global main program and startup program with `\"with\"` statement.\n Layer functions in the Python `\"with\"` block will append operators and\n variables to the new main programs.\n\n Args:\n main_program(Program): New main program inside `\"with\"` statement.\n startup_program(Program, optional): New startup program inside `\"with\"` \n statement. :code:`None` means not changing startup program, \n default_startup_program is still used.\n Default: None.\n\n Examples:\n .. code-block:: python\n \n import paddle.fluid as fluid\n\n main_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(main_program, startup_program):\n data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10, act='relu')\n\n Notes: The temporary :code:`Program` can be used if the user does not need\n to construct either of startup program or main program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n main_program = fluid.Program()\n # does not care about startup program. Just pass a temporary value.\n with fluid.program_guard(main_program, fluid.Program()):\n data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n \n \"\"\"\n from .data_feeder import check_type\n check_type(main_program, 'main_program', Program, 'fluid.program_guard')\n main_program = switch_main_program(main_program)\n if startup_program is not None:\n check_type(startup_program, 'startup_program', Program,\n 'fluid.program_guard')\n startup_program = switch_startup_program(startup_program)\n try:\n yield\n finally:\n switch_main_program(main_program)\n if startup_program is not None:\n switch_startup_program(startup_program)\n\n\ndef _get_var(name, program=None):\n \"\"\"\n Get a variable by name from the global block of a program.\n\n Args:\n name(str): name of the variable\n program(Program|None): program object.\n If None, default_global_program() will be used.\n\n Returns:\n Variable\n \"\"\"\n if program is None:\n program = default_main_program()\n assert isinstance(name, str)\n assert isinstance(program, Program)\n\n return program.global_block().var(name)\n\n\n@signature_safe_contextmanager\ndef _dygraph_guard(tracer):\n global _dygraph_tracer_\n tmp_trace = _dygraph_tracer_\n _dygraph_tracer_ = tracer\n core._switch_tracer(tracer)\n\n try:\n yield\n finally:\n core._switch_tracer(tmp_trace)\n _dygraph_tracer_ = tmp_trace\n\n\n@signature_safe_contextmanager\ndef _dygraph_place_guard(place):\n global _global_expected_place_\n tmp_place = _global_expected_place_\n _global_expected_place_ = place\n\n try:\n yield\n finally:\n _global_expected_place_ = tmp_place\n\n\ndef load_op_library(lib_filename):\n \"\"\"\n :api_attr: Static Graph\n \n Load a dynamic library, including custom operators and kernels.\n When library is loaded, ops and kernels registered in the library\n will be available in PaddlePaddle main process.\n Please note, the type of custom operators can't have the same type\n with the existing operators in the framework.\n\n Args:\n lib_filename (str): name of dynamic library.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n #fluid.load_op_library('custom_op.so')\n\n \"\"\"\n core.load_op_library(lib_filename)\n OpProtoHolder.instance().update_op_proto()\n\n\ndef switch_device(device):\n global _current_device\n pre_device = _current_device\n _current_device = device\n return pre_device\n\n\n@signature_safe_contextmanager\ndef device_guard(device=None):\n \"\"\"\n **Notes**:\n **The API only supports static mode.**\n\n A context manager that specifies the device on which the OP will be placed.\n\n Args:\n device(str|None): Specify the device to use in the context. It should be 'cpu' or 'gpu',\n When it is set to 'cpu' or 'gpu', all OPs created in the context will be\n placed on CPUPlace or CUDAPlace. When 'gpu' is set and the program runs on\n single-card, the device index will be the same as the device on which the\n executor runs. Default: None, OPs in this context will be automatically\n assigned devices.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n support_gpu = fluid.is_compiled_with_cuda()\n place = fluid.CPUPlace()\n if support_gpu:\n place = fluid.CUDAPlace(0)\n\n # if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0)\n data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32')\n data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32')\n shape = fluid.layers.shape(data2)\n\n with fluid.device_guard(\"cpu\"):\n # Ops created here will be placed on CPUPlace\n shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4])\n with fluid.device_guard('gpu'):\n # if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace\n out = fluid.layers.crop_tensor(data1, shape=shape)\n\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n result = exe.run(fetch_list=[out])\n \"\"\"\n\n index = None\n if device and ':' in device:\n device, index = device.split(':')\n if device == 'cpu':\n raise ValueError(\"Should not set device id for cpu.\")\n if device not in ['cpu', 'gpu', '', None]:\n raise ValueError(\n \"The Attr(device) should be 'cpu' or 'gpu', and it can also be empty string or None \"\n \"when there is no need to specify device. But received %s\" % device)\n if index:\n device = \":\".join([device, index])\n pre_device = switch_device(device)\n try:\n yield\n finally:\n switch_device(pre_device)\n\n\ndef set_flags(flags):\n \"\"\"\n This function sets the GFlags value in Paddle.\n\n Args:\n flags (dict): A dict contains flags and its value.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0})\n \"\"\"\n if not isinstance(flags, dict):\n raise TypeError('flags in set_flags should be a dict')\n for key, value in flags.items():\n if core.globals().is_public(key):\n core.globals()[key] = value\n else:\n raise ValueError(\n \"Flag %s cannot set its value through this function.\" % (key))\n\n\ndef get_flags(flags):\n \"\"\"\n This function gets the GFlags value in Paddle.\n\n Args:\n flags(list|tuple|str): A list/tuple of string or a string which is the flag's name.\n\n Returns:\n flag's value in Paddle.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']\n res = fluid.get_flags(flags)\n print(res)\n # {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False}\n \"\"\"\n flags_value = {}\n if isinstance(flags, (list, tuple)):\n for key in flags:\n if (core.globals().is_public(key)):\n value = core.globals()[key]\n temp = {key: value}\n flags_value.update(temp)\n else:\n raise ValueError(\n 'Flag %s cannot get its value through this function.' %\n (key))\n elif isinstance(flags, str):\n if (core.globals().is_public(flags)):\n value = core.globals()[flags]\n temp = {flags: value}\n flags_value.update(temp)\n else:\n raise ValueError(\n 'Flag %s cannot get its value through this function.' % (flags))\n else:\n raise TypeError('Flags in get_flags should be a list, tuple or string.')\n return flags_value\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDistribute CTR model for test fleet api\n\"\"\"\n\nfrom __future__ import print_function\n\nimport shutil\nimport tempfile\nimport time\n\nimport paddle\nimport paddle.fluid as fluid\nimport os\nimport numpy as np\n\nimport ctr_dataset_reader\nfrom test_dist_fleet_base import runtime_main, FleetDistRunnerBase\nfrom dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader\nfrom paddle.distributed.fleet.base.util_factory import fleet_util\n\n# Fix seed for test\nfluid.default_startup_program().random_seed = 1\nfluid.default_main_program().random_seed = 1\n\n\nclass TestDistGpuPsCTR2x2(TestDistCTR2x2):\n \"\"\"\n For test CTR model, using Fleet api & PS-GPU\n \"\"\"\n\n def check_model_right(self, dirname):\n model_filename = os.path.join(dirname, \"__model__\")\n\n with open(model_filename, \"rb\") as f:\n program_desc_str = f.read()\n\n program = fluid.Program.parse_from_string(program_desc_str)\n with open(os.path.join(dirname, \"__model__.proto\"), \"w\") as wn:\n wn.write(str(program))\n\n def do_pyreader_training(self, fleet):\n \"\"\"\n do training using dataset, using fetch handler to catch variable\n Args:\n fleet(Fleet api): the fleet object of Parameter Server, define distribute training role\n \"\"\"\n device_id = int(os.getenv(\"FLAGS_selected_gpus\", \"0\"))\n place = fluid.CUDAPlace(device_id)\n exe = fluid.Executor(place)\n fleet.init_worker()\n exe.run(fleet.startup_program)\n\n batch_size = 4\n train_reader = paddle.batch(fake_ctr_reader(), batch_size=batch_size)\n self.reader.decorate_sample_list_generator(train_reader)\n\n for epoch_id in range(1):\n self.reader.start()\n try:\n pass_start = time.time()\n while True:\n loss_val = exe.run(program=fleet.main_program,\n fetch_list=[self.avg_cost.name])\n loss_val = np.mean(loss_val)\n reduce_output = fleet_util.all_reduce(\n np.array(loss_val), mode=\"sum\")\n loss_all_trainer = fleet_util.all_gather(float(loss_val))\n loss_val = float(reduce_output) / len(loss_all_trainer)\n message = \"TRAIN ---> pass: {} loss: {}\\n\".format(epoch_id,\n loss_val)\n fleet_util.print_on_rank(message, 0)\n\n pass_time = time.time() - pass_start\n except fluid.core.EOFException:\n self.reader.reset()\n\n model_dir = tempfile.mkdtemp()\n fleet.save_inference_model(\n exe, model_dir, [feed.name for feed in self.feeds], self.avg_cost)\n self.check_model_right(model_dir)\n if fleet.is_first_worker():\n fleet.save_persistables(executor=exe, dirname=model_dir)\n shutil.rmtree(model_dir)\n fleet.stop_worker()\n\n def do_dataset_training(self, fleet):\n dnn_input_dim, lr_input_dim, train_file_path = ctr_dataset_reader.prepare_data(\n )\n\n device_id = int(os.getenv(\"FLAGS_selected_gpus\", \"0\"))\n place = fluid.CUDAPlace(device_id)\n exe = fluid.Executor(place)\n\n fleet.init_worker()\n exe.run(fleet.startup_program)\n\n thread_num = 2\n batch_size = 128\n filelist = []\n for _ in range(thread_num):\n filelist.append(train_file_path)\n\n # config dataset\n dataset = paddle.fleet.DatasetFactory().create_dataset()\n dataset.set_batch_size(batch_size)\n dataset.set_use_var(self.feeds)\n pipe_command = 'python ctr_dataset_reader.py'\n dataset.set_pipe_command(pipe_command)\n\n dataset.set_filelist(filelist)\n dataset.set_thread(thread_num)\n\n for epoch_id in range(1):\n pass_start = time.time()\n dataset.set_filelist(filelist)\n exe.train_from_dataset(\n program=fleet.main_program,\n dataset=dataset,\n fetch_list=[self.avg_cost],\n fetch_info=[\"cost\"],\n print_period=2,\n debug=int(os.getenv(\"Debug\", \"0\")))\n pass_time = time.time() - pass_start\n\n if os.getenv(\"SAVE_MODEL\") == \"1\":\n model_dir = tempfile.mkdtemp()\n fleet.save_inference_model(exe, model_dir,\n [feed.name for feed in self.feeds],\n self.avg_cost)\n self.check_model_right(model_dir)\n if fleet.is_first_worker():\n fleet.save_persistables(executor=exe, dirname=model_dir)\n shutil.rmtree(model_dir)\n\n fleet.stop_worker()\n\n\nif __name__ == \"__main__\":\n runtime_main(TestDistGpuPsCTR2x2)\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport io\nimport tarfile\nimport numpy as np\nimport scipy.io as scio\nfrom PIL import Image\n\nfrom paddle.io import Dataset\nfrom paddle.dataset.common import _check_exists_and_download\n\n__all__ = [\"Flowers\"]\n\nDATA_URL = 'http://paddlemodels.bj.bcebos.com/flowers/102flowers.tgz'\nLABEL_URL = 'http://paddlemodels.bj.bcebos.com/flowers/imagelabels.mat'\nSETID_URL = 'http://paddlemodels.bj.bcebos.com/flowers/setid.mat'\nDATA_MD5 = '52808999861908f626f3c1f4e79d11fa'\nLABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d'\nSETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c'\n\n# In official 'readme', tstid is the flag of test data\n# and trnid is the flag of train data. But test data is more than train data.\n# So we exchange the train data and test data.\nMODE_FLAG_MAP = {'train': 'tstid', 'test': 'trnid', 'valid': 'valid'}\n\n\nclass Flowers(Dataset):\n \"\"\"\n Implementation of `Flowers <https://www.robots.ox.ac.uk/~vgg/data/flowers/>`_\n dataset\n\n Args:\n data_file(str): path to data file, can be set None if\n :attr:`download` is True. Default None\n label_file(str): path to label file, can be set None if\n :attr:`download` is True. Default None\n setid_file(str): path to subset index file, can be set\n None if :attr:`download` is True. Default None\n mode(str): 'train', 'valid' or 'test' mode. Default 'train'.\n transform(callable): transform to perform on image, None for on transform.\n download(bool): whether to download dataset automatically if\n :attr:`data_file` is not set. Default True\n\n Examples:\n \n .. code-block:: python\n\n from paddle.vision.datasets import Flowers\n\n flowers = Flowers(mode='test')\n\n for i in range(len(flowers)):\n sample = flowers[i]\n print(sample[0].shape, sample[1])\n\n \"\"\"\n\n def __init__(self,\n data_file=None,\n label_file=None,\n setid_file=None,\n mode='train',\n transform=None,\n download=True):\n assert mode.lower() in ['train', 'valid', 'test'], \\\n \"mode should be 'train', 'valid' or 'test', but got {}\".format(mode)\n self.flag = MODE_FLAG_MAP[mode.lower()]\n\n self.data_file = data_file\n if self.data_file is None:\n assert download, \"data_file is not set and downloading automatically is disabled\"\n self.data_file = _check_exists_and_download(\n data_file, DATA_URL, DATA_MD5, 'flowers', download)\n\n self.label_file = label_file\n if self.label_file is None:\n assert download, \"label_file is not set and downloading automatically is disabled\"\n self.label_file = _check_exists_and_download(\n label_file, LABEL_URL, LABEL_MD5, 'flowers', download)\n\n self.setid_file = setid_file\n if self.setid_file is None:\n assert download, \"setid_file is not set and downloading automatically is disabled\"\n self.setid_file = _check_exists_and_download(\n setid_file, SETID_URL, SETID_MD5, 'flowers', download)\n\n self.transform = transform\n\n # read dataset into memory\n self._load_anno()\n\n def _load_anno(self):\n self.name2mem = {}\n self.data_tar = tarfile.open(self.data_file)\n for ele in self.data_tar.getmembers():\n self.name2mem[ele.name] = ele\n\n self.labels = scio.loadmat(self.label_file)['labels'][0]\n self.indexes = scio.loadmat(self.setid_file)[self.flag][0]\n\n def __getitem__(self, idx):\n index = self.indexes[idx]\n label = np.array([self.labels[index - 1]])\n img_name = \"jpg/image_%05d.jpg\" % index\n img_ele = self.name2mem[img_name]\n image = self.data_tar.extractfile(img_ele).read()\n image = np.array(Image.open(io.BytesIO(image)))\n\n if self.transform is not None:\n image = self.transform(image)\n\n return image, label.astype('int64')\n\n def __len__(self):\n return len(self.indexes)\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nimport abc\nimport numpy as np\n\nimport paddle\n\n__all__ = ['Metric', 'Accuracy', 'Precision', 'Recall', 'Auc']\n\n\ndef _is_numpy_(var):\n return isinstance(var, (np.ndarray, np.generic))\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Metric(object):\n \"\"\"\n Base class for metric, encapsulates metric logic and APIs\n Usage:\n \n m = SomeMetric()\n for prediction, label in ...:\n m.update(prediction, label)\n m.accumulate()\n \n Advanced usage for :code:`compute`:\n\n Metric calculation can be accelerated by calculating metric states\n from model outputs and labels by build-in operators not by Python/NumPy\n in :code:`compute`, metric states will be fetched as NumPy array and\n call :code:`update` with states in NumPy format.\n Metric calculated as follows (operations in Model and Metric are\n indicated with curly brackets, while data nodes not):\n inputs & labels || ------------------\n | ||\n {model} ||\n | ||\n outputs & labels ||\n | || tensor data\n {Metric.compute} ||\n | ||\n metric states(tensor) ||\n | ||\n {fetch as numpy} || ------------------\n | ||\n metric states(numpy) || numpy data\n | ||\n {Metric.update} \\/ ------------------\n Examples:\n \n For :code:`Accuracy` metric, which takes :code:`pred` and :code:`label`\n as inputs, we can calculate the correct prediction matrix between\n :code:`pred` and :code:`label` in :code:`compute`.\n For examples, prediction results contains 10 classes, while :code:`pred`\n shape is [N, 10], :code:`label` shape is [N, 1], N is mini-batch size,\n and we only need to calculate accurary of top-1 and top-5, we could\n calculate the correct prediction matrix of the top-5 scores of the\n prediction of each sample like follows, while the correct prediction\n matrix shape is [N, 5].\n\n .. code-block:: python\n def compute(pred, label):\n # sort prediction and slice the top-5 scores\n pred = paddle.argsort(pred, descending=True)[:, :5]\n # calculate whether the predictions are correct\n correct = pred == label\n return paddle.cast(correct, dtype='float32')\n\n With the :code:`compute`, we split some calculations to OPs (which\n may run on GPU devices, will be faster), and only fetch 1 tensor with\n shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1].\n :code:`update` can be define as follows:\n\n .. code-block:: python\n def update(self, correct):\n accs = []\n for i, k in enumerate(self.topk):\n num_corrects = correct[:, :k].sum()\n num_samples = len(correct)\n accs.append(float(num_corrects) / num_samples)\n self.total[i] += num_corrects\n self.count[i] += num_samples\n return accs\n \"\"\"\n\n def __init__(self):\n pass\n\n @abc.abstractmethod\n def reset(self):\n \"\"\"\n Reset states and result\n \"\"\"\n raise NotImplementedError(\"function 'reset' not implemented in {}.\".\n format(self.__class__.__name__))\n\n @abc.abstractmethod\n def update(self, *args):\n \"\"\"\n Update states for metric\n\n Inputs of :code:`update` is the outputs of :code:`Metric.compute`,\n if :code:`compute` is not defined, the inputs of :code:`update`\n will be flatten arguments of **output** of mode and **label** from data:\n :code:`update(output1, output2, ..., label1, label2,...)`\n\n see :code:`Metric.compute`\n \"\"\"\n raise NotImplementedError(\"function 'update' not implemented in {}.\".\n format(self.__class__.__name__))\n\n @abc.abstractmethod\n def accumulate(self):\n \"\"\"\n Accumulates statistics, computes and returns the metric value\n \"\"\"\n raise NotImplementedError(\n \"function 'accumulate' not implemented in {}.\".format(\n self.__class__.__name__))\n\n @abc.abstractmethod\n def name(self):\n \"\"\"\n Returns metric name\n \"\"\"\n raise NotImplementedError(\"function 'name' not implemented in {}.\".\n format(self.__class__.__name__))\n\n def compute(self, *args):\n \"\"\"\n This API is advanced usage to accelerate metric calculating, calulations\n from outputs of model to the states which should be updated by Metric can\n be defined here, where Paddle OPs is also supported. Outputs of this API\n will be the inputs of \"Metric.update\".\n\n If :code:`compute` is defined, it will be called with **outputs**\n of model and **labels** from data as arguments, all outputs and labels\n will be concatenated and flatten and each filed as a separate argument\n as follows:\n :code:`compute(output1, output2, ..., label1, label2,...)`\n\n If :code:`compute` is not defined, default behaviour is to pass\n input to output, so output format will be:\n :code:`return output1, output2, ..., label1, label2,...`\n\n see :code:`Metric.update`\n \"\"\"\n return args\n\n\nclass Accuracy(Metric):\n \"\"\"\n Encapsulates accuracy metric logic.\n\n Args:\n topk (int|tuple(int)): Number of top elements to look at\n for computing accuracy. Default is (1,).\n name (str, optional): String name of the metric instance. Default\n is `acc`.\n\n Example by standalone:\n \n .. code-block:: python\n\n import numpy as np\n import paddle\n\n paddle.disable_static()\n x = paddle.to_tensor(np.array([\n [0.1, 0.2, 0.3, 0.4],\n [0.1, 0.4, 0.3, 0.2],\n [0.1, 0.2, 0.4, 0.3],\n [0.1, 0.2, 0.3, 0.4]]))\n y = paddle.to_tensor(np.array([[0], [1], [2], [3]]))\n\n m = paddle.metric.Accuracy()\n correct = m.compute(x, y)\n m.update(correct)\n res = m.accumulate()\n print(res) # 0.75\n\n\n Example with Model API:\n \n .. code-block:: python\n\n import paddle\n\n paddle.disable_static()\n train_dataset = paddle.vision.datasets.MNIST(mode='train')\n\n model = paddle.Model(paddle.vision.LeNet(classifier_activation=None))\n optim = paddle.optimizer.Adam(\n learning_rate=0.001, parameters=model.parameters())\n model.prepare(\n optim,\n loss=paddle.nn.CrossEntropyLoss(),\n metrics=paddle.metric.Accuracy())\n\n model.fit(train_dataset, batch_size=64)\n\n \"\"\"\n\n def __init__(self, topk=(1, ), name=None, *args, **kwargs):\n super(Accuracy, self).__init__(*args, **kwargs)\n self.topk = topk\n self.maxk = max(topk)\n self._init_name(name)\n self.reset()\n\n def compute(self, pred, label, *args):\n \"\"\"\n Compute the top-k (maxinum value in `topk`) indices.\n\n Args:\n pred (Tensor): The predicted value is a Tensor wit type\n float32 or float64.\n label (Tensor): The ground truth value is a 2D Tensor, its\n shape is [batch_size, 1] and type is int64.\n\n Return:\n Tensor: Correct mask, a tensor with shape [batch_size, topk].\n \"\"\"\n pred = paddle.argsort(pred, descending=True)[:, :self.maxk]\n correct = pred == label\n return paddle.cast(correct, dtype='float32')\n\n def update(self, correct, *args):\n \"\"\"\n Update the metrics states (correct count and total count), in order to\n calculate cumulative accuracy of all instances. This function also\n returns the accuracy of current step.\n \n Args:\n correct: Correct mask, a tensor with shape [batch_size, topk].\n\n Return:\n Tensor: the accuracy of current step.\n \"\"\"\n if isinstance(correct, paddle.Tensor):\n correct = correct.numpy()\n accs = []\n for i, k in enumerate(self.topk):\n num_corrects = correct[:, :k].sum()\n num_samples = len(correct)\n accs.append(float(num_corrects) / num_samples)\n self.total[i] += num_corrects\n self.count[i] += num_samples\n accs = accs[0] if len(self.topk) == 1 else accs\n return accs\n\n def reset(self):\n \"\"\"\n Resets all of the metric state.\n \"\"\"\n self.total = [0.] * len(self.topk)\n self.count = [0] * len(self.topk)\n\n def accumulate(self):\n \"\"\"\n Computes and returns the accumulated metric.\n \"\"\"\n res = []\n for t, c in zip(self.total, self.count):\n r = float(t) / c if c > 0 else 0.\n res.append(r)\n res = res[0] if len(self.topk) == 1 else res\n return res\n\n def _init_name(self, name):\n name = name or 'acc'\n if self.maxk != 1:\n self._name = ['{}_top{}'.format(name, k) for k in self.topk]\n else:\n self._name = [name]\n\n def name(self):\n \"\"\"\n Return name of metric instance.\n \"\"\"\n return self._name\n\n\nclass Precision(Metric):\n \"\"\"\n Precision (also called positive predictive value) is the fraction of\n relevant instances among the retrieved instances. Refer to\n https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers\n\n Noted that this class manages the precision score only for binary\n classification task.\n\n Args:\n name (str, optional): String name of the metric instance.\n Default is `precision`.\n\n Example by standalone:\n \n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x = np.array([0.1, 0.5, 0.6, 0.7])\n y = np.array([0, 1, 1, 1])\n\n m = paddle.metric.Precision()\n m.update(x, y)\n res = m.accumulate()\n print(res) # 1.0\n\n\n Example with Model API:\n \n .. code-block:: python\n\n import numpy as np\n \n import paddle\n import paddle.nn as nn\n \n class Data(paddle.io.Dataset):\n def __init__(self):\n super(Data, self).__init__()\n self.n = 1024\n self.x = np.random.randn(self.n, 10).astype('float32')\n self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')\n \n def __getitem__(self, idx):\n return self.x[idx], self.y[idx]\n \n def __len__(self):\n return self.n\n \n paddle.disable_static()\n model = paddle.Model(nn.Sequential(\n nn.Linear(10, 1),\n nn.Sigmoid()\n ))\n optim = paddle.optimizer.Adam(\n learning_rate=0.001, parameters=model.parameters())\n model.prepare(\n optim,\n loss=nn.BCELoss(),\n metrics=paddle.metric.Precision())\n \n data = Data()\n model.fit(data, batch_size=16)\n \"\"\"\n\n def __init__(self, name='precision', *args, **kwargs):\n super(Precision, self).__init__(*args, **kwargs)\n self.tp = 0 # true positive\n self.fp = 0 # false positive\n self._name = name\n\n def update(self, preds, labels):\n \"\"\"\n Update the states based on the current mini-batch prediction results.\n\n Args:\n preds (numpy.ndarray): The prediction result, usually the output\n of two-class sigmoid function. It should be a vector (column\n vector or row vector) with data type: 'float64' or 'float32'.\n labels (numpy.ndarray): The ground truth (labels),\n the shape should keep the same as preds.\n The data type is 'int32' or 'int64'.\n \"\"\"\n if isinstance(preds, paddle.Tensor):\n preds = preds.numpy()\n elif not _is_numpy_(preds):\n raise ValueError(\"The 'preds' must be a numpy ndarray or Tensor.\")\n\n if isinstance(labels, paddle.Tensor):\n labels = labels.numpy()\n elif not _is_numpy_(labels):\n raise ValueError(\"The 'labels' must be a numpy ndarray or Tensor.\")\n\n sample_num = labels.shape[0]\n preds = np.floor(preds + 0.5).astype(\"int32\")\n\n for i in range(sample_num):\n pred = preds[i]\n label = labels[i]\n if pred == 1:\n if pred == label:\n self.tp += 1\n else:\n self.fp += 1\n\n def reset(self):\n \"\"\"\n Resets all of the metric state.\n \"\"\"\n self.tp = 0\n self.fp = 0\n\n def accumulate(self):\n \"\"\"\n Calculate the final precision.\n\n Returns:\n A scaler float: results of the calculated precision.\n \"\"\"\n ap = self.tp + self.fp\n return float(self.tp) / ap if ap != 0 else .0\n\n def name(self):\n \"\"\"\n Returns metric name\n \"\"\"\n return self._name\n\n\nclass Recall(Metric):\n \"\"\"\n Recall (also known as sensitivity) is the fraction of\n relevant instances that have been retrieved over the\n total amount of relevant instances\n\n Refer to:\n https://en.wikipedia.org/wiki/Precision_and_recall\n\n Noted that this class manages the recall score only for\n binary classification task.\n\n Args:\n name (str, optional): String name of the metric instance.\n Default is `recall`.\n\n Example by standalone:\n \n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x = np.array([0.1, 0.5, 0.6, 0.7])\n y = np.array([1, 0, 1, 1])\n\n m = paddle.metric.Recall()\n m.update(x, y)\n res = m.accumulate()\n print(res) # 2.0 / 3.0\n\n\n Example with Model API:\n \n .. code-block:: python\n\n import numpy as np\n \n import paddle\n import paddle.nn as nn\n \n class Data(paddle.io.Dataset):\n def __init__(self):\n super(Data, self).__init__()\n self.n = 1024\n self.x = np.random.randn(self.n, 10).astype('float32')\n self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')\n \n def __getitem__(self, idx):\n return self.x[idx], self.y[idx]\n \n def __len__(self):\n return self.n\n \n paddle.disable_static()\n model = paddle.Model(nn.Sequential(\n nn.Linear(10, 1),\n nn.Sigmoid()\n ))\n optim = paddle.optimizer.Adam(\n learning_rate=0.001, parameters=model.parameters())\n model.prepare(\n optim,\n loss=nn.BCELoss(),\n metrics=[paddle.metric.Precision(), paddle.metric.Recall()])\n \n data = Data()\n model.fit(data, batch_size=16)\n \"\"\"\n\n def __init__(self, name='recall', *args, **kwargs):\n super(Recall, self).__init__(*args, **kwargs)\n self.tp = 0 # true positive\n self.fn = 0 # false negative\n self._name = name\n\n def update(self, preds, labels):\n \"\"\"\n Update the states based on the current mini-batch prediction results.\n\n Args:\n preds(numpy.array): prediction results of current mini-batch,\n the output of two-class sigmoid function.\n Shape: [batch_size, 1]. Dtype: 'float64' or 'float32'.\n labels(numpy.array): ground truth (labels) of current mini-batch,\n the shape should keep the same as preds.\n Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.\n \"\"\"\n if isinstance(preds, paddle.Tensor):\n preds = preds.numpy()\n elif not _is_numpy_(preds):\n raise ValueError(\"The 'preds' must be a numpy ndarray or Tensor.\")\n\n if isinstance(labels, paddle.Tensor):\n labels = labels.numpy()\n elif not _is_numpy_(labels):\n raise ValueError(\"The 'labels' must be a numpy ndarray or Tensor.\")\n\n sample_num = labels.shape[0]\n preds = np.rint(preds).astype(\"int32\")\n\n for i in range(sample_num):\n pred = preds[i]\n label = labels[i]\n if label == 1:\n if pred == label:\n self.tp += 1\n else:\n self.fn += 1\n\n def accumulate(self):\n \"\"\"\n Calculate the final recall.\n\n Returns:\n A scaler float: results of the calculated Recall.\n \"\"\"\n recall = self.tp + self.fn\n return float(self.tp) / recall if recall != 0 else .0\n\n def reset(self):\n \"\"\"\n Resets all of the metric state.\n \"\"\"\n self.tp = 0\n self.fn = 0\n\n def name(self):\n \"\"\"\n Returns metric name\n \"\"\"\n return self._name\n\n\nclass Auc(Metric):\n \"\"\"\n The auc metric is for binary classification.\n Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve.\n Please notice that the auc metric is implemented with python, which may be a little bit slow.\n\n The `auc` function creates four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` that are used to\n compute the AUC. To discretize the AUC curve, a linearly spaced set of\n thresholds is used to compute pairs of recall and precision values. The area\n under the ROC-curve is therefore computed using the height of the recall\n values by the false positive rate, while the area under the PR-curve is the\n computed using the height of the precision values by the recall.\n\n Args:\n curve (str): Specifies the mode of the curve to be computed,\n 'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'.\n num_thresholds (int): The number of thresholds to use when\n discretizing the roc curve. Default is 4095.\n 'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'.\n name (str, optional): String name of the metric instance. Default\n is `auc`.\n\n \"NOTE: only implement the ROC curve type via Python now.\"\n\n Example by standalone:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n m = paddle.metric.Auc()\n \n n = 8\n class0_preds = np.random.random(size = (n, 1))\n class1_preds = 1 - class0_preds\n \n preds = np.concatenate((class0_preds, class1_preds), axis=1)\n labels = np.random.randint(2, size = (n, 1))\n \n m.update(preds=preds, labels=labels)\n res = m.accumulate()\n\n\n Example with Model API:\n \n .. code-block:: python\n\n import numpy as np\n import paddle\n import paddle.nn as nn\n \n class Data(paddle.io.Dataset):\n def __init__(self):\n super(Data, self).__init__()\n self.n = 1024\n self.x = np.random.randn(self.n, 10).astype('float32')\n self.y = np.random.randint(2, size=(self.n, 1)).astype('int64')\n \n def __getitem__(self, idx):\n return self.x[idx], self.y[idx]\n \n def __len__(self):\n return self.n\n \n paddle.disable_static()\n model = paddle.Model(nn.Sequential(\n nn.Linear(10, 2), nn.Softmax())\n )\n optim = paddle.optimizer.Adam(\n learning_rate=0.001, parameters=model.parameters())\n \n def loss(x, y):\n return nn.functional.nll_loss(paddle.log(x), y)\n \n model.prepare(\n optim,\n loss=loss,\n metrics=paddle.metric.Auc())\n data = Data()\n model.fit(data, batch_size=16)\n \"\"\"\n\n def __init__(self,\n curve='ROC',\n num_thresholds=4095,\n name='auc',\n *args,\n **kwargs):\n super(Auc, self).__init__(*args, **kwargs)\n self._curve = curve\n self._num_thresholds = num_thresholds\n\n _num_pred_buckets = num_thresholds + 1\n self._stat_pos = np.zeros(_num_pred_buckets)\n self._stat_neg = np.zeros(_num_pred_buckets)\n self._name = name\n\n def update(self, preds, labels):\n \"\"\"\n Update the auc curve with the given predictions and labels.\n\n Args:\n preds (numpy.array): An numpy array in the shape of\n (batch_size, 2), preds[i][j] denotes the probability of\n classifying the instance i into the class j.\n labels (numpy.array): an numpy array in the shape of\n (batch_size, 1), labels[i] is either o or 1,\n representing the label of the instance i.\n \"\"\"\n if isinstance(labels, paddle.Tensor):\n labels = labels.numpy()\n elif not _is_numpy_(labels):\n raise ValueError(\"The 'labels' must be a numpy ndarray or Tensor.\")\n\n if isinstance(preds, paddle.Tensor):\n preds = preds.numpy()\n elif not _is_numpy_(preds):\n raise ValueError(\"The 'preds' must be a numpy ndarray or Tensor.\")\n\n for i, lbl in enumerate(labels):\n value = preds[i, 1]\n bin_idx = int(value * self._num_thresholds)\n assert bin_idx <= self._num_thresholds\n if lbl:\n self._stat_pos[bin_idx] += 1.0\n else:\n self._stat_neg[bin_idx] += 1.0\n\n @staticmethod\n def trapezoid_area(x1, x2, y1, y2):\n return abs(x1 - x2) * (y1 + y2) / 2.0\n\n def accumulate(self):\n \"\"\"\n Return the area (a float score) under auc curve\n\n Return:\n float: the area under auc curve\n \"\"\"\n tot_pos = 0.0\n tot_neg = 0.0\n auc = 0.0\n\n idx = self._num_thresholds\n while idx >= 0:\n tot_pos_prev = tot_pos\n tot_neg_prev = tot_neg\n tot_pos += self._stat_pos[idx]\n tot_neg += self._stat_neg[idx]\n auc += self.trapezoid_area(tot_neg, tot_neg_prev, tot_pos,\n tot_pos_prev)\n idx -= 1\n\n return auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0\n\n def reset(self):\n \"\"\"\n Reset states and result\n \"\"\"\n _num_pred_buckets = self._num_thresholds + 1\n self._stat_pos = np.zeros(_num_pred_buckets)\n self._stat_neg = np.zeros(_num_pred_buckets)\n\n def name(self):\n \"\"\"\n Returns metric name\n \"\"\"\n return self._name\n" ]
[ [ "numpy.random.random", "numpy.random.seed" ], [ "numpy.dtype" ], [ "numpy.array", "numpy.mean" ], [ "numpy.array", "scipy.io.loadmat" ], [ "numpy.rint", "numpy.zeros", "numpy.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlohaBazinga/Surgery-Robot-Detection-Segmentation
[ "f42a3562cbb6a77ba195f5aa4828876afaf02500" ]
[ "surgery.py" ]
[ "\"\"\"\nMask R-CNN\nTrain on the surgery robot dataset.\n\nCopyright (c) 2018 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\n------------------------------------------------------------\n\nUsage: import the module (see Jupyter notebooks for examples), or run from\n the command line as such:\n\n #Train a new model starting from pre-trained COCO weights\n python surgery.py train --dataset=/home/.../mask_rcnn/data/surgery/ --weights=coco\n\n #Train a new model starting from pre-trained ImageNet weights\n python surgery.py train --dataset=/home/.../mask_rcnn/data/surgery/ --weights=imagenet\n\n # Continue training the last model you trained. This will find\n # the last trained weights in the model directory.\n python surgery.py train --dataset=/home/.../mask_rcnn/data/surgery/ --weights=last\n\n #Detect and color splash on a image with the last model you trained.\n #This will find the last trained weights in the model directory.\n python surgery.py splash --weights=last --image=/home/...../*.jpg\n\n #Detect and color splash on a video with a specific pre-trained weights of yours.\n python sugery.py splash --weights=/home/.../logs/mask_rcnn_surgery_0030.h5 --video=/home/simon/Videos/Center.wmv\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.draw\nfrom matplotlib import pyplot as plt\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\nfrom mrcnn import visualize\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Configurations\n############################################################\n\n\nclass SurgeryConfig(Config):\n \"\"\"Configuration for training on the toy dataset.\n Derives from the base Config class and overrides some values.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"surgery\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 2\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 2 # Background + objects\n\n # Number of training steps per epoch\n STEPS_PER_EPOCH = 100\n\n # Skip detections with < 90% confidence\n DETECTION_MIN_CONFIDENCE = 0.9\n\n\n############################################################\n# Dataset\n############################################################\n\nclass SurgeryDataset(utils.Dataset):\n def load_VIA(self, dataset_dir, subset, hc=False):\n \"\"\"Load the surgery dataset from VIA.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val or predict\n \"\"\"\n # Add classes. We have only one class to add.\n self.add_class(\"surgery\", 1, \"adidas\")\n self.add_class(\"surgery\", 2, \"apple\")\n if hc is True:\n for i in range(1,14):\n self.add_class(\"surgery\", i, \"{}\".format(i))\n self.add_class(\"surgery\", 14, \"arm\")\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\", \"predict\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n\n # Load annotations\n # VGG Image Annotator saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {name:'a'},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n annotations = json.load(open(os.path.join(dataset_dir, \"via_region_data.json\")))\n\n annotations = list(annotations.values()) # don't need the dict keys\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n\n # Add images\n for a in annotations:\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. There are stores in the\n # shape_attributes (see json format above)\n polygons = [r['shape_attributes'] for r in a['regions'].values()]\n names = [r['region_attributes'] for r in a['regions'].values()]\n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"surgery\",\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons,\n names=names)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a surgery dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"surgery\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n class_names = info[\"names\"]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n # Assign class_ids by reading class_names\n class_ids = np.zeros([len(info[\"polygons\"])])\n # In the surgery dataset, pictures are labeled with name 'a' and 'r' representing arm and ring.\n for i, p in enumerate(class_names):\n #\"name\" is the attributes name decided when labeling, etc. 'region_attributes': {name:'a'}\n if p['name'] == 'adidas':\n class_ids[i] = 1\n elif p['name'] == 'apple':\n class_ids[i] = 2\n #assert code here to extend to other labels\n class_ids = class_ids.astype(int)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), class_ids\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"surgery\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n def load_mask_hc(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a surgery dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"surgery\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n #\"name\" is the attributes name decided when labeling, etc. 'region_attributes': {name:'a'}\n class_names = info[\"names\"]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n # Assign class_ids by reading class_names\n class_ids = np.zeros([len(info[\"polygons\"])])\n # In the surgery dataset, pictures are labeled with name 'a' and 'r' representing arm and ring.\n for i, p in enumerate(class_names):\n if p['name'] == 'adidas':\n class_ids[i] = 14\n elif p['name'] == 'error':\n pass\n else:\n class_ids[i] = int(p['name'])\n #assert code here to extend to other labels\n class_ids = class_ids.astype(int)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), class_ids\n\ndef train(model, *dic):\n \"\"\"Train the model.\"\"\"\n # Training dataset.\n dataset_train = SurgeryDataset()\n dataset_train.load_VIA(args.dataset, \"train\")\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = SurgeryDataset()\n dataset_val.load_VIA(args.dataset, \"val\")\n dataset_val.prepare()\n\n # *** This training schedu le is an example. Update to your needs ***\n # Since we're using a very small dataset, and starting from\n # COCO trained weights, we don't need to train too long. Also,\n # no need to train all layers, just the heads should do it.\n print(\"Training network heads\")\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=60,\n layers='heads')\n\n\ndef color_splash(image, mask):\n \"\"\"Apply color splash effect.\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n\n Returns result image.\n \"\"\"\n # Make a grayscale copy of the image. The grayscale copy still\n # has 3 RGB channels, though.\n gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) >= 1)\n # Copy color pixels from the original color image where mask is set\n if mask.shape[0] > 0:\n splash = np.where(mask, image, gray).astype(np.uint8)\n else:\n splash = gray\n return splash\n\n\ndef detect_and_color_splash(model, image_path=None, video_path=None, out_dir=''):\n assert image_path or video_path\n\n class_names = ['BG', 'adidas', 'apple']\n\n # Image or video?\n if image_path:\n # Run model detection and generate the color splash effect\n print(\"Running on {}\".format(args.image))\n # Read image\n image = skimage.io.imread(args.image)\n # Detect objects\n r = model.detect([image], verbose=1)[0]\n # Color splash\n # splash = color_splash(image, r['masks'])\n visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'], making_image=True)\n file_name = 'splash.png'\n # Save output\n # file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n # save_file_name = os.path.join(out_dir, file_name)\n # skimage.io.imsave(save_file_name, splash)\n elif video_path:\n import cv2\n # Video capture\n vcapture = cv2.VideoCapture(video_path)\n # width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n # height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n width = 1600\n height = 1600\n fps = vcapture.get(cv2.CAP_PROP_FPS)\n # Define codec and create video writer\n file_name = \"splash_{:%Y%m%dT%H%M%S}.wmv\".format(datetime.datetime.now())\n vwriter = cv2.VideoWriter(file_name,\n cv2.VideoWriter_fourcc(*'MJPG'),\n fps, (width, height))\n\n count = 0\n success = True\n #For video, we wish classes keep the same mask in frames, generate colors for masks\n colors = visualize.random_colors(len(class_names))\n while success:\n print(\"frame: \", count)\n # Read next image\n plt.clf()\n plt.close()\n success, image = vcapture.read()\n if success:\n # OpenCV returns images as BGR, convert to RGB\n image = image[..., ::-1]\n # Detect objects\n r = model.detect([image], verbose=0)[0]\n # Color splash\n # splash = color_splash(image, r['masks'])\n\n splash = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'], colors=colors, making_video=True)\n # Add image to video writer\n vwriter.write(splash)\n count += 1\n vwriter.release()\n print(\"Saved to \", file_name)\n\n############################################################\n# RLE Encoding\n############################################################\n\ndef rle_encode(mask):\n \"\"\"Encodes a mask in Run Length Encoding (RLE).\n Returns a string of space-separated values.\n \"\"\"\n assert mask.ndim == 2, \"Mask must be of shape [Height, Width]\"\n # Flatten it column wise\n m = mask.T.flatten()\n # Compute gradient. Equals 1 or -1 at transition points\n g = np.diff(np.concatenate([[0], m, [0]]), n=1)\n # 1-based indicies of transition points (where gradient != 0)\n rle = np.where(g != 0)[0].reshape([-1, 2]) + 1\n # Convert second index in each pair to lenth\n rle[:, 1] = rle[:, 1] - rle[:, 0]\n return \" \".join(map(str, rle.flatten()))\n\n\ndef rle_decode(rle, shape):\n \"\"\"Decodes an RLE encoded list of space separated\n numbers and returns a binary mask.\"\"\"\n rle = list(map(int, rle.split()))\n rle = np.array(rle, dtype=np.int32).reshape([-1, 2])\n rle[:, 1] += rle[:, 0]\n rle -= 1\n mask = np.zeros([shape[0] * shape[1]], np.bool)\n for s, e in rle:\n assert 0 <= s < mask.shape[0]\n assert 1 <= e <= mask.shape[0], \"shape: {} s {} e {}\".format(shape, s, e)\n mask[s:e] = 1\n # Reshape and transpose\n mask = mask.reshape([shape[1], shape[0]]).T\n return mask\n\n\ndef mask_to_rle(image_id, mask, scores):\n \"Encodes instance masks to submission format.\"\n assert mask.ndim == 3, \"Mask must be [H, W, count]\"\n # If mask is empty, return line with image ID only\n if mask.shape[-1] == 0:\n return \"{},\".format(image_id)\n # Remove mask overlaps\n # Multiply each instance mask by its score order\n # then take the maximum across the last dimension\n order = np.argsort(scores)[::-1] + 1 # 1-based descending\n mask = np.max(mask * np.reshape(order, [1, 1, -1]), -1)\n # Loop over instance masks\n lines = []\n for o in order:\n m = np.where(mask == o, 1, 0)\n # Skip if empty\n if m.sum() == 0.0:\n continue\n rle = rle_encode(m)\n lines.append(\"{}, {}\".format(image_id, rle))\n return \"\\n\".join(lines)\n\ndef detect(model, dataset_dir, subset):\n \"\"\"Run detection on images in the given directory.\"\"\"\n print(\"Running on {}\".format(dataset_dir))\n\n os.makedirs('RESULTS')\n submit_dir = os.path.join(os.getcwd(), \"RESULTS/\")\n # Read dataset\n dataset = SurgeryDataset()\n dataset.load_VIA(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # Detect objects\n r = model.detect([image], verbose=0)[0]\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n canvas = visualize.display_instances(\n image, r['rois'], r['masks'], r['class_ids'],\n dataset.class_names, r['scores'], detect=True)\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\",\n # detect=True)\n canvas.print_figure(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"][:-4]))\n # Save to csv file\n submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n file_path = os.path.join(submit_dir, \"submit.csv\")\n with open(file_path, \"w\") as f:\n f.write(submission)\n print(\"Saved to \", submit_dir)\n############################################################\n# Training\n############################################################\n\nif __name__ == '__main__':\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN to detect rings and robot arms.')\n parser.add_argument(\"command\",\n metavar=\"<command>\",\n help=\"'train' or 'splash'\")\n parser.add_argument('--dataset', required=False,\n metavar=\"/home/simon/mask_rcnn/data/surgery\",\n help='Directory of the surgery dataset')\n parser.add_argument('--weights', required=True,\n metavar=\"/home/simon/logs/weights.h5\",\n help=\"Path to weights .h5 file or 'coco'\")\n parser.add_argument('--logs', required=False,\n default=DEFAULT_LOGS_DIR,\n metavar=\"/path/to/logs/\",\n help='Logs and checkpoints directory (default=logs/)')\n parser.add_argument('--image', required=False,\n metavar=\"path or URL to image\",\n help='Image to apply the color splash effect on')\n parser.add_argument('--video', required=False,\n metavar=\"path or URL to video\",\n help='Video to apply the color splash effect on')\n parser.add_argument('--subset', required=False,\n metavar=\"Dataset sub-directory\",\n help=\"Subset of dataset to run prediction on\")\n args = parser.parse_args()\n\n # Validate arguments\n if args.command == \"train\":\n assert args.dataset, \"Argument --dataset is required for training\"\n\n elif args.command == \"splash\":\n assert args.image or args.video,\\\n \"Provide --image or --video to apply color splash\"\n\n print(\"Weights: \", args.weights)\n print(\"Dataset: \", args.dataset)\n print(\"Logs: \", args.logs)\n\n # Configurations\n if args.command == \"train\":\n config = SurgeryConfig()\n else:\n class InferenceConfig(SurgeryConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n config = InferenceConfig()\n config.display()\n\n # Create model\n if args.command == \"train\":\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=args.logs)\n else:\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.logs)\n\n # Select weights file to load\n if args.weights.lower() == \"coco\":\n weights_path = COCO_WEIGHTS_PATH\n # Download weights file\n if not os.path.exists(weights_path):\n utils.download_trained_weights(weights_path)\n elif args.weights.lower() == \"last\":\n # Find last trained weights\n weights_path = model.find_last()[1]\n elif args.weights.lower() == \"imagenet\":\n # Start from ImageNet trained weights\n weights_path = model.get_imagenet_weights()\n else:\n weights_path = args.weights\n\n # Load weights\n print(\"Loading weights \", weights_path)\n if args.weights.lower() == \"coco\":\n # Exclude the last layers because they require a matching\n # number of classes\n model.load_weights(weights_path, by_name=True, exclude=[\n \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n else:\n model.load_weights(weights_path, by_name=True)\n\n # Train or evaluate\n if args.command == \"train\":\n train(model)\n elif args.command == \"detect\":\n detect(model, args.dataset, args.subset)\n elif args.command == \"splash\":\n detect_and_color_splash(model, image_path=args.image,\n video_path=args.video)\n else:\n print(\"'{}' is not recognized. \"\n \"Use 'train' or 'splash'\".format(args.command))\n\n\n# dataset_dir = '/home/simon/deeplearning/mask_rcnn/data'\n# dataset_train = SurgeryDataset()\n# dataset_train.VIA(dataset_dir, \"train\")\n# # dataset_train.prepare()\n# a, b = dataset_train.load_mask(130)\n# print(a.shape, b.shape)\n# print(b)\n" ]
[ [ "numpy.reshape", "numpy.concatenate", "matplotlib.pyplot.clf", "matplotlib.pyplot.close", "numpy.argsort", "numpy.array", "numpy.where", "numpy.sum", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sniafas/mlflow
[ "c577c7f199d9efa076344785dabb2121edb7e6c8" ]
[ "mlflow/pyfunc/__init__.py" ]
[ "\"\"\"\nThe ``python_function`` model flavor serves as a default model interface for MLflow Python models.\nAny MLflow Python model is expected to be loadable as a ``python_function`` model.\n\nIn addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format\n<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from\nthis format. The format is self contained in the sense that it includes all necessary information\nfor anyone to load it and use it. Dependencies are either stored directly with the model or\nreferenced via a Conda environment.\n\nThe ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models\nusing frameworks and inference logic that may not be natively included in MLflow. See\n:ref:`pyfunc-create-custom`.\n\n.. _pyfunc-inference-api:\n\n*************\nInference API\n*************\n\nPython function models are loaded as an instance of :py:class:`PyFuncModel\n<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model\nmetadata (MLmodel file). You can score the model by calling the :py:func:`predict()\n<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::\n\n predict(\n model_input: [pandas.DataFrame, numpy.ndarray, scipy.sparse.(csc.csc_matrix | csr.csr_matrix),\n List[Any], Dict[str, Any]]\n ) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]\n\nAll PyFunc models will support `pandas.DataFrame` as input and DL PyFunc models will also support\ntensor inputs in the form of Dict[str, numpy.ndarray] (named tensors) and `numpy.ndarrays`\n(unnamed tensors).\n\n\n.. _pyfunc-filesystem-format:\n\n*****************\nFilesystem format\n*****************\n\nThe Pyfunc format is defined as a directory structure containing all required data, code, and\nconfiguration::\n\n ./dst-path/\n ./MLmodel: configuration\n <code>: code packaged with the model (specified in the MLmodel file)\n <data>: data packaged with the model (specified in the MLmodel file)\n <env>: Conda environment definition (specified in the MLmodel file)\n\nThe directory structure may contain additional contents that can be referenced by the ``MLmodel``\nconfiguration.\n\n.. _pyfunc-model-config:\n\nMLModel configuration\n#####################\n\nA Python model contains an ``MLmodel`` file in **python_function** format in its root with the\nfollowing parameters:\n\n- loader_module [required]:\n Python module that can load the model. Expected as module identifier\n e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.\n The imported module must contain a function with the following signature::\n\n _load_pyfunc(path: string) -> <pyfunc model implementation>\n\n The path argument is specified by the ``data`` parameter and may refer to a file or\n directory. The model implementation is expected to be an object with a\n ``predict`` method with the following signature::\n\n predict(\n model_input: [pandas.DataFrame, numpy.ndarray,\n scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], Dict[str, Any]]\n ) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]\n\n- code [optional]:\n Relative path to a directory containing the code packaged with this model.\n All files and directories inside this directory are added to the Python path\n prior to importing the model loader.\n\n- data [optional]:\n Relative path to a file or directory containing model data.\n The path is passed to the model loader.\n\n- env [optional]:\n Relative path to an exported Conda environment. If present this environment\n should be activated prior to running the model.\n\n- Optionally, any additional parameters necessary for interpreting the serialized model in\n ``pyfunc`` format.\n\n.. rubric:: Example\n\n::\n\n tree example/sklearn_iris/mlruns/run1/outputs/linear-lr\n\n::\n\n ├── MLmodel\n ├── code\n │   ├── sklearn_iris.py\n │\n ├── data\n │   └── model.pkl\n └── mlflow_env.yml\n\n::\n\n cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel\n\n::\n\n python_function:\n code: code\n data: data/model.pkl\n loader_module: mlflow.sklearn\n env: mlflow_env.yml\n main: sklearn_iris\n\n.. _pyfunc-create-custom:\n\n******************************\nCreating custom Pyfunc models\n******************************\n\nMLflow's persistence modules provide convenience functions for creating models with the\n``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and\nmore); however, they do not cover every use case. For example, you may want to create an MLflow\nmodel with the ``pyfunc`` flavor using a framework that MLflow does not natively support.\nAlternatively, you may want to build an MLflow model that executes custom logic when evaluating\nqueries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``\nprovides utilities for creating ``pyfunc`` models from arbitrary code and model data.\n\nThe :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows\nfor creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts\nthat the logic may require.\n\nAn `artifact` is a file or directory, such as a serialized model or a CSV. For example, a\nserialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.\n\n.. _pyfunc-create-custom-workflows:\n\nWorkflows\n#########\n\n:meth:`save_model()` and :meth:`log_model()` support the following workflows:\n\n1. Programmatically defining a new MLflow model, including its attributes and artifacts.\n\n Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can\n automatically download artifacts from their URIs and create an MLflow model directory.\n\n In this case, you must define a Python class which inherits from :class:`~PythonModel`,\n defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is\n specified via the ``python_model`` parameter; it is automatically serialized and deserialized\n as a Python class, including all of its attributes.\n\n2. Interpreting pre-existing data as an MLflow model.\n\n If you already have a directory containing model data, :meth:`save_model()` and\n :meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter\n specifies the local filesystem path to the directory containing model data.\n\n In this case, you must provide a Python module, called a `loader module`. The\n loader module defines a ``_load_pyfunc()`` method that performs the following tasks:\n\n - Load data from the specified ``data_path``. For example, this process may include\n deserializing pickled Python objects or models or parsing CSV files.\n\n - Construct and return a pyfunc-compatible model wrapper. As in the first\n use case, this wrapper must define a ``predict()`` method that is used to evaluate\n queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.\n\n The ``loader_module`` parameter specifies the name of your loader module.\n\n For an example loader module implementation, refer to the `loader module\n implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/\n 74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.\n\n.. _pyfunc-create-custom-selecting-workflow:\n\nWhich workflow is right for my use case?\n########################################\n\nWe consider the first workflow to be more user-friendly and generally recommend it for the\nfollowing reasons:\n\n- It automatically resolves and collects specified model artifacts.\n\n- It automatically serializes and deserializes the ``python_model`` instance and all of\n its attributes, reducing the amount of user logic that is required to load the model\n\n- You can create Models using logic that is defined in the ``__main__`` scope. This allows\n custom models to be constructed in interactive environments, such as notebooks and the Python\n REPL.\n\nYou may prefer the second, lower-level workflow for the following reasons:\n\n- Inference logic is always persisted as code, rather than a Python object. This makes logic\n easier to inspect and modify later.\n\n- If you have already collected all of your model data in a single location, the second\n workflow allows it to be saved in MLflow format directly, without enumerating constituent\n artifacts.\n\"\"\"\n\nimport importlib\nimport tempfile\nimport signal\nimport sys\n\nimport numpy as np\nimport os\nimport pandas\nimport yaml\nfrom copy import deepcopy\nimport logging\nimport threading\nimport collections\nimport subprocess\n\nfrom typing import Any, Union, List, Dict, Iterator, Tuple\nimport mlflow\nimport mlflow.pyfunc.model\nfrom mlflow.models import Model, ModelSignature, ModelInputExample\nfrom mlflow.models.model import MLMODEL_FILE_NAME\nfrom mlflow.models.utils import _save_example\nfrom mlflow.pyfunc.model import ( # pylint: disable=unused-import\n PythonModel,\n PythonModelContext,\n get_default_conda_env,\n)\nfrom mlflow.pyfunc.model import get_default_pip_requirements\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.types import DataType, Schema, TensorSpec\nfrom mlflow.types.utils import clean_tensor_type\nfrom mlflow.utils import PYTHON_VERSION, get_major_minor_py_version, _is_in_ipython_notebook\nfrom mlflow.utils.annotations import deprecated\nfrom mlflow.utils.file_utils import _copy_file_or_tree, write_to\nfrom mlflow.utils.model_utils import (\n _get_flavor_configuration,\n _validate_and_copy_code_paths,\n _add_code_from_conf_to_system_path,\n _get_flavor_configuration_from_uri,\n _validate_and_prepare_target_save_path,\n)\nfrom mlflow.utils.uri import append_to_uri_path\nfrom mlflow.utils.environment import (\n _validate_env_arguments,\n _process_pip_requirements,\n _process_conda_env,\n _CONDA_ENV_FILE_NAME,\n _REQUIREMENTS_FILE_NAME,\n _CONSTRAINTS_FILE_NAME,\n _PYTHON_ENV_FILE_NAME,\n _PythonEnv,\n)\nfrom mlflow.utils import env_manager as _EnvManager\nfrom mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS\nfrom mlflow.utils.databricks_utils import is_in_databricks_runtime\nfrom mlflow.utils.file_utils import get_or_create_tmp_dir, get_or_create_nfs_tmp_dir\nfrom mlflow.utils.process import cache_return_value_per_process\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\nfrom mlflow.protos.databricks_pb2 import (\n INVALID_PARAMETER_VALUE,\n RESOURCE_DOES_NOT_EXIST,\n)\nfrom scipy.sparse import csc_matrix, csr_matrix\nfrom mlflow.utils.requirements_utils import (\n _check_requirement_satisfied,\n _parse_requirements,\n)\nfrom mlflow.utils import find_free_port\nfrom mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir\n\nFLAVOR_NAME = \"python_function\"\nMAIN = \"loader_module\"\nCODE = \"code\"\nDATA = \"data\"\nENV = \"env\"\nPY_VERSION = \"python_version\"\n\n_logger = logging.getLogger(__name__)\nPyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]\nPyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]\n\n\ndef add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):\n \"\"\"\n Add a ``pyfunc`` spec to the model configuration.\n\n Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model\n flavor out of an existing directory structure. For example, other model flavors can use this to\n specify how to use their output as a ``pyfunc``.\n\n NOTE:\n\n All paths are relative to the exported model root directory.\n\n :param model: Existing model.\n :param loader_module: The module to be used to load the model.\n :param data: Path to the model data.\n :param code: Path to the code dependencies.\n :param env: Conda environment.\n :param req: pip requirements file.\n :param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.\n Values must be YAML-serializable.\n :return: Updated model configuration.\n \"\"\"\n params = deepcopy(kwargs)\n params[MAIN] = loader_module\n params[PY_VERSION] = PYTHON_VERSION\n if code:\n params[CODE] = code\n if data:\n params[DATA] = data\n if env:\n params[ENV] = env\n\n return model.add_flavor(FLAVOR_NAME, **params)\n\n\ndef _load_model_env(path):\n \"\"\"\n Get ENV file string from a model configuration stored in Python Function format.\n Returned value is a model-relative path to a Conda Environment file,\n or None if none was specified at model save time\n \"\"\"\n return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)\n\n\ndef _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):\n \"\"\"\n Enforce the input column type matches the declared in model input schema.\n\n The following type conversions are allowed:\n\n 1. object -> string\n 2. int -> long (upcast)\n 3. float -> double (upcast)\n 4. int -> double (safe conversion)\n 5. np.datetime64[x] -> datetime (any precision)\n 6. object -> datetime\n\n Any other type mismatch will raise error.\n \"\"\"\n if values.dtype == object and t not in (DataType.binary, DataType.string):\n values = values.infer_objects()\n\n if t == DataType.string and values.dtype == object:\n # NB: the object can contain any type and we currently cannot cast to pandas Strings\n # due to how None is cast\n return values\n\n # NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand\n # side of the comparison operator. It works, however, if pandas type is on the left hand side.\n # That is because pandas is aware of numpy.\n if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:\n # The types are already compatible => conversion is not necessary.\n return values\n\n if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:\n # NB: bytes in numpy have variable itemsize depending on the length of the longest\n # element in the array (column). Since MLflow binary type is length agnostic, we ignore\n # itemsize when matching binary columns.\n return values\n\n if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:\n # NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]\n # denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we\n # ignore precision when matching datetime columns.\n return values\n\n if t == DataType.datetime and values.dtype == object:\n # NB: Pyspark date columns get converted to object when converted to a pandas\n # DataFrame. To respect the original typing, we convert the column to datetime.\n try:\n return values.astype(np.datetime64, errors=\"raise\")\n except ValueError:\n raise MlflowException(\n \"Failed to convert column {0} from type {1} to {2}.\".format(name, values.dtype, t)\n )\n\n numpy_type = t.to_numpy()\n if values.dtype.kind == numpy_type.kind:\n is_upcast = values.dtype.itemsize <= numpy_type.itemsize\n elif values.dtype.kind == \"u\" and numpy_type.kind == \"i\":\n is_upcast = values.dtype.itemsize < numpy_type.itemsize\n elif values.dtype.kind in (\"i\", \"u\") and numpy_type == np.float64:\n # allow (u)int => double conversion\n is_upcast = values.dtype.itemsize <= 6\n else:\n is_upcast = False\n\n if is_upcast:\n return values.astype(numpy_type, errors=\"raise\")\n else:\n # NB: conversion between incompatible types (e.g. floats -> ints or\n # double -> float) are not allowed. While supported by pandas and numpy,\n # these conversions alter the values significantly.\n def all_ints(xs):\n return all(pandas.isnull(x) or int(x) == x for x in xs)\n\n hint = \"\"\n if (\n values.dtype == np.float64\n and numpy_type.kind in (\"i\", \"u\")\n and values.hasnans\n and all_ints(values)\n ):\n hint = (\n \" Hint: the type mismatch is likely caused by missing values. \"\n \"Integer columns in python can not represent missing values and are therefore \"\n \"encoded as floats. The best way to avoid this problem is to infer the model \"\n \"schema based on a realistic data sample (training dataset) that includes missing \"\n \"values. Alternatively, you can declare integer columns as doubles (float64) \"\n \"whenever these columns may have missing values. See `Handling Integers With \"\n \"Missing Values <https://www.mlflow.org/docs/latest/models.html#\"\n \"handling-integers-with-missing-values>`_ for more details.\"\n )\n\n raise MlflowException(\n \"Incompatible input types for column {0}. \"\n \"Can not safely convert {1} to {2}.{3}\".format(name, values.dtype, numpy_type, hint)\n )\n\n\ndef _enforce_tensor_spec(\n values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec\n):\n \"\"\"\n Enforce the input tensor shape and type matches the provided tensor spec.\n \"\"\"\n expected_shape = tensor_spec.shape\n actual_shape = values.shape\n\n actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype\n\n if len(expected_shape) != len(actual_shape):\n raise MlflowException(\n \"Shape of input {0} does not match expected shape {1}.\".format(\n actual_shape, expected_shape\n )\n )\n for expected, actual in zip(expected_shape, actual_shape):\n if expected == -1:\n continue\n if expected != actual:\n raise MlflowException(\n \"Shape of input {0} does not match expected shape {1}.\".format(\n actual_shape, expected_shape\n )\n )\n if clean_tensor_type(actual_type) != tensor_spec.type:\n raise MlflowException(\n \"dtype of input {0} does not match expected dtype {1}\".format(\n values.dtype, tensor_spec.type\n )\n )\n return values\n\n\ndef _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):\n \"\"\"Enforce the input columns conform to the model's column-based signature.\"\"\"\n if input_schema.has_input_names():\n input_names = input_schema.input_names()\n else:\n input_names = pfInput.columns[: len(input_schema.inputs)]\n input_types = input_schema.input_types()\n new_pfInput = pandas.DataFrame()\n for i, x in enumerate(input_names):\n new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])\n return new_pfInput\n\n\ndef _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):\n \"\"\"Enforce the input tensor(s) conforms to the model's tensor-based signature.\"\"\"\n if input_schema.has_input_names():\n if isinstance(pfInput, dict):\n new_pfInput = dict()\n for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):\n if not isinstance(pfInput[col_name], np.ndarray):\n raise MlflowException(\n \"This model contains a tensor-based model signature with input names,\"\n \" which suggests a dictionary input mapping input name to a numpy\"\n \" array, but a dict with value type {0} was found.\".format(\n type(pfInput[col_name])\n )\n )\n new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)\n elif isinstance(pfInput, pandas.DataFrame):\n new_pfInput = dict()\n for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):\n new_pfInput[col_name] = _enforce_tensor_spec(\n np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec\n )\n else:\n raise MlflowException(\n \"This model contains a tensor-based model signature with input names, which\"\n \" suggests a dictionary input mapping input name to tensor, but an input of\"\n \" type {0} was found.\".format(type(pfInput))\n )\n else:\n if isinstance(pfInput, pandas.DataFrame):\n new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])\n elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):\n new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])\n else:\n raise MlflowException(\n \"This model contains a tensor-based model signature with no input names,\"\n \" which suggests a numpy array input, but an input of type {0} was\"\n \" found.\".format(type(pfInput))\n )\n return new_pfInput\n\n\ndef _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):\n \"\"\"\n Enforces the provided input matches the model's input schema,\n\n For signatures with input names, we check there are no missing inputs and reorder the inputs to\n match the ordering declared in schema if necessary. Any extra columns are ignored.\n\n For column-based signatures, we make sure the types of the input match the type specified in\n the schema or if it can be safely converted to match the input schema.\n\n For tensor-based signatures, we make sure the shape and type of the input matches the shape\n and type specified in model's input schema.\n \"\"\"\n if not input_schema.is_tensor_spec():\n if isinstance(pfInput, (list, np.ndarray, dict)):\n try:\n pfInput = pandas.DataFrame(pfInput)\n except Exception as e:\n raise MlflowException(\n \"This model contains a column-based signature, which suggests a DataFrame\"\n \" input. There was an error casting the input data to a DataFrame:\"\n \" {0}\".format(str(e))\n )\n if not isinstance(pfInput, pandas.DataFrame):\n raise MlflowException(\n \"Expected input to be DataFrame or list. Found: %s\" % type(pfInput).__name__\n )\n\n if input_schema.has_input_names():\n # make sure there are no missing columns\n input_names = input_schema.input_names()\n expected_cols = set(input_names)\n actual_cols = set()\n if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):\n # for schemas with a single column, match input with column\n pfInput = {input_names[0]: pfInput}\n actual_cols = expected_cols\n elif isinstance(pfInput, pandas.DataFrame):\n actual_cols = set(pfInput.columns)\n elif isinstance(pfInput, dict):\n actual_cols = set(pfInput.keys())\n missing_cols = expected_cols - actual_cols\n extra_cols = actual_cols - expected_cols\n # Preserve order from the original columns, since missing/extra columns are likely to\n # be in same order.\n missing_cols = [c for c in input_names if c in missing_cols]\n extra_cols = [c for c in actual_cols if c in extra_cols]\n if missing_cols:\n raise MlflowException(\n \"Model is missing inputs {0}.\"\n \" Note that there were extra inputs: {1}\".format(missing_cols, extra_cols)\n )\n elif not input_schema.is_tensor_spec():\n # The model signature does not specify column names => we can only verify column count.\n num_actual_columns = len(pfInput.columns)\n if num_actual_columns < len(input_schema.inputs):\n raise MlflowException(\n \"Model inference is missing inputs. The model signature declares \"\n \"{0} inputs but the provided value only has \"\n \"{1} inputs. Note: the inputs were not named in the signature so we can \"\n \"only verify their count.\".format(len(input_schema.inputs), num_actual_columns)\n )\n\n return (\n _enforce_tensor_schema(pfInput, input_schema)\n if input_schema.is_tensor_spec()\n else _enforce_col_schema(pfInput, input_schema)\n )\n\n\nclass PyFuncModel:\n \"\"\"\n MLflow 'python function' model.\n\n Wrapper around model implementation and metadata. This class is not meant to be constructed\n directly. Instead, instances of this class are constructed and returned from\n :py:func:`load_model() <mlflow.pyfunc.load_model>`.\n\n ``model_impl`` can be any Python object that implements the `Pyfunc interface\n <https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is\n returned by invoking the model's ``loader_module``.\n\n ``model_meta`` contains model metadata loaded from the MLmodel file.\n \"\"\"\n\n def __init__(self, model_meta: Model, model_impl: Any):\n if not hasattr(model_impl, \"predict\"):\n raise MlflowException(\"Model implementation is missing required predict method.\")\n if not model_meta:\n raise MlflowException(\"Model is missing metadata.\")\n self._model_meta = model_meta\n self._model_impl = model_impl\n\n def predict(self, data: PyFuncInput) -> PyFuncOutput:\n \"\"\"\n Generate model predictions.\n\n If the model contains signature, enforce the input schema first before calling the model\n implementation with the sanitized input. If the pyfunc model does not include model schema,\n the input is passed to the model implementation as is. See `Model Signature Enforcement\n <https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details.\"\n\n :param data: Model input as one of pandas.DataFrame, numpy.ndarray,\n scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], or\n Dict[str, numpy.ndarray]\n :return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.\n \"\"\"\n input_schema = self.metadata.get_input_schema()\n if input_schema is not None:\n data = _enforce_schema(data, input_schema)\n return self._model_impl.predict(data)\n\n @property\n def metadata(self):\n \"\"\"Model metadata.\"\"\"\n if self._model_meta is None:\n raise MlflowException(\"Model is missing metadata.\")\n return self._model_meta\n\n def __repr__(self):\n info = {}\n if self._model_meta is not None:\n if hasattr(self._model_meta, \"run_id\") and self._model_meta.run_id is not None:\n info[\"run_id\"] = self._model_meta.run_id\n if (\n hasattr(self._model_meta, \"artifact_path\")\n and self._model_meta.artifact_path is not None\n ):\n info[\"artifact_path\"] = self._model_meta.artifact_path\n info[\"flavor\"] = self._model_meta.flavors[FLAVOR_NAME][\"loader_module\"]\n return yaml.safe_dump({\"mlflow.pyfunc.loaded_model\": info}, default_flow_style=False)\n\n\ndef _warn_dependency_requirement_mismatches(model_path):\n \"\"\"\n Inspects the model's dependencies and prints a warning if the current Python environment\n doesn't satisfy them.\n \"\"\"\n req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)\n if not os.path.exists(req_file_path):\n return\n\n try:\n mismatch_infos = []\n for req in _parse_requirements(req_file_path, is_constraint=False):\n req_line = req.req_str\n mismatch_info = _check_requirement_satisfied(req_line)\n if mismatch_info is not None:\n mismatch_infos.append(str(mismatch_info))\n\n if len(mismatch_infos) > 0:\n mismatch_str = \" - \" + \"\\n - \".join(mismatch_infos)\n warning_msg = (\n \"Detected one or more mismatches between the model's dependencies and the current \"\n f\"Python environment:\\n{mismatch_str}\\n\"\n \"To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` \"\n \"to fetch the model's environment and install dependencies using the resulting \"\n \"environment file.\"\n )\n _logger.warning(warning_msg)\n\n except Exception as e:\n _logger.warning(\n f\"Encountered an unexpected error ({repr(e)}) while detecting model dependency \"\n \"mismatches. Set logging level to DEBUG to see the full traceback.\"\n )\n _logger.debug(\"\", exc_info=True)\n\n\ndef load_model(\n model_uri: str, suppress_warnings: bool = False, dst_path: str = None\n) -> PyFuncModel:\n \"\"\"\n Load a model stored in Python function format.\n\n :param model_uri: The location, in URI format, of the MLflow model. For example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n - ``mlflow-artifacts:/path/to/model``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n :param suppress_warnings: If ``True``, non-fatal warning messages associated with the model\n loading process will be suppressed. If ``False``, these warning\n messages will be emitted.\n :param dst_path: The local filesystem path to which to download the model artifact.\n This directory must already exist. If unspecified, a local output\n path will be created.\n \"\"\"\n local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)\n\n if not suppress_warnings:\n _warn_dependency_requirement_mismatches(local_path)\n\n model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))\n\n conf = model_meta.flavors.get(FLAVOR_NAME)\n if conf is None:\n raise MlflowException(\n 'Model does not have the \"{flavor_name}\" flavor'.format(flavor_name=FLAVOR_NAME),\n RESOURCE_DOES_NOT_EXIST,\n )\n model_py_version = conf.get(PY_VERSION)\n if not suppress_warnings:\n _warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)\n\n _add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)\n data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path\n model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)\n return PyFuncModel(model_meta=model_meta, model_impl=model_impl)\n\n\ndef _download_model_conda_env(model_uri):\n conda_yml_file_name = _get_flavor_configuration_from_uri(model_uri, FLAVOR_NAME)[ENV]\n return _download_artifact_from_uri(append_to_uri_path(model_uri, conda_yml_file_name))\n\n\ndef _get_model_dependencies(model_uri, format=\"pip\"): # pylint: disable=redefined-builtin\n if format == \"pip\":\n req_file_uri = append_to_uri_path(model_uri, _REQUIREMENTS_FILE_NAME)\n try:\n return _download_artifact_from_uri(req_file_uri)\n except Exception as e:\n # fallback to download conda.yaml file and parse the \"pip\" section from it.\n _logger.info(\n f\"Downloading model '{_REQUIREMENTS_FILE_NAME}' file failed, error is {repr(e)}. \"\n \"Falling back to fetching pip requirements from the model's 'conda.yaml' file. \"\n \"Other conda dependencies will be ignored.\"\n )\n\n conda_yml_path = _download_model_conda_env(model_uri)\n\n with open(conda_yml_path, \"r\") as yf:\n conda_yml = yaml.safe_load(yf)\n\n conda_deps = conda_yml.get(\"dependencies\", [])\n for index, dep in enumerate(conda_deps):\n if isinstance(dep, dict) and \"pip\" in dep:\n pip_deps_index = index\n break\n else:\n raise MlflowException(\n \"No pip section found in conda.yaml file in the model directory.\",\n error_code=RESOURCE_DOES_NOT_EXIST,\n )\n\n pip_deps = conda_deps.pop(pip_deps_index)[\"pip\"]\n tmp_dir = tempfile.mkdtemp()\n pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)\n with open(pip_file_path, \"w\") as f:\n f.write(\"\\n\".join(pip_deps) + \"\\n\")\n\n if len(conda_deps) > 0:\n _logger.warning(\n \"The following conda dependencies have been excluded from the environment file:\"\n f\" {', '.join(conda_deps)}.\"\n )\n\n return pip_file_path\n\n elif format == \"conda\":\n conda_yml_path = _download_model_conda_env(model_uri)\n return conda_yml_path\n else:\n raise MlflowException(\n f\"Illegal format argument '{format}'.\", error_code=INVALID_PARAMETER_VALUE\n )\n\n\ndef get_model_dependencies(model_uri, format=\"pip\"): # pylint: disable=redefined-builtin\n \"\"\"\n :param model_uri: The uri of the model to get dependencies from.\n :param format: The format of the returned dependency file. If the ``\"pip\"`` format is\n specified, the path to a pip ``requirements.txt`` file is returned.\n If the ``\"conda\"`` format is specified, the path to a ``\"conda.yaml\"``\n file is returned . If the ``\"pip\"`` format is specified but the model\n was not saved with a ``requirements.txt`` file, the ``pip`` section\n of the model's ``conda.yaml`` file is extracted instead, and any\n additional conda dependencies are ignored. Default value is ``\"pip\"``.\n :return: The local filesystem path to either a pip ``requirements.txt`` file\n (if ``format=\"pip\"``) or a ``conda.yaml`` file (if ``format=\"conda\"``)\n specifying the model's dependencies.\n \"\"\"\n dep_file = _get_model_dependencies(model_uri, format)\n\n if format == \"pip\":\n prefix = \"%\" if _is_in_ipython_notebook() else \"\"\n _logger.info(\n \"To install the dependencies that were used to train the model, run the \"\n f\"following command: '{prefix}pip install -r {dep_file}'.\"\n )\n return dep_file\n\n\n@deprecated(\"mlflow.pyfunc.load_model\", 1.0)\ndef load_pyfunc(model_uri, suppress_warnings=False):\n \"\"\"\n Load a model stored in Python function format.\n\n :param model_uri: The location, in URI format, of the MLflow model. For example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n - ``mlflow-artifacts:/path/to/model``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :param suppress_warnings: If ``True``, non-fatal warning messages associated with the model\n loading process will be suppressed. If ``False``, these warning\n messages will be emitted.\n \"\"\"\n return load_model(model_uri, suppress_warnings)\n\n\ndef _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):\n \"\"\"\n Compares the version of Python that was used to save a given model with the version\n of Python that is currently running. If a major or minor version difference is detected,\n logs an appropriate warning.\n \"\"\"\n if model_py_version is None:\n _logger.warning(\n \"The specified model does not have a specified Python version. It may be\"\n \" incompatible with the version of Python that is currently running: Python %s\",\n PYTHON_VERSION,\n )\n elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):\n _logger.warning(\n \"The version of Python that the model was saved in, `Python %s`, differs\"\n \" from the version of Python that is currently running, `Python %s`,\"\n \" and may be incompatible\",\n model_py_version,\n PYTHON_VERSION,\n )\n\n\ndef _create_model_downloading_tmp_dir(should_use_nfs):\n if should_use_nfs:\n root_tmp_dir = get_or_create_nfs_tmp_dir()\n else:\n root_tmp_dir = get_or_create_tmp_dir()\n\n root_model_cache_dir = os.path.join(root_tmp_dir, \"models\")\n os.makedirs(root_model_cache_dir, exist_ok=True)\n\n tmp_model_dir = tempfile.mkdtemp(dir=root_model_cache_dir)\n # mkdtemp creates a directory with permission 0o700\n # change it to be 0o777 to ensure it can be seen in spark UDF\n os.chmod(tmp_model_dir, 0o777)\n return tmp_model_dir\n\n\n@cache_return_value_per_process\ndef _get_or_create_env_root_dir(should_use_nfs):\n if should_use_nfs:\n root_tmp_dir = get_or_create_nfs_tmp_dir()\n else:\n root_tmp_dir = get_or_create_tmp_dir()\n\n env_root_dir = os.path.join(root_tmp_dir, \"envs\")\n os.makedirs(env_root_dir, exist_ok=True)\n return env_root_dir\n\n\n_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200\n\n\ndef spark_udf(spark, model_uri, result_type=\"double\", env_manager=\"local\"):\n \"\"\"\n A Spark UDF that can be used to invoke the Python function formatted model.\n\n Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names\n are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to\n wrap the input in a struct. In that case, the data will be passed as a DataFrame with column\n names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model\n will get the data as a pandas DataFrame with 2 columns 'x' and 'y').\n\n If a model contains a signature, the UDF can be called without specifying column name\n arguments. In this case, the UDF will be called with column names from signature, so the\n evaluation dataframe's column names must match the model signature's column names.\n\n The predictions are filtered to contain only the columns that can be represented as the\n ``result_type``. If the ``result_type`` is string or array of strings, all predictions are\n converted to string. If the result type is not an array type, the left most column with\n matching type is returned.\n\n NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of\n Spark (2.4 and below).\n\n .. code-block:: python\n :caption: Example\n\n from pyspark.sql.functions import struct\n\n predict = mlflow.pyfunc.spark_udf(spark, \"/my/local/model\")\n df.withColumn(\"prediction\", predict(struct(\"name\", \"age\"))).show()\n\n :param spark: A SparkSession object.\n :param model_uri: The location, in URI format, of the MLflow model with the\n :py:mod:`mlflow.pyfunc` flavor. For example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n - ``mlflow-artifacts:/path/to/model``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :param result_type: the return type of the user-defined function. The value can be either a\n ``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive\n type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.\n The following classes of result type are supported:\n\n - \"int\" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an\n ``int32`` or an exception if there is none.\n\n - \"long\" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an\n ``int64`` or an exception if there is none.\n\n - ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested\n size.\n\n - \"float\" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to\n ``float32`` or an exception if there is none.\n\n - \"double\" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to\n ``double`` or an exception if there is none.\n\n - ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or\n an exception if there are no numeric columns.\n\n - \"string\" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.\n\n - ``ArrayType(StringType)``: All columns converted to ``string``.\n\n :param env_manager: The environment manager to use in order to create the python environment\n for model inference. Note that environment is only restored in the context\n of the PySpark UDF; the software environment outside of the UDF is\n unaffected. Default value is ``local``, and the following values are\n supported:\n\n - ``conda``: (Recommended) Use Conda to restore the software environment\n that was used to train the model.\n - ``virtualenv``: Use virtualenv to restore the python environment that\n was used to train the model.\n - ``local``: Use the current Python environment for model inference, which\n may differ from the environment used to train the model and may lead to\n errors or invalid predictions.\n\n :return: Spark UDF that applies the model's ``predict`` method to the data and returns a\n type specified by ``result_type``, which by default is a double.\n \"\"\"\n\n # Scope Spark import to this method so users don't need pyspark to use non-Spark-related\n # functionality.\n import functools\n from mlflow.pyfunc.spark_model_cache import SparkModelCache\n from mlflow.utils._spark_utils import _SparkDirectoryDistributor\n from pyspark.sql.functions import pandas_udf\n from pyspark.sql.types import _parse_datatype_string\n from pyspark.sql.types import (\n ArrayType,\n DataType as SparkDataType,\n StructType as SparkStructType,\n )\n from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType\n from mlflow.models.cli import _get_flavor_backend\n\n _EnvManager.validate(env_manager)\n\n # Check whether spark is in local or local-cluster mode\n # this case all executors and driver share the same filesystem\n is_spark_in_local_mode = spark.conf.get(\"spark.master\").startswith(\"local\")\n\n nfs_root_dir = get_nfs_cache_root_dir()\n should_use_nfs = nfs_root_dir is not None\n should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)\n env_root_dir = _get_or_create_env_root_dir(should_use_nfs)\n\n if not isinstance(result_type, SparkDataType):\n result_type = _parse_datatype_string(result_type)\n\n elem_type = result_type\n if isinstance(elem_type, ArrayType):\n elem_type = elem_type.elementType\n\n supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]\n\n if not any(isinstance(elem_type, x) for x in supported_types):\n raise MlflowException(\n message=\"Invalid result_type '{}'. Result type can only be one of or an array of one \"\n \"of the following types: {}\".format(str(elem_type), str(supported_types)),\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n local_model_path = _download_artifact_from_uri(\n artifact_uri=model_uri, output_path=_create_model_downloading_tmp_dir(should_use_nfs)\n )\n\n if env_manager == _EnvManager.LOCAL:\n # Assume spark executor python environment is the same with spark driver side.\n _warn_dependency_requirement_mismatches(local_model_path)\n _logger.warning(\n 'Calling `spark_udf()` with `env_manager=\"local\"` does not recreate the same '\n \"environment that was used during training, which may lead to errors or inaccurate \"\n 'predictions. We recommend specifying `env_manager=\"conda\"`, which automatically '\n \"recreates the environment that was used to train the model and performs inference \"\n \"in the recreated environment.\"\n )\n else:\n _logger.info(\n \"This UDF will use Conda to recreate the model's software environment for inference. \"\n \"This may take extra time during execution.\"\n )\n if not sys.platform.startswith(\"linux\"):\n # TODO: support killing mlflow server launched in UDF task when spark job canceled\n # for non-linux system.\n # https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows\n _logger.warning(\n \"In order to run inference code in restored python environment, PySpark UDF \"\n \"processes spawn MLflow Model servers as child processes. Due to system \"\n \"limitations with handling SIGKILL signals, these MLflow Model server child \"\n \"processes cannot be cleaned up if the Spark Job is canceled.\"\n )\n\n if not should_use_spark_to_broadcast_file:\n # Prepare restored environment in driver side if possible.\n # Note: In databricks runtime, because databricks notebook cell output cannot capture\n # child process output, so that set capture_output to be True so that when `conda prepare\n # env` command failed, the exception message will include command stdout/stderr output.\n # Otherwise user have to check cluster driver log to find command stdout/stderr output.\n # In non-databricks runtime, set capture_output to be False, because the benefit of\n # \"capture_output=False\" is the output will be printed immediately, otherwise you have\n # to wait conda command fail and suddenly get all output printed (included in error\n # message).\n if env_manager != _EnvManager.LOCAL:\n _get_flavor_backend(\n local_model_path,\n env_manager=env_manager,\n install_mlflow=False,\n env_root_dir=env_root_dir,\n ).prepare_env(model_uri=local_model_path, capture_output=is_in_databricks_runtime())\n\n # Broadcast local model directory to remote worker if needed.\n if should_use_spark_to_broadcast_file:\n archive_path = SparkModelCache.add_local_model(spark, local_model_path)\n\n model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))\n\n def _predict_row_batch(predict_fn, args):\n input_schema = model_metadata.get_input_schema()\n pdf = None\n\n for x in args:\n if type(x) == pandas.DataFrame:\n if len(args) != 1:\n raise Exception(\n \"If passing a StructType column, there should be only one \"\n \"input column, but got %d\" % len(args)\n )\n pdf = x\n if pdf is None:\n args = list(args)\n if input_schema is None:\n names = [str(i) for i in range(len(args))]\n else:\n names = input_schema.input_names()\n if len(args) > len(names):\n args = args[: len(names)]\n if len(args) < len(names):\n raise MlflowException(\n \"Model input is missing columns. Expected {0} input columns {1},\"\n \" but the model received only {2} unnamed input columns\"\n \" (Since the columns were passed unnamed they are expected to be in\"\n \" the order specified by the schema).\".format(len(names), names, len(args))\n )\n pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)\n\n result = predict_fn(pdf)\n\n if not isinstance(result, pandas.DataFrame):\n result = pandas.DataFrame(data=result)\n\n elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type\n\n if type(elem_type) == IntegerType:\n result = result.select_dtypes(\n [np.byte, np.ubyte, np.short, np.ushort, np.int32]\n ).astype(np.int32)\n elif type(elem_type) == LongType:\n result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])\n\n elif type(elem_type) == FloatType:\n result = result.select_dtypes(include=(np.number,)).astype(np.float32)\n\n elif type(elem_type) == DoubleType:\n result = result.select_dtypes(include=(np.number,)).astype(np.float64)\n\n if len(result.columns) == 0:\n raise MlflowException(\n message=\"The the model did not produce any values compatible with the requested \"\n \"type '{}'. Consider requesting udf with StringType or \"\n \"Arraytype(StringType).\".format(str(elem_type)),\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n if type(elem_type) == StringType:\n result = result.applymap(str)\n\n if type(result_type) == ArrayType:\n return pandas.Series(result.to_numpy().tolist())\n else:\n return result[result.columns[0]]\n\n result_type_hint = (\n pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series\n )\n\n @pandas_udf(result_type)\n def udf(\n iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]\n ) -> Iterator[result_type_hint]:\n # importing here to prevent circular import\n from mlflow.pyfunc.scoring_server.client import ScoringServerClient\n\n # Note: this is a pandas udf function in iteration style, which takes an iterator of\n # tuple of pandas.Series and outputs an iterator of pandas.Series.\n\n scoring_server_proc = None\n\n if env_manager != _EnvManager.LOCAL:\n if should_use_spark_to_broadcast_file:\n local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(\n archive_path\n )\n # Create individual conda_env_root_dir for each spark UDF task process.\n env_root_dir_on_executor = _get_or_create_env_root_dir(should_use_nfs)\n else:\n local_model_path_on_executor = local_model_path\n env_root_dir_on_executor = env_root_dir\n\n pyfunc_backend = _get_flavor_backend(\n local_model_path_on_executor,\n workers=1,\n install_mlflow=False,\n env_manager=env_manager,\n env_root_dir=env_root_dir_on_executor,\n )\n\n if should_use_spark_to_broadcast_file:\n # Call \"prepare_env\" in advance in order to reduce scoring server launch time.\n # So that we can use a shorter timeout when call `client.wait_server_ready`,\n # otherwise we have to set a long timeout for `client.wait_server_ready` time,\n # this prevents spark UDF task failing fast if other exception raised when scoring\n # server launching.\n # Set \"capture_output\" so that if \"conda env create\" command failed, the command\n # stdout/stderr output will be attached to the exception message and included in\n # driver side exception.\n pyfunc_backend.prepare_env(\n model_uri=local_model_path_on_executor, capture_output=True\n )\n\n # launch scoring server\n server_port = find_free_port()\n scoring_server_proc = pyfunc_backend.serve(\n model_uri=local_model_path_on_executor,\n port=server_port,\n host=\"127.0.0.1\",\n timeout=60,\n enable_mlserver=False,\n synchronous=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n\n server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)\n\n def server_redirect_log_thread_func(child_stdout):\n for line in child_stdout:\n if isinstance(line, bytes):\n decoded = line.decode()\n else:\n decoded = line\n server_tail_logs.append(decoded)\n sys.stdout.write(\"[model server] \" + decoded)\n\n server_redirect_log_thread = threading.Thread(\n target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)\n )\n server_redirect_log_thread.setDaemon(True)\n server_redirect_log_thread.start()\n\n client = ScoringServerClient(\"127.0.0.1\", server_port)\n\n try:\n client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)\n except Exception:\n err_msg = \"During spark UDF task execution, mlflow model server failed to launch. \"\n if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:\n err_msg += (\n f\"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} \"\n \"lines of MLflow model server output:\\n\"\n )\n else:\n err_msg += \"MLflow model server output:\\n\"\n err_msg += \"\".join(server_tail_logs)\n raise MlflowException(err_msg)\n\n def batch_predict_fn(pdf):\n return client.invoke(pdf)\n\n elif env_manager == _EnvManager.LOCAL:\n if should_use_spark_to_broadcast_file:\n loaded_model, _ = SparkModelCache.get_or_load(archive_path)\n else:\n loaded_model = mlflow.pyfunc.load_model(local_model_path)\n\n def batch_predict_fn(pdf):\n return loaded_model.predict(pdf)\n\n try:\n for input_batch in iterator:\n # If the UDF is called with only multiple arguments,\n # the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame\n # objects.\n # If the UDF is called with only one argument,\n # the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,\n if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):\n # UDF is called with only one argument\n row_batch_args = (input_batch,)\n else:\n row_batch_args = input_batch\n\n yield _predict_row_batch(batch_predict_fn, row_batch_args)\n finally:\n if scoring_server_proc is not None:\n os.kill(scoring_server_proc.pid, signal.SIGTERM)\n\n udf.metadata = model_metadata\n\n @functools.wraps(udf)\n def udf_with_default_cols(*args):\n if len(args) == 0:\n input_schema = model_metadata.get_input_schema()\n\n if input_schema and len(input_schema.inputs) > 0:\n if input_schema.has_input_names():\n input_names = input_schema.input_names()\n return udf(*input_names)\n else:\n raise MlflowException(\n message=\"Cannot apply udf because no column names specified. The udf \"\n \"expects {} columns with types: {}. Input column names could not be \"\n \"inferred from the model signature (column names not found).\".format(\n len(input_schema.inputs),\n input_schema.inputs,\n ),\n error_code=INVALID_PARAMETER_VALUE,\n )\n else:\n raise MlflowException(\n \"Attempting to apply udf on zero columns because no column names were \"\n \"specified as arguments or inferred from the model signature.\",\n error_code=INVALID_PARAMETER_VALUE,\n )\n else:\n return udf(*args)\n\n return udf_with_default_cols\n\n\n@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=\"scikit-learn\"))\ndef save_model(\n path,\n loader_module=None,\n data_path=None,\n code_path=None,\n conda_env=None,\n mlflow_model=None,\n python_model=None,\n artifacts=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n pip_requirements=None,\n extra_pip_requirements=None,\n **kwargs,\n):\n \"\"\"\n save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\\\n mlflow_model=Model(), python_model=None, artifacts=None)\n\n Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the\n local filesystem.\n\n For information about the workflows that this method supports, please see :ref:`\"workflows for\n creating custom pyfunc models\" <pyfunc-create-custom-workflows>` and\n :ref:`\"which workflow is right for my use case?\" <pyfunc-create-custom-selecting-workflow>`.\n Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the\n parameters for the first workflow: ``python_model``, ``artifacts``, cannot be\n specified together.\n\n :param path: The path to which to save the Python model.\n :param loader_module: The name of the Python module that is used to load the model\n from ``data_path``. This module must define a method with the prototype\n ``_load_pyfunc(data_path)``. If not ``None``, this module and its\n dependencies must be included in one of the following locations:\n\n - The MLflow library.\n - Package(s) listed in the model's Conda environment, specified by\n the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_path`` parameter.\n\n :param data_path: Path to a file or directory containing model data.\n :param code_path: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path before the model is loaded.\n :param conda_env: {{ conda_env }}\n :param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the\n **python_function** flavor.\n :param python_model: An instance of a subclass of :class:`~PythonModel`. This class is\n serialized using the CloudPickle library. Any dependencies of the class\n should be included in one of the following locations:\n\n - The MLflow library.\n - Package(s) listed in the model's Conda environment, specified by\n the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_path`` parameter.\n\n Note: If the class is imported from another module, as opposed to being\n defined in the ``__main__`` scope, the defining module should also be\n included in one of the listed locations.\n :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs\n are resolved to absolute filesystem paths, producing a dictionary of\n ``<name, absolute_path>`` entries. ``python_model`` can reference these\n resolved entries as the ``artifacts`` property of the ``context`` parameter\n in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`\n and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.\n For example, consider the following ``artifacts`` dictionary::\n\n {\n \"my_file\": \"s3://my-bucket/path/to/my/file\"\n }\n\n In this case, the ``\"my_file\"`` artifact is downloaded from S3. The\n ``python_model`` can then refer to ``\"my_file\"`` as an absolute filesystem\n path via ``context.artifacts[\"my_file\"]``.\n\n If ``None``, no artifacts are added to the model.\n\n :param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example can be a Pandas DataFrame where the given\n example will be serialized to json using the Pandas split-oriented\n format, or a numpy array where the example will be serialized to json\n by converting it to a list. Bytes are base64-encoded.\n :param pip_requirements: {{ pip_requirements }}\n :param extra_pip_requirements: {{ extra_pip_requirements }}\n \"\"\"\n _validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)\n\n mlflow_model = kwargs.pop(\"model\", mlflow_model)\n if len(kwargs) > 0:\n raise TypeError(\"save_model() got unexpected keyword arguments: {}\".format(kwargs))\n if code_path is not None:\n if not isinstance(code_path, list):\n raise TypeError(\"Argument code_path should be a list, not {}\".format(type(code_path)))\n\n first_argument_set = {\n \"loader_module\": loader_module,\n \"data_path\": data_path,\n }\n second_argument_set = {\n \"artifacts\": artifacts,\n \"python_model\": python_model,\n }\n first_argument_set_specified = any(item is not None for item in first_argument_set.values())\n second_argument_set_specified = any(item is not None for item in second_argument_set.values())\n if first_argument_set_specified and second_argument_set_specified:\n raise MlflowException(\n message=(\n \"The following sets of parameters cannot be specified together: {first_set_keys}\"\n \" and {second_set_keys}. All parameters in one set must be `None`. Instead, found\"\n \" the following values: {first_set_entries} and {second_set_entries}\".format(\n first_set_keys=first_argument_set.keys(),\n second_set_keys=second_argument_set.keys(),\n first_set_entries=first_argument_set,\n second_set_entries=second_argument_set,\n )\n ),\n error_code=INVALID_PARAMETER_VALUE,\n )\n elif (loader_module is None) and (python_model is None):\n msg = (\n \"Either `loader_module` or `python_model` must be specified. A `loader_module` \"\n \"should be a python module. A `python_model` should be a subclass of PythonModel\"\n )\n raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)\n\n _validate_and_prepare_target_save_path(path)\n if mlflow_model is None:\n mlflow_model = Model()\n if signature is not None:\n mlflow_model.signature = signature\n if input_example is not None:\n _save_example(mlflow_model, input_example, path)\n\n if first_argument_set_specified:\n return _save_model_with_loader_module_and_data_path(\n path=path,\n loader_module=loader_module,\n data_path=data_path,\n code_paths=code_path,\n conda_env=conda_env,\n mlflow_model=mlflow_model,\n pip_requirements=pip_requirements,\n extra_pip_requirements=extra_pip_requirements,\n )\n elif second_argument_set_specified:\n return mlflow.pyfunc.model._save_model_with_class_artifacts_params(\n path=path,\n python_model=python_model,\n artifacts=artifacts,\n conda_env=conda_env,\n code_paths=code_path,\n mlflow_model=mlflow_model,\n pip_requirements=pip_requirements,\n extra_pip_requirements=extra_pip_requirements,\n )\n\n\n@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=\"scikit-learn\"))\ndef log_model(\n artifact_path,\n loader_module=None,\n data_path=None,\n code_path=None,\n conda_env=None,\n python_model=None,\n artifacts=None,\n registered_model_name=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n pip_requirements=None,\n extra_pip_requirements=None,\n):\n \"\"\"\n Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow\n artifact for the current run.\n\n For information about the workflows that this method supports, see :ref:`Workflows for\n creating custom pyfunc models <pyfunc-create-custom-workflows>` and\n :ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.\n You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``\n and the parameters for the first workflow: ``python_model``, ``artifacts`` together.\n\n :param artifact_path: The run-relative artifact path to which to log the Python model.\n :param loader_module: The name of the Python module that is used to load the model\n from ``data_path``. This module must define a method with the prototype\n ``_load_pyfunc(data_path)``. If not ``None``, this module and its\n dependencies must be included in one of the following locations:\n\n - The MLflow library.\n - Package(s) listed in the model's Conda environment, specified by\n the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_path`` parameter.\n\n :param data_path: Path to a file or directory containing model data.\n :param code_path: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path before the model is loaded.\n :param conda_env: {{ conda_env }}\n :param python_model: An instance of a subclass of :class:`~PythonModel`. This class is\n serialized using the CloudPickle library. Any dependencies of the class\n should be included in one of the following locations:\n\n - The MLflow library.\n - Package(s) listed in the model's Conda environment, specified by\n the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_path`` parameter.\n\n Note: If the class is imported from another module, as opposed to being\n defined in the ``__main__`` scope, the defining module should also be\n included in one of the listed locations.\n :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs\n are resolved to absolute filesystem paths, producing a dictionary of\n ``<name, absolute_path>`` entries. ``python_model`` can reference these\n resolved entries as the ``artifacts`` property of the ``context`` parameter\n in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`\n and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.\n For example, consider the following ``artifacts`` dictionary::\n\n {\n \"my_file\": \"s3://my-bucket/path/to/my/file\"\n }\n\n In this case, the ``\"my_file\"`` artifact is downloaded from S3. The\n ``python_model`` can then refer to ``\"my_file\"`` as an absolute filesystem\n path via ``context.artifacts[\"my_file\"]``.\n\n If ``None``, no artifacts are added to the model.\n :param registered_model_name: This argument may change or be removed in a\n future release without warning. If given, create a model\n version under ``registered_model_name``, also creating a\n registered model if one with the given name does not exist.\n\n :param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example can be a Pandas DataFrame where the given\n example will be serialized to json using the Pandas split-oriented\n format, or a numpy array where the example will be serialized to json\n by converting it to a list. Bytes are base64-encoded.\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n :param pip_requirements: {{ pip_requirements }}\n :param extra_pip_requirements: {{ extra_pip_requirements }}\n :return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the\n metadata of the logged model.\n \"\"\"\n return Model.log(\n artifact_path=artifact_path,\n flavor=mlflow.pyfunc,\n loader_module=loader_module,\n data_path=data_path,\n code_path=code_path,\n python_model=python_model,\n artifacts=artifacts,\n conda_env=conda_env,\n registered_model_name=registered_model_name,\n signature=signature,\n input_example=input_example,\n await_registration_for=await_registration_for,\n pip_requirements=pip_requirements,\n extra_pip_requirements=extra_pip_requirements,\n )\n\n\ndef _save_model_with_loader_module_and_data_path(\n path,\n loader_module,\n data_path=None,\n code_paths=None,\n conda_env=None,\n mlflow_model=None,\n pip_requirements=None,\n extra_pip_requirements=None,\n):\n \"\"\"\n Export model as a generic Python function model.\n :param path: The path to which to save the Python model.\n :param loader_module: The name of the Python module that is used to load the model\n from ``data_path``. This module must define a method with the prototype\n ``_load_pyfunc(data_path)``.\n :param data_path: Path to a file or directory containing model data.\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path before the model is loaded.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decsribes the environment\n this model should be run in.\n :return: Model configuration containing model info.\n \"\"\"\n\n data = None\n\n if data_path is not None:\n model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir=\"data\")\n data = model_file\n\n code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)\n\n if mlflow_model is None:\n mlflow_model = Model()\n\n mlflow.pyfunc.add_to_model(\n mlflow_model,\n loader_module=loader_module,\n code=code_dir_subpath,\n data=data,\n env=_CONDA_ENV_FILE_NAME,\n )\n mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))\n\n if conda_env is None:\n if pip_requirements is None:\n default_reqs = get_default_pip_requirements()\n # To ensure `_load_pyfunc` can successfully load the model during the dependency\n # inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.\n inferred_reqs = mlflow.models.infer_pip_requirements(\n path,\n FLAVOR_NAME,\n fallback=default_reqs,\n )\n default_reqs = sorted(set(inferred_reqs).union(default_reqs))\n else:\n default_reqs = None\n conda_env, pip_requirements, pip_constraints = _process_pip_requirements(\n default_reqs,\n pip_requirements,\n extra_pip_requirements,\n )\n else:\n conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)\n\n with open(os.path.join(path, _CONDA_ENV_FILE_NAME), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n # Save `constraints.txt` if necessary\n if pip_constraints:\n write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), \"\\n\".join(pip_constraints))\n\n # Save `requirements.txt`\n write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), \"\\n\".join(pip_requirements))\n\n _PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))\n return mlflow_model\n\n\nloader_template = \"\"\"\n\nimport importlib\nimport os\nimport sys\n\ndef load_pyfunc():\n {update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')\n\n\"\"\"\n" ]
[ [ "pandas.isnull", "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
pearsonlab/python-neo
[ "8915dfe9e55fd3a36be83d820bdd83ab085e9402" ]
[ "neo/io/neuroexplorerio.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nClass for reading data from NeuroExplorer (.nex)\n\nDocumentation for dev :\nhttp://www.neuroexplorer.com/code.html\n\nDepend on:\n\nSupported : Read\n\nAuthor: sgarcia,luc estebanez\n\n\"\"\"\n\nimport os\nimport struct\n\nimport numpy as np\nimport quantities as pq\n\nfrom neo.io.baseio import BaseIO\nfrom neo.core import Segment, AnalogSignal, SpikeTrain, Epoch, Event\n\n\nclass NeuroExplorerIO(BaseIO):\n \"\"\"\n Class for reading nex files.\n\n Usage:\n >>> from neo import io\n >>> r = io.NeuroExplorerIO(filename='File_neuroexplorer_1.nex')\n >>> seg = r.read_segment(lazy=False, cascade=True)\n >>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n [<AnalogSignal(array([ 39.0625 , 0. , 0. , ...,\n >>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n [<SpikeTrain(array([ 2.29499992e-02, 6.79249987e-02, ...\n >>> print seg.events # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n [<Event: @21.1967754364 s, @21.2993755341 s, @21.350725174 s, ...\n >>> print seg.epochs # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n [<neo.core.epoch.Epoch object at 0x10561ba90>,\n <neo.core.epoch.Epoch object at 0x10561bad0>]\n \"\"\"\n\n is_readable = True\n is_writable = False\n\n supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]\n readable_objects = [Segment]\n writeable_objects = []\n\n has_header = False\n is_streameable = False\n\n # This is for GUI stuff: a definition for parameters when reading.\n read_params = {Segment: []}\n write_params = None\n\n name = 'NeuroExplorer'\n extensions = ['nex']\n\n mode = 'file'\n\n def __init__(self, filename=None):\n \"\"\"\n This class read a nex file.\n\n Arguments:\n filename: the filename to read\n \"\"\"\n BaseIO.__init__(self)\n self.filename = filename\n\n def read_segment(self, lazy=False, cascade=True):\n fid = open(self.filename, 'rb')\n global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)\n # ~ print globalHeader\n #~ print 'version' , globalHeader['version']\n seg = Segment()\n seg.file_origin = os.path.basename(self.filename)\n seg.annotate(neuroexplorer_version=global_header['version'])\n seg.annotate(comment=global_header['comment'])\n\n if not cascade:\n return seg\n\n offset = 544\n for i in range(global_header['nvar']):\n entity_header = HeaderReader(fid, EntityHeader).read_f(\n offset=offset + i * 208)\n entity_header['name'] = entity_header['name'].replace('\\x00', '')\n\n #print 'i',i, entityHeader['type']\n\n if entity_header['type'] == 0:\n # neuron\n if lazy:\n spike_times = [] * pq.s\n else:\n spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',\n shape=(entity_header['n']),\n offset=entity_header['offset'])\n spike_times = spike_times.astype('f8') / global_header[\n 'freq'] * pq.s\n sptr = SpikeTrain(\n times=spike_times,\n t_start=global_header['tbeg'] /\n global_header['freq'] * pq.s,\n t_stop=global_header['tend'] /\n global_header['freq'] * pq.s,\n name=entity_header['name'])\n if lazy:\n sptr.lazy_shape = entity_header['n']\n sptr.annotate(channel_index=entity_header['WireNumber'])\n seg.spiketrains.append(sptr)\n\n if entity_header['type'] == 1:\n # event\n if lazy:\n event_times = [] * pq.s\n else:\n event_times = np.memmap(self.filename, np.dtype('i4'), 'r',\n shape=(entity_header['n']),\n offset=entity_header['offset'])\n event_times = event_times.astype('f8') / global_header[\n 'freq'] * pq.s\n labels = np.array([''] * event_times.size, dtype='S')\n evar = Event(times=event_times, labels=labels,\n channel_name=entity_header['name'])\n if lazy:\n evar.lazy_shape = entity_header['n']\n seg.events.append(evar)\n\n if entity_header['type'] == 2:\n # interval\n if lazy:\n start_times = [] * pq.s\n stop_times = [] * pq.s\n else:\n start_times = np.memmap(self.filename, np.dtype('i4'), 'r',\n shape=(entity_header['n']),\n offset=entity_header['offset'])\n start_times = start_times.astype('f8') / global_header[\n 'freq'] * pq.s\n stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',\n shape=(entity_header['n']),\n offset=entity_header['offset'] +\n entity_header['n'] * 4)\n stop_times = stop_times.astype('f') / global_header[\n 'freq'] * pq.s\n epar = Epoch(times=start_times,\n durations=stop_times - start_times,\n labels=np.array([''] * start_times.size,\n dtype='S'),\n channel_name=entity_header['name'])\n if lazy:\n epar.lazy_shape = entity_header['n']\n seg.epochs.append(epar)\n\n if entity_header['type'] == 3:\n # spiketrain and wavefoms\n if lazy:\n spike_times = [] * pq.s\n waveforms = None\n else:\n\n spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',\n shape=(entity_header['n']),\n offset=entity_header['offset'])\n spike_times = spike_times.astype('f8') / global_header[\n 'freq'] * pq.s\n\n waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',\n shape=(entity_header['n'], 1,\n entity_header['NPointsWave']),\n offset=entity_header['offset'] +\n entity_header['n'] * 4)\n waveforms = (waveforms.astype('f') *\n entity_header['ADtoMV'] +\n entity_header['MVOffset']) * pq.mV\n t_stop = global_header['tend'] / global_header['freq'] * pq.s\n if spike_times.size > 0:\n t_stop = max(t_stop, max(spike_times))\n sptr = SpikeTrain(\n times=spike_times,\n t_start=global_header['tbeg'] /\n global_header['freq'] * pq.s,\n #~ t_stop = max(globalHeader['tend']/\n #~ globalHeader['freq']*pq.s,max(spike_times)),\n t_stop=t_stop, name=entity_header['name'],\n waveforms=waveforms,\n sampling_rate=entity_header['WFrequency'] * pq.Hz,\n left_sweep=0 * pq.ms)\n if lazy:\n sptr.lazy_shape = entity_header['n']\n sptr.annotate(channel_index=entity_header['WireNumber'])\n seg.spiketrains.append(sptr)\n\n if entity_header['type'] == 4:\n # popvectors\n pass\n\n if entity_header['type'] == 5:\n # analog\n timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',\n shape=(entity_header['n']),\n offset=entity_header['offset'])\n timestamps = timestamps.astype('f8') / global_header['freq']\n fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',\n shape=(entity_header['n']),\n offset=entity_header['offset'])\n fragment_starts = fragment_starts.astype('f8') / global_header[\n 'freq']\n t_start = timestamps[0] - fragment_starts[0] / float(\n entity_header['WFrequency'])\n del timestamps, fragment_starts\n\n if lazy:\n signal = [] * pq.mV\n else:\n signal = np.memmap(self.filename, np.dtype('i2'), 'r',\n shape=(entity_header['NPointsWave']),\n offset=entity_header['offset'])\n signal = signal.astype('f')\n signal *= entity_header['ADtoMV']\n signal += entity_header['MVOffset']\n signal = signal * pq.mV\n\n ana_sig = AnalogSignal(\n signal=signal, t_start=t_start * pq.s,\n sampling_rate=entity_header['WFrequency'] * pq.Hz,\n name=entity_header['name'],\n channel_index=entity_header['WireNumber'])\n if lazy:\n ana_sig.lazy_shape = entity_header['NPointsWave']\n seg.analogsignals.append(ana_sig)\n\n if entity_header['type'] == 6:\n # markers : TO TEST\n if lazy:\n times = [] * pq.s\n labels = np.array([], dtype='S')\n markertype = None\n else:\n times = np.memmap(self.filename, np.dtype('i4'), 'r',\n shape=(entity_header['n']),\n offset=entity_header['offset'])\n times = times.astype('f8') / global_header['freq'] * pq.s\n fid.seek(entity_header['offset'] + entity_header['n'] * 4)\n markertype = fid.read(64).replace('\\x00', '')\n labels = np.memmap(\n self.filename, np.dtype(\n 'S' + str(entity_header['MarkerLength'])),\n 'r', shape=(entity_header['n']),\n offset=entity_header['offset'] +\n entity_header['n'] * 4 + 64)\n ea = Event(times=times,\n labels=labels.view(np.ndarray),\n name=entity_header['name'],\n channel_index=entity_header['WireNumber'],\n marker_type=markertype)\n if lazy:\n ea.lazy_shape = entity_header['n']\n seg.events.append(ea)\n\n seg.create_many_to_one_relationship()\n return seg\n\n\nGlobalHeader = [\n ('signature', '4s'),\n ('version', 'i'),\n ('comment', '256s'),\n ('freq', 'd'),\n ('tbeg', 'i'),\n ('tend', 'i'),\n ('nvar', 'i'),\n]\n\nEntityHeader = [\n ('type', 'i'),\n ('varVersion', 'i'),\n ('name', '64s'),\n ('offset', 'i'),\n ('n', 'i'),\n ('WireNumber', 'i'),\n ('UnitNumber', 'i'),\n ('Gain', 'i'),\n ('Filter', 'i'),\n ('XPos', 'd'),\n ('YPos', 'd'),\n ('WFrequency', 'd'),\n ('ADtoMV', 'd'),\n ('NPointsWave', 'i'),\n ('NMarkers', 'i'),\n ('MarkerLength', 'i'),\n ('MVOffset', 'd'),\n ('dummy', '60s'),\n]\n\nMarkerHeader = [\n ('type', 'i'),\n ('varVersion', 'i'),\n ('name', '64s'),\n ('offset', 'i'),\n ('n', 'i'),\n ('WireNumber', 'i'),\n ('UnitNumber', 'i'),\n ('Gain', 'i'),\n ('Filter', 'i'),\n]\n\n\nclass HeaderReader():\n def __init__(self, fid, description):\n self.fid = fid\n self.description = description\n\n def read_f(self, offset=0):\n self.fid.seek(offset)\n d = {}\n for key, fmt in self.description:\n val = struct.unpack(fmt, self.fid.read(struct.calcsize(fmt)))\n if len(val) == 1:\n val = val[0]\n else:\n val = list(val)\n d[key] = val\n return d\n" ]
[ [ "numpy.array", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sam-cts/logistic_lda
[ "405caf212ba0def212feb82f8d9aaec1e491f735" ]
[ "logistic_lda/utils.py" ]
[ "\"\"\"\nCopyright 2019 Twitter, Inc.\nLicensed under the Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef softmax_cross_entropy(targets, logits):\n \"\"\"\n Implements a simple softmax cross entropy.\n\n $$-\\sum_i t_{ni} \\cdot (l_{ni} - \\ln \\sum_j \\exp l_{nj})$$\n\n Targets can be arbitrary vectors and do not have to be one-hot encodings or normalized,\n unlike in some other implementations of cross-entropy.\n\n Args:\n targets: A float tensor of shape [B, K]\n logits: A float tensor of shape [B, K]\n\n Returns:\n A float tensor of shape [B]\n \"\"\"\n\n logprobs = logits - tf.reduce_logsumexp(logits, axis=1, keepdims=True)\n return -tf.reduce_sum(targets * logprobs, axis=1)\n\n\ndef create_table(keys, values=None, name=None):\n \"\"\"\n Creates a hash table which maps the given keys to integers.\n\n Args:\n keys: A list containing possible keys\n values: An list of corresponding values (optional)\n name: A name for the operation (optional)\n\n Returns:\n A `tf.contrib.lookup.HashTable` mapping keys to integers\n \"\"\"\n\n if values is None:\n values = np.arange(len(keys), dtype=np.int64)\n\n return tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(keys=keys, values=values), -1, name=name)\n" ]
[ [ "tensorflow.reduce_sum", "tensorflow.reduce_logsumexp", "tensorflow.contrib.lookup.KeyValueTensorInitializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
sguada/circuit_training
[ "220ca925c83cdc6e67181c305da577f305c602b3" ]
[ "circuit_training/learning/train_ppo_lib.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Circuit Training Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Sample training with distributed collection using a variable container.\"\"\"\n\nimport os\nimport time\n\nfrom absl import flags\nfrom absl import logging\n\nfrom circuit_training.learning import agent\nfrom circuit_training.learning import learner as learner_lib\n\n\nimport reverb\nimport tensorflow as tf\n\nfrom tf_agents.experimental.distributed import reverb_variable_container\nfrom tf_agents.replay_buffers import reverb_replay_buffer\nfrom tf_agents.train import learner as actor_learner\nfrom tf_agents.train import triggers\nfrom tf_agents.train.utils import spec_utils\nfrom tf_agents.train.utils import train_utils\nfrom tf_agents.utils import common\n\n\nflags.DEFINE_string('netlist_file', '',\n 'File path to the netlist file.')\nflags.DEFINE_string('init_placement', '',\n 'File path to the init placement file.')\nflags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),\n 'Root directory for writing logs/summaries/checkpoints.')\nflags.DEFINE_string('replay_buffer_server_address', None,\n 'Replay buffer server address.')\nflags.DEFINE_string('variable_container_server_address', None,\n 'Variable container server address.')\nflags.DEFINE_integer('num_iterations', 10000,\n 'Total number train/eval iterations to perform.')\nflags.DEFINE_integer(\n 'sequence_length', 134,\n 'The sequence length to estimate shuffle size. Depends on the environment.'\n 'Max horizon = T translates to sequence_length T+1 because of the '\n 'additional boundary step (last -> first).')\nflags.DEFINE_integer(\n 'num_episodes_per_iteration', 1024,\n 'This is the number of episodes we train on in each iteration.')\nflags.DEFINE_integer(\n 'global_batch_size', 1024,\n 'Global batch size across all replicas.')\n\nflags.DEFINE_integer(\n 'global_seed', 111,\n 'Used in env and weight initialization, does not impact action sampling.')\n\n\nFLAGS = flags.FLAGS\n\n\ndef train(\n root_dir,\n strategy,\n replay_buffer_server_address,\n variable_container_server_address,\n create_env_fn,\n sequence_length,\n # Training params\n # This is the per replica batch size. The global batch size can be computed\n # by this number multiplied by the number of replicas (8 in the case of 2x2\n # TPUs).\n per_replica_batch_size=32,\n num_epochs=4,\n num_iterations=10000,\n # This is the number of episodes we train on in each iteration.\n # num_episodes_per_iteration * epsisode_length * num_epochs =\n # global_step (number of gradient updates) * per_replica_batch_size *\n # num_replicas.\n num_episodes_per_iteration=1024,\n use_model_tpu=False):\n \"\"\"Trains a PPO agent.\"\"\"\n # Get the specs from the environment.\n env = create_env_fn()\n observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (\n spec_utils.get_tensor_specs(env))\n\n # Create the agent.\n with strategy.scope():\n train_step = train_utils.create_train_step()\n model_id = common.create_variable('model_id')\n\n logging.info('Using GRL agent networks.')\n static_features = env.wrapped_env().get_static_obs()\n tf_agent = agent.create_circuit_ppo_grl_agent(\n train_step,\n observation_tensor_spec,\n action_tensor_spec,\n time_step_tensor_spec,\n strategy,\n static_features=static_features,\n use_model_tpu=use_model_tpu)\n\n tf_agent.initialize()\n\n # Create the policy saver which saves the initial model now, then it\n # periodically checkpoints the policy weights.\n saved_model_dir = os.path.join(root_dir, actor_learner.POLICY_SAVED_MODEL_DIR)\n save_model_trigger = triggers.PolicySavedModelTrigger(\n saved_model_dir,\n tf_agent,\n train_step,\n start=-num_episodes_per_iteration,\n interval=num_episodes_per_iteration)\n\n # Create the variable container.\n variables = {\n reverb_variable_container.POLICY_KEY: tf_agent.collect_policy.variables(),\n reverb_variable_container.TRAIN_STEP_KEY: train_step,\n 'model_id': model_id,\n }\n variable_container = reverb_variable_container.ReverbVariableContainer(\n variable_container_server_address,\n table_names=[reverb_variable_container.DEFAULT_TABLE])\n variable_container.push(variables)\n\n # Create the replay buffer.\n reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(\n tf_agent.collect_data_spec,\n sequence_length=None,\n table_name='training_table',\n server_address=replay_buffer_server_address)\n\n # Initialize the dataset.\n def experience_dataset_fn():\n get_dtype = lambda x: x.dtype\n get_shape = lambda x: (None,) + x.shape\n shapes = tf.nest.map_structure(get_shape, tf_agent.collect_data_spec)\n dtypes = tf.nest.map_structure(get_dtype, tf_agent.collect_data_spec)\n\n dataset = reverb.TrajectoryDataset(\n server_address=replay_buffer_server_address,\n table='training_table',\n dtypes=dtypes,\n shapes=shapes,\n # Menger uses learner_iterations_per_call (256). Using 8 here instead\n # because we do not need that much data in the buffer (they have to be\n # filtered out for the next iteration anyways). The rule of thumb is\n # 2-3x batch_size.\n max_in_flight_samples_per_worker=8,\n num_workers_per_iterator=-1,\n max_samples_per_stream=-1,\n rate_limiter_timeout_ms=-1,\n )\n\n def broadcast_info(info_traj):\n # Assumes that the first element of traj is shaped\n # (sequence_length, ...); and we extract this length.\n info, traj = info_traj\n first_elem = tf.nest.flatten(traj)[0]\n length = first_elem.shape[0] or tf.shape(first_elem)[0]\n info = tf.nest.map_structure(lambda t: tf.repeat(t, [length]), info)\n return reverb.ReplaySample(info, traj)\n\n dataset = dataset.map(broadcast_info)\n return dataset\n\n # Create the learner.\n learning_triggers = [\n save_model_trigger,\n triggers.StepPerSecondLogTrigger(train_step, interval=1000),\n ]\n\n def per_sequence_fn(sample):\n # At this point, each sample data contains a sequence of trajectories.\n data, info = sample.data, sample.info\n data = tf_agent.preprocess_sequence(data)\n return data, info\n\n learner = learner_lib.CircuittrainingPPOLearner(\n root_dir,\n train_step,\n model_id,\n tf_agent,\n experience_dataset_fn,\n sequence_length,\n num_episodes_per_iteration=num_episodes_per_iteration,\n minibatch_size=per_replica_batch_size,\n shuffle_buffer_size=(num_episodes_per_iteration * sequence_length),\n triggers=learning_triggers,\n summary_interval=1000,\n strategy=strategy,\n num_epochs=num_epochs,\n per_sequence_fn=per_sequence_fn,\n )\n\n # Run the training loop.\n for i in range(num_iterations):\n step_val = train_step.numpy()\n logging.info('Training. Iteration: %d', i)\n start_time = time.time()\n learner.run()\n num_steps = train_step.numpy() - step_val\n run_time = time.time() - start_time\n logging.info('Steps per sec: %s', num_steps / run_time)\n logging.info('Pushing variables at model_id: %d', model_id.numpy())\n variable_container.push(variables)\n logging.info('clearing replay buffer')\n reverb_replay_train.clear()\n" ]
[ [ "tensorflow.repeat", "tensorflow.nest.flatten", "tensorflow.nest.map_structure", "tensorflow.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
troywinter/airflow
[ "ba66ba0d97941c55d9f00f66329a9d3c7ad673e7", "dfe8337ca2d3ed173d9ecc112938271519792c40" ]
[ "airflow/providers/salesforce/hooks/salesforce.py", "tests/providers/salesforce/hooks/test_salesforce.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\"\"\"\nThis module contains a Salesforce Hook which allows you to connect to your Salesforce instance,\nretrieve data from it, and write that data to a file for other uses.\n\n.. note:: this hook also relies on the simple_salesforce package:\n https://github.com/simple-salesforce/simple-salesforce\n\"\"\"\nimport logging\nimport time\n\nimport pandas as pd\nfrom simple_salesforce import Salesforce\n\nfrom airflow.hooks.base_hook import BaseHook\n\nlog = logging.getLogger(__name__)\n\n\nclass SalesforceHook(BaseHook):\n \"\"\"\n Create new connection to Salesforce and allows you to pull data out of SFDC and save it to a file.\n\n You can then use that file with other Airflow operators to move the data into another data source.\n\n :param conn_id: the name of the connection that has the parameters we need to connect to Salesforce.\n The connection should be type `http` and include a user's security token in the `Extras` field.\n :type conn_id: str\n\n .. note::\n For the HTTP connection type, you can include a\n JSON structure in the `Extras` field.\n We need a user's security token to connect to Salesforce.\n So we define it in the `Extras` field as `{\"security_token\":\"YOUR_SECURITY_TOKEN\"}`\n\n For sandbox mode, add `{\"domain\":\"test\"}` in the `Extras` field\n\n \"\"\"\n\n def __init__(self, conn_id):\n super().__init__()\n self.conn_id = conn_id\n self.conn = None\n\n def get_conn(self):\n \"\"\"\n Sign into Salesforce, only if we are not already signed in.\n \"\"\"\n if not self.conn:\n connection = self.get_connection(self.conn_id)\n extras = connection.extra_dejson\n self.conn = Salesforce(\n username=connection.login,\n password=connection.password,\n security_token=extras['security_token'],\n instance_url=connection.host,\n domain=extras.get('domain', None)\n )\n return self.conn\n\n def make_query(self, query, include_deleted=False, query_params=None):\n \"\"\"\n Make a query to Salesforce.\n\n :param query: The query to make to Salesforce.\n :type query: str\n :param include_deleted: True if the query should include deleted records.\n :type include_deleted: bool\n :param query_params: Additional optional arguments\n :type query_params: dict\n :return: The query result.\n :rtype: dict\n \"\"\"\n conn = self.get_conn()\n\n self.log.info(\"Querying for all objects\")\n query_params = query_params or {}\n query_results = conn.query_all(query, include_deleted=include_deleted, **query_params)\n\n self.log.info(\"Received results: Total size: %s; Done: %s\",\n query_results['totalSize'], query_results['done'])\n\n return query_results\n\n def describe_object(self, obj):\n \"\"\"\n Get the description of an object from Salesforce.\n This description is the object's schema and\n some extra metadata that Salesforce stores for each object.\n\n :param obj: The name of the Salesforce object that we are getting a description of.\n :type obj: str\n :return: the description of the Salesforce object.\n :rtype: dict\n \"\"\"\n conn = self.get_conn()\n\n return conn.__getattr__(obj).describe()\n\n def get_available_fields(self, obj):\n \"\"\"\n Get a list of all available fields for an object.\n\n :param obj: The name of the Salesforce object that we are getting a description of.\n :type obj: str\n :return: the names of the fields.\n :rtype: list(str)\n \"\"\"\n self.get_conn()\n\n obj_description = self.describe_object(obj)\n\n return [field['name'] for field in obj_description['fields']]\n\n def get_object_from_salesforce(self, obj, fields):\n \"\"\"\n Get all instances of the `object` from Salesforce.\n For each model, only get the fields specified in fields.\n\n All we really do underneath the hood is run:\n SELECT <fields> FROM <obj>;\n\n :param obj: The object name to get from Salesforce.\n :type obj: str\n :param fields: The fields to get from the object.\n :type fields: iterable\n :return: all instances of the object from Salesforce.\n :rtype: dict\n \"\"\"\n query = \"SELECT {} FROM {}\".format(\",\".join(fields), obj)\n\n self.log.info(\"Making query to Salesforce: %s\",\n query if len(query) < 30 else \" ... \".join([query[:15], query[-15:]]))\n\n return self.make_query(query)\n\n @classmethod\n def _to_timestamp(cls, column):\n \"\"\"\n Convert a column of a dataframe to UNIX timestamps if applicable\n\n :param column: A Series object representing a column of a dataframe.\n :type column: pandas.Series\n :return: a new series that maintains the same index as the original\n :rtype: pandas.Series\n \"\"\"\n # try and convert the column to datetimes\n # the column MUST have a four digit year somewhere in the string\n # there should be a better way to do this,\n # but just letting pandas try and convert every column without a format\n # caused it to convert floats as well\n # For example, a column of integers\n # between 0 and 10 are turned into timestamps\n # if the column cannot be converted,\n # just return the original column untouched\n try:\n column = pd.to_datetime(column)\n except ValueError:\n log.error(\"Could not convert field to timestamps: %s\", column.name)\n return column\n\n # now convert the newly created datetimes into timestamps\n # we have to be careful here\n # because NaT cannot be converted to a timestamp\n # so we have to return NaN\n converted = []\n for value in column:\n try:\n converted.append(value.timestamp())\n except (ValueError, AttributeError):\n converted.append(pd.np.NaN)\n\n return pd.Series(converted, index=column.index)\n\n def write_object_to_file(self,\n query_results,\n filename,\n fmt=\"csv\",\n coerce_to_timestamp=False,\n record_time_added=False):\n \"\"\"\n Write query results to file.\n\n Acceptable formats are:\n - csv:\n comma-separated-values file. This is the default format.\n - json:\n JSON array. Each element in the array is a different row.\n - ndjson:\n JSON array but each element is new-line delimited instead of comma delimited like in `json`\n\n This requires a significant amount of cleanup.\n Pandas doesn't handle output to CSV and json in a uniform way.\n This is especially painful for datetime types.\n Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.\n\n By default, this function will try and leave all values as they are represented in Salesforce.\n You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).\n This is can be greatly beneficial as it will make all of your datetime fields look the same,\n and makes it easier to work with in other database environments\n\n :param query_results: the results from a SQL query\n :type query_results: list of dict\n :param filename: the name of the file where the data should be dumped to\n :type filename: str\n :param fmt: the format you want the output in. Default: 'csv'\n :type fmt: str\n :param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.\n False if you want them to be left in the same format as they were in Salesforce.\n Leaving the value as False will result in datetimes being strings. Default: False\n :type coerce_to_timestamp: bool\n :param record_time_added: True if you want to add a Unix timestamp field\n to the resulting data that marks when the data was fetched from Salesforce. Default: False\n :type record_time_added: bool\n :return: the dataframe that gets written to the file.\n :rtype: pandas.Dataframe\n \"\"\"\n fmt = fmt.lower()\n if fmt not in ['csv', 'json', 'ndjson']:\n raise ValueError(\"Format value is not recognized: {}\".format(fmt))\n\n df = self.object_to_df(query_results=query_results, coerce_to_timestamp=coerce_to_timestamp,\n record_time_added=record_time_added)\n\n # write the CSV or JSON file depending on the option\n # NOTE:\n # datetimes here are an issue.\n # There is no good way to manage the difference\n # for to_json, the options are an epoch or a ISO string\n # but for to_csv, it will be a string output by datetime\n # For JSON we decided to output the epoch timestamp in seconds\n # (as is fairly standard for JavaScript)\n # And for csv, we do a string\n if fmt == \"csv\":\n # there are also a ton of newline objects that mess up our ability to write to csv\n # we remove these newlines so that the output is a valid CSV format\n self.log.info(\"Cleaning data and writing to CSV\")\n possible_strings = df.columns[df.dtypes == \"object\"]\n df[possible_strings] = df[possible_strings].astype(str).apply(\n lambda x: x.str.replace(\"\\r\\n\", \"\").str.replace(\"\\n\", \"\")\n )\n # write the dataframe\n df.to_csv(filename, index=False)\n elif fmt == \"json\":\n df.to_json(filename, \"records\", date_unit=\"s\")\n elif fmt == \"ndjson\":\n df.to_json(filename, \"records\", lines=True, date_unit=\"s\")\n\n return df\n\n def object_to_df(self, query_results, coerce_to_timestamp=False,\n record_time_added=False):\n \"\"\"\n Export query results to dataframe.\n\n By default, this function will try and leave all values as they are represented in Salesforce.\n You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).\n This is can be greatly beneficial as it will make all of your datetime fields look the same,\n and makes it easier to work with in other database environments\n\n :param query_results: the results from a SQL query\n :type query_results: list of dict\n :param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.\n False if you want them to be left in the same format as they were in Salesforce.\n Leaving the value as False will result in datetimes being strings. Default: False\n :type coerce_to_timestamp: bool\n :param record_time_added: True if you want to add a Unix timestamp field\n to the resulting data that marks when the data was fetched from Salesforce. Default: False\n :type record_time_added: bool\n :return: the dataframe.\n :rtype: pandas.Dataframe\n \"\"\"\n\n # this line right here will convert all integers to floats\n # if there are any None/np.nan values in the column\n # that's because None/np.nan cannot exist in an integer column\n # we should write all of our timestamps as FLOATS in our final schema\n df = pd.DataFrame.from_records(query_results, exclude=[\"attributes\"])\n\n df.columns = [column.lower() for column in df.columns]\n\n # convert columns with datetime strings to datetimes\n # not all strings will be datetimes, so we ignore any errors that occur\n # we get the object's definition at this point and only consider\n # features that are DATE or DATETIME\n if coerce_to_timestamp and df.shape[0] > 0:\n # get the object name out of the query results\n # it's stored in the \"attributes\" dictionary\n # for each returned record\n object_name = query_results[0]['attributes']['type']\n\n self.log.info(\"Coercing timestamps for: %s\", object_name)\n\n schema = self.describe_object(object_name)\n\n # possible columns that can be converted to timestamps\n # are the ones that are either date or datetime types\n # strings are too general and we risk unintentional conversion\n possible_timestamp_cols = [\n field['name'].lower()\n for field in schema['fields']\n if field['type'] in [\"date\", \"datetime\"] and field['name'].lower() in df.columns\n ]\n df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)\n\n if record_time_added:\n fetched_time = time.time()\n df[\"time_fetched_from_salesforce\"] = fetched_time\n\n return df\n", "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport unittest\nfrom unittest.mock import Mock, patch\n\nimport pandas as pd\nfrom simple_salesforce import Salesforce\n\nfrom airflow.models.connection import Connection\nfrom airflow.providers.salesforce.hooks.salesforce import SalesforceHook\n\n\nclass TestSalesforceHook(unittest.TestCase):\n def setUp(self):\n self.salesforce_hook = SalesforceHook(conn_id=\"conn_id\")\n\n def test_get_conn_exists(self):\n self.salesforce_hook.conn = Mock(spec=Salesforce)\n\n self.salesforce_hook.get_conn()\n\n self.assertIsNotNone(self.salesforce_hook.conn.return_value)\n\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.get_connection\",\n return_value=Connection(\n login=\"username\", password=\"password\", extra='{\"security_token\": \"token\", \"domain\": \"test\"}'\n ),\n )\n @patch(\"airflow.providers.salesforce.hooks.salesforce.Salesforce\")\n def test_get_conn(self, mock_salesforce, mock_get_connection):\n self.salesforce_hook.get_conn()\n\n self.assertEqual(self.salesforce_hook.conn, mock_salesforce.return_value)\n mock_salesforce.assert_called_once_with(\n username=mock_get_connection.return_value.login,\n password=mock_get_connection.return_value.password,\n security_token=mock_get_connection.return_value.extra_dejson[\"security_token\"],\n instance_url=mock_get_connection.return_value.host,\n domain=mock_get_connection.return_value.extra_dejson.get(\"domain\", None),\n )\n\n @patch(\"airflow.providers.salesforce.hooks.salesforce.Salesforce\")\n def test_make_query(self, mock_salesforce):\n mock_salesforce.return_value.query_all.return_value = dict(totalSize=123, done=True)\n self.salesforce_hook.conn = mock_salesforce.return_value\n query = \"SELECT * FROM table\"\n\n query_results = self.salesforce_hook.make_query(query, include_deleted=True)\n\n mock_salesforce.return_value.query_all.assert_called_once_with(query, include_deleted=True)\n self.assertEqual(query_results, mock_salesforce.return_value.query_all.return_value)\n\n @patch(\"airflow.providers.salesforce.hooks.salesforce.Salesforce\")\n def test_describe_object(self, mock_salesforce):\n obj = \"obj_name\"\n mock_salesforce.return_value.__setattr__(obj, Mock(spec=Salesforce))\n self.salesforce_hook.conn = mock_salesforce.return_value\n\n obj_description = self.salesforce_hook.describe_object(obj)\n\n mock_salesforce.return_value.__getattr__(obj).describe.assert_called_once_with()\n self.assertEqual(obj_description, mock_salesforce.return_value.__getattr__(obj).describe.return_value)\n\n @patch(\"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.get_conn\")\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object\",\n return_value={\"fields\": [{\"name\": \"field_1\"}, {\"name\": \"field_2\"}]},\n )\n def test_get_available_fields(self, mock_describe_object, mock_get_conn):\n obj = \"obj_name\"\n\n available_fields = self.salesforce_hook.get_available_fields(obj)\n\n mock_get_conn.assert_called_once_with()\n mock_describe_object.assert_called_once_with(obj)\n self.assertEqual(available_fields, [\"field_1\", \"field_2\"])\n\n @patch(\"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.make_query\")\n def test_get_object_from_salesforce(self, mock_make_query):\n salesforce_objects = self.salesforce_hook.get_object_from_salesforce(\n obj=\"obj_name\", fields=[\"field_1\", \"field_2\"]\n )\n\n mock_make_query.assert_called_once_with(\"SELECT field_1,field_2 FROM obj_name\")\n self.assertEqual(salesforce_objects, mock_make_query.return_value)\n\n def test_write_object_to_file_invalid_format(self):\n with self.assertRaises(ValueError):\n self.salesforce_hook.write_object_to_file(query_results=[], filename=\"test\", fmt=\"test\")\n\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records\",\n return_value=pd.DataFrame({\"test\": [1, 2, 3], \"dict\": [None, None, {\"foo\": \"bar\"}]}),\n )\n def test_write_object_to_file_csv(self, mock_data_frame):\n mock_data_frame.return_value.to_csv = Mock()\n filename = \"test\"\n\n data_frame = self.salesforce_hook.write_object_to_file(query_results=[], filename=filename, fmt=\"csv\")\n\n mock_data_frame.return_value.to_csv.assert_called_once_with(filename, index=False)\n pd.testing.assert_frame_equal(\n data_frame, pd.DataFrame({\"test\": [1, 2, 3], \"dict\": [\"None\", \"None\", str({\"foo\": \"bar\"})]})\n )\n\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object\",\n return_value={\"fields\": [{\"name\": \"field_1\", \"type\": \"date\"}]},\n )\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records\",\n return_value=pd.DataFrame({\"test\": [1, 2, 3], \"field_1\": [\"2019-01-01\", \"2019-01-02\", \"2019-01-03\"]}),\n )\n def test_write_object_to_file_json_with_timestamp_conversion(self, mock_data_frame, mock_describe_object):\n mock_data_frame.return_value.to_json = Mock()\n filename = \"test\"\n obj_name = \"obj_name\"\n\n data_frame = self.salesforce_hook.write_object_to_file(\n query_results=[{\"attributes\": {\"type\": obj_name}}],\n filename=filename,\n fmt=\"json\",\n coerce_to_timestamp=True,\n )\n\n mock_describe_object.assert_called_once_with(obj_name)\n mock_data_frame.return_value.to_json.assert_called_once_with(filename, \"records\", date_unit=\"s\")\n pd.testing.assert_frame_equal(\n data_frame, pd.DataFrame({\"test\": [1, 2, 3], \"field_1\": [1.546301e09, 1.546387e09, 1.546474e09]})\n )\n\n @patch(\"airflow.providers.salesforce.hooks.salesforce.time.time\", return_value=1.23)\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records\",\n return_value=pd.DataFrame({\"test\": [1, 2, 3]}),\n )\n def test_write_object_to_file_ndjson_with_record_time(self, mock_data_frame, mock_time):\n mock_data_frame.return_value.to_json = Mock()\n filename = \"test\"\n\n data_frame = self.salesforce_hook.write_object_to_file(\n query_results=[], filename=filename, fmt=\"ndjson\", record_time_added=True\n )\n\n mock_data_frame.return_value.to_json.assert_called_once_with(\n filename, \"records\", lines=True, date_unit=\"s\"\n )\n pd.testing.assert_frame_equal(\n data_frame,\n pd.DataFrame(\n {\n \"test\": [1, 2, 3],\n \"time_fetched_from_salesforce\": [\n mock_time.return_value,\n mock_time.return_value,\n mock_time.return_value,\n ],\n }\n ),\n )\n\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object\",\n return_value={\"fields\": [{\"name\": \"field_1\", \"type\": \"date\"}]},\n )\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records\",\n return_value=pd.DataFrame({\"test\": [1, 2, 3], \"field_1\": [\"2019-01-01\", \"2019-01-02\", \"2019-01-03\"]}),\n )\n def test_obect_to_df_with_timestamp_conversion(self, mock_data_frame, mock_describe_object):\n obj_name = \"obj_name\"\n\n data_frame = self.salesforce_hook.object_to_df(\n query_results=[{\"attributes\": {\"type\": obj_name}}],\n coerce_to_timestamp=True,\n )\n\n mock_describe_object.assert_called_once_with(obj_name)\n pd.testing.assert_frame_equal(\n data_frame, pd.DataFrame({\"test\": [1, 2, 3], \"field_1\": [1.546301e09, 1.546387e09, 1.546474e09]})\n )\n\n @patch(\"airflow.providers.salesforce.hooks.salesforce.time.time\", return_value=1.23)\n @patch(\n \"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records\",\n return_value=pd.DataFrame({\"test\": [1, 2, 3]}),\n )\n def test_object_to_df_with_record_time(self, mock_data_frame, mock_time):\n data_frame = self.salesforce_hook.object_to_df(\n query_results=[], record_time_added=True\n )\n\n pd.testing.assert_frame_equal(\n data_frame,\n pd.DataFrame(\n {\n \"test\": [1, 2, 3],\n \"time_fetched_from_salesforce\": [\n mock_time.return_value,\n mock_time.return_value,\n mock_time.return_value,\n ],\n }\n ),\n )\n" ]
[ [ "pandas.DataFrame.from_records", "pandas.to_datetime", "pandas.Series" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Lzc06/vega
[ "df51ed9c1d6dbde1deef63f2a037a369f8554406", "df51ed9c1d6dbde1deef63f2a037a369f8554406" ]
[ "vega/algorithms/nas/esr_ea/esr_search.py", "vega/search_space/networks/tensorflow/super_network/cars_darts.py" ]
[ "# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"search algorithm for ESR_EA.\"\"\"\nimport csv\nimport logging\nimport os\nfrom bisect import bisect_right\nfrom random import random, sample\nimport numpy as np\nimport pandas as pd\nfrom vega.core.common.general import General\nfrom .conf import ESRConfig\nfrom vega.core.common import FileOps\nfrom vega.core.common.class_factory import ClassFactory, ClassType\nfrom vega.search_space.search_algs.search_algorithm import SearchAlgorithm\nfrom .esr_ea_individual import ESRIndividual\n\n\[email protected](ClassType.SEARCH_ALGORITHM)\nclass ESRSearch(SearchAlgorithm):\n \"\"\"Evolutionary search algorithm of the efficient super-resolution.\"\"\"\n\n config = ESRConfig()\n\n def __init__(self, search_space=None, **kwargs):\n \"\"\"Construct the ESR EA search class.\n\n :param search_space: config of the search space\n :type search_space: dictionary\n \"\"\"\n super(ESRSearch, self).__init__(search_space, **kwargs)\n self.individual_num = self.config.policy.num_individual\n self.generation_num = self.config.policy.num_generation\n self.elitism_num = self.config.policy.num_elitism\n self.mutation_rate = self.config.policy.mutation_rate\n self.min_active = self.config.range.min_active\n self.max_params = self.config.range.max_params\n self.min_params = self.config.range.min_params\n\n self.indiv_count = 0\n self.evolution_count = 0\n self.initialize_pop()\n self.elitism = [ESRIndividual(self.codec) for _ in range(self.elitism_num)]\n self.elit_fitness = [0] * self.elitism_num\n self.fitness_pop = [0] * self.individual_num\n self.fit_state = [0] * self.individual_num\n\n @property\n def is_completed(self):\n \"\"\"Tell whether the search process is completed.\n\n :return: True is completed, or False otherwise\n :rtype: bool\n \"\"\"\n return self.indiv_count > self.generation_num * self.individual_num\n\n def update_fitness(self, evals):\n \"\"\"Update the fitness of each individual.\n\n :param evals: the evalution\n :type evals: list\n \"\"\"\n for i in range(self.individual_num):\n self.pop[i].update_fitness(evals[i])\n\n def update_elitism(self, evaluations):\n \"\"\"Update the elitism and its fitness.\n\n :param evaluations: evaluations result\n :type evaluations: list\n \"\"\"\n popu_all = [ESRIndividual(self.codec) for _ in range(self.elitism_num + self.individual_num)]\n for i in range(self.elitism_num + self.individual_num):\n if i < self.elitism_num:\n popu_all[i].copy(self.elitism[i])\n else:\n popu_all[i].copy(self.pop[i - self.elitism_num])\n fitness_all = self.elit_fitness + evaluations\n sorted_ind = sorted(range(len(fitness_all)), key=lambda k: fitness_all[k])\n for i in range(self.elitism_num):\n self.elitism[i].copy(popu_all[sorted_ind[len(fitness_all) - 1 - i]])\n self.elit_fitness[i] = fitness_all[sorted_ind[len(fitness_all) - 1 - i]]\n logging.info('Generation: {}, updated elitism fitness: {}'.format(self.evolution_count, self.elit_fitness))\n\n def _log_data(self, net_info_type='active_only', pop=None, value=0):\n \"\"\"Get the evolution and network information of children.\n\n :param net_info_type: defaults to 'active_only'\n :type net_info_type: str\n :param pop: defaults to None\n :type pop: list\n :param value: defaults to 0\n :type value: int\n :return: log_list\n :rtype: list\n \"\"\"\n log_list = [value, pop.parameter, pop.flops]\n if net_info_type == 'active_only':\n log_list.append(pop.active_net_list())\n elif net_info_type == 'full':\n log_list += pop.gene.flatten().tolist()\n else:\n pass\n return log_list\n\n def save_results(self):\n \"\"\"Save the results of evolution contains the information of pupulation and elitism.\"\"\"\n _path = FileOps.join_path(self.local_output_path, General.step_name)\n FileOps.make_dir(_path)\n arch_file = FileOps.join_path(_path, 'arch.txt')\n arch_child = FileOps.join_path(_path, 'arch_child.txt')\n sel_arch_file = FileOps.join_path(_path, 'selected_arch.npy')\n sel_arch = []\n with open(arch_file, 'a') as fw_a, open(arch_child, 'a') as fw_ac:\n writer_a = csv.writer(fw_a, lineterminator='\\n')\n writer_ac = csv.writer(fw_ac, lineterminator='\\n')\n writer_ac.writerow(['Population Iteration: ' + str(self.evolution_count + 1)])\n for c in range(self.individual_num):\n writer_ac.writerow(\n self._log_data(net_info_type='active_only', pop=self.pop[c],\n value=self.pop[c].fitness))\n\n writer_a.writerow(['Population Iteration: ' + str(self.evolution_count + 1)])\n for c in range(self.elitism_num):\n writer_a.writerow(self._log_data(net_info_type='active_only',\n pop=self.elitism[c],\n value=self.elit_fitness[c]))\n sel_arch.append(self.elitism[c].gene)\n sel_arch = np.stack(sel_arch)\n np.save(sel_arch_file, sel_arch)\n if self.backup_base_path is not None:\n FileOps.copy_folder(self.local_output_path, self.backup_base_path)\n\n def parent_select(self, parent_num=2, select_type='Tournament'):\n \"\"\"Select parent from a population with Tournament or Roulette.\n\n :param parent_num: number of parents\n :type parent_num: int\n :param select_type: select_type, defaults to 'Tournament'\n :type select_type: str\n :return: the selected parent individuals\n :rtype: list\n \"\"\"\n popu_all = [ESRIndividual(self.codec) for _ in range(self.elitism_num + self.individual_num)]\n parent = [ESRIndividual(self.codec) for _ in range(parent_num)]\n fitness_all = self.elit_fitness\n for i in range(self.elitism_num + self.individual_num):\n if i < self.elitism_num:\n popu_all[i].copy(self.elitism[i])\n else:\n popu_all[i].copy(self.pop[i - self.elitism_num])\n fitness_all = fitness_all + [popu_all[i].fitness]\n fitness_all = np.asarray(fitness_all)\n if select_type == 'Tournament':\n for i in range(parent_num):\n tourn = sample(range(len(popu_all)), 2)\n if fitness_all[tourn[0]] >= fitness_all[tourn[1]]:\n parent[i].copy(popu_all[tourn[0]])\n fitness_all[tourn[0]] = 0\n else:\n parent[i] = popu_all[tourn[1]]\n fitness_all[tourn[1]] = 0\n elif select_type == 'Roulette':\n eval_submean = fitness_all - np.min(fitness_all)\n eval_norm = eval_submean / sum(eval_submean)\n eva_threshold = np.cumsum(eval_norm)\n for i in range(parent_num):\n ran = random()\n selec_id = bisect_right(eva_threshold, ran)\n parent[i].copy(popu_all[selec_id])\n eval_submean[selec_id] = 0\n eval_norm = eval_submean / sum(eval_submean)\n eva_threshold = np.cumsum(eval_norm)\n else:\n logging.info('Wrong selection type')\n return parent\n\n def initialize_pop(self):\n \"\"\"Initialize the population of first generation.\"\"\"\n self.pop = [ESRIndividual(self.codec) for _ in range(self.individual_num)]\n for i in range(self.individual_num):\n while self.pop[i].active_num < self.min_active:\n self.pop[i].mutation_using(self.mutation_rate)\n while self.pop[i].parameter > self.max_params or self.pop[i].parameter < self.min_params:\n self.pop[i].mutation_node(self.mutation_rate)\n\n def get_mutate_child(self, muta_num):\n \"\"\"Generate the mutated children of the next offspring with mutation operation.\n\n :param muta_num: number of mutated children\n :type muta_num: int\n \"\"\"\n for i in range(muta_num):\n if int(self.individual_num / 2) == len(self.elitism):\n self.pop[i].copy(self.elitism[i])\n else:\n self.pop[i].copy(sample(self.elitism, 1)[0])\n self.pop[i].mutation_using(self.mutation_rate)\n while self.pop[i].active_num < self.min_active:\n self.pop[i].mutation_using(self.mutation_rate)\n self.pop[i].mutation_node(self.mutation_rate)\n while self.pop[i].parameter > self.max_params or self.pop[i].parameter < self.min_params:\n self.pop[i].mutation_node(self.mutation_rate)\n\n def get_cross_child(self, muta_num):\n \"\"\"Generate the children of the next offspring with crossover operation.\n\n :param muta_num: number of mutated children\n :type muta_num: int\n \"\"\"\n for i in range(int(self.individual_num / 4)):\n pop_id = muta_num + i * 2\n father, mother = self.parent_select(2, 'Roulette')\n length = np.random.randint(4, int(father.gene.shape[0] / 2))\n location = np.random.randint(0, father.gene.shape[0] - length)\n gene_1 = father.gene.copy()\n gene_2 = mother.gene.copy()\n gene_1[location:(location + length), :] = gene_2[location:(location + length), :]\n gene_2[location:(location + length), :] = father.gene[location:(location + length), :]\n self.pop[pop_id].update_gene(gene_1)\n self.pop[pop_id + 1].update_gene(gene_2)\n while self.pop[pop_id].active_num < self.min_active:\n self.pop[pop_id].mutation_using(self.mutation_rate)\n param = self.pop[pop_id].parameter\n while param > self.max_params or param < self.min_params:\n self.pop[pop_id].mutation_node(self.mutation_rate)\n param = self.pop[pop_id].parameter\n while self.pop[pop_id + 1].active_num < self.min_active:\n self.pop[pop_id + 1].mutation_using(self.mutation_rate)\n param = self.pop[pop_id + 1].parameter\n while param > self.max_params or param < self.min_params:\n self.pop[pop_id + 1].mutation_node(self.mutation_rate)\n param = self.pop[pop_id + 1].parameter\n\n def reproduction(self):\n \"\"\"Generate the new offsprings.\"\"\"\n muta_num = self.individual_num - (self.individual_num // 4) * 2\n self.get_mutate_child(muta_num)\n self.get_cross_child(muta_num)\n\n def update(self, record):\n \"\"\"Update function.\n\n :param local_worker_path: the local path that saved `performance.txt`.\n :type local_worker_path: str\n \"\"\"\n worker_id = record.get(\"worker_id\")\n performance = record.get(\"rewards\")\n self.fitness_pop[(worker_id - 1) % self.individual_num] = performance\n self.fit_state[(worker_id - 1) % self.individual_num] = 1\n\n def get_fitness(self):\n \"\"\"Get the evalutation of each individual.\n\n :return: a list of evaluations\n :rtype: list\n \"\"\"\n pd_path = os.path.join(self.local_output_path, 'population_fitness.csv')\n with open(pd_path, \"r\") as file:\n df = pd.read_csv(file)\n fitness_all = df['PSNR'].values\n fitness = fitness_all[fitness_all.size - self.individual_num:]\n return list(fitness)\n\n def search(self):\n \"\"\"Search one random model.\n\n :return: current number of samples, and the model\n :rtype: int and class\n \"\"\"\n if self.indiv_count > 0 and self.indiv_count % self.individual_num == 0:\n if np.sum(np.asarray(self.fit_state)) < self.individual_num:\n return\n else:\n self.update_fitness(self.fitness_pop)\n self.update_elitism(self.fitness_pop)\n self.save_results()\n self.reproduction()\n self.evolution_count += 1\n self.fitness_pop = [0] * self.individual_num\n self.fit_state = [0] * self.individual_num\n current_indiv = self.pop[self.indiv_count % self.individual_num]\n indiv_cfg = self.codec.decode(current_indiv)\n self.indiv_count += 1\n logging.info('model parameters:{}, model flops:{}'.format(current_indiv.parameter, current_indiv.flops))\n logging.info('model arch:{}'.format(current_indiv.active_net_list()))\n return self.indiv_count, indiv_cfg\n", "# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"SuperNet for CARS-DARTS.\"\"\"\nimport logging\nimport tensorflow as tf\nfrom vega.search_space.networks import NetworkFactory, NetTypes\nfrom vega.search_space.networks.tensorflow.super_network import DartsNetwork\nimport numpy as np\nimport copy\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](NetTypes.SUPER_NETWORK)\nclass CARSDartsNetwork(DartsNetwork):\n \"\"\"Base CARS-Darts Network of classification.\n\n :param desc: darts description\n :type desc: Config\n \"\"\"\n\n def __init__(self, desc, scope_name='CarsNetwork'):\n \"\"\"Init CARSDartsNetwork.\"\"\"\n super(CARSDartsNetwork, self).__init__(desc)\n self.scope_name = scope_name\n self.steps = self.desc.normal.steps\n self.num_ops = self.num_ops()\n self.len_alpha = self.len_alpha()\n\n def len_alpha(self):\n \"\"\"Get number of path.\"\"\"\n k_normal = len(self.desc.normal.genotype)\n return k_normal\n\n def num_ops(self):\n \"\"\"Get number of candidate operations.\"\"\"\n num_ops = len(self.desc.normal.genotype[0][0])\n return num_ops\n\n def __call__(self, input, alpha=None, training=True):\n \"\"\"Forward a model that specified by alpha.\n\n :param input: An input tensor\n :type input: Tensor\n \"\"\"\n stem_training = training\n if self.data_format == 'channels_first':\n input = tf.transpose(input, [0, 3, 1, 2])\n\n with tf.variable_scope(self.scope_name, reuse=tf.AUTO_REUSE) as scope:\n self.build_network()\n if self.search:\n training = True\n alphas_normal = alpha[:self.len_alpha]\n alphas_reduce = alpha[self.len_alpha:]\n s0, s1 = self.stem(input, training=stem_training)\n for i, cell in enumerate(self.cells):\n if self.search:\n if self.desc.network[i + 1] == 'reduce':\n weights = alphas_reduce\n else:\n weights = alphas_normal\n else:\n weights = None\n s0, s1 = s1, cell(s0, s1, training, weights, drop_prob=self.drop_path_prob)\n if not self.search:\n if self._auxiliary and i == self._auxiliary_layer:\n logits_aux = self.auxiliary_head(s1, training=training)\n out = tf.reduce_mean(s1, [-2, -1], keepdims=True)\n out = tf.reshape(out, [out.get_shape()[0], -1])\n logits = self.classifier(out, units=self._classes)\n if self._auxiliary and not self.search:\n return logits, logits_aux\n else:\n return logits\n" ]
[ [ "pandas.read_csv", "numpy.min", "numpy.asarray", "numpy.cumsum", "numpy.save", "numpy.stack", "numpy.random.randint" ], [ "tensorflow.variable_scope", "tensorflow.transpose", "tensorflow.reduce_mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
mwdchang/delphi
[ "c6177f2d614118883eaaa7f5300f3e46f10ddc7e" ]
[ "scripts/data_processing/process_climis_unicef_ieconomics_data.py" ]
[ "\"\"\" Script for cleaning data for 12 month evaluation. \"\"\"\n\nimport os\nimport re\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom pprint import pprint\nfrom glob import glob\nfrom typing import List, Dict\nfrom delphi.utils.shell import cd\nfrom delphi.paths import data_dir, south_sudan_data\nfrom delphi.utils.fp import grouper\nfrom functools import partial\nfrom itertools import groupby\n\ndef get_state_from_filename(filename, get_state_func):\n return \" \".join(re.findall(\"[A-Z][^A-Z]*\", get_state_func(filename)))\n\n\ndef process_file_with_single_table(\n filename, variable_name_func, get_state_func, country=\"South Sudan\"\n):\n records = []\n df = pd.read_csv(\n filename, index_col=0, names=range(12), header=0, skipinitialspace=True\n )\n for ind in df.index:\n for column in df.columns:\n record = {\n \"Variable\": variable_name_func(ind),\n \"Month\": column + 1,\n \"Value\": df.loc[ind][column],\n \"State\": get_state_from_filename(filename, get_state_func),\n \"Country\": country,\n }\n set_defaults(record)\n records.append(record)\n return records\n\n\ndef set_climis_south_sudan_default_params(\n filename, df, get_state_func=lambda x: x.split(\"_\")[-2]\n):\n df[\"Country\"] = \"South Sudan\"\n df[\"Source\"] = \"CLiMIS\"\n df[\"Year\"] = int(filename.split(\".\")[0].split(\"_\")[-1])\n df[\"State\"] = get_state_from_filename(filename, get_state_func)\n return df\n\n\ndef make_livestock_prices_table(filename):\n df = pd.read_csv(\n filename,\n index_col=[0, 1],\n header=0,\n names=[\"County\", \"Market\"] + list(range(1, 13)),\n skipinitialspace=True,\n thousands=\",\",\n )\n df = df.stack().reset_index(name=\"Value\")\n df.columns = [\"County\", \"Market\", \"Month\", \"Value\"]\n df = df.pivot_table(values=\"Value\", index=[\"County\", \"Month\"])\n df = set_climis_south_sudan_default_params(filename, df)\n df[\"Unit\"] = \"SSP\"\n df[\"Variable\"] = f\"Average price of {filename.split('_')[-3].lower()}\"\n df = df.reset_index()\n return df\n\n\ndef set_defaults(record: Dict):\n record.update(\n {\n \"Year\": 2017,\n \"Country\": \"South Sudan\",\n \"Unit\": \"%\",\n \"Source\": \"CLiMIS\",\n \"County\": None,\n }\n )\n\n\ndef make_group_dict(groups):\n return {k[0][0]: g for k, g in grouper(groups, 2)}\n\n\ndef make_df_from_group(k, v, index_func):\n df = pd.DataFrame(v)\n df.set_index(0, inplace=True)\n df.index = [index_func(k, i) for i in df.index]\n df = df.stack().reset_index(name=\"Value\")\n df.columns = [\"Variable\", \"Month\", \"Value\"]\n df[\"Month\"] = df[\"Month\"].astype(int)\n return df\n\n\ndef process_file_with_multiple_tables(filename, header_dict):\n dfs = []\n df = pd.read_csv(filename, index_col=0, names=range(12), header=0)\n\n # Define a grouping key function to split the CSV by the header rows\n grouping_key_function = lambda _tuple: _tuple[1][1:].isna().all()\n iterrows = filter(lambda r: r[1][0] != \"\", df.iterrows())\n key_group_tuples = groupby(iterrows, grouping_key_function)\n groups = [\n [\n [x[0].strip()] + x[1].values.tolist()\n for x in list(g)\n if isinstance(x[0], str)\n ]\n for k, g in key_group_tuples\n ]\n\n for k, v in make_group_dict(groups).items():\n if v is not None:\n df = make_df_from_group(\n k, v, lambda k, i: header_dict.get(k.strip(), lambda x: k)(i)\n )\n df[\"Value\"] = df[\"Value\"].replace(\" \", np.nan)\n df = df.dropna()\n df[\"County\"] = None\n df = set_climis_south_sudan_default_params(filename, df)\n\n if len(df.Value.values) > 0 and any(\n map(lambda v: \"%\" in v, df[\"Value\"].values)\n ):\n df.Value = df.Value.str.replace(\"%\", \"\")\n df[\"Unit\"] = \"%\"\n else:\n df[\"Unit\"] = None\n if len(df[\"Variable\"].values) > 0:\n if \"SSP\" in df[\"Variable\"].values[0]:\n df[\"Variable\"] = (\n df[\"Variable\"].str.replace(\"\\(SSP\\)\", \"\").str.strip()\n )\n df[\"Unit\"] = \"SSP\"\n\n if len(df.Value.values) > 0 and \"-\" in df.Value.values[0]:\n # For percentage ranges, take the mean value\n df.Value = (\n df.Value.str.strip()\n .str.split(\"-\")\n .map(lambda x: list(map(float, x)))\n .map(lambda x: np.mean(x))\n )\n\n dfs.append(df)\n\n if len(dfs) > 0:\n return pd.concat(dfs)\n else:\n return None\n\ndef process_climis_crop_production_data(data_dir: str):\n \"\"\" Process CLiMIS crop production data \"\"\"\n\n climis_crop_production_csvs = glob(\n \"{data_dir}/Climis South Sudan Crop Production Data/\"\n \"Crops_EstimatedProductionConsumptionBalance*.csv\"\n )\n state_county_df = pd.read_csv(\n f\"data/south_sudan_data_fewsnet.tsv\", skipinitialspace=True\n )\n combined_records = []\n\n for f in climis_crop_production_csvs:\n year = int(f.split(\"/\")[-1].split(\"_\")[2].split(\".\")[0])\n df = pd.read_csv(f).dropna()\n for i, r in df.iterrows():\n record = {\n \"Year\": year,\n \"Month\": None,\n \"Source\": \"CLiMIS\",\n \"Country\": \"South Sudan\",\n }\n region = r[\"State/County\"].strip()\n\n if region.lower() in state_county_df[\"State\"].str.lower().values:\n record[\"State\"] = region\n record[\"County\"] = None\n else:\n potential_states = state_county_df.loc[\n state_county_df[\"County\"] == region\n ][\"State\"]\n record[\"State\"] = (\n potential_states.iloc[0]\n if len(potential_states) != 0\n else None\n )\n record[\"County\"] = region\n\n for field in r.index:\n if field != \"State/County\":\n if \"Net Cereal production\" in field:\n record[\"Variable\"] = \"Net Cereal Production\"\n record[\"Value\"] = r[field]\n if field.split()[-1].startswith(\"(\"):\n record[\"Unit\"] = field.split()[-1][1:-1].lower()\n else:\n record[\"Unit\"] = None\n\n combined_records.append(record)\n\n df = pd.DataFrame(combined_records)\n return df\n\n\ndef process_climis_livestock_data(data_dir: str):\n \"\"\" Process CLiMIS livestock data. \"\"\"\n\n records = []\n\n livestock_data_dir = f\"{data_dir}/Climis South Sudan Livestock Data\"\n\n for filename in glob(\n f\"{livestock_data_dir}/Livestock Body Condition/*2017.csv\"\n ):\n records += process_file_with_single_table(\n filename,\n lambda ind: f\"Percentage of {filename.split('_')[-3].lower()} with body condition {ind.lower()}\",\n lambda f: f.split(\"_\")[-2],\n )\n\n\n for filename in glob(\n f\"{livestock_data_dir}/Livestock Production/*2017.csv\"\n ):\n records += process_file_with_single_table(\n filename,\n lambda ind: \"Percentage of householding at least milking one of their livestocks\",\n lambda f: f.split(\"_\")[1],\n )\n\n disease_acronym_dict = {\n \"FMD\": \"Foot and Mouth Disease (FMD)\",\n \"LSD\": \"Lumpy Skin Disease (LSD)\",\n \"CBPP\": \"Contagious Bovine Pleuropneumonia (CBPP)\",\n \"CCPP\": \"Contagious Caprine Pleuropneumonia (CCPP)\",\n \"NC\": \"NC\",\n \"PPR\": \"Peste des Petits Ruminants (PPR)\",\n \"Others\": \"Other diseases\",\n }\n\n func = (\n lambda k, i: f\"Percentage of livestock with {disease_acronym_dict[k]} that are {i.lower().strip()}\"\n )\n livestock_disease_header_dict = {\n k: partial(func, k) for k in disease_acronym_dict\n }\n\n livestock_migration_header_dict = {\n \"Livestock migration\": lambda i: f\"Percentage of livestock migrating {i.split()[-1].lower()}\",\n \"Distance covered\": lambda i: \"Distance covered by migrating livestock\",\n \"Proportion of livestock that migrated\": lambda i: \"Percentage of livestock that migrated\",\n \"Migration normal at this time of the year\": lambda i: f\"Migration normal at this time of year, {i}\",\n \"Duration in months when the migrated animals are expected to be back after\": lambda i: \"Duration in months when the migrated animals are expected to be back after\",\n \"Reasons for livestock migration\": lambda i: f\"Percentage of livestock migrating due to {i.lower()}\",\n }\n\n def process_directory(dirname, header_dict):\n return pd.concat(\n [\n df\n for df in [\n process_file_with_multiple_tables(f, header_dict)\n for f in glob(f\"{livestock_data_dir}/{dirname}/*2017.csv\")\n ]\n if df is not None\n ]\n )\n\n func2 = (\n lambda k, i: f\"{k.replace('animals', i.lower()).replace('stock', 'stock of '+i.lower()).replace('animal', i.lower())}\"\n )\n livestock_ownership_headers = [\n \"Average current stock per household\",\n \"Average number of animals born per household during last 4 weeks\",\n \"Average number of animals acquired per household during last 4 weeks (dowry, purchase, gift)\",\n \"Average number of animals given out as bride price/gift per household during last 4 weeks per household\",\n \"Average number of animals sold per household during last 4 weeks household\",\n \"Average price of animal sold (SSP)\",\n \"Average number of animals exchanged for grain per household during last 4 weeks\",\n \"Average number of animals died/slaughtered/lost per household during last 4 weeks\",\n ]\n\n livestock_ownership_header_dict = {\n k: partial(func2, k) for k in livestock_ownership_headers\n }\n ownership_df = process_directory(\n \"Livestock Ownership\", livestock_ownership_header_dict\n )\n\n disease_df = process_directory(\n \"Livestock Diseases\", livestock_disease_header_dict\n )\n\n livestock_migration_df = process_directory(\n \"Livestock Migration\", livestock_migration_header_dict\n )\n\n livestock_pasture_header_dict = {\n \"Pasture condtion\": lambda i: f\"Percentage of livestock pasture in {i.lower()} condition\",\n \"Pasture condition compared to similar time in a normal year\": lambda i: f\"Percentage of livestock pasture in {i.lower()} condition compared to a similar time in a normal year\",\n \"Browse condition\": lambda i: f\"Percentage of livestock pasture in {i.lower()} browse condition\",\n \"Browse condition compared to similar time in a normal year\": lambda i: f\"Percentage of livestock pasture in {i.lower()} browse condition compared to a similar time in a normal year\",\n \"Presence of constraints in accessing forage\": lambda i: f\"Percentage reporting the {('presence' if i=='Yes' else 'absence')} of constraints in accessing forage\",\n \"Main forage constraints\": lambda i: f\"Percentage reporting {i.lower()} as the main forage constraint\",\n }\n livestock_pasture_df = process_directory(\n \"Livestock Pasture\", livestock_pasture_header_dict\n )\n livestock_water_sources_header_dict = {\n \"Main water sources\": lambda i: f\"Percentage of livestock whose main water source is {i.lower()}\",\n \"Number of days livestock have been watered in the last 7 days\": lambda i: f\"Number of days {i.lower()} have been watered in the last 7 days\",\n }\n\n livestock_water_sources_df = process_directory(\n \"Livestock Water Sources\", livestock_water_sources_header_dict\n )\n\n for filename in glob(f\"{livestock_data_dir}/Livestock Loss/*2017.csv\"):\n records += process_file_with_single_table(\n filename,\n lambda ind: f\"Percentage of {filename.split('_')[-3].lower()} loss accounted for by {ind.lower()}\",\n lambda f: f.split(\"_\")[-2],\n )\n\n\n for record in records:\n if isinstance(record[\"Value\"], str):\n record[\"Value\"] = record[\"Value\"].replace(\"%\", \"\")\n\n livestock_prices_df = pd.concat(\n [\n make_livestock_prices_table(f)\n for f in glob(\n f\"{livestock_data_dir}/Livestock Market Prices/*2017.csv\"\n )\n ]\n )\n\n climis_livestock_data_df = pd.concat(\n [\n pd.DataFrame(records),\n disease_df,\n ownership_df,\n livestock_prices_df,\n livestock_migration_df,\n livestock_pasture_df,\n livestock_water_sources_df,\n ],\n sort=True\n )\n return climis_livestock_data_df\n\n\ndef process_climis_import_data(data_dir: str) -> pd.DataFrame:\n dfs = []\n for f in glob(f\"{data_dir}/CLiMIS Import Data/*.csv\"):\n df = pd.read_csv(f, names=range(1, 13), header=0, thousands=\",\")\n df = df.stack().reset_index(name=\"Value\")\n df.columns = [\"Year\", \"Month\", \"Value\"]\n df[\"Month\"] = df[\"Month\"].astype(int)\n df[\"Year\"] = df[\"Year\"].astype(int)\n dfs.append(df)\n df = (\n pd.concat(dfs)\n .pivot_table(values=\"Value\", index=[\"Year\", \"Month\"], aggfunc=np.sum)\n .reset_index()\n )\n\n df.columns = [\"Year\", \"Month\", \"Value\"]\n df[\"Variable\"] = \"Total amount of cereal grains imported\"\n df[\"Unit\"] = \"metric tonne\"\n df[\"Country\"] = \"South Sudan\"\n df[\"County\"] = None\n df[\"State\"] = None\n return df\n\n\ndef process_climis_rainfall_data(data_dir: str) -> pd.DataFrame:\n dfs = []\n # Read CSV files first\n for f in glob(f\"{data_dir}/CLiMIS South Sudan Rainfall Data in\"\n \" Millimeters/*.csv\"):\n # Get the name of the table without path and extension\n table_name = os.path.basename(f)[:-4]\n # Get state and year from groups\n pattern = r'^(.*) ([0-9]+) Rainfall'\n state, year = re.match(pattern, table_name).groups()\n df = pd.read_csv(f, header=0, thousands=\",\")\n cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',\n 'State', 'County', 'Country']\n df_new = pd.DataFrame(columns=cols)\n df_new['Month'] = range(1, 13)\n df_new['Year'] = int(year)\n df_new['Value'] = df['monthly rainfall data ']\n df_new['Variable'] = 'Rainfall'\n df_new['Unit'] = 'millimeters'\n df_new['County'] = None\n df_new['State'] = state\n df_new['Source'] = 'CLiMIS'\n df_new['Country'] = 'South Sudan'\n dfs.append(df_new)\n df1 = pd.concat(dfs)\n\n # Read XLSX file next\n fname = f'{data_dir}/CLiMIS South Sudan Rainfall Data in Millimeters/' + \\\n 'Rainfall-Early_Warning_6month_Summary-2017-data_table.xlsx'\n df = pd.read_excel(fname, sheet_name='Rainfall Data', header=1)\n cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',\n 'State', 'County', 'Country']\n df_new = pd.DataFrame(columns=cols)\n states = []\n counties = []\n years = []\n months = []\n values = []\n for row in df.itertuples():\n state, county, year = row[1:4]\n for month in range(1,13):\n value = row[3 + month]\n if pd.isnull(value):\n continue\n states.append(state)\n counties.append(county)\n years.append(year)\n months.append(month)\n values.append(value)\n df_new['Year'] = years\n df_new['Month'] = months\n df_new['Value'] = values\n df_new['County'] = counties\n df_new['State'] = states\n df_new['Variable'] = 'Rainfall'\n df_new['Unit'] = 'millimeters'\n df_new['Source'] = 'CLiMIS'\n df_new['Country'] = 'South Sudan'\n\n df = pd.concat([df1, df_new])\n return df\n\ndef process_UNHCR_data(data_dir: str):\n df = pd.read_table(f\"{data_dir}/UNHCR Refugee Data/RefugeeData.tsv\",\n index_col=0,\n parse_dates=True, infer_datetime_format=True)\n df[\"Year\"] = df.index.year\n df[\"Month\"] = df.index.month\n df.rename(columns = {\"individuals\":\"Value\"}, inplace=True)\n df[\"Country\"] = \"South Sudan\"\n df[\"State\"] = None\n df[\"County\"] = None\n df[\"Source\"] = \"UNHCR\"\n df[\"Unit\"] = None\n df[\"Variable\"] = \"Number of refugees\"\n del df[\"unix_timestamp\"]\n return df\n\n\ndef create_combined_table(data_dir: str, columns: List[str]) -> pd.DataFrame:\n climis_crop_production_df = process_climis_crop_production_data(data_dir)\n climis_livestock_data_df = process_climis_livestock_data(data_dir)\n climis_import_data_df = process_climis_import_data(data_dir)\n climis_rainfall_data_df = process_climis_rainfall_data(data_dir)\n UNHCR_data_df = process_UNHCR_data(data_dir)\n\n # Severe acute malnutrition and inflation rate indicators from PDFs\n pdf_indicators_df = pd.read_table(f\"{data_dir}/indicator_data_from_pdfs.tsv\")\n\n df = pd.concat(\n [\n climis_crop_production_df,\n climis_livestock_data_df,\n climis_import_data_df,\n climis_rainfall_data_df,\n pdf_indicators_df,\n UNHCR_data_df,\n ],\n sort=True,\n )\n\n return df[columns]\n\n\nif __name__ == \"__main__\":\n columns = [\n \"Variable\",\n \"Year\",\n \"Month\",\n \"Value\",\n \"Unit\",\n \"Source\",\n \"State\",\n \"County\",\n \"Country\",\n ]\n\n data_dir = str(data_dir / \"raw\" / \"wm_12_month_evaluation\")\n df = create_combined_table(data_dir, columns)\n df[\"Year\"] = df[\"Year\"].astype(int)\n df.to_csv(sys.argv[1], index=False, sep=\"\\t\")\n" ]
[ [ "pandas.concat", "pandas.read_excel", "pandas.read_csv", "pandas.isnull", "pandas.DataFrame", "pandas.read_table", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
rndtestnt/riskAnalysis
[ "36601374b60efd3f7680adee84e9e54e2f2b52a2" ]
[ "train.py" ]
[ "import os\nimport warnings\nimport sys\n\nimport pandas as pd\nimport numpy as np\nfrom itertools import cycle\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.linear_model import lasso_path, enet_path\nfilePath = \"PD_Commercial_Train.csv\"\nos.environ['MLFLOW_TRACKING_URI'] = 'http://localhost:5000'\nos.environ['GIT_PYTHON_REFRESH'] = 'quiet'\n\ndf_credit = pd.read_csv(filePath)\n\ncols = df_credit.columns\ndata = df_credit[cols].apply(pd.to_numeric, errors='coerce')\ndata = data.fillna(0)\n\nX = data.drop([\"PD\"], axis=1)\ny = data[[\"PD\"]]\n\n# Import mlflow\nimport mlflow\nimport mlflow.sklearn\n\n\n# Evaluate metrics\ndef eval_metrics(actual, pred):\n rmse = np.sqrt(mean_squared_error(actual, pred))\n mae = mean_absolute_error(actual, pred)\n r2 = r2_score(actual, pred)\n return rmse, mae, r2\n#new\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n np.random.seed(40)\n mlflow.set_experiment('riskAnalysis')\n # Split the data into training and test sets. (0.75, 0.25) split.\n train, test = train_test_split(data)\n\n #Predict PD\n train_x = train.drop([\"PD\"], axis=1)\n test_x = test.drop([\"PD\"], axis=1)\n train_y = train[[\"PD\"]]\n test_y = test[[\"PD\"]]\n\n alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.05\n l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.05\n\n # Run ElasticNet\n lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)\n lr.fit(train_x, train_y)\n predicted_qualities = lr.predict(test_x)\n (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)\n\n # Print out ElasticNet model metrics\n print(\"Elasticnet model (alpha=%f, l1_ratio=%f):\" % (alpha, l1_ratio))\n print(\" RMSE: %s\" % rmse)\n print(\" MAE: %s\" % mae)\n print(\" R2: %s\" % r2)\n\n # Log mlflow attributes for mlflow UI\n mlflow.log_param(\"alpha\", alpha)\n mlflow.log_param(\"l1_ratio\", l1_ratio)\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.log_metric(\"r2\", r2)\n mlflow.log_metric(\"mae\", mae)\n mlflow.sklearn.log_model(lr, \"model\")\n \n " ]
[ [ "pandas.read_csv", "sklearn.metrics.r2_score", "numpy.random.seed", "sklearn.linear_model.ElasticNet", "sklearn.metrics.mean_absolute_error", "sklearn.model_selection.train_test_split", "sklearn.metrics.mean_squared_error" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
alexcwsmith/scTriangulate
[ "ec014a4c575f4fd3270922ee9197493a6ec0846c" ]
[ "sctriangulate/metrics.py" ]
[ "import scanpy as sc\nimport pandas as pd\nimport numpy as np\nimport anndata as ad\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sys\nimport gseapy as gp\nimport math\nimport os\n\ndef check_filter_single_cluster(adata,key):\n vc = adata.obs[key].value_counts()\n exclude_clusters= vc.loc[vc==1].index\n truth = np.logical_not(adata.obs[key].isin(exclude_clusters).values)\n adata_valid = adata[truth,:]\n return adata_valid\n\ndef doublet_compute(adata,key):\n cluster_to_doublet = {}\n for cluster in adata.obs[key].astype('category').cat.categories:\n mean_score = adata[adata.obs[key]==cluster,:].obs['doublet_scores'].values.mean()\n cluster_to_doublet[cluster] = mean_score\n return cluster_to_doublet\n\n\n\ndef compute_combo_score(rank_uns,cluster):\n rank_names = rank_uns['names'][cluster]\n rank_lfc = rank_uns['logfoldchanges'][cluster]\n rank_pval = rank_uns['pvals'][cluster]\n df = pd.DataFrame({'names':rank_names,'lfc':rank_lfc,'pval':rank_pval})\n # filter out down-regulated genes\n df = df.loc[df['lfc'] > 0, :]\n df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)\n # the rank of each gene by lfc, the larger, the better, make argsort result reverse\n temp = np.flip(np.argsort(df['lfc'].values))\n ranks_lfc = np.empty_like(temp)\n ranks_lfc[temp] = np.arange(len(df['pval'].values))\n # the rank of each gene by pval, the smaller, the better\n temp = np.argsort(df['pval'].values)\n ranks_pval = np.empty_like(temp)\n ranks_pval[temp] = np.arange(len(df['pval'].values))\n # combo rank score\n temp = (ranks_lfc + ranks_pval) / 2\n df['rank_lfc'] = ranks_lfc\n df['rank_pval'] = ranks_pval\n df['combo'] = temp\n df.sort_values(by='combo', inplace=True)\n df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)\n # filter out the genes if pval > 0.05\n df = df.loc[df['pval']<0.05,:]\n df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)\n return df\n\ndef run_enrichr(gene_list,key,name,folder):\n # run enrichr\n artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\\t')\n artifact_dict = artifact.groupby(by='class')['genes'].apply(lambda x:x.tolist()).to_dict()\n enr2 = gp.enrichr(gene_list=gene_list,\n description=name,\n gene_sets=artifact_dict,\n background=20000,\n outdir=os.path.join(folder,'scTriangulate_local_mode_enrichr'),\n cutoff=0.1, # adj-p for plotting\n verbose=True)\n enrichr_result = enr2.results\n enrichr_dict = {}\n for metric in artifact_dict.keys():\n if enrichr_result.shape[0] == 0: # no enrichment for any of the above terms\n enrichr_dict[metric] = 0\n else:\n try:\n enrichr_score = -math.log10(enrichr_result.loc[enrichr_result['Term']==metric,:]['Adjusted P-value'].to_list()[0])\n except IndexError:\n enrichr_dict[metric] = 0\n else:\n enrichr_dict[metric] = enrichr_score\n return enrichr_dict\n\ndef run_gsea(gene_list,key,name,folder):\n artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\\t')\n artifact_dict = artifact.groupby(by='class')['genes'].apply(lambda x:x.tolist()).to_dict()\n artifact_dict_keys = list(artifact_dict.keys())\n df = pd.DataFrame({0: gene_list, 1: 1/(np.arange(len(gene_list))+1)}) # col 1 is for descending rank of gene\n gsea_dict = {}\n try:\n pre_res = gp.prerank(rnk=df, gene_sets=artifact_dict,\n permutation_num=100,\n outdir=os.path.join(folder,'scTriangulate_local_mode_gsea/{}/{}'.format(key,name)),\n min_size=1,\n max_size=10000,\n seed=6,\n verbose=True) # run this will cause artifact dict decreasing !! Caveats!!!\n except: # no hit return, all metrics are zero\n for metric in artifact_dict_keys:\n gsea_dict[metric] = (0,0) # first is nes, second is #hit\n else:\n gsea_result = pre_res.res2d\n metric_get = set(gsea_result.index.tolist())\n for metric in artifact_dict_keys:\n if metric in metric_get:\n gsea_score = gsea_result.loc[gsea_result.index==metric,:]['nes'].to_list()[0]\n gsea_hits = gsea_result.loc[gsea_result.index==metric,:]['matched_size'].to_list()[0]\n gsea_dict[metric] = (gsea_score, gsea_hits)\n else: # not enriched\n gsea_dict[metric] = (0,0)\n return gsea_dict\n\n\ndef read_artifact_genes(species,criterion):\n '''\n criterion1: all will be artifact\n criterion2: all will be artifact except cellcycle\n criterion3: all will be artifact except cellcycle, ribosome\n criterion4: all will be artifact except cellcycle, ribosome, mitochondrial\n criterion5: all will be artifact except cellcycle, ribosome, mitochondrial, antisense\n criterion6: all will be artifact except cellcycle, ribosome, mitochondrial, antisense, predict_gene\n '''\n artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\\t',index_col=0)\n artifact = artifact.loc[artifact['species']==species,:]\n if criterion == 1:\n artifact = artifact\n elif criterion == 2:\n artifact = artifact.loc[~(artifact['class']=='cellcycle'),:]\n elif criterion == 3:\n artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcycle')),:]\n elif criterion == 4:\n artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')),:]\n elif criterion == 5:\n artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')|(artifact['class']=='antisense')),:]\n elif criterion == 6:\n artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')|(artifact['class']=='antisense')|(artifact['class']=='predict_gene')),:]\n return artifact\n\n \n\ndef purify_gene(genelist,species,criterion):\n result = []\n artifact = read_artifact_genes(species,criterion)\n artifact_genes = set(artifact.index.to_list())\n for gene in genelist:\n if gene not in artifact_genes:\n result.append(gene)\n return result\n\ndef marker_gene(adata, key, species, criterion, folder):\n # delete previous rank_gene_gruops if present\n if adata.uns.get('rank_genes_groups') != None:\n del adata.uns['rank_genes_groups']\n # perform t-test\n sc.tl.rank_genes_groups(adata, key, method='t-test',n_genes=adata.shape[1])\n all_genes = adata.var_names.values # ndarray, all the genes\n all_clusters = adata.obs[key].cat.categories # pd.Index, all the clusters\n cluster2gene = dict() # {'cluster1':[gene1,gene2..]}\n rank_uns = adata.uns['rank_genes_groups']\n pre_computed_dfs = []\n for cluster in all_clusters:\n cluster2gene[cluster] = []\n df = compute_combo_score(rank_uns, cluster)\n pre_computed_dfs.append(df)\n for gene in all_genes: \n index_store = []\n for i,cluster in enumerate(all_clusters):\n df = pre_computed_dfs[i]\n # get the rank of the gene in each cluster\n try:\n index = np.nonzero(df['names'].values == gene)[0][0] # the rank of this gene in each cluster\n except IndexError:\n index = len(all_genes)\n index_store.append(index)\n if np.all(np.array(index_store) == len(all_genes)):\n continue\n assign = all_clusters[np.argmin(np.array(index_store))] # get argmin, take the corresponding cluster\n cluster2gene[assign].append((gene,np.min(index_store)))\n # sort the cluster2gene\n for key_,value in cluster2gene.items():\n gene = [item[0] for item in value]\n rank = [item[1] for item in value]\n temp = sorted(zip(gene,rank),key=lambda x:x[1])\n cluster2gene[key_] = [item[0] for item in temp]\n result = pd.Series(cluster2gene).to_frame()\n result.columns = ['whole_marker_genes']\n\n '''\n now the result is a dataframe\n whole_marker_genes\n cluster1 gene_list\n cluster2 gene_list\n '''\n\n # now let's perform enrichr and GSEA, and get puried marker gene\n col_enrichr = []\n col_gsea = []\n col_purify = [] # genelist that have artifact genes removed\n for cluster in result.index:\n enrichr_dict = run_enrichr(result.loc[cluster,:].to_list()[0],key=key,name=cluster,folder=folder) # [0] because it is a [[gene_list]],we only need [gene_list]\n gsea_dict = run_gsea(result.loc[cluster,:].to_list()[0],key=key,name=cluster,folder=folder)\n purified = purify_gene(result.loc[cluster,:].to_list()[0],species,criterion) # the [0] is explained last line\n col_enrichr.append(enrichr_dict)\n col_gsea.append(gsea_dict)\n col_purify.append(purified)\n\n result['enrichr'] = col_enrichr\n result['gsea'] = col_gsea\n result['purify'] = col_purify\n return result\n\n\ndef reassign_score(adata,key,marker,regress_size=False):\n # get gene pool, slice the adata\n num = 30\n pool = []\n for i in range(marker.shape[0]):\n marker_genes = marker.iloc[i]['purify']\n pick = marker_genes[:num] # if the list doesn't have more than 30 markers, it is oK, python will automatically choose all\n pool.extend(pick)\n pool = list(set(pool))\n adata_now = adata[:,pool].copy()\n\n # mean-centered and divide the std of the data\n tmp = adata_now.X\n from sklearn.preprocessing import scale\n tmp_scaled = scale(tmp,axis=0)\n adata_now.X = tmp_scaled\n \n # reducing dimension \n from sklearn.decomposition import PCA\n reducer = PCA(n_components=30)\n scoring = reducer.fit_transform(X=adata_now.X) \n\n from sklearn.preprocessing import LabelEncoder\n le = LabelEncoder()\n scoring_y = le.fit_transform(adata_now.obs[key].astype('str'))\n order = le.classes_\n\n # compute the centroid of each cluster\n X = np.empty([len(adata_now.obs[key].cat.categories),scoring.shape[1]])\n y = []\n for i,cluster in enumerate(adata_now.obs[key].cat.categories):\n bool_index = adata_now.obs[key]==cluster\n centroid = np.mean(scoring[bool_index,:],axis=0)\n X[i,:] = centroid\n y.append(cluster)\n y = le.fit_transform(y)\n\n\n # train a KNN classifier\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.metrics import confusion_matrix\n # if number of centroid(training data) < N_neighbors, will raise error, we hard code it to be 10\n n_neighbors = 10\n if X.shape[0] < n_neighbors:\n n_neighbors = X.shape[0]\n model = KNeighborsClassifier(n_neighbors=n_neighbors,weights='distance')\n model.fit(X,y)\n pred = model.predict(scoring) # (n_samples,)\n mat = confusion_matrix(scoring_y,pred)\n confusion_reassign = pd.DataFrame(data=mat,index=order,columns=order)\n accuracy = []\n for i in range(mat.shape[0]):\n accuracy.append(mat[i,i]/np.sum(mat[i,:]))\n cluster_to_accuracy = {}\n for i,cluster in enumerate(order):\n cluster_to_accuracy[cluster] = accuracy[i]\n\n # whether to regress out the clutser size effect or not\n if regress_size:\n key_metric_dict = cluster_to_accuracy\n key_size_dict = get_size_in_metrics(adata.obs,key)\n df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size\n cluster_to_accuracy = regress_size(df_inspect,regressor='GLM',to_dict=True)\n return cluster_to_accuracy, confusion_reassign\n\n'''below is the part for regression score'''\ndef background_normalizer(df,n_neighbors=10,scale=True):\n # df is a two column dataframe where first column is metric and second column is size\n from copy import deepcopy\n df = deepcopy(df)\n df['order'] = np.arange(df.shape[0])\n col = []\n for i in range(df.shape[0]):\n this_metric = df[0][i]\n distance_to_this = (df[0] - this_metric).abs()\n df_tmp = deepcopy(df)\n df_tmp['distance'] = distance_to_this.values\n df_tmp.sort_values(by='distance',inplace=True)\n neighbors_metric = df_tmp.iloc[:,0][:n_neighbors].values\n mean_ = neighbors_metric.mean()\n std_ = neighbors_metric.std()\n if scale:\n if std_ == 0:\n col.append(0)\n else:\n col.append((this_metric-mean_)/std_)\n else:\n col.append(this_metric-mean_)\n df['normalized'] = col\n return df\n\ndef regress_size(df_inspect,regressor='background_zscore',n_neighbors=10,to_dict=False):\n \n # df_inspect, index is cluster name, col1 is metric, col2 is size\n if regressor == 'background_zscore':\n df_now = background_normalizer(df_inspect,n_neighbors,True)\n residual = df_now['normalized'].values\n df_inspect[0] = residual\n normalized_metric_series = df_inspect[0]\n elif regressor == 'background_mean':\n df_now = background_normalizer(df_inspect,n_neighbors,False)\n residual = df_now['normalized'].values\n df_inspect[0] = residual\n normalized_metric_series = df_inspect[0]\n elif regressor == 'GLM':\n endog = df_inspect[0] # metric\n exog = df_inspect[1] # size\n import statsmodels.api as sm\n exog = sm.add_constant(exog,prepend=True)\n model = sm.GLM(endog,exog,family=sm.families.Gaussian())\n res = model.fit()\n residual = res.resid_response\n normalized_metric_series = residual\n elif regressor == 'Huber':\n endog = df_inspect[0] # metric\n exog = df_inspect[1] # size\n from sklearn.linear_model import HuberRegressor\n model = HuberRegressor().fit(exog.values.reshape(-1,1),endog.values)\n prediction = model.predict(exog.values.reshape(-1,1))\n residual = endog.values - prediction\n # outliers = model.outliers_\n df_inspect[0] = residual\n normalized_metric_series = df_inspect[0]\n elif regressor == 'RANSAC':\n endog = df_inspect[0] # metric\n exog = df_inspect[1] # size\n from sklearn.linear_model import RANSACRegressor\n model = RANSACRegressor().fit(exog.values.reshape(-1,1),endog.values)\n prediction = model.predict(exog.values.reshape(-1,1))\n residual = endog.values - prediction\n #outliers = np.logical_not(model.inlier_mask_)\n df_inspect[0] = residual\n normalized_metric_series = df_inspect[0]\n elif regressor == 'TheilSen':\n endog = df_inspect[0] # metric\n exog = df_inspect[1] # size\n from sklearn.linear_model import TheilSenRegressor\n model = TheilSenRegressor().fit(exog.values.reshape(-1,1),endog.values)\n prediction = model.predict(exog.values.reshape(-1,1))\n residual = endog.values - prediction\n df_inspect[0] = residual\n normalized_metric_series = df_inspect[0]\n if to_dict:\n normalized_metric_dict = normalized_metric_series.to_dict()\n final = normalized_metric_dict\n else:\n final = normalized_metric_series\n return final\n\n\n\n\ndef tf_idf_bare_compute(df,cluster):\n '''\n now the df contains all the gene for and an additional column for cluster\n '''\n # compute its tf_idf\n tmp1 = df.loc[df['cluster'] == cluster, :].loc[:,df.columns!='cluster'].values # (n_cells,n_genes)\n tf = np.count_nonzero(tmp1,axis=0) / tmp1.shape[0] # (n_genes,)\n tf = tf + 1e-5\n tmp2 = df.loc[:,df.columns!='cluster'].values\n df_ = np.count_nonzero(tmp2,axis=0) / tmp2.shape[0] # (n_genes,)\n df_ = df_ + 1e-5\n idf = -np.log10(df_)\n tf_idf_ori = tf * idf # (n_genes,)\n return tf_idf_ori\n\ndef single_size_query(obs,c):\n # c would be {gs:ERP4}\n key = list(c.keys())[0]\n cluster = list(c.values())[0]\n size = obs.loc[obs[key]==cluster,:].shape[0]\n return size\n\ndef get_size_in_metrics(obs,key):\n key_size_dict = {} # {ERP1:54,ERP2:100....}\n for cluster in obs[key].unique():\n size = single_size_query(obs,{key:cluster})\n key_size_dict[cluster] = size\n return key_size_dict\n\ndef tf_idf10_for_cluster(adata,key,species,criterion,regress_size=False):\n df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names) \n df['cluster'] = adata.obs[key].astype('str').values\n cluster_to_tfidf10 = {} # store tfidf10 score\n cluster_to_exclusive = {} # store exclusivly expressed genes\n for item in adata.obs[key].cat.categories:\n a = tf_idf_bare_compute(df,item)\n a_names = adata.var_names\n test = pd.Series(data=a, index=a_names)\n test.sort_values(ascending=False, inplace=True)\n # remove artifact genes\n artifact = read_artifact_genes(species,criterion)\n artifact_genes = set(artifact.index.to_list())\n test_pure = test.loc[~test.index.isin(artifact_genes)]\n result10 = test_pure.iloc[9] \n cluster_to_tfidf10[item] = result10\n cluster_to_exclusive[item] = test_pure.to_dict()\n exclusive_genes = pd.Series(cluster_to_exclusive,name='genes')\n\n # whether to regress out the clutser size effect or not\n if regress_size:\n key_metric_dict = cluster_to_tfidf10\n key_size_dict = get_size_in_metrics(adata.obs,key)\n df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size \n cluster_to_tfidf10 = regress_size(df_inspect,regressor='GLM',to_dict=True)\n return cluster_to_tfidf10, exclusive_genes\n\n\ndef tf_idf5_for_cluster(adata,key,species,criterion,regress_size=False):\n df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names) \n df['cluster'] = adata.obs[key].astype('str').values\n cluster_to_tfidf5 = {} # store tfidf1 score\n for item in adata.obs[key].cat.categories:\n a = tf_idf_bare_compute(df,item)\n a_names = adata.var_names\n test = pd.Series(data=a, index=a_names)\n test.sort_values(ascending=False, inplace=True)\n # remove artifact genes\n artifact = read_artifact_genes(species,criterion)\n artifact_genes = set(artifact.index.to_list())\n test_pure = test.loc[~test.index.isin(artifact_genes)]\n result5 = test_pure.iloc[4] \n cluster_to_tfidf5[item] = result5\n\n # whether to regress out the clutser size effect or not\n if regress_size:\n key_metric_dict = cluster_to_tfidf5\n key_size_dict = get_size_in_metrics(adata.obs,key)\n df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size\n cluster_to_tfidf5 = regress_size(df_inspect,regressor='GLM',to_dict=True)\n return cluster_to_tfidf5\n\ndef tf_idf1_for_cluster(adata,key,species,criterion,regress_size=False):\n df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names) \n df['cluster'] = adata.obs[key].astype('str').values\n cluster_to_tfidf1 = {} # store tfidf1 score\n for item in adata.obs[key].cat.categories:\n a = tf_idf_bare_compute(df,item)\n a_names = adata.var_names\n test = pd.Series(data=a, index=a_names)\n test.sort_values(ascending=False, inplace=True)\n # remove artifact genes\n artifact = read_artifact_genes(species,criterion)\n artifact_genes = set(artifact.index.to_list())\n test_pure = test.loc[~test.index.isin(artifact_genes)]\n result1 = test_pure.iloc[0] \n cluster_to_tfidf1[item] = result1\n\n # whether to regress out the clutser size effect or not\n if regress_size:\n key_metric_dict = cluster_to_tfidf1\n key_size_dict = get_size_in_metrics(adata.obs,key)\n df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size\n cluster_to_tfidf1 = regress_size(df_inspect,regressor='GLM',to_dict=True)\n return cluster_to_tfidf1\n\n\n\n\ndef SCCAF_score(adata, key, species, criterion, scale_sccaf,regress_size=False):\n from sklearn.preprocessing import LabelEncoder\n from sklearn.model_selection import StratifiedShuffleSplit\n from sklearn.linear_model import LogisticRegression\n from sklearn.metrics import confusion_matrix\n # define X and Y and remove artifact genes in the first place\n artifact = read_artifact_genes(species,criterion)\n artifact_genes = set(artifact.index.to_list())\n X = adata[:,~adata.var_names.isin(artifact_genes)].X.copy() # from ArrayView to ndarray\n Y = adata.obs[key].values\n\n # mean-centered and divide the std of the data, if too many cells (>50000), no scale, liblinear solver is robust to unscaled data\n if scale_sccaf:\n tmp = X\n from sklearn.preprocessing import scale\n tmp_scaled = scale(tmp,axis=0)\n X = tmp_scaled\n \n # label encoding Y to numerical values\n le = LabelEncoder()\n Y = le.fit_transform(Y)\n # stratified split to traing and test, train and test, then get confusion matrix\n sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)\n for train_index, test_index in sss.split(X, Y):\n X_train = X[train_index]\n Y_train = Y[train_index]\n X_test = X[test_index]\n Y_test = Y[test_index]\n model = LogisticRegression(penalty='l1', solver='liblinear', max_iter=100000)\n model.fit(X_train, Y_train)\n result = model.predict(X_test)\n m = confusion_matrix(Y_test, result)\n confusion_sccaf = pd.DataFrame(data=m,index=le.classes_,columns=le.classes_)\n # derive cluster reliability from confusion matrix for each cluster\n numeric2reliable = [] # [0.4,0.5...] length is the number of clusters involved in self-projection\n for i in range(m.shape[0]):\n numeric2reliable.append(m[i, i] / m[i, :].sum())\n cluster_to_SCCAF = {}\n for i in range(len(numeric2reliable)):\n cluster_to_SCCAF[le.classes_[i]] = numeric2reliable[i]\n\n # whether to regress out the clustser size effect or not\n if regress_size:\n key_metric_dict = cluster_to_SCCAF\n key_size_dict = get_size_in_metrics(adata.obs,key)\n df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size\n cluster_to_SCCAF = regress_size(df_inspect,regressor='GLM',to_dict=True)\n return cluster_to_SCCAF, confusion_sccaf\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.Series", "sklearn.metrics.confusion_matrix", "pandas.DataFrame", "numpy.mean", "sklearn.preprocessing.LabelEncoder", "numpy.empty_like", "numpy.arange", "sklearn.neighbors.KNeighborsClassifier", "numpy.count_nonzero", "sklearn.model_selection.StratifiedShuffleSplit", "sklearn.linear_model.TheilSenRegressor", "numpy.nonzero", "numpy.min", "sklearn.linear_model.RANSACRegressor", "numpy.log10", "numpy.argsort", "numpy.array", "sklearn.preprocessing.scale", "numpy.sum", "sklearn.decomposition.PCA", "sklearn.linear_model.HuberRegressor", "sklearn.linear_model.LogisticRegression" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
DSimonne/bcdi
[ "5740a75576d7c3760ac72358acfb51321d51f82b", "5740a75576d7c3760ac72358acfb51321d51f82b", "5740a75576d7c3760ac72358acfb51321d51f82b", "5740a75576d7c3760ac72358acfb51321d51f82b" ]
[ "bcdi/postprocessing/facet_recognition.py", "bcdi/graph/graph_utils.py", "scripts/utils/bcdi_crop_npz.py", "tests/config.py" ]
[ "# -*- coding: utf-8 -*-\n\n# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data\n# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP\n# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE\n# authors:\n# Jerome Carnis, [email protected]\n\"\"\"Functions related to facet recognition of nanocrystals.\"\"\"\n\nimport sys\nfrom numbers import Real\n\nimport numpy as np\nfrom matplotlib import patches\nfrom matplotlib import pyplot as plt\nfrom scipy import ndimage, stats\nfrom scipy.interpolate import RegularGridInterpolator, griddata\nfrom scipy.ndimage.measurements import center_of_mass\nfrom scipy.signal import convolve\nfrom skimage.feature import corner_peaks\nfrom skimage.segmentation import watershed\n\nfrom bcdi.graph import graph_utils as gu\nfrom bcdi.graph.colormap import ColormapFactory\nfrom bcdi.utils import utilities as util\nfrom bcdi.utils import validation as valid\n\ndefault_cmap = ColormapFactory().cmap\n\n\ndef calc_stereoproj_facet(projection_axis, vectors, radius_mean, stereo_center):\n \"\"\"\n Calculate the coordinates of normals in the stereographic projection.\n\n The calculation depends on the reference axis. See: Nanoscale 10, 4833 (2018).\n\n :param projection_axis: the projection is performed on q plane perpendicular to\n that axis (0, 1 or 2)\n :param vectors: array of vectors to be projected (nb_vectors rows x 3 columns)\n :param radius_mean: q radius from which the projection will be done\n :param stereo_center: offset of the projection plane along the reflection axis,\n in the same unit as radius_mean. If stereo_center = 0, the projection plane will\n be the equator.\n :return: the coordinates of the stereographic projection for the projection from\n the South pole(1st and 2nd columns) and from the North pole (3rd and 4th\n columns) projection, rescaled from radius_mean to 90 degrees\n \"\"\"\n if projection_axis not in [0, 1, 2]:\n raise ValueError(\n \"reflection_axis should be a basis axis of the reconstructed array\"\n )\n\n # calculate u and v from xyz\n stereo_proj = np.zeros((vectors.shape[0], 4), dtype=vectors.dtype)\n # stereo_proj[:, 0] is the euclidian u_south,\n # stereo_proj[:, 1] is the euclidian v_south\n # stereo_proj[:, 2] is the euclidian u_north,\n # stereo_proj[:, 3] is the euclidian v_north\n\n if (\n projection_axis == 0\n ): # q aligned along the 1st axis (Z downstream in CXI convention)\n for idx in range(vectors.shape[0]):\n stereo_proj[idx, 0] = (\n radius_mean\n * vectors[idx, 1]\n / (radius_mean + vectors[idx, 0] - stereo_center)\n ) # u_s\n stereo_proj[idx, 1] = (\n radius_mean\n * vectors[idx, 2]\n / (radius_mean + vectors[idx, 0] - stereo_center)\n ) # v_s\n stereo_proj[idx, 2] = (\n radius_mean\n * vectors[idx, 1]\n / (radius_mean + stereo_center - vectors[idx, 0])\n ) # u_n\n stereo_proj[idx, 3] = (\n radius_mean\n * vectors[idx, 2]\n / (radius_mean + stereo_center - vectors[idx, 0])\n ) # v_n\n uv_labels = (\n \"axis 1\",\n \"axis 2\",\n ) # axes corresponding to u and v respectively, used in plots\n\n elif (\n projection_axis == 1\n ): # q aligned along the 2nd axis (Y vertical up in CXI convention)\n for idx in range(vectors.shape[0]):\n stereo_proj[idx, 0] = (\n radius_mean\n * vectors[idx, 0]\n / (radius_mean + vectors[idx, 1] - stereo_center)\n ) # u_s\n stereo_proj[idx, 1] = (\n radius_mean\n * vectors[idx, 2]\n / (radius_mean + vectors[idx, 1] - stereo_center)\n ) # v_s\n stereo_proj[idx, 2] = (\n radius_mean\n * vectors[idx, 0]\n / (radius_mean + stereo_center - vectors[idx, 1])\n ) # u_n\n stereo_proj[idx, 3] = (\n radius_mean\n * vectors[idx, 2]\n / (radius_mean + stereo_center - vectors[idx, 1])\n ) # v_n\n uv_labels = (\n \"axis 0\",\n \"axis 2\",\n ) # axes corresponding to u and v respectively, used in plots\n\n else: # q aligned along the 3rd axis (X outboard in CXI convention)\n for idx in range(vectors.shape[0]):\n stereo_proj[idx, 0] = (\n radius_mean\n * vectors[idx, 0]\n / (radius_mean + vectors[idx, 2] - stereo_center)\n ) # u_s\n stereo_proj[idx, 1] = (\n radius_mean\n * vectors[idx, 1]\n / (radius_mean + vectors[idx, 2] - stereo_center)\n ) # v_s\n stereo_proj[idx, 2] = (\n radius_mean\n * vectors[idx, 0]\n / (radius_mean + stereo_center - vectors[idx, 2])\n ) # u_n\n stereo_proj[idx, 3] = (\n radius_mean\n * vectors[idx, 1]\n / (radius_mean + stereo_center - vectors[idx, 2])\n ) # v_n\n uv_labels = (\n \"axis 0\",\n \"axis 1\",\n ) # axes corresponding to u and v respectively, used in plots\n\n stereo_proj = stereo_proj / radius_mean * 90 # rescale from radius_mean to 90\n\n return stereo_proj, uv_labels\n\n\ndef detect_edges(faces):\n \"\"\"\n Find indices of vertices defining non-shared edges.\n\n :param faces: ndarray of m*3 faces\n :return: 1D list of indices of vertices defining non-shared edges (near hole...)\n \"\"\"\n # Get the three edges per triangle\n edge1 = np.copy(faces[:, 0:2])\n edge2 = np.array([np.copy(faces[:, 0]), np.copy(faces[:, 2])]).T\n edge3 = np.array([np.copy(faces[:, 1]), np.copy(faces[:, 2])]).T\n edge1.sort(axis=1)\n edge2.sort(axis=1)\n edge3.sort(axis=1)\n\n # list of edges without redundancy\n edges = np.concatenate((edge1, edge2, edge3), axis=0)\n edge_list, _, edges_counts = np.unique(\n edges, return_index=True, return_counts=True, axis=0\n )\n\n # isolate non redundant edges\n unique_edges = edge_list[edges_counts == 1].flatten()\n return unique_edges\n\n\ndef distance_threshold(fit, indices, plane_shape, max_distance=0.90):\n \"\"\"\n Filter out pixels depending on their distance to a fit plane.\n\n :param fit: coefficients of the plane (a, b, c, d) such that a*x + b*y + c*z + d = 0\n :param indices: tuple or array of plane indices, x being the 1st tuple element or\n array row, y the 2nd tuple element or array row and z the third tuple element or\n array row\n :param plane_shape: shape of the initial plane array\n :param max_distance: max distance allowed from the fit plane in pixels\n :return: the updated plane, a stop flag\n \"\"\"\n indices = np.asarray(indices)\n plane = np.zeros(plane_shape, dtype=int)\n no_points = False\n if len(indices[0]) == 0:\n no_points = True\n return plane, no_points\n\n # remove outsiders based on their distance to the plane\n plane_normal = np.array(\n [fit[0], fit[1], fit[2]]\n ) # normal is [a, b, c] if ax+by+cz+d=0\n for point in range(len(indices[0])):\n dist = abs(\n fit[0] * indices[0, point]\n + fit[1] * indices[1, point]\n + fit[2] * indices[2, point]\n + fit[3]\n ) / np.linalg.norm(plane_normal)\n if dist < max_distance:\n plane[indices[0, point], indices[1, point], indices[2, point]] = 1\n if plane[plane == 1].sum() == 0:\n print(\"Distance_threshold: no points for plane\")\n no_points = True\n return plane, no_points\n return plane, no_points\n\n\ndef equirectangular_proj(\n normals,\n intensity,\n cmap=default_cmap,\n bw_method=0.03,\n min_distance=10,\n background_threshold=-0.35,\n debugging=False,\n):\n \"\"\"\n Detect facets in an object.\n\n It uses an equirectangular projection of normals to mesh triangles and watershed\n segmentation.\n\n :param normals: normals array\n :param intensity: intensity array\n :param cmap: colormap used for plotting\n :param bw_method: bw_method of gaussian_kde\n :param min_distance: min_distance of corner_peaks()\n :param background_threshold: threshold for background determination\n (depth of the KDE)\n :param debugging: if True, show plots for debugging\n :return: ndarray of labelled regions\n \"\"\"\n # check normals for nan\n list_nan = np.argwhere(np.isnan(normals))\n normals = np.delete(normals, list_nan[::3, 0], axis=0)\n intensity = np.delete(intensity, list_nan[::3, 0], axis=0)\n\n # calculate latitude and longitude from xyz,\n # this is equal to the equirectangular flat square projection\n long_lat = np.zeros((normals.shape[0], 2), dtype=normals.dtype)\n for i in range(normals.shape[0]):\n if normals[i, 1] == 0 and normals[i, 0] == 0:\n continue\n long_lat[i, 0] = np.arctan2(normals[i, 1], normals[i, 0]) # longitude\n long_lat[i, 1] = np.arcsin(normals[i, 2]) # latitude\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(long_lat[:, 0], long_lat[:, 1], c=intensity, cmap=cmap)\n ax.set_xlim(-np.pi, np.pi)\n ax.set_ylim(-np.pi / 2, np.pi / 2)\n plt.axis(\"scaled\")\n plt.title(\"Equirectangular projection of the weighted point densities before KDE\")\n plt.pause(0.1)\n\n # kernel density estimation\n kde = stats.gaussian_kde(long_lat.T, bw_method=bw_method)\n # input should be a 2D array with shape (# of dims, # of data)\n\n # Create a regular 3D grid\n yi, xi = np.mgrid[\n -np.pi / 2 : np.pi / 2 : 150j, -np.pi : np.pi : 300j\n ] # vertical, horizontal\n\n # Evaluate the KDE on a regular grid...\n coords = np.vstack([item.ravel() for item in [xi, yi]])\n # coords is a contiguous flattened array of coordinates of shape (2, size(xi))\n\n density = -1 * kde(coords).reshape(\n xi.shape\n ) # inverse density for later watershed segmentation\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n scatter = ax.scatter(xi, yi, c=density, cmap=cmap, vmin=-1.5, vmax=0)\n ax.set_xlim(-np.pi, np.pi)\n ax.set_ylim(-np.pi / 2, np.pi / 2)\n fig.colorbar(scatter)\n plt.axis(\"scaled\")\n plt.title(\"Equirectangular projection of the KDE\")\n plt.pause(0.1)\n\n # identification of local minima\n density[density > background_threshold] = 0 # define the background\n mask = np.copy(density)\n mask[mask != 0] = 1\n\n plt.figure()\n plt.imshow(mask, cmap=cmap, interpolation=\"nearest\")\n plt.title(\"Background mask\")\n plt.gca().invert_yaxis()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n scatter = ax.scatter(xi, yi, c=density, cmap=cmap)\n ax.set_xlim(-np.pi, np.pi)\n ax.set_ylim(-np.pi / 2, np.pi / 2)\n fig.colorbar(scatter)\n plt.axis(\"scaled\")\n plt.title(\"KDE after background definition\")\n plt.pause(0.1)\n\n # Generate the markers as local minima of the distance to the background\n distances = ndimage.distance_transform_edt(density)\n if debugging:\n plt.figure()\n plt.imshow(distances, cmap=cmap, interpolation=\"nearest\")\n plt.title(\"Distances\")\n plt.gca().invert_yaxis()\n plt.pause(0.1)\n\n # find peaks\n local_maxi = corner_peaks(\n distances, exclude_border=False, min_distance=min_distance, indices=False\n ) #\n if debugging:\n plt.figure()\n plt.imshow(local_maxi, interpolation=\"nearest\")\n plt.title(\"local_maxi\")\n plt.gca().invert_yaxis()\n plt.pause(0.1)\n\n # define markers for each peak\n markers = ndimage.label(local_maxi)[0]\n if debugging:\n plt.figure()\n plt.imshow(markers, interpolation=\"nearest\")\n plt.title(\"markers\")\n plt.colorbar()\n plt.gca().invert_yaxis()\n plt.pause(0.1)\n\n # watershed segmentation\n labels = watershed(-1 * distances, markers, mask=mask)\n print(\"There are\", str(labels.max()), \"facets\") # label 0 is the background\n\n plt.figure()\n plt.imshow(labels, cmap=cmap, interpolation=\"nearest\")\n plt.title(\"Separated objects\")\n plt.colorbar()\n plt.gca().invert_yaxis()\n plt.pause(0.1)\n\n return labels, long_lat\n\n\ndef find_facet(\n refplane_indices,\n surf_indices,\n original_shape,\n step_shift,\n plane_label,\n plane_coeffs,\n min_points,\n debugging=False,\n):\n \"\"\"\n Shift a fit plane along its normal until it reaches the surface of a faceted object.\n\n :param refplane_indices: a tuple of 3 arrays (1D, length N) describing the\n coordinates of the plane voxels, x values being the 1st tuple element, y values\n the 2nd tuple element and z values the 3rd tuple element (output of np.nonzero)\n :param surf_indices: a tuple of 3 arrays (1D, length N) describing the coordinates\n of the surface voxels, x values being the 1st tuple element, y values the 2nd\n tuple element and z values the 3rd tuple element (output of np.nonzero)\n :param original_shape: the shape of the full dataset (amplitude object,\n eventually upsampled)\n :param step_shift: the amplitude of the shift to be applied to the plane\n along its normal\n :param plane_label: the label of the plane, used in comments\n :param plane_coeffs: a tuple of coefficient (a, b, c, d) such that ax+by+cz+d=0\n :param min_points: threshold, minimum number of points that should coincide\n between the fit plane and the object surface\n :param debugging: True to see debugging plots\n :return: the shift that needs to be applied to the fit plane in order to best\n match with the object surface\n \"\"\"\n if not isinstance(refplane_indices, tuple):\n raise ValueError(\"refplane_indices should be a tuple of 3 1D ndarrays\")\n if not isinstance(surf_indices, tuple):\n raise ValueError(\"surf_indices should be a tuple of 3 1D ndarrays\")\n surf0, surf1, surf2 = surf_indices\n plane_normal = np.array(\n [plane_coeffs[0], plane_coeffs[1], plane_coeffs[2]]\n ) # normal is [a, b, c] if ax+by+cz+d=0\n # loop until the surface is crossed or the iteration limit is reached\n common_previous = 0\n found_plane = 0\n nbloop = 1\n crossed_surface = 0\n shift_direction = 0\n while found_plane == 0:\n common_points = 0\n nb_points = len(surf0)\n\n # shift indices\n plane_newindices0, plane_newindices1, plane_newindices2 = offset_plane(\n indices=refplane_indices,\n offset=nbloop * step_shift,\n plane_normal=plane_normal,\n )\n nb_newpoints = len(plane_newindices0)\n for point in range(nb_newpoints):\n for point2 in range(nb_points):\n if (\n plane_newindices0[point] == surf0[point2]\n and plane_newindices1[point] == surf1[point2]\n and plane_newindices2[point] == surf2[point2]\n ):\n common_points = common_points + 1\n\n if debugging:\n temp_coeff3 = plane_coeffs[3] - nbloop * step_shift\n dist = np.zeros(nb_points)\n for point in range(nb_points):\n dist[point] = (\n plane_coeffs[0] * surf0[point]\n + plane_coeffs[1] * surf1[point]\n + plane_coeffs[2] * surf2[point]\n + temp_coeff3\n ) / np.linalg.norm(plane_normal)\n temp_mean_dist = dist.mean()\n plane = np.zeros(original_shape)\n plane[plane_newindices0, plane_newindices1, plane_newindices2] = 1\n\n # plot plane points overlaid with the support\n gu.scatter_plot_overlaid(\n arrays=(\n np.concatenate(\n (\n plane_newindices0[:, np.newaxis],\n plane_newindices1[:, np.newaxis],\n plane_newindices2[:, np.newaxis],\n ),\n axis=1,\n ),\n np.concatenate(\n (\n surf0[:, np.newaxis],\n surf1[:, np.newaxis],\n surf2[:, np.newaxis],\n ),\n axis=1,\n ),\n ),\n markersizes=(8, 2),\n markercolors=(\"b\", \"r\"),\n labels=(\"axis 0\", \"axis 1\", \"axis 2\"),\n title=\"Plane\"\n + str(plane_label)\n + \" after shifting - iteration\"\n + str(nbloop),\n )\n\n print(\n \"(while) iteration \",\n nbloop,\n \"- Mean distance of the plane to outer shell = \"\n + str(\"{:.2f}\".format(temp_mean_dist))\n + \"\\n pixels - common_points = \",\n common_points,\n )\n\n if common_points != 0: # some plane points are in commun with the surface layer\n if common_points >= common_previous:\n found_plane = 0\n common_previous = common_points\n print(\n \"(while, common_points != 0), iteration \",\n nbloop,\n \" - \",\n common_previous,\n \"points belonging to the facet for plane \",\n plane_label,\n )\n nbloop = nbloop + 1\n crossed_surface = 1\n elif (\n common_points < min_points\n ): # try to keep enough points for statistics, half step back\n found_plane = 1\n print(\n \"(while, common_points != 0), \"\n \"exiting while loop after threshold reached - \",\n common_previous,\n \"points belonging to the facet for plane \",\n plane_label,\n \"- next step common points=\",\n common_points,\n )\n else:\n found_plane = 0\n common_previous = common_points\n print(\n \"(while, common_points != 0), iteration \",\n nbloop,\n \" - \",\n common_previous,\n \"points belonging to the facet for plane \",\n plane_label,\n )\n nbloop = nbloop + 1\n crossed_surface = 1\n else: # no commun points, the plane is not intersecting the surface layer\n if crossed_surface == 1: # found the outer shell, which is 1 step before\n found_plane = 1\n print(\n \"(while, common_points = 0), exiting while loop - \",\n common_previous,\n \"points belonging to the facet for plane \",\n plane_label,\n \"- next step common points=\",\n common_points,\n )\n elif not shift_direction:\n if nbloop < 5: # continue to scan\n print(\n \"(while, common_points = 0), iteration \",\n nbloop,\n \" - \",\n common_previous,\n \"points belonging to the facet for plane \",\n plane_label,\n )\n nbloop = nbloop + 1\n else: # scan in the other direction\n shift_direction = 1\n print(\"Shift scanning direction\")\n step_shift = -1 * step_shift\n nbloop = 1\n else: # shift_direction = 1\n if nbloop < 10:\n print(\n \"(while, common_points = 0), iteration \",\n nbloop,\n \" - \",\n common_previous,\n \"points belonging to the facet for plane \",\n plane_label,\n )\n nbloop = nbloop + 1\n else: # we were already unsuccessfull in the other direction, give up\n print(\n \"(while, common_points = 0),\"\n \" no point from support is intersecting the plane \",\n plane_label,\n )\n break\n\n return (nbloop - 1) * step_shift\n\n\ndef find_neighbours(vertices, faces):\n \"\"\"\n Get the list of neighbouring vertices for each vertex.\n\n :param vertices: ndarray of n*3 vertices\n :param faces: ndarray of m*3 faces\n :return: list of lists of indices\n \"\"\"\n neighbors = [None] * vertices.shape[0]\n\n nb_faces = faces.shape[0]\n for indx in range(nb_faces):\n if neighbors[faces[indx, 0]] is None:\n neighbors[faces[indx, 0]] = [faces[indx, 1], faces[indx, 2]]\n else:\n neighbors[faces[indx, 0]].append(faces[indx, 1])\n neighbors[faces[indx, 0]].append(faces[indx, 2])\n if neighbors[faces[indx, 1]] is None:\n neighbors[faces[indx, 1]] = [faces[indx, 2], faces[indx, 0]]\n else:\n neighbors[faces[indx, 1]].append(faces[indx, 2])\n neighbors[faces[indx, 1]].append(faces[indx, 0])\n if neighbors[faces[indx, 2]] is None:\n neighbors[faces[indx, 2]] = [faces[indx, 0], faces[indx, 1]]\n else:\n neighbors[faces[indx, 2]].append(faces[indx, 0])\n neighbors[faces[indx, 2]].append(faces[indx, 1])\n\n for indx, neighbor in enumerate(neighbors):\n # remove None values\n temp_list = [point for point in neighbor if point is not None]\n\n # remove redundant indices in each sublist\n neighbors[indx] = list(set(temp_list))\n\n return neighbors\n\n\ndef fit_plane(plane, label, debugging=False):\n \"\"\"\n Fit a plane to labelled indices using the equation a*x+ b*y + c*z + d = 0.\n\n :param plane: 3D binary array, where the voxels belonging to the plane are set\n to 1 and others are set to 0.\n :param label: int, label of the plane used for the title in plots\n :param debugging: show plots for debugging\n :return: fit parameters (a, b, c, d), plane indices after filtering,\n errors associated, a stop flag\n \"\"\"\n indices = np.asarray(np.nonzero(plane))\n no_points = False\n\n if len(indices[0]) == 0:\n no_points = True\n return 0, indices, 0, no_points\n\n for idx in range(2):\n # remove isolated points, which probably do not belong to the plane\n if debugging:\n gu.scatter_plot(\n np.asarray(np.nonzero(plane)).transpose(),\n labels=(\"axis 0\", \"axis 1\", \"axis 2\"),\n title=\"Points before coordination threshold plane \"\n + str(label)\n + f\"\\niteration {idx}\",\n )\n\n for point in range(indices.shape[1]):\n neighbors = plane[\n indices[0, point] - 2 : indices[0, point] + 3,\n indices[1, point] - 2 : indices[1, point] + 3,\n indices[2, point] - 2 : indices[2, point] + 3,\n ].sum()\n if neighbors < 5:\n plane[indices[0, point], indices[1, point], indices[2, point]] = 0\n\n print(\n \"Fit plane\",\n label,\n \", \",\n str(indices.shape[1] - plane[plane == 1].sum()),\n \"points isolated, \",\n str(plane[plane == 1].sum()),\n \"remaining\",\n )\n if debugging:\n gu.scatter_plot(\n np.asarray(np.nonzero(plane)).transpose(),\n labels=(\"axis 0\", \"axis 1\", \"axis 2\"),\n title=\"Points after coordination threshold plane \"\n + str(label)\n + f\"\\niteration {idx}\",\n )\n\n # update plane indices\n indices = np.asarray(np.nonzero(plane))\n if len(indices[0]) == 0:\n no_points = True\n return 0, indices, 0, no_points\n\n # remove also points farther away than the median distance to the COM\n dist = np.zeros(indices.shape[1])\n x_com, y_com, z_com = center_of_mass(plane)\n for point in range(indices.shape[1]):\n dist[point] = np.sqrt(\n (indices[0, point] - x_com) ** 2\n + (indices[1, point] - y_com) ** 2\n + (indices[2, point] - z_com) ** 2\n )\n median_dist = np.median(dist)\n if debugging:\n gu.scatter_plot(\n np.asarray(np.nonzero(plane)).transpose(),\n labels=(\"axis 0\", \"axis 1\", \"axis 2\"),\n title=\"Points before distance threshold plane \"\n + str(label)\n + f\"\\niteration {idx}\",\n )\n\n for point in range(indices.shape[1]):\n if dist[point] > median_dist:\n plane[indices[0, point], indices[1, point], indices[2, point]] = 0\n print(\n \"Fit plane\",\n label,\n \", \",\n str(indices.shape[1] - plane[plane == 1].sum()),\n \"points too far from COM, \",\n str(plane[plane == 1].sum()),\n \"remaining\",\n )\n if debugging:\n gu.scatter_plot(\n np.asarray(np.nonzero(plane)).transpose(),\n labels=(\"axis 0\", \"axis 1\", \"axis 2\"),\n title=\"Points after distance threshold plane \"\n + str(label)\n + f\"\\niteration {idx}\",\n )\n\n # update plane indices and check if enough points remain\n indices = np.asarray(np.nonzero(plane))\n if len(indices[0]) < 5:\n no_points = True\n return 0, indices, 0, no_points\n\n # the fit parameters are (a, b, c, d) such that a*x + b*y + c*z + d = 0\n params, std_param, valid_plane = util.plane_fit(\n indices=indices, label=label, threshold=1, debugging=debugging\n )\n if not valid_plane:\n plane[indices] = 0\n no_points = True\n return params, indices, std_param, no_points\n\n\ndef grow_facet(fit, plane, label, support, max_distance=0.90, debugging=True):\n \"\"\"\n Find voxels of the object which belong to a facet.\n\n It uses the facet plane equation and the distance to the plane to find such voxels.\n\n :param fit: coefficients of the plane (a, b, c, d) such that a*x + b*y + c*z + d = 0\n :param plane: 3D binary support of the plane, with shape of the full dataset\n :param label: the label of the plane processed\n :param support: 3D binary support of the reconstructed object,\n with shape of the full dataset\n :param max_distance: in pixels, maximum allowed distance to the facet plane\n of a voxel\n :param debugging: set to True to see plots\n :return: the updated plane, a stop flag\n \"\"\"\n nbz, nby, nbx = plane.shape\n indices = np.nonzero(plane)\n if len(indices[0]) == 0:\n no_points = True\n return plane, no_points\n kernel = np.ones((3, 3, 3))\n\n start_z = max(indices[0].min() - 20, 0)\n stop_z = min(indices[0].max() + 21, nbz)\n start_y = max(indices[1].min() - 20, 0)\n stop_y = min(indices[1].max() + 21, nby)\n start_x = max(indices[2].min() - 20, 0)\n stop_x = min(indices[2].max() + 21, nbx)\n\n # find nearby voxels using the coordination number\n obj = np.copy(plane[start_z:stop_z, start_y:stop_y, start_x:stop_x])\n coord = np.rint(convolve(obj, kernel, mode=\"same\"))\n coord = coord.astype(int)\n coord[np.nonzero(coord)] = 1\n if debugging:\n gu.scatter_plot_overlaid(\n arrays=(np.asarray(np.nonzero(coord)).T, np.asarray(np.nonzero(obj)).T),\n markersizes=(2, 8),\n markercolors=(\"b\", \"r\"),\n labels=(\"x\", \"y\", \"z\"),\n title=\"Plane\" + str(label) + \" before facet growing and coord matrix\",\n )\n\n # update plane with new voxels\n temp_plane = np.copy(plane)\n temp_plane[start_z:stop_z, start_y:stop_y, start_x:stop_x] = coord\n # remove voxels not belonging to the support\n temp_plane[support == 0] = 0\n # check distance of new voxels to the plane\n\n plane, no_points = distance_threshold(\n fit=fit,\n indices=np.nonzero(temp_plane),\n plane_shape=temp_plane.shape,\n max_distance=max_distance,\n )\n\n plane_normal = fit[:-1] # normal is [a, b, c] if ax+by+cz+d=0\n\n # calculate the local gradient for each point of the plane,\n # gradients is a list of arrays of 3 vector components\n indices = np.nonzero(plane)\n gradients = surface_gradient(\n list(zip(indices[0], indices[1], indices[2])), support=support\n )\n\n count_grad = 0\n nb_indices = len(indices[0])\n for idx in range(nb_indices):\n if np.dot(plane_normal, gradients[idx]) < 0.75:\n # 0.85 is too restrictive checked CH4760 S11 plane 1\n plane[indices[0][idx], indices[1][idx], indices[2][idx]] = 0\n count_grad += 1\n\n indices = np.nonzero(plane)\n if debugging and len(indices[0]) != 0:\n gu.scatter_plot(\n array=np.asarray(indices).T,\n labels=(\"x\", \"y\", \"z\"),\n title=\"Plane\" + str(label) + \" after 1 cycle of facet growing\",\n )\n print(f\"{count_grad} points excluded by gradient filtering\")\n print(str(len(indices[0])) + \" after 1 cycle of facet growing\")\n return plane, no_points\n\n\ndef offset_plane(indices, offset, plane_normal):\n \"\"\"\n Shift plane indices by the offset value in order to scan perpendicular to the plane.\n\n :param indices: tuple of 3 1D ndarrays (array shape = nb_points)\n :param offset: offset to be applied to the indices (offset of the plane)\n :param plane_normal: ndarray of 3 elements, normal to the plane\n :return: offseted indices\n \"\"\"\n if not isinstance(indices, tuple):\n raise ValueError(\"indices should be a tuple of 3 1D ndarrays\")\n new_indices0 = np.rint(\n indices[0]\n + offset\n * np.dot(np.array([1, 0, 0]), plane_normal / np.linalg.norm(plane_normal))\n ).astype(int)\n new_indices1 = np.rint(\n indices[1]\n + offset\n * np.dot(np.array([0, 1, 0]), plane_normal / np.linalg.norm(plane_normal))\n ).astype(int)\n new_indices2 = np.rint(\n indices[2]\n + offset\n * np.dot(np.array([0, 0, 1]), plane_normal / np.linalg.norm(plane_normal))\n ).astype(int)\n return new_indices0, new_indices1, new_indices2\n\n\ndef remove_duplicates(vertices, faces, debugging=False):\n \"\"\"\n Remove duplicates in a list of vertices and faces.\n\n A face is a triangle made of three vertices.\n\n :param vertices: a ndarray of vertices, shape (N, 3)\n :param faces: a ndarray of vertex indices, shape (M, 3)\n :param debugging: True to see which vertices are duplicated and how lists are\n modified\n :return: the updated vertices and faces with duplicates removed in place\n \"\"\"\n # find indices which are duplicated\n uniq_vertices, uniq_inverse = np.unique(vertices, axis=0, return_inverse=True)\n indices, count = np.unique(uniq_inverse, return_counts=True)\n duplicated_indices = indices[count != 1] # list of vertices which are not unique\n\n # for each duplicated vertex, build the list of the corresponding identical vertices\n list_duplicated = []\n for idx, value in enumerate(duplicated_indices):\n same_vertices = np.argwhere(vertices == uniq_vertices[value, :])\n # same_vertices is a ndarray of the form\n # [[ind0, 0], [ind0, 1], [ind0, 2], [ind1, 0], [ind1, 1], [ind1, 2],...]\n list_duplicated.append(list(same_vertices[::3, 0]))\n\n # remove duplicates in vertices\n remove_vertices = [value for sublist in list_duplicated for value in sublist[1:]]\n vertices = np.delete(vertices, remove_vertices, axis=0)\n print(len(remove_vertices), \"duplicated vertices removed\")\n\n # remove duplicated_vertices in faces\n for idx, temp_array in enumerate(list_duplicated):\n for idy in range(1, len(temp_array)):\n duplicated_value = temp_array[idy]\n faces[faces == duplicated_value] = temp_array[0]\n # temp_array[0] is the unique value, others are duplicates\n\n # all indices above duplicated_value have to be decreased by 1\n # to keep the match with the number of vertices\n faces[faces > duplicated_value] = faces[faces > duplicated_value] - 1\n\n # update accordingly all indices above temp_array[idy]\n if debugging:\n print(\"temp_array before\", temp_array)\n print(\"list_duplicated before\", list_duplicated)\n temp_array = [\n (value - 1) if value > duplicated_value else value\n for value in temp_array\n ]\n list_duplicated = [\n [\n (value - 1) if value > duplicated_value else value\n for value in sublist\n ]\n for sublist in list_duplicated\n ]\n if debugging:\n print(\"temp_array after\", temp_array)\n print(\"list_duplicated after\", list_duplicated)\n\n # look for faces with 2 identical vertices\n # (cannot define later a normal to these faces)\n remove_faces = []\n for idx in range(faces.shape[0]):\n if np.unique(faces[idx, :], axis=0).shape[0] != faces[idx, :].shape[0]:\n remove_faces.append(idx)\n faces = np.delete(faces, remove_faces, axis=0)\n print(len(remove_faces), \"faces with identical vertices removed\")\n\n return vertices, faces\n\n\ndef surface_indices(surface, plane_indices, margin=3):\n \"\"\"\n Find surface indices potentially belonging to a plane.\n\n It crops the surface around the plane with a certain margin, and find corresponding\n surface indices.\n\n :param surface: the 3D surface binary array\n :param plane_indices: tuple of 3 1D-arrays of plane indices\n :param margin: margin to include aroung plane indices, in pixels\n :return: 3*1D arrays of surface indices\n \"\"\"\n valid.valid_ndarray(surface, ndim=3)\n if not isinstance(plane_indices, tuple):\n plane_indices = tuple(plane_indices)\n\n surf_indices = np.nonzero(\n surface[\n plane_indices[0].min() - margin : plane_indices[0].max() + margin,\n plane_indices[1].min() - margin : plane_indices[1].max() + margin,\n plane_indices[2].min() - margin : plane_indices[2].max() + margin,\n ]\n )\n surf0 = (\n surf_indices[0] + plane_indices[0].min() - margin\n ) # add margin plane_indices[0].min() - margin\n surf1 = (\n surf_indices[1] + plane_indices[1].min() - margin\n ) # add margin plane_indices[1].min() - margin\n surf2 = (\n surf_indices[2] + plane_indices[2].min() - margin\n ) # add margin plane_indices[2].min() - margin\n return surf0, surf1, surf2\n\n\ndef stereographic_proj(\n normals,\n intensity,\n max_angle,\n savedir,\n voxel_size,\n projection_axis,\n min_distance=10,\n background_south=-1000,\n background_north=-1000,\n save_txt=False,\n cmap=default_cmap,\n planes_south=None,\n planes_north=None,\n plot_planes=True,\n scale=\"linear\",\n comment_fig=\"\",\n debugging=False,\n):\n \"\"\"\n Detect facets in an object.\n\n It uses a stereographic projection of normals to mesh triangles and watershed\n segmentation.\n\n :param normals: array of normals to mesh triangles (nb_normals rows x 3 columns)\n :param intensity: array of intensities (nb_normals rows x 1 column)\n :param max_angle: maximum angle in degree of the stereographic projection\n (should be larger than 90)\n :param savedir: directory for saving figures\n :param voxel_size: tuple of three numbers corresponding to the real-space\n voxel size in each dimension\n :param projection_axis: the projection is performed on a plane perpendicular to\n that axis (0, 1 or 2)\n :param min_distance: min_distance of corner_peaks()\n :param background_south: threshold for background determination in the projection\n from South\n :param background_north: threshold for background determination in the projection\n from North\n :param save_txt: if True, will save coordinates in a .txt file\n :param cmap: colormap used for plotting pole figures\n :param planes_south: dictionnary of crystallographic planes, e.g.\n {'111':angle_with_reflection}\n :param planes_north: dictionnary of crystallographic planes, e.g.\n {'111':angle_with_reflection}\n :param plot_planes: if True, will draw circles corresponding to crystallographic\n planes in the pole figure\n :param scale: 'linear' or 'log', scale for the colorbar of the plot\n :param comment_fig: string, comment for the filename when saving figures\n :param debugging: show plots for debugging\n :return:\n - labels_south and labels_north as 2D arrays for each projection from South and\n North\n - a (Nx4) array: projected coordinates of normals from South (u column 0,\n v column 1) and North (u column2 , v column 3). The coordinates are in\n degrees, not indices.\n - the list of rows to remove\n\n \"\"\"\n\n def mouse_move(event):\n \"\"\"Write the density value at the position of the mouse pointer.\"\"\"\n nonlocal density_south, density_north, u_grid, v_grid, ax0, ax1\n if event.inaxes == ax0:\n index_u = util.find_nearest(u_grid[0, :], event.xdata, width=None)\n index_v = util.find_nearest(v_grid[:, 0], event.ydata, width=None)\n sys.stdout.write(\n \"\\rKDE South:\" + str(\"{:.0f}\".format(density_south[index_v, index_u]))\n )\n sys.stdout.flush()\n elif event.inaxes == ax1:\n index_u = util.find_nearest(u_grid[0, :], event.xdata, width=None)\n index_v = util.find_nearest(v_grid[:, 0], event.ydata, width=None)\n sys.stdout.write(\n \"\\rKDE North:\" + str(\"{:.0f}\".format(density_north[index_v, index_u]))\n )\n sys.stdout.flush()\n else:\n pass\n\n if comment_fig and comment_fig[-1] != \"_\":\n comment_fig = comment_fig + \"_\"\n radius_mean = 1 # normals are normalized\n stereo_center = 0 # COM of the weighted point density,\n # where the projection plane intersects the reference axis\n # since the normals have their origin at 0,\n # the projection plane is the equator and stereo_center=0\n\n # check normals for nan\n list_nan = np.argwhere(np.isnan(normals))\n normals = np.delete(normals, list_nan[::3, 0], axis=0)\n intensity = np.delete(intensity, list_nan[::3, 0], axis=0)\n\n # recalculate normals considering the anisotropy of voxel sizes\n # (otherwise angles are wrong)\n # the stereographic projection is in reciprocal space,\n # therefore we need to use the reciprocal voxel sizes\n iso_normals = np.copy(normals)\n iso_normals[:, 0] = iso_normals[:, 0] * 2 * np.pi / voxel_size[0]\n iso_normals[:, 1] = iso_normals[:, 1] * 2 * np.pi / voxel_size[1]\n iso_normals[:, 2] = iso_normals[:, 2] * 2 * np.pi / voxel_size[2]\n # normalize iso_normals\n iso_normals_length = np.sqrt(\n iso_normals[:, 0] ** 2 + iso_normals[:, 1] ** 2 + iso_normals[:, 2] ** 2\n )\n iso_normals = iso_normals / iso_normals_length[:, np.newaxis]\n\n # calculate the normalized Euclidian metric coordinates u and v from xyz\n stereo_proj, uv_labels = calc_stereoproj_facet(\n projection_axis=projection_axis,\n vectors=iso_normals,\n radius_mean=radius_mean,\n stereo_center=stereo_center,\n )\n # stereo_proj[:, 0] is the euclidian u_south,\n # stereo_proj[:, 1] is the euclidian v_south\n # stereo_proj[:, 2] is the euclidian u_north,\n # stereo_proj[:, 3] is the euclidian v_north\n\n # remove intensity where stereo_proj is infinite\n list_bad = np.argwhere(\n np.isinf(stereo_proj) | np.isnan(stereo_proj)\n ) # elementwise or\n remove_row = list(set(list_bad[:, 0])) # remove duplicated row indices\n print(\n \"remove_row indices (the stereographic projection is infinite or nan): \",\n remove_row,\n \"\\n\",\n )\n stereo_proj = np.delete(stereo_proj, remove_row, axis=0)\n intensity = np.delete(intensity, remove_row, axis=0)\n\n fig, _ = gu.contour_stereographic(\n euclidian_u=stereo_proj[:, 0],\n euclidian_v=stereo_proj[:, 1],\n color=intensity,\n radius_mean=radius_mean,\n planes=planes_south,\n max_angle=max_angle,\n scale=scale,\n title=\"Projection from\\nSouth pole\",\n plot_planes=plot_planes,\n uv_labels=uv_labels,\n debugging=debugging,\n )\n fig.savefig(savedir + comment_fig + \"South pole_\" + scale + \".png\")\n fig, _ = gu.contour_stereographic(\n euclidian_u=stereo_proj[:, 2],\n euclidian_v=stereo_proj[:, 3],\n color=intensity,\n radius_mean=radius_mean,\n planes=planes_north,\n max_angle=max_angle,\n scale=scale,\n title=\"Projection from\\nNorth pole\",\n plot_planes=plot_planes,\n uv_labels=uv_labels,\n debugging=debugging,\n )\n fig.savefig(savedir + comment_fig + \"North pole_\" + scale + \".png\")\n\n # regrid stereo_proj\n # stereo_proj[:, 0] is the euclidian u_south,\n # stereo_proj[:, 1] is the euclidian v_south\n # stereo_proj[:, 2] is the euclidian u_north,\n # stereo_proj[:, 3] is the euclidian v_north\n nb_points = 4 * max_angle + 1\n v_grid, u_grid = np.mgrid[\n -max_angle : max_angle : (nb_points * 1j),\n -max_angle : max_angle : (nb_points * 1j),\n ]\n # v_grid changes vertically, u_grid horizontally\n nby, nbx = u_grid.shape\n density_south = griddata(\n (stereo_proj[:, 0], stereo_proj[:, 1]),\n intensity,\n (u_grid, v_grid),\n method=\"linear\",\n ) # S\n density_north = griddata(\n (stereo_proj[:, 2], stereo_proj[:, 3]),\n intensity,\n (u_grid, v_grid),\n method=\"linear\",\n ) # N\n\n # normalize for plotting\n density_south = density_south / density_south[density_south > 0].max() * 10000\n density_north = density_north / density_north[density_north > 0].max() * 10000\n\n if save_txt:\n # save metric coordinates in text file\n density_south[np.isnan(density_south)] = 0.0\n density_north[np.isnan(density_north)] = 0.0\n with open(savedir + \"CDI_poles.dat\", \"w\") as file:\n for ii in range(len(v_grid)):\n for jj in range(len(u_grid)):\n file.write(\n str(v_grid[ii, 0])\n + \"\\t\"\n + str(u_grid[0, jj])\n + \"\\t\"\n + str(density_south[ii, jj])\n + \"\\t\"\n + str(v_grid[ii, 0])\n + \"\\t\"\n + str(u_grid[0, jj])\n + \"\\t\"\n + str(density_north[ii, jj])\n + \"\\n\"\n )\n\n # inverse densities for watershed segmentation\n density_south = -1 * density_south\n density_north = -1 * density_north\n\n fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 9))\n img0 = ax0.scatter(u_grid, v_grid, c=density_south, cmap=cmap)\n ax0.set_xlim(-max_angle, max_angle)\n ax0.set_ylim(-max_angle, max_angle)\n ax0.axis(\"scaled\")\n gu.colorbar(img0)\n ax0.set_title(\"KDE \\nSouth pole\")\n img1 = ax1.scatter(u_grid, v_grid, c=density_north, cmap=cmap)\n ax1.set_xlim(-max_angle, max_angle)\n ax1.set_ylim(-max_angle, max_angle)\n ax1.axis(\"scaled\")\n gu.colorbar(img1)\n ax1.set_title(\"KDE \\nNorth pole\")\n fig.text(0.32, 0.90, \"Read the threshold value in the console\", size=16)\n fig.text(0.32, 0.85, \"Click on the figure to resume the execution\", size=16)\n fig.tight_layout()\n cid = plt.connect(\"motion_notify_event\", mouse_move)\n fig.waitforbuttonpress()\n plt.disconnect(cid)\n print(\"\\n\")\n\n # identification of local minima\n density_south[\n density_south > background_south\n ] = 0 # define the background in the density of normals\n mask_south = np.copy(density_south)\n mask_south[mask_south != 0] = 1\n\n density_north[\n density_north > background_north\n ] = 0 # define the background in the density of normals\n mask_north = np.copy(density_north)\n mask_north[mask_north != 0] = 1\n\n fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2, figsize=(12, 9))\n ax0.imshow(mask_south, cmap=cmap, interpolation=\"nearest\")\n ax0.set_title(\"Background mask South\")\n ax0.invert_yaxis()\n img1 = ax1.scatter(u_grid, v_grid, c=density_south, cmap=cmap)\n ax1.set_xlim(-max_angle, max_angle)\n ax1.set_ylim(-max_angle, max_angle)\n ax1.axis(\"scaled\")\n gu.colorbar(img1)\n ax1.set_title(\"KDE South pole\\nafter background definition\")\n circle = patches.Circle((0, 0), 90, color=\"w\", fill=False, linewidth=1.5)\n ax1.add_artist(circle)\n ax2.imshow(mask_north, cmap=cmap, interpolation=\"nearest\")\n ax2.set_title(\"Background mask North\")\n ax2.invert_yaxis()\n img3 = ax3.scatter(u_grid, v_grid, c=density_north, cmap=cmap)\n ax3.set_xlim(-max_angle, max_angle)\n ax3.set_ylim(-max_angle, max_angle)\n ax3.axis(\"scaled\")\n gu.colorbar(img3)\n ax3.set_title(\"KDE North pole\\nafter background definition\")\n circle = patches.Circle((0, 0), 90, color=\"w\", fill=False, linewidth=1.5)\n ax3.add_artist(circle)\n fig.tight_layout()\n plt.pause(0.1)\n\n ##########################################################################\n # Generate the markers as local maxima of the distance to the background #\n ##########################################################################\n distances_south = ndimage.distance_transform_edt(density_south)\n distances_north = ndimage.distance_transform_edt(density_north)\n if debugging:\n fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)\n img0 = ax0.imshow(distances_south, cmap=cmap, interpolation=\"nearest\")\n ax0.set_title(\"Distances South\")\n gu.colorbar(img0)\n ax0.invert_yaxis()\n img1 = ax1.imshow(distances_north, cmap=cmap, interpolation=\"nearest\")\n ax1.set_title(\"Distances North\")\n gu.colorbar(img1)\n ax1.invert_yaxis()\n fig.tight_layout()\n plt.pause(0.1)\n\n local_maxi_south = corner_peaks(\n distances_south, exclude_border=False, min_distance=min_distance, indices=False\n )\n local_maxi_north = corner_peaks(\n distances_north, exclude_border=False, min_distance=min_distance, indices=False\n )\n if debugging:\n fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)\n ax0.imshow(local_maxi_south, interpolation=\"nearest\")\n ax0.set_title(\"local_maxi South before filtering\")\n ax0.invert_yaxis()\n circle = patches.Ellipse(\n (nbx // 2, nby // 2), 361, 361, color=\"r\", fill=False, linewidth=1.5\n )\n ax0.add_artist(circle)\n ax1.imshow(local_maxi_north, interpolation=\"nearest\")\n ax1.set_title(\"local_maxi North before filtering\")\n ax1.invert_yaxis()\n circle = patches.Ellipse(\n (nbx // 2, nby // 2), 361, 361, color=\"r\", fill=False, linewidth=1.5\n )\n ax1.add_artist(circle)\n fig.tight_layout()\n plt.pause(0.1)\n\n # define the marker for each peak\n markers_south = ndimage.label(local_maxi_south)[0] # range from 0 to nb_peaks\n # define non overlaping markers for the North projection:\n # the first marker value is (markers_south.max()+1)\n markers_north = ndimage.label(local_maxi_north)[0] + markers_south.max(initial=None)\n # markers_north.min() is 0 since it is the background\n markers_north[markers_north == markers_south.max(initial=None)] = 0\n if debugging:\n fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)\n ax0.imshow(\n markers_south, interpolation=\"nearest\", cmap=\"binary\", vmin=0, vmax=1\n )\n ax0.set_title(\"markers South\")\n ax0.invert_yaxis()\n circle = patches.Ellipse(\n (nbx // 2, nby // 2), 361, 361, color=\"r\", fill=False, linewidth=1.5\n )\n ax0.add_artist(circle)\n ax1.imshow(\n markers_north, interpolation=\"nearest\", cmap=\"binary\", vmin=0, vmax=1\n )\n ax1.set_title(\"markers North\")\n ax1.invert_yaxis()\n circle = patches.Ellipse(\n (nbx // 2, nby // 2), 361, 361, color=\"r\", fill=False, linewidth=1.5\n )\n ax1.add_artist(circle)\n fig.tight_layout()\n plt.pause(0.1)\n\n ##########################\n # watershed segmentation #\n ##########################\n labels_south = watershed(-1 * distances_south, markers_south, mask=mask_south)\n labels_north = watershed(-1 * distances_north, markers_north, mask=mask_north)\n fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 9))\n img0 = ax0.imshow(labels_south, cmap=cmap, interpolation=\"nearest\")\n ax0.set_title(\"Labels South\")\n ax0.invert_yaxis()\n circle = patches.Ellipse(\n (nbx // 2, nby // 2), 361, 361, color=\"r\", fill=False, linewidth=1.5\n )\n ax0.add_artist(circle)\n gu.colorbar(img0, numticks=int(labels_south.max() + 1))\n img1 = ax1.imshow(labels_north, cmap=cmap, interpolation=\"nearest\")\n ax1.set_title(\"Labels North\")\n ax1.invert_yaxis()\n circle = patches.Ellipse(\n (nbx // 2, nby // 2), 361, 361, color=\"r\", fill=False, linewidth=1.5\n )\n ax1.add_artist(circle)\n gu.colorbar(img1, numticks=int(labels_north.max() + 1))\n fig.tight_layout()\n plt.pause(0.1)\n fig.savefig(savedir + comment_fig + \"labels.png\")\n\n return labels_south, labels_north, stereo_proj, remove_row\n\n\ndef surface_gradient(points, support, width=2):\n \"\"\"\n Calculate the support gradient at point.\n\n :param points: tuple or list of tuples of 3 integers (z, y, x), position where\n to calculate the gradient vector\n :param support: 3D numpy binary array, being 1 in the crystal and 0 outside\n :param width: half-width of the window where the gradient will be calculated\n (the support gradient is nonzero on a single layer, it avoids missing it)\n :return: a list of normalized vector(s) (array(s) of 3 numbers) oriented\n towards the exterior of the cristal\n \"\"\"\n gradz, grady, gradx = np.gradient(support, 1) # support\n vectors = []\n if not isinstance(points, list):\n points = [points]\n\n for _, point in enumerate(points):\n # round the point to integer numbers\n point = [int(np.rint(point[idx])) for idx in range(3)]\n\n # calculate the gradient in a small window around point\n # (gradient will be nonzero on a single layer)\n gradz_slice = gradz[\n point[0] - width : point[0] + width + 1,\n point[1] - width : point[1] + width + 1,\n point[2] - width : point[2] + width + 1,\n ]\n val = (gradz_slice != 0).sum()\n if val == 0:\n vector_z = 0\n else:\n vector_z = gradz_slice.sum() / val\n\n grady_slice = grady[\n point[0] - width : point[0] + width + 1,\n point[1] - width : point[1] + width + 1,\n point[2] - width : point[2] + width + 1,\n ]\n val = (grady_slice != 0).sum()\n if val == 0:\n vector_y = 0\n else:\n vector_y = grady_slice.sum() / val\n\n gradx_slice = gradx[\n point[0] - width : point[0] + width + 1,\n point[1] - width : point[1] + width + 1,\n point[2] - width : point[2] + width + 1,\n ]\n val = (gradx_slice != 0).sum()\n if val == 0:\n vector_x = 0\n else:\n vector_x = gradx_slice.sum() / val\n\n # support was 1 inside, 0 outside,\n # the vector needs to be flipped to point towards the outside\n vectors.append(\n [-vector_z, -vector_y, -vector_x]\n / np.linalg.norm([-vector_z, -vector_y, -vector_x])\n )\n return vectors\n\n\ndef taubin_smooth(\n faces,\n vertices,\n cmap=default_cmap,\n iterations=10,\n lamda=0.33,\n mu=0.34,\n radius=0.1,\n debugging=False,\n):\n \"\"\"\n Perform Taubin's smoothing of a mesh.\n\n It performs a back and forward Laplacian smoothing \"without shrinking\" of a\n triangulated mesh, as described by Gabriel Taubin (ICCV '95)\n\n :param faces: m*3 ndarray of m faces defined by 3 indices of vertices\n :param vertices: n*3 ndarray of n vertices defined by 3 positions\n :param cmap: colormap used for plotting\n :param iterations: number of iterations for smoothing\n :param lamda: smoothing variable 0 < lambda < mu < 1\n :param mu: smoothing variable 0 < lambda < mu < 1\n :param radius: radius around which the normals are integrated in the calculation\n of the density of normals\n :param debugging: show plots for debugging\n :return: smoothened vertices (ndarray n*3), normals to triangle (ndarray m*3),\n weighted density of normals, updated faces, errors\n \"\"\"\n from mpl_toolkits.mplot3d import Axes3D\n\n plt.ion()\n\n print(\"Original number of vertices:\", vertices.shape[0])\n print(\"Original number of faces:\", faces.shape[0])\n new_vertices = np.copy(vertices)\n\n for k in range(iterations):\n # check the unicity of vertices otherwise 0 distance would happen\n if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:\n print(\"\\nTaubin smoothing / lambda: duplicated vertices at iteration\", k)\n new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)\n vertices = np.copy(new_vertices)\n neighbours = find_neighbours(\n vertices, faces\n ) # get the indices of neighboring vertices for each vertex\n indices_edges = detect_edges(\n faces\n ) # find indices of vertices defining non-shared edges (near hole...)\n\n for i in range(vertices.shape[0]):\n indices = neighbours[i] # list of indices\n distances = np.sqrt(\n np.sum((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)\n )\n weights = distances ** (-1)\n vectoren = weights[:, np.newaxis] * vertices[indices, :]\n totaldist = sum(weights)\n new_vertices[i, :] = vertices[i, :] + lamda * (\n np.sum(vectoren, axis=0) / totaldist - vertices[i, :]\n )\n\n if indices_edges.size != 0:\n new_vertices[indices_edges, :] = vertices[indices_edges, :]\n\n # check the unicity of vertices otherwise 0 distance would happen\n if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:\n print(\"\\nTaubin smoothing / mu: duplicated vertices at iteration\", k)\n new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)\n vertices = np.copy(new_vertices)\n neighbours = find_neighbours(\n vertices, faces\n ) # get the indices of neighboring vertices for each vertex\n indices_edges = detect_edges(\n faces\n ) # find indices of vertices defining non-shared edges (near hole...)\n\n for i in range(vertices.shape[0]):\n\n indices = neighbours[i] # list of indices\n distances = np.sqrt(\n np.sum((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)\n )\n weights = distances ** (-1)\n vectoren = weights[:, np.newaxis] * vertices[indices, :]\n totaldist = sum(weights)\n new_vertices[i, :] = vertices[i, :] - mu * (\n sum(vectoren) / totaldist - vertices[i, :]\n )\n\n if indices_edges.size != 0:\n new_vertices[indices_edges, :] = vertices[indices_edges, :]\n\n # check the unicity of vertices otherwise 0 distance would happen\n if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:\n print(\"\\nTaubin smoothing / exiting loop: duplicated vertices\")\n new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)\n\n nan_vertices = np.argwhere(np.isnan(new_vertices[:, 0]))\n print(\n \"Number of nan in new_vertices:\",\n nan_vertices.shape[0],\n \"; Total number of vertices:\",\n new_vertices.shape[0],\n )\n\n # Create an indexed view into the vertex array using\n # the array of three indices for triangles\n tris = new_vertices[faces]\n # Calculate the normal for all the triangles,\n # by taking the cross product of the vectors v1-v0,\n # and v2-v0 in each triangle\n normals = np.cross(tris[:, 1] - tris[:, 0], tris[:, 2] - tris[::, 0])\n areas = np.array([1 / 2 * np.linalg.norm(normal) for normal in normals])\n normals_length = np.sqrt(\n normals[:, 0] ** 2 + normals[:, 1] ** 2 + normals[:, 2] ** 2\n )\n normals = -1 * normals / normals_length[:, np.newaxis] # flip and normalize normals\n # n is now an array of normalized normals, one per triangle.\n\n # calculate the colormap for plotting\n # the weighted point density of normals on a sphere\n intensity = np.zeros(normals.shape[0], dtype=normals.dtype)\n for i in range(normals.shape[0]):\n distances = np.sqrt(\n np.sum((normals - normals[i, :]) ** 2, axis=1)\n ) # ndarray of normals.shape[0]\n intensity[i] = np.multiply(\n areas[distances < radius], distances[distances < radius]\n ).sum()\n # normals are weighted by the area of mesh triangles\n\n intensity = intensity / max(intensity)\n if debugging:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(normals[:, 0], normals[:, 1], normals[:, 2], c=intensity, cmap=cmap)\n ax.set_xlim(-1, 1)\n ax.set_xlabel(\"z\")\n ax.set_ylim(-1, 1)\n ax.set_ylabel(\"y\")\n ax.set_zlim(-1, 1)\n ax.set_zlabel(\"x\")\n plt.title(\"Weighted point densities before KDE\")\n plt.pause(0.1)\n err_normals = np.argwhere(np.isnan(normals[:, 0]))\n normals[err_normals, :] = normals[err_normals - 1, :]\n plt.ioff()\n\n # check normals for nan\n list_nan = np.argwhere(np.isnan(normals))\n normals = np.delete(normals, list_nan[::3, 0], axis=0)\n intensity = np.delete(intensity, list_nan[::3, 0], axis=0)\n\n return new_vertices, normals, areas, intensity, faces, err_normals\n\n\ndef update_logfile(\n support,\n strain_array,\n summary_file,\n allpoints_file,\n label=0,\n angle_plane=np.nan,\n plane_coeffs=(0, 0, 0, 0),\n plane_normal=(0, 0, 0),\n):\n \"\"\"\n Update log files use in the facet_strain.py script.\n\n :param support: the 3D binary support defining voxels to be saved in the logfile\n :param strain_array: the 3D strain array\n :param summary_file: the handle for the file summarizing strain statistics per facet\n :param allpoints_file: the handle for the file giving the strain and the label\n for each voxel\n :param label: the label of the plane\n :param angle_plane: the angle of the plane with the measurement direction\n :param plane_coeffs: the fit coefficients (a,b,c,d) of the plane such\n that ax+by+cz+d=0\n :param plane_normal: the normal to the plane\n :return: nothing\n \"\"\"\n if (support.ndim != 3) or (strain_array.ndim != 3):\n raise ValueError(\"The support and the strain arrays should be 3D arrays\")\n\n support_indices = np.nonzero(support == 1)\n ind_z = support_indices[0]\n ind_y = support_indices[1]\n ind_x = support_indices[2]\n nb_points = len(support_indices[0])\n for idx in range(nb_points):\n if strain_array[ind_z[idx], ind_y[idx], ind_x[idx]] != 0:\n # remove the artefact from YY reconstrutions at the bottom facet\n allpoints_file.write(\n \"{0: <10}\".format(str(label))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.3f}\".format(angle_plane)))\n + \"\\t\"\n + \"{0: <10}\".format(\n str(\n \"{:.7f}\".format(\n strain_array[ind_z[idx], ind_y[idx], ind_x[idx]]\n )\n )\n )\n + \"\\t\"\n + \"{0: <10}\".format(str(ind_z[idx]))\n + \"\\t\"\n + \"{0: <10}\".format(str(ind_y[idx]))\n + \"\\t\"\n + \"{0: <10}\".format(str(ind_x[idx]))\n + \"\\n\"\n )\n\n str_array = strain_array[support == 1]\n str_array[\n str_array == 0\n ] = np.nan # remove the artefact from YY reconstrutions at the bottom facet\n support_strain = np.mean(str_array[~np.isnan(str_array)])\n support_deviation = np.std(str_array[~np.isnan(str_array)])\n\n # support_strain = np.mean(strain_array[support == 1])\n # support_deviation = np.std(strain_array[support == 1])\n summary_file.write(\n \"{0: <10}\".format(str(label))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.3f}\".format(angle_plane)))\n + \"\\t\"\n + \"{0: <10}\".format(str(nb_points))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.7f}\".format(support_strain)))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.7f}\".format(support_deviation)))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.5f}\".format(plane_coeffs[0])))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.5f}\".format(plane_coeffs[1])))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.5f}\".format(plane_coeffs[2])))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.5f}\".format(plane_coeffs[3])))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.5f}\".format(plane_normal[0])))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.5f}\".format(plane_normal[1])))\n + \"\\t\"\n + \"{0: <10}\".format(str(\"{:.5f}\".format(plane_normal[2])))\n + \"\\n\"\n )\n\n\ndef upsample(array, upsampling_factor, voxelsizes=None, title=\"\", debugging=False):\n \"\"\"\n Upsample array using a factor of upsampling.\n\n :param array: the real array to be upsampled\n :param upsampling_factor: int, the upsampling factor\n :param voxelsizes: list, the voxel sizes of array\n :param title: title for the debugging plot\n :param debugging: True to see plots\n :return: the upsampled array\n \"\"\"\n valid.valid_ndarray(array, ndim=(2, 3))\n ndim = array.ndim\n\n valid.valid_item(\n value=upsampling_factor,\n allowed_types=int,\n min_included=1,\n name=\"utils.upsample\",\n )\n if voxelsizes is None:\n voxelsizes = (1,) * ndim\n valid.valid_container(\n voxelsizes,\n container_types=(list, tuple, np.ndarray),\n length=ndim,\n item_types=Real,\n min_excluded=0,\n name=\"utils.upsample\",\n )\n\n vmin, vmax = array.min(), array.max()\n\n if ndim == 3:\n if debugging:\n gu.multislices_plot(\n array,\n sum_frames=False,\n title=title + \" before upsampling\",\n vmin=vmin,\n vmax=vmax,\n scale=\"linear\",\n plot_colorbar=True,\n reciprocal_space=False,\n is_orthogonal=True,\n )\n nbz, nby, nbx = array.shape\n numz, numy, numx = (\n nbz * upsampling_factor,\n nby * upsampling_factor,\n nbx * upsampling_factor,\n )\n newvoxelsizes = [voxsize / upsampling_factor for voxsize in voxelsizes]\n\n newz, newy, newx = np.meshgrid(\n np.arange(-numz // 2, numz // 2, 1) * newvoxelsizes[0],\n np.arange(-numy // 2, numy // 2, 1) * newvoxelsizes[1],\n np.arange(-numx // 2, numx // 2, 1) * newvoxelsizes[2],\n indexing=\"ij\",\n )\n\n rgi = RegularGridInterpolator(\n (\n np.arange(-nbz // 2, nbz // 2) * voxelsizes[0],\n np.arange(-nby // 2, nby // 2) * voxelsizes[1],\n np.arange(-nbx // 2, nbx // 2) * voxelsizes[2],\n ),\n array,\n method=\"linear\",\n bounds_error=False,\n fill_value=0,\n )\n\n obj = rgi(\n np.concatenate(\n (\n newz.reshape((1, newz.size)),\n newy.reshape((1, newz.size)),\n newx.reshape((1, newz.size)),\n )\n ).transpose()\n )\n\n obj = obj.reshape((numz, numy, numx)).astype(array.dtype)\n\n if debugging:\n gu.multislices_plot(\n obj,\n sum_frames=False,\n title=title + \" after upsampling\",\n vmin=vmin,\n vmax=vmax,\n scale=\"linear\",\n plot_colorbar=True,\n reciprocal_space=False,\n is_orthogonal=True,\n )\n\n else: # 2D case\n if debugging:\n gu.imshow_plot(\n array,\n title=title + \" before upsampling\",\n vmin=vmin,\n vmax=vmax,\n scale=\"linear\",\n plot_colorbar=True,\n reciprocal_space=False,\n is_orthogonal=True,\n )\n nby, nbx = array.shape\n numy, numx = nby * upsampling_factor, nbx * upsampling_factor\n newvoxelsizes = [voxsize / upsampling_factor for voxsize in voxelsizes]\n\n newy, newx = np.meshgrid(\n np.arange(-numy // 2, numy // 2, 1) * newvoxelsizes[0],\n np.arange(-numx // 2, numx // 2, 1) * newvoxelsizes[1],\n indexing=\"ij\",\n )\n\n rgi = RegularGridInterpolator(\n (\n np.arange(-nby // 2, nby // 2) * voxelsizes[0],\n np.arange(-nbx // 2, nbx // 2) * voxelsizes[1],\n ),\n array,\n method=\"linear\",\n bounds_error=False,\n fill_value=0,\n )\n\n obj = rgi(\n np.concatenate(\n (newy.reshape((1, newy.size)), newx.reshape((1, newy.size)))\n ).transpose()\n )\n\n obj = obj.reshape((numy, numx)).astype(array.dtype)\n\n if debugging:\n gu.imshow_plot(\n obj,\n title=title + \" after upsampling\",\n vmin=vmin,\n vmax=vmax,\n scale=\"linear\",\n plot_colorbar=True,\n reciprocal_space=False,\n is_orthogonal=True,\n )\n\n return obj, newvoxelsizes\n", "# -*- coding: utf-8 -*-\n\n# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data\n# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP\n# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE\n# authors:\n# Jerome Carnis, [email protected]\n\"\"\"Functions related to visualization.\"\"\"\n\nimport logging\nimport os\nimport pathlib\nimport sys\nfrom numbers import Real\nfrom operator import itemgetter\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport matplotlib as mpl\nimport matplotlib.colors as colors\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport numpy as np\nfrom lmfit import Parameters, minimize\nfrom matplotlib.path import Path\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom scipy.interpolate import griddata\nfrom scipy.ndimage import map_coordinates\nfrom scipy.signal import find_peaks\n\nfrom bcdi.graph.colormap import ColormapFactory\nfrom bcdi.utils import utilities as util\nfrom bcdi.utils import validation as valid\n\ndefault_cmap = ColormapFactory(colormap=\"turbo\").cmap\nmodule_logger = logging.getLogger(__name__)\n\n\ndef close_event(event):\n \"\"\"Handle closing events on plots.\"\"\"\n print(event, \"Click on the figure instead of closing it!\")\n sys.exit()\n\n\ndef colorbar(mappable, scale=\"linear\", numticks=10, label=None, pad=0.05):\n \"\"\"\n Generate a colorbar whose height (or width) in sync with the master axes.\n\n :param mappable: the image where to put the colorbar\n :param scale: 'linear' or 'log', used for tick location\n :param numticks: number of ticks for the colorbar\n :param label: label for the colorbar\n :param pad: float (default 0.05). Fraction of original axes between colorbar and\n new image axes.\n :return: the colorbar instance\n \"\"\"\n last_axes = plt.gca()\n try:\n ax = mappable.axes\n except AttributeError: # QuadContourSet\n ax = mappable.ax\n fig = ax.figure\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=pad)\n cbar = fig.colorbar(mappable, cax=cax)\n if scale == \"linear\":\n cbar.locator = ticker.LinearLocator(numticks=numticks)\n elif scale == \"log\":\n cbar.locator = ticker.LogLocator(numticks=numticks)\n else:\n raise ValueError(\"Incorrect value for the scale parameter\")\n if label is not None:\n cbar.ax.set_ylabel(label)\n cbar.update_ticks()\n plt.sca(last_axes)\n return cbar\n\n\ndef combined_plots(\n tuple_array,\n tuple_sum_frames,\n tuple_colorbar,\n tuple_title,\n tuple_scale,\n tuple_sum_axis=None,\n cmap=default_cmap,\n tick_direction=\"out\",\n tick_width=1,\n tick_length=4,\n pixel_spacing=None,\n tuple_width_v=None,\n tuple_width_h=None,\n tuple_vmin=np.nan,\n tuple_vmax=np.nan,\n is_orthogonal=False,\n reciprocal_space=False,\n **kwargs,\n):\n \"\"\"\n Subplots of a 1D, 2D or 3D datasets using user-defined parameters.\n\n :param tuple_array: tuple of 1D, 2D or 3D arrays of real numbers\n :param tuple_sum_frames: boolean or tuple of boolean values. If True, will sum the\n data along sum_axis\n :param tuple_sum_axis: tuple of axis along which to sum or to take the middle slice\n :param tuple_width_v: int or tuple of user-defined zoom vertical width, should be\n smaller than the actual data size. Set it to None if you do not need it.\n :param tuple_width_h: int or tuple of user-defined zoom horizontal width, should be\n smaller than the actual data size. Set it to None if you do not need it.\n :param tuple_colorbar: boolean or tuple of boolean values. Set it to True in order\n to plot the colorbar\n :param tuple_vmin: float or tuple of lower boundaries for the colorbar,\n set to np.nan if you do not need it\n :param tuple_vmax: float or tuple of higher boundaries for the colorbar,\n set to np.nan if you do not need it\n :param tuple_title: string or tuple of strings, set to '' if you do not need it\n :param tuple_scale: string ot tuple of strings with value 'linear' or 'log'\n :param cmap: colormap to be used\n :param tick_direction: 'out', 'in', 'inout'\n :param tick_width: width of tickes in plots\n :param tick_length: length of tickes in plots\n :param pixel_spacing: pixel_spacing = desired tick_spacing (in nm) / voxel_size\n of the reconstruction(in nm). It can be a positive number or a tuple of\n array.ndim positive numbers\n :param is_orthogonal: set to True is the frame is orthogonal, False otherwise\n (detector frame) Used for plot labels.\n :param reciprocal_space: True if the data is in reciprocal space, False otherwise.\n Used for plot labels.\n :param kwargs:\n - 'xlabel' , label of the horizontal axis for plots: string or tuple of strings\n - 'ylabel' , label of the vertical axis for plots: string or tuple of strings\n - 'position' , tuple of subplot positions in the format 231 (2 rows, 3 columns,\n first subplot)\n - 'invert_y': boolean, True to invert the vertical axis of the plot.\n Will overwrite the default behavior.\n\n :return: the figure instance\n \"\"\"\n mpl.rcParams[\"axes.linewidth\"] = tick_width # set the linewidth globally\n ####################\n # check parameters #\n ####################\n tuple_sum_axis = tuple_sum_axis or 0\n invert_yaxis = False\n\n if isinstance(tuple_array, np.ndarray):\n tuple_array = (tuple_array,)\n valid.valid_ndarray(tuple_array, ndim=(1, 2, 3), fix_ndim=False)\n nb_subplots = len(tuple_array)\n\n if isinstance(tuple_sum_frames, bool):\n tuple_sum_frames = (tuple_sum_frames,) * nb_subplots\n valid.valid_container(\n obj=tuple_sum_frames,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=bool,\n name=\"graph_utils.combined_plots\",\n )\n if isinstance(tuple_sum_axis, int):\n tuple_sum_axis = (tuple_sum_axis,) * nb_subplots\n valid.valid_container(\n obj=tuple_sum_axis,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=int,\n allow_none=True,\n min_included=0,\n name=\"graph_utils.combined_plots\",\n )\n if any(sum_axis not in {0, 1, 2} for sum_axis in tuple_sum_axis):\n raise ValueError(\"sum_axis should be either 0, 1 or 2\")\n\n if isinstance(tuple_width_v, int) or tuple_width_v is None:\n tuple_width_v = (tuple_width_v,) * nb_subplots\n valid.valid_container(\n obj=tuple_width_v,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=int,\n allow_none=True,\n min_excluded=0,\n name=\"graph_utils.combined_plots\",\n )\n if isinstance(tuple_width_h, int) or tuple_width_h is None:\n tuple_width_h = (tuple_width_h,) * nb_subplots\n valid.valid_container(\n obj=tuple_width_h,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=int,\n allow_none=True,\n min_excluded=0,\n name=\"graph_utils.combined_plots\",\n )\n if isinstance(tuple_colorbar, bool):\n tuple_colorbar = (tuple_colorbar,) * nb_subplots\n valid.valid_container(\n obj=tuple_colorbar,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=bool,\n name=\"graph_utils.combined_plots\",\n )\n if isinstance(tuple_vmin, Real):\n tuple_vmin = (tuple_vmin,) * nb_subplots\n valid.valid_container(\n obj=tuple_vmin,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=Real,\n name=\"graph_utils.combined_plots\",\n )\n if isinstance(tuple_vmax, Real):\n tuple_vmax = (tuple_vmax,) * nb_subplots\n valid.valid_container(\n obj=tuple_vmax,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=Real,\n name=\"graph_utils.combined_plots\",\n )\n if any(\n vmin >= vmax\n for vmin, vmax in zip(tuple_vmin, tuple_vmax)\n if not np.isnan(vmin) and not np.isnan(vmax)\n ):\n raise ValueError(\"vmin should be strictly smaller than vmax\")\n\n if isinstance(tuple_title, str):\n tuple_title = (tuple_title,) * nb_subplots\n valid.valid_container(\n obj=tuple_title,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=str,\n name=\"graph_utils.combined_plots\",\n )\n if isinstance(tuple_scale, str):\n tuple_scale = (tuple_scale,) * nb_subplots\n valid.valid_container(\n obj=tuple_scale,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=str,\n name=\"graph_utils.combined_plots\",\n )\n if any(scale not in {\"linear\", \"log\"} for scale in tuple_scale):\n raise ValueError('scale should be either \"linear\" or \"log\"')\n\n #########################\n # load and check kwargs #\n #########################\n valid.valid_kwargs(\n kwargs=kwargs,\n allowed_kwargs={\"xlabel\", \"ylabel\", \"position\", \"invert_y\"},\n name=\"graph_utils.combined_plots\",\n )\n xlabel = kwargs.get(\"xlabel\", \"\")\n ylabel = kwargs.get(\"ylabel\", \"\")\n position = kwargs.get(\"position\")\n invert_y = kwargs.get(\"invert_y\", [None for _ in range(nb_subplots)])\n\n if isinstance(xlabel, str):\n xlabel = (xlabel,) * nb_subplots\n valid.valid_container(\n obj=xlabel,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=str,\n name=\"graph_utils.combined_plots\",\n )\n if isinstance(ylabel, str):\n ylabel = (ylabel,) * nb_subplots\n valid.valid_container(\n obj=ylabel,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=str,\n name=\"graph_utils.combined_plots\",\n )\n if position is None:\n nb_columns = nb_subplots // 2\n nb_rows = nb_subplots // nb_columns + nb_subplots % nb_columns\n position = [\n nb_rows * 100 + nb_columns * 10 + index\n for index in range(1, nb_subplots + 1)\n ]\n valid.valid_container(\n obj=position,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=int,\n name=\"graph_utils.combined_plots\",\n )\n if isinstance(invert_y, bool):\n invert_y = (invert_y,) * nb_subplots\n valid.valid_container(\n obj=invert_y,\n container_types=(tuple, list),\n length=nb_subplots,\n item_types=bool,\n allow_none=True,\n name=\"graph_utils.combined_plots\",\n )\n\n ##############################\n # plot subplots sequentially #\n ##############################\n plt.ion()\n fig = plt.figure(figsize=(12, 9))\n for idx in range(nb_subplots):\n\n axis = plt.subplot(position[idx])\n\n array = tuple_array[idx]\n sum_frames = tuple_sum_frames[idx]\n sum_axis = tuple_sum_axis[idx]\n width_v = tuple_width_v[idx]\n width_h = tuple_width_h[idx]\n plot_colorbar = tuple_colorbar[idx]\n vmin = tuple_vmin[idx]\n vmax = tuple_vmax[idx]\n title = tuple_title[idx]\n scale = tuple_scale[idx]\n\n nb_dim = array.ndim\n if nb_dim in {2, 3}:\n if isinstance(pixel_spacing, Real):\n pixel_spacing = (pixel_spacing,) * nb_dim\n valid.valid_container(\n obj=pixel_spacing,\n container_types=(tuple, list),\n length=nb_dim,\n item_types=Real,\n min_excluded=0,\n allow_none=True,\n name=\"graph_utils.combined_plots\",\n )\n\n if nb_dim not in {1, 2, 3}:\n print(\"array \", idx, \": wrong number of dimensions\")\n continue\n\n if nb_dim == 1:\n\n if np.isnan(vmin):\n tmp_array = np.copy(array).astype(float)\n tmp_array[np.isnan(array)] = np.inf\n tmp_array[\n np.isinf(tmp_array)\n ] = np.inf # set -inf to +inf to find the min\n vmin = tmp_array.min()\n if np.isnan(vmax):\n tmp_array = np.copy(array).astype(float)\n tmp_array[np.isnan(array)] = -1 * np.inf\n tmp_array[np.isinf(tmp_array)] = (\n -1 * np.inf\n ) # set +inf to -inf to find the max\n vmax = tmp_array.max()\n if np.isclose(vmax, vmin):\n vmax = vmin + 1\n\n axis.plot(array)\n axis.set_title(title)\n axis.set_ylim(vmin, vmax)\n axis.set_yscale(scale)\n axis.set_xlabel(xlabel[idx])\n axis.set_ylabel(ylabel[idx])\n\n continue\n\n if nb_dim == 3: # 3D, needs to be reduced to 2D by slicing or projecting\n invert_yaxis = bool(is_orthogonal and sum_axis == 0)\n\n slice_names, ver_labels, hor_labels = define_labels(\n reciprocal_space=reciprocal_space,\n is_orthogonal=is_orthogonal,\n sum_frames=sum_frames,\n )\n nbz, nby, nbx = array.shape\n width_v = width_v or max(nbz, nby, nbx)\n width_h = width_h or max(nbz, nby, nbx)\n\n if sum_axis == 0:\n dim_v = nby\n dim_h = nbx\n if pixel_spacing is not None:\n pixel_spacing = (\n pixel_spacing[1],\n pixel_spacing[2],\n ) # vertical, horizontal\n if not sum_frames:\n array = array[nbz // 2, :, :]\n else:\n array = array.sum(axis=sum_axis)\n default_xlabel = hor_labels[0]\n default_ylabel = ver_labels[0]\n elif sum_axis == 1:\n dim_v = nbz\n dim_h = nbx\n if pixel_spacing is not None:\n pixel_spacing = (\n pixel_spacing[0],\n pixel_spacing[2],\n ) # vertical, horizontal\n if not sum_frames:\n array = array[:, nby // 2, :]\n else:\n array = array.sum(axis=sum_axis)\n default_xlabel = hor_labels[1]\n default_ylabel = ver_labels[1]\n else: # sum_axis == 2:\n dim_v = nbz\n dim_h = nby\n if pixel_spacing is not None:\n pixel_spacing = (\n pixel_spacing[0],\n pixel_spacing[1],\n ) # vertical, horizontal\n if not sum_frames:\n array = array[:, :, nbx // 2]\n else:\n array = array.sum(axis=sum_axis)\n default_xlabel = hor_labels[2]\n default_ylabel = ver_labels[2]\n\n slice_name = slice_names[sum_axis]\n\n else: # 2D\n nby, nbx = array.shape\n width_v = width_v or max(nby, nbx)\n width_h = width_h or max(nby, nbx)\n\n dim_v = nby\n dim_h = nbx\n slice_name = \"\"\n default_xlabel = \"\"\n default_ylabel = \"\"\n\n ############################\n # now array is 2D, plot it #\n ############################\n if invert_y[idx] is not None: # overwrite invert_yaxis parameter\n invert_yaxis = invert_y[idx]\n\n width_v = min(width_v, dim_v)\n width_h = min(width_h, dim_h)\n array = array[\n int(np.rint(dim_v / 2 - width_v / 2)) : int(\n np.rint(dim_v / 2 - width_v / 2)\n )\n + width_v,\n int(np.rint(dim_h // 2 - width_h // 2)) : int(\n np.rint(dim_h // 2 - width_h // 2)\n )\n + width_h,\n ]\n\n if scale == \"linear\":\n if np.isnan(vmin):\n tmp_array = np.copy(array)\n tmp_array[np.isnan(array)] = np.inf\n tmp_array[\n np.isinf(tmp_array)\n ] = np.inf # set -inf to +inf to find the min\n vmin = tmp_array.min()\n if np.isnan(vmax):\n tmp_array = np.copy(array)\n tmp_array[np.isnan(array)] = -1 * np.inf\n tmp_array[np.isinf(tmp_array)] = (\n -1 * np.inf\n ) # set +inf to -inf to find the max\n vmax = tmp_array.max()\n if np.isclose(vmax, vmin):\n vmax = vmin + 1\n plot = axis.imshow(array, vmin=vmin, vmax=vmax, cmap=cmap)\n else: # 'log'\n if np.isnan(vmin):\n tmp_array = np.copy(array)\n tmp_array[np.isnan(array)] = np.inf\n tmp_array[\n np.isinf(tmp_array)\n ] = np.inf # set -inf to +inf to find the min\n vmin = np.log10(abs(tmp_array).min())\n if np.isinf(vmin):\n vmin = 0\n if np.isnan(vmax):\n tmp_array = np.copy(array)\n tmp_array[np.isnan(array)] = -1 * np.inf\n tmp_array[np.isinf(tmp_array)] = (\n -1 * np.inf\n ) # set +inf to -inf to find the max\n vmax = np.log10(abs(tmp_array).max())\n if np.isclose(vmax, vmin):\n vmax = vmin + 1\n plot = axis.imshow(np.log10(abs(array)), vmin=vmin, vmax=vmax, cmap=cmap)\n\n axis.set_title(title + slice_name)\n if len(xlabel[idx]) != 0:\n axis.set_xlabel(xlabel[idx])\n else:\n axis.set_xlabel(default_xlabel)\n if len(ylabel[idx]) != 0:\n axis.set_ylabel(ylabel[idx])\n else:\n axis.set_ylabel(default_ylabel)\n plt.axis(\"scaled\")\n axis.tick_params(direction=tick_direction, length=tick_length, width=tick_width)\n if pixel_spacing is not None:\n axis.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[1]))\n axis.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[0]))\n axis.tick_params(labelbottom=False, labelleft=False, top=True, right=True)\n if invert_yaxis: # Y is axis 0, need to be flipped\n axis.invert_yaxis()\n if plot_colorbar:\n cbar = colorbar(plot, numticks=5)\n cbar.ax.tick_params(length=tick_length, width=tick_width)\n\n plt.tight_layout() # avoids the overlap of subplots with axes labels\n plt.pause(0.1)\n plt.ioff()\n\n return fig\n\n\ndef contour_slices(\n array,\n q_coordinates,\n sum_frames=False,\n slice_position=None,\n levels=150,\n width_z=None,\n width_y=None,\n width_x=None,\n plot_colorbar=False,\n cmap=default_cmap,\n title=\"\",\n scale=\"linear\",\n is_orthogonal=False,\n reciprocal_space=True,\n):\n \"\"\"\n Create a figure with three 2D contour plots from a 3D dataset.\n\n :param array: 3D array of real numbers\n :param q_coordinates: a tuple of (qx, qz, qy) 1D-coordinates corresponding to the\n (Z, Y, X) of the cxi convention\n :param sum_frames: if True, will sum the data along the 3rd axis\n :param slice_position: tuple of three integers where to slice the 3D array\n :param levels: int n, will use n data intervals and draw n+1 contour lines\n :param width_z: user-defined zoom width along axis 0 (rocking angle), should be\n smaller than the actual data size\n :param width_y: user-defined zoom width along axis 1 (vertical), should be smaller\n than the actual data size\n :param width_x: user-defined zoom width along axis 2 (horizontal), should be\n smaller than the actual data size\n :param plot_colorbar: set it to True in order to plot the colorbar\n :param cmap: colormap to be used\n :param title: string to include in the plot\n :param scale: 'linear' or 'log'\n :param is_orthogonal: set to True is the frame is orthogonal, False otherwise\n (detector frame) Used for plot labels.\n :param reciprocal_space: True if the data is in reciprocal space, False otherwise.\n Used for plot labels.\n :return: fig, (ax0, ax1, ax2, ax3), (plt0, plt1, plt2) instances\n \"\"\"\n #########################\n # check some parameters #\n #########################\n valid.valid_ndarray(array, ndim=3)\n if scale not in {\"linear\", \"log\"}:\n raise ValueError('scale should be either \"linear\" or \"log\"')\n if any(len(qval) != shape for qval, shape in zip(q_coordinates, array.shape)):\n raise ValueError(\"Coordinates shape is not compatible with data shape\")\n\n nbz, nby, nbx = array.shape\n qx, qz, qy = q_coordinates\n\n width_z = width_z or nbz\n width_y = width_y or nby\n width_x = width_x or nbx\n\n if not sum_frames:\n slice_position = slice_position or (int(nbz // 2), int(nby // 2), int(nbx // 2))\n valid.valid_container(\n obj=slice_position,\n container_types=(tuple, list),\n length=3,\n item_types=int,\n min_included=0,\n name=\"graph_utils.contour_slices\",\n )\n\n #######################################\n # create the figure and plot subplots #\n #######################################\n slice_names, ver_labels, hor_labels = define_labels(\n reciprocal_space=reciprocal_space,\n is_orthogonal=is_orthogonal,\n sum_frames=sum_frames,\n )\n plt.ion()\n fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2, figsize=(12, 9))\n\n ##########\n # axis 0 #\n ##########\n temp_array = np.copy(array)\n if not sum_frames:\n temp_array = temp_array[slice_position[0], :, :]\n else:\n temp_array = temp_array.sum(axis=0)\n # now array is 2D\n temp_array = temp_array[\n int(np.rint(nby / 2 - min(width_y, nby) / 2)) : int(\n np.rint(nby / 2 - min(width_y, nby) / 2)\n )\n + min(width_y, nby),\n int(np.rint(nbx // 2 - min(width_x, nbx) // 2)) : int(\n np.rint(nbx // 2 - min(width_x, nbx) // 2)\n )\n + min(width_x, nbx),\n ]\n\n if scale == \"linear\":\n plt0 = ax0.contourf(qy, qz, temp_array, levels, cmap=cmap)\n else: # 'log'\n plt0 = ax0.contourf(qy, qz, np.log10(abs(temp_array)), levels, cmap=cmap)\n\n ax0.set_aspect(\"equal\")\n ax0.set_xlabel(hor_labels[0])\n ax0.set_ylabel(ver_labels[0])\n ax0.set_title(title + slice_names[0])\n if plot_colorbar:\n colorbar(plt0, numticks=5)\n\n ##########\n # axis 1 #\n ##########\n temp_array = np.copy(array)\n if not sum_frames:\n temp_array = temp_array[:, slice_position[1], :]\n else:\n temp_array = temp_array.sum(axis=1)\n # now array is 2D\n temp_array = temp_array[\n int(np.rint(nbz / 2 - min(width_z, nbz) / 2)) : int(\n np.rint(nbz / 2 - min(width_z, nbz) / 2)\n )\n + min(width_z, nbz),\n int(np.rint(nbx // 2 - min(width_x, nbx) // 2)) : int(\n np.rint(nbx // 2 - min(width_x, nbx) // 2)\n )\n + min(width_x, nbx),\n ]\n\n if scale == \"linear\":\n plt1 = ax1.contourf(qy, qx, temp_array, levels, cmap=cmap)\n else: # 'log'\n plt1 = ax1.contourf(qy, qx, np.log10(abs(temp_array)), levels, cmap=cmap)\n\n ax1.set_aspect(\"equal\")\n ax1.set_xlabel(hor_labels[1])\n ax1.set_ylabel(ver_labels[1])\n ax1.set_title(title + slice_names[1])\n if plot_colorbar:\n colorbar(plt1, numticks=5)\n\n ##########\n # axis 2 #\n ##########\n temp_array = np.copy(array)\n if not sum_frames:\n temp_array = temp_array[:, :, slice_position[2]]\n else:\n temp_array = temp_array.sum(axis=2)\n # now array is 2D\n temp_array = temp_array[\n int(np.rint(nbz / 2 - min(width_z, nbz) / 2)) : int(\n np.rint(nbz / 2 - min(width_z, nbz) / 2)\n )\n + min(width_z, nbz),\n int(np.rint(nby // 2 - min(width_y, nby) // 2)) : int(\n np.rint(nby // 2 - min(width_y, nby) // 2)\n )\n + min(width_y, nby),\n ]\n\n if scale == \"linear\":\n plt2 = ax2.contourf(qz, qx, temp_array, levels, cmap=cmap)\n else: # 'log'\n plt2 = ax2.contourf(qz, qx, np.log10(abs(temp_array)), levels, cmap=cmap)\n\n ax2.set_aspect(\"equal\")\n ax2.set_xlabel(hor_labels[2])\n ax2.set_ylabel(ver_labels[2])\n ax2.set_title(title + slice_names[2])\n if plot_colorbar:\n colorbar(plt2, numticks=5)\n\n ##########\n # axis 3 #\n ##########\n ax3.set_visible(False)\n\n plt.tight_layout() # avoids the overlap of subplots with axes labels\n plt.pause(0.1)\n plt.ioff()\n return fig, (ax0, ax1, ax2, ax3), (plt0, plt1, plt2)\n\n\ndef contour_stereographic(\n euclidian_u,\n euclidian_v,\n color,\n radius_mean,\n planes=None,\n title=\"\",\n plot_planes=True,\n contour_range=None,\n max_angle=95,\n cmap=default_cmap,\n uv_labels=(\"\", \"\"),\n hide_axis=False,\n scale=\"linear\",\n debugging=False,\n):\n \"\"\"\n Plot the stereographic projection with some cosmetics.\n\n :param euclidian_u: flattened array, normalized Euclidian metric coordinates\n (points can be not on a regular grid)\n :param euclidian_v: flattened array, normalized Euclidian metric coordinates\n (points can be not on a regular grid)\n :param color: flattened array, intensity of density kernel estimation at radius_mean\n :param radius_mean: radius of the sphere in reciprocal space from which the\n projection is done\n :param planes: dictionnary of crystallographic planes, e.g.\n {'111':angle_with_reflection}\n :param title: title for the stereographic plot\n :param plot_planes: if True, will draw circle corresponding to crystallographic\n planes in the pole figure\n :param contour_range: range for the plot contours\n :param max_angle: maximum angle in degrees of the stereographic projection\n (should be larger than 90)\n :param cmap: colormap to be used\n :param uv_labels: tuple of strings, labels for the u axis and the v axis,\n respectively\n :param hide_axis: hide the axis frame, ticks and ticks labels\n :param scale: 'linear' or 'log', scale for the colorbar of the plot\n :param debugging: True to see the scatter plot of euclidian coordinates\n :return: figure and axe instances\n \"\"\"\n if scale not in {\"linear\", \"log\"}:\n raise ValueError('scale should be either \"linear\" or \"log\"')\n if contour_range is None:\n if scale == \"linear\":\n contour_range = range(0, 10001, 250)\n else: # 'log'\n contour_range = np.logspace(0, 4, num=20, endpoint=True, base=10.0)\n\n if debugging:\n color2 = np.copy(color)\n color2 = color2 / abs(color2[~np.isnan(color2)]).max() * 10000\n _, ax0 = plt.subplots(nrows=1, ncols=1)\n plt0 = ax0.scatter(\n euclidian_u,\n euclidian_v,\n s=6,\n c=color2,\n cmap=default_cmap,\n norm=colors.LogNorm(\n vmin=max(color2[~np.isnan(color2)].min(), 1),\n vmax=color2[~np.isnan(color2)].max(),\n ),\n )\n circle = patches.Circle((0, 0), 90, color=\"k\", fill=False, linewidth=1.5)\n ax0.add_artist(circle)\n ax0.axis(\"scaled\")\n ax0.set_xlim(-max_angle, max_angle)\n ax0.set_ylim(-max_angle, max_angle)\n ax0.set_xlabel(\"u \" + uv_labels[0])\n ax0.set_ylabel(\"v \" + uv_labels[1])\n ax0.set_title(title)\n colorbar(plt0, scale=\"log\", numticks=5)\n\n nb_points = 5 * max_angle + 1\n v_grid, u_grid = np.mgrid[\n -max_angle : max_angle : (nb_points * 1j),\n -max_angle : max_angle : (nb_points * 1j),\n ]\n # v_grid is changing along the vertical axis,\n # u_grid is changing along the horizontal axis\n intensity_grid = griddata(\n (euclidian_v, euclidian_u), color, (v_grid, u_grid), method=\"linear\"\n )\n nan_indices = np.isnan(intensity_grid)\n # normalize the intensity for easier plotting\n intensity_grid = intensity_grid / abs(intensity_grid[~nan_indices]).max() * 10000\n\n #####################################\n # plot the stereographic projection #\n #####################################\n plt.ion()\n fig, ax0 = plt.subplots(\n nrows=1, ncols=1, figsize=(12, 9), facecolor=\"w\", edgecolor=\"k\"\n )\n if scale == \"linear\":\n plt0 = ax0.contourf(u_grid, v_grid, intensity_grid, contour_range, cmap=cmap)\n colorbar(plt0, scale=\"linear\", numticks=5)\n else: # log\n plt0 = ax0.contourf(\n u_grid,\n v_grid,\n intensity_grid,\n contour_range,\n cmap=cmap,\n norm=colors.LogNorm(\n vmin=max(intensity_grid[~nan_indices].min(), 1),\n vmax=intensity_grid[~nan_indices].max(),\n ),\n )\n colorbar(plt0, scale=\"log\", numticks=5)\n ax0.axis(\"equal\")\n\n # add the projection of the elevation angle, depending on the center of projection\n for ii in range(15, 90, 5):\n circle = patches.Circle(\n (0, 0),\n radius_mean\n * np.sin(ii * np.pi / 180)\n / (1 + np.cos(ii * np.pi / 180))\n * 90\n / radius_mean,\n color=\"grey\",\n fill=False,\n linestyle=\"dotted\",\n linewidth=0.5,\n )\n ax0.add_artist(circle)\n for ii in range(10, 90, 20):\n circle = patches.Circle(\n (0, 0),\n radius_mean\n * np.sin(ii * np.pi / 180)\n / (1 + np.cos(ii * np.pi / 180))\n * 90\n / radius_mean,\n color=\"grey\",\n fill=False,\n linestyle=\"dotted\",\n linewidth=1,\n )\n ax0.add_artist(circle)\n for ii in range(10, 95, 20):\n ax0.text(\n -radius_mean\n * np.sin(ii * np.pi / 180)\n / (1 + np.cos(ii * np.pi / 180))\n * 90\n / radius_mean,\n 0,\n str(ii) + r\"$^\\circ$\",\n fontsize=10,\n color=\"k\",\n )\n circle = patches.Circle((0, 0), 90, color=\"k\", fill=False, linewidth=1.5)\n ax0.add_artist(circle)\n\n # add azimutal lines every 5 and 45 degrees\n for ii in range(5, 365, 5):\n ax0.plot(\n [0, 90 * np.cos(ii * np.pi / 180)],\n [0, 90 * np.sin(ii * np.pi / 180)],\n color=\"grey\",\n linestyle=\"dotted\",\n linewidth=0.5,\n )\n for ii in range(0, 365, 20):\n ax0.plot(\n [0, 90 * np.cos(ii * np.pi / 180)],\n [0, 90 * np.sin(ii * np.pi / 180)],\n color=\"grey\",\n linestyle=\"dotted\",\n linewidth=1,\n )\n\n # draw circles corresponding to particular reflection\n if planes and plot_planes == 1:\n indx = 0\n for key, value in planes.items():\n circle = patches.Circle(\n (0, 0),\n radius_mean\n * np.sin(value * np.pi / 180)\n / (1 + np.cos(value * np.pi / 180))\n * 90\n / radius_mean,\n color=\"g\",\n fill=False,\n linestyle=\"dotted\",\n linewidth=1.5,\n )\n ax0.add_artist(circle)\n ax0.text(\n np.cos(indx * np.pi / 180)\n * radius_mean\n * np.sin(value * np.pi / 180)\n / (1 + np.cos(value * np.pi / 180))\n * 90\n / radius_mean,\n np.sin(indx * np.pi / 180)\n * radius_mean\n * np.sin(value * np.pi / 180)\n / (1 + np.cos(value * np.pi / 180))\n * 90\n / radius_mean,\n key,\n fontsize=10,\n color=\"k\",\n fontweight=\"bold\",\n )\n indx = indx + 6\n print(key + \": \", str(\"{:.2f}\".format(value)))\n print(\"\\n\")\n ax0.set_xlabel(\"u \" + uv_labels[0])\n ax0.set_ylabel(\"v \" + uv_labels[1])\n if hide_axis:\n ax0.axis(\"off\")\n ax0.set_title(title + \"\\nu horizontal, v vertical\")\n else:\n ax0.set_title(title)\n ax0.axis(\"scaled\")\n plt.pause(0.1)\n plt.ioff()\n return fig, ax0\n\n\ndef define_labels(reciprocal_space, is_orthogonal, sum_frames, labels=None):\n \"\"\"\n Define default labels for plots.\n\n :param reciprocal_space: True if the data is in reciprocal space, False otherwise\n :param is_orthogonal: True is the frame is orthogonal, False otherwise\n (detector frame)\n :param sum_frames: True if the the data is summed along some axis\n :param labels: tuple of two strings (vertical label, horizontal label)\n :return: three tuples of three elements: slice_names, vertical labels,\n horizontal labels. The first element in the tuple corresponds to the first\n subplot and so on.\n \"\"\"\n labels = labels or (\"\",) * 2\n\n if reciprocal_space:\n if is_orthogonal:\n if sum_frames:\n slice_names = (\n \" sum along Q$_x$\",\n \" sum along Q$_z$\",\n \" sum along Q$_y$\",\n )\n else:\n slice_names = (\" slice in Q$_x$\", \" slice in Q$_z$\", \" slice in Q$_y$\")\n ver_labels = (\n labels[0] + r\" Q$_z$\",\n labels[0] + r\" Q$_x$\",\n labels[0] + r\" Q$_x$\",\n )\n hor_labels = (\n labels[1] + r\" Q$_y$\",\n labels[1] + r\" Q$_y$\",\n labels[1] + r\" Q$_z$\",\n )\n else: # detector frame\n if sum_frames:\n slice_names = (\" sum along Z\", \" sum along Y\", \" sum along X\")\n else:\n slice_names = (\" slice in Z\", \" slice in Y\", \" slice in X\")\n ver_labels = (\n labels[0] + \" Y\",\n labels[0] + \" rocking angle\",\n labels[0] + \" rocking angle\",\n )\n hor_labels = (labels[1] + \" X\", labels[1] + \" X\", labels[1] + \" Y\")\n else:\n if is_orthogonal:\n if sum_frames:\n slice_names = (\" sum along z\", \" sum along y\", \" sum along x\")\n else:\n slice_names = (\" slice in z\", \" slice in y\", \" slice in x\")\n ver_labels = (labels[0] + \" y\", labels[0] + \" z\", labels[0] + \" z\")\n hor_labels = (labels[1] + \" x\", labels[1] + \" x\", labels[1] + \" y\")\n else: # detector frame\n if sum_frames:\n slice_names = (\" sum along Z\", \" sum along Y\", \" sum along X\")\n else:\n slice_names = (\" slice in Z\", \" slice in Y\", \" slice in X\")\n ver_labels = (\n labels[0] + \" Y\",\n labels[0] + \" rocking angle\",\n labels[0] + \" rocking angle\",\n )\n hor_labels = (labels[1] + \" X\", labels[1] + \" X\", labels[1] + \" Y\")\n\n return slice_names, ver_labels, hor_labels\n\n\ndef fit_linecut(\n array: np.ndarray,\n indices: Optional[List[List[Tuple[int, int]]]] = None,\n fit_derivative: bool = False,\n support_threshold: float = 0.25,\n voxel_sizes: Optional[List[float]] = None,\n filename: Optional[str] = None,\n label: str = \"linecut\",\n) -> Dict:\n \"\"\"\n Perform a linecut on an array and optionally fit its gradient.\n\n :param array: a 2D or 3D array, typically the modulus of a real space object after\n phase retrieval\n :param indices: a list of ndim lists of ndim tuples (start, stop), ndim being the\n number of dimensions of the array (one list per linecut)\n :param fit_derivative: True to fit the gradient of the linecut with a gaussian line\n shape\n :param support_threshold: float, threshold used to define the support, for the\n determination of the location of crystal boundaries\n :param voxel_sizes: list of voxels sizes, in each dimension of the array\n :param filename: name for saving plots\n :param label: labels for the vertical axis of plots\n :return: a dictionary containing linecuts, fits and fitted parameters\n \"\"\"\n # check parameters\n valid.valid_ndarray(array, fix_shape=True, name=\"array\")\n shape = array.shape\n ndim = len(shape)\n if indices is None:\n # default to the linecut through the center of the array in each dimension\n indices = []\n\n for idx, shp in enumerate(shape):\n default = [(val // 2, val // 2) for val in shape]\n default[idx] = (0, shp - 1)\n indices.append(default)\n\n valid.valid_container(\n indices,\n container_types=list,\n item_types=list,\n length=ndim,\n name=\"indices\",\n )\n for _, item in enumerate(indices):\n valid.valid_container(\n item,\n container_types=list,\n item_types=tuple,\n length=ndim,\n name=\"indices sublists\",\n )\n if not isinstance(fit_derivative, bool):\n raise TypeError(f\"fit_derivative should be a bool, got {type(fit_derivative)}\")\n\n # generate a linecut for each dimension of the array\n result: Dict[str, Any] = {}\n for idx in range(ndim):\n result[f\"dimension_{idx}\"] = {}\n # generate the linecut\n cut = linecut(array=array, indices=indices[idx])\n result[f\"dimension_{idx}\"][\"linecut\"] = np.vstack(\n (np.arange(indices[idx][idx][0], indices[idx][idx][1] + 1), cut)\n )\n\n # optionally fit the derivative at the edge of the support\n if fit_derivative:\n support = np.zeros(shape)\n support[array > support_threshold] = 1\n support_cut = linecut(array=support, indices=indices[idx])\n\n peaks, _ = find_peaks(\n abs(np.gradient(support_cut)), height=0.1, distance=10, width=1\n )\n dcut = abs(np.gradient(cut))\n\n # setup data and parameters for fitting\n combined_xaxis = []\n combined_data = []\n fit_params = Parameters()\n for peak_id, peak in enumerate(peaks):\n cropped_xaxis = np.arange(peak - 10, peak + 10)\n cropped_dcut = dcut[peak - 10 : peak + 10]\n result[f\"dimension_{idx}\"][f\"derivative_{peak_id}\"] = np.vstack(\n (cropped_xaxis, cropped_dcut)\n )\n combined_xaxis.append(cropped_xaxis)\n combined_data.append(cropped_dcut)\n\n cen = peak\n fit_params.add(\"amp_%i\" % peak_id, value=1, min=0.0, max=10)\n fit_params.add(\"cen_%i\" % peak_id, value=cen, min=cen - 1, max=cen + 1)\n fit_params.add(\"sig_%i\" % peak_id, value=2, min=0.1, max=10)\n\n # fit the data\n fit_result = minimize(\n util.objective_lmfit,\n fit_params,\n args=(\n np.asarray(combined_xaxis),\n np.asarray(combined_data),\n \"gaussian\",\n ),\n )\n\n # generate fit curves\n for peak_id, peak in enumerate(peaks):\n interp_xaxis = util.upsample(combined_xaxis[peak_id], factor=4)\n # interp_xaxis = combined_xaxis[peak_id]\n y_fit = util.function_lmfit(\n params=fit_result.params,\n iterator=peak_id,\n x_axis=interp_xaxis,\n distribution=\"gaussian\",\n )\n result[f\"dimension_{idx}\"][f\"fit_{peak_id}\"] = np.vstack(\n (interp_xaxis, y_fit)\n )\n result[f\"dimension_{idx}\"][f\"param_{peak_id}\"] = {\n \"amp\": fit_result.params[f\"amp_{peak_id}\"].value,\n \"sig\": fit_result.params[f\"sig_{peak_id}\"].value,\n \"cen\": fit_result.params[f\"cen_{peak_id}\"].value,\n }\n\n # plot the cut and optionally the fits\n plot_linecut(\n linecuts=result, filename=filename, voxel_sizes=voxel_sizes, label=label\n )\n return result\n\n\ndef imshow_plot(\n array,\n sum_frames=False,\n sum_axis=0,\n width_v=None,\n width_h=None,\n plot_colorbar=False,\n vmin=np.nan,\n vmax=np.nan,\n cmap=default_cmap,\n title=\"\",\n labels=None,\n scale=\"linear\",\n tick_direction=\"out\",\n tick_width=1,\n tick_length=4,\n pixel_spacing=None,\n is_orthogonal=False,\n reciprocal_space=False,\n **kwargs,\n):\n \"\"\"\n 2D imshow plot of a 2D or 3D dataset using user-defined parameters.\n\n :param array: 2D or 3D array of real numbers\n :param sum_frames: if True, will sum the data along sum_axis\n :param sum_axis: axis along which to sum\n :param width_v: user-defined zoom vertical width, should be smaller than the\n actual data size\n :param width_h: user-defined zoom horizontal width, should be smaller than the\n actual data size\n :param plot_colorbar: set it to True in order to plot the colorbar\n :param vmin: lower boundary for the colorbar\n :param vmax: higher boundary for the colorbar\n :param cmap: colormap to be used\n :param title: string to include in the plot\n :param labels: tuple of two strings (vertical label, horizontal label)\n :param scale: 'linear' or 'log'\n :param tick_direction: 'out', 'in', 'inout'\n :param tick_width: width of tickes in plots\n :param tick_length: length of tickes in plots\n :param pixel_spacing: pixel_spacing = desired tick_spacing (in nm) / voxel_size\n of the reconstruction(in nm). It can\n be a positive number or a tuple of array.ndim positive numbers\n :param is_orthogonal: True is the array is in an orthogonal basis,\n False otherwise (detector frame). Used for plot labels.\n :param reciprocal_space: True if the data is in reciprocal space,\n False otherwise. Used for plot labels.\n :param kwargs:\n - 'invert_y': boolean, True to invert the vertical axis of the plot.\n Will overwrite the default behavior.\n\n :return: fig, axis, plot instances\n \"\"\"\n mpl.rcParams[\"axes.linewidth\"] = tick_width # set the linewidth globally\n #########################\n # check some parameters #\n #########################\n valid.valid_ndarray(array, ndim=(2, 3))\n if sum_axis not in {0, 1, 2}:\n raise ValueError(\"sum_axis should be either 0, 1 or 2\")\n if not isinstance(sum_frames, bool):\n raise TypeError(\"sum_frames should be a boolean\")\n if scale not in {\"linear\", \"log\"}:\n raise ValueError('scale should be either \"linear\" or \"log\"')\n if not np.isnan(vmin) and not np.isnan(vmax) and vmin >= vmax:\n raise ValueError(\"vmin should be strictly smaller than vmax\")\n ###############\n # load kwargs #\n ###############\n valid.valid_kwargs(\n kwargs=kwargs, allowed_kwargs={\"invert_y\"}, name=\"graph_utils.imshow_plot\"\n )\n invert_y = kwargs.get(\"invert_y\")\n\n nb_dim = array.ndim\n\n if isinstance(pixel_spacing, Real):\n pixel_spacing = (pixel_spacing,) * nb_dim\n valid.valid_container(\n obj=pixel_spacing,\n container_types=(tuple, list),\n length=nb_dim,\n item_types=Real,\n min_excluded=0,\n allow_none=True,\n name=\"graph_utils.imshow_plot\",\n )\n\n labels = labels or (\"\",) * 2\n valid.valid_container(\n obj=labels,\n container_types=(tuple, list),\n length=2,\n item_types=str,\n name=\"graph_utils.imshow_plot\",\n )\n\n array = array.astype(float)\n plt.ion()\n\n if nb_dim == 3:\n invert_yaxis = bool(is_orthogonal)\n\n slice_names, ver_labels, hor_labels = define_labels(\n reciprocal_space=reciprocal_space,\n labels=labels,\n is_orthogonal=is_orthogonal,\n sum_frames=sum_frames,\n )\n\n nbz, nby, nbx = array.shape\n width_v = width_v or max(nbz, nby, nbx)\n width_h = width_h or max(nbz, nby, nbx)\n\n if sum_axis == 0:\n dim_v = nby\n dim_h = nbx\n if pixel_spacing is not None:\n pixel_spacing = (\n pixel_spacing[1],\n pixel_spacing[2],\n ) # vertical, horizontal\n if not sum_frames:\n array = array[nbz // 2, :, :]\n else:\n array = array.sum(axis=sum_axis)\n elif sum_axis == 1:\n dim_v = nbz\n dim_h = nbx\n if pixel_spacing is not None:\n pixel_spacing = (\n pixel_spacing[0],\n pixel_spacing[2],\n ) # vertical, horizontal\n if not sum_frames:\n array = array[:, nby // 2, :]\n else:\n array = array.sum(axis=sum_axis)\n else: # 2\n dim_v = nbz\n dim_h = nby\n if pixel_spacing is not None:\n pixel_spacing = (\n pixel_spacing[0],\n pixel_spacing[1],\n ) # vertical, horizontal\n if not sum_frames:\n array = array[:, :, nbx // 2]\n else:\n array = array.sum(axis=sum_axis)\n\n slice_name = slice_names[sum_axis]\n ver_label = ver_labels[sum_axis]\n hor_label = hor_labels[sum_axis]\n\n else: # array is 2D\n invert_yaxis = False\n nby, nbx = array.shape\n width_v = width_v or max(nby, nbx)\n width_h = width_h or max(nby, nbx)\n\n dim_v = nby\n dim_h = nbx\n slice_name, ver_label, hor_label = \"\", labels[0], labels[1]\n\n ############################\n # now array is 2D, plot it #\n ############################\n if invert_y is not None: # overwrite invert_yaxis parameter\n invert_yaxis = invert_y\n\n fig, axis = plt.subplots(nrows=1, ncols=1, figsize=(12, 9))\n width_v = min(width_v, dim_v)\n width_h = min(width_h, dim_h)\n array = array[\n int(np.rint(dim_v / 2 - width_v / 2)) : int(np.rint(dim_v / 2 - width_v / 2))\n + width_v,\n int(np.rint(dim_h // 2 - width_h // 2)) : int(\n np.rint(dim_h // 2 - width_h // 2)\n )\n + width_h,\n ]\n\n if scale == \"linear\":\n if np.isnan(vmin):\n tmp_array = np.copy(array)\n tmp_array[np.isnan(array)] = np.inf\n tmp_array[np.isinf(tmp_array)] = np.inf # set -inf to +inf to find the min\n vmin = tmp_array.min()\n if np.isnan(vmax):\n tmp_array = np.copy(array)\n tmp_array[np.isnan(array)] = -1 * np.inf\n tmp_array[np.isinf(tmp_array)] = (\n -1 * np.inf\n ) # set +inf to -inf to find the max\n vmax = tmp_array.max()\n plot = axis.imshow(array, vmin=vmin, vmax=vmax, cmap=cmap)\n else: # 'log'\n if np.isnan(vmin):\n tmp_array = np.copy(array)\n tmp_array[np.isnan(array)] = np.inf\n tmp_array[np.isinf(tmp_array)] = np.inf # set -inf to +inf to find the min\n vmin = np.log10(abs(tmp_array).min())\n if np.isinf(vmin):\n vmin = 0\n if np.isnan(vmax):\n tmp_array = np.copy(array)\n tmp_array[np.isnan(array)] = -1 * np.inf\n tmp_array[np.isinf(tmp_array)] = (\n -1 * np.inf\n ) # set +inf to -inf to find the max\n vmax = np.log10(abs(tmp_array).max())\n plot = axis.imshow(np.log10(abs(array)), vmin=vmin, vmax=vmax, cmap=cmap)\n\n if invert_yaxis and sum_axis == 0: # detector Y is axis 0, need to be flipped\n axis = plt.gca()\n axis.invert_yaxis()\n axis.set_xlabel(hor_label)\n axis.set_ylabel(ver_label)\n plt.title(title + slice_name)\n plt.axis(\"scaled\")\n axis.tick_params(direction=tick_direction, length=tick_length, width=tick_width)\n if pixel_spacing is not None:\n axis.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[1]))\n axis.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[0]))\n axis.tick_params(labelbottom=False, labelleft=False, top=True, right=True)\n if plot_colorbar:\n cbar = colorbar(plot, numticks=5)\n cbar.ax.tick_params(length=tick_length, width=tick_width)\n plt.pause(0.1)\n plt.ioff()\n return fig, axis, plot\n\n\ndef linecut(\n array: np.ndarray,\n indices: List[Tuple[int, int]],\n interp_order: int = 1,\n) -> np.ndarray:\n \"\"\"\n Linecut through a 2D or 3D array.\n\n The user must input indices of the starting voxel and of the end voxel.\n\n :param array: a numpy array\n :param indices: list of tuples of (start, stop) indices, one tuple for each\n dimension of the array. e.g [(start0, stop0), (start1, stop1)] for a 2D array\n :param interp_order: order of the spline interpolation, default is 3.\n The order has to be in the range 0-5.\n :return: a 1D array interpolated between the start and stop indices\n \"\"\"\n # check parameters\n valid.valid_ndarray(array)\n array = np.asarray(array)\n if array.dtype in [\"int8\", \"int16\", \"int32\", \"int64\"]:\n array = array.astype(float)\n\n if not isinstance(indices, list):\n raise TypeError(f\"'indices' should be a list, got {type(indices)}\")\n for _, item in enumerate(indices):\n valid.valid_container(\n item,\n container_types=tuple,\n item_types=int,\n min_included=0,\n length=2,\n name=\"indices\",\n )\n valid.valid_item(\n interp_order, allowed_types=int, min_included=1, name=\"interp_order\"\n )\n\n num_points = int(\n np.sqrt(sum((val[1] - val[0] + 1) ** 2 for _, val in enumerate(indices)))\n )\n\n cut = map_coordinates(\n input=array,\n coordinates=np.vstack(\n (\n [\n np.linspace(val[0], val[1], endpoint=True, num=num_points)\n for _, val in enumerate(indices)\n ]\n )\n ),\n order=interp_order,\n )\n return np.asarray(cut)\n\n\ndef loop_thru_scan(\n key,\n array,\n figure,\n scale,\n dim,\n idx,\n savedir,\n cmap=default_cmap,\n vmin=None,\n vmax=None,\n):\n \"\"\"\n Update the plot while removing the parasitic diffraction intensity in 3D dataset.\n\n :param key: the keyboard key which was pressed\n :param array: the 3D data array\n :param figure: the figure instance\n :param scale: 'linear' or 'log'\n :param dim: the axis over which the loop is performed (axis 0, 1 or 2)\n :param idx: the frame index in the current axis\n :param savedir: path of the directory for saving images\n :param cmap: colormap to be used\n :param vmin: the lower boundary for the colorbar\n :param vmax: the higher boundary for the colorbar\n :return: updated controls\n \"\"\"\n valid.valid_ndarray(array, ndim=3)\n\n nbz, nby, nbx = array.shape\n exit_flag = False\n if dim > 2:\n raise ValueError(\"dim should be 0, 1 or 2\")\n\n vmin = vmin or array.min()\n vmax = vmax or array.max()\n\n axis = figure.gca()\n xmin, xmax = axis.get_xlim()\n ymin, ymax = axis.get_ylim()\n if key == \"u\": # show next frame\n idx = idx + 1\n if dim == 0:\n if idx > nbz - 1:\n idx = 0\n elif dim == 1:\n if idx > nby - 1:\n idx = 0\n elif dim == 2 and idx > nbx - 1:\n idx = 0\n\n elif key == \"d\": # show previous frame\n idx = idx - 1\n if dim == 0:\n if idx < 0:\n idx = nbz - 1\n elif dim == 1:\n if idx < 0:\n idx = nby - 1\n elif dim == 2 and idx < 0:\n idx = nbx - 1\n\n elif key == \"right\": # increase colobar max\n if scale == \"linear\":\n vmax = vmax * 2\n else:\n vmax = vmax + 1\n\n elif key == \"left\": # reduce colobar max\n if scale == \"linear\":\n vmax = vmax / 2\n else:\n vmax = vmax - 1\n vmax = max(vmax, 1)\n\n elif key == \"p\": # plot full image\n if dim == 0:\n xmin, xmax = -0.5, nbx - 0.5\n ymin, ymax = nby - 0.5, -0.5 # pointing down\n elif dim == 1:\n xmin, xmax = -0.5, nbx - 0.5\n ymin, ymax = nbz - 0.5, -0.5 # pointing down\n elif dim == 2:\n xmin, xmax = -0.5, nby - 0.5\n ymin, ymax = nbz - 0.5, -0.5 # pointing down\n\n elif key == \"q\":\n exit_flag = True\n\n elif key == \"r\":\n filename = \"frame\" + str(idx) + \"_dim\" + str(dim) + \".png\"\n plt.savefig(savedir + filename)\n\n # get the images on axis\n im = axis.images\n # get and remove the existing colorbar\n cb = im[0].colorbar # there is only one axis in the list im\n cb.remove()\n\n axis.cla()\n if dim == 0:\n if scale == \"linear\":\n plot = axis.imshow(array[idx, :, :], vmin=vmin, vmax=vmax, cmap=cmap)\n else: # 'log'\n plot = axis.imshow(\n np.log10(array[idx, :, :]), vmin=vmin, vmax=vmax, cmap=cmap\n )\n axis.set_title(\n \"Frame \"\n + str(idx + 1)\n + \"/\"\n + str(nbz)\n + \"\\nq quit ; u next frame ; d previous frame ; p unzoom\\n\"\n \"right darker ; left brighter ; r save 2D frame\"\n )\n colorbar(plot, numticks=5)\n elif dim == 1:\n if scale == \"linear\":\n plot = axis.imshow(array[:, idx, :], vmin=vmin, vmax=vmax, cmap=cmap)\n else: # 'log'\n plot = axis.imshow(\n np.log10(array[:, idx, :]), vmin=vmin, vmax=vmax, cmap=cmap\n )\n axis.set_title(\n \"Frame \"\n + str(idx + 1)\n + \"/\"\n + str(nby)\n + \"\\nq quit ; u next frame ; d previous frame ; p unzoom\\n\"\n \"right darker ; left brighter ; r save 2D frame\"\n )\n colorbar(plot, numticks=5)\n elif dim == 2:\n if scale == \"linear\":\n plot = axis.imshow(array[:, :, idx], vmin=vmin, vmax=vmax, cmap=cmap)\n else: # 'log'\n plot = axis.imshow(\n np.log10(array[:, :, idx]), vmin=vmin, vmax=vmax, cmap=cmap\n )\n axis.set_title(\n \"Frame \"\n + str(idx + 1)\n + \"/\"\n + str(nbx)\n + \"\\nq quit ; u next frame ; d previous frame ; p unzoom\\n\"\n \"right darker ; left brighter ; r save 2D frame\"\n )\n colorbar(plot, numticks=5)\n axis.set_xlim([xmin, xmax])\n axis.set_ylim([ymin, ymax])\n plt.draw()\n\n return vmax, idx, exit_flag\n\n\ndef multislices_plot(\n array,\n sum_frames=False,\n slice_position=None,\n width_z=None,\n width_y=None,\n width_x=None,\n plot_colorbar=False,\n cmap=default_cmap,\n title=\"\",\n scale=\"linear\",\n vmin=np.nan,\n vmax=np.nan,\n tick_direction=\"out\",\n tick_width=1,\n tick_length=4,\n pixel_spacing=None,\n is_orthogonal=False,\n reciprocal_space=False,\n ipynb_layout=False,\n save_as: Optional[str] = None,\n **kwargs,\n):\n \"\"\"\n Create a figure with three 2D imshow plots from a 3D dataset.\n\n :param array: 3D array of real numbers\n :param sum_frames: if True, will sum the data along the 3rd axis\n :param slice_position: tuple of three integers where to slice the 3D array\n :param width_z: zoom width along axis 0 (rocking angle), should be smaller\n than the actual data size\n :param width_y: zoom width along axis 1 (vertical), should be smaller\n than the actual data size\n :param width_x: zoom width along axis 2 (horizontal), should be smaller\n than the actual data size\n :param plot_colorbar: set it to True in,der to plot the colorbar\n :param cmap: colormap to be used\n :param title: string to include in the plot\n :param scale: 'linear', 'log'\n :param tick_direction: 'out', 'in', 'inout'\n :param tick_width: width of tickes in plots\n :param tick_length: length of tickes in plots\n :param pixel_spacing: pixel_spacing=desired tick_spacing (in nm)/voxel_size of\n the reconstruction(in nm). It can be a positive number or a tuple of 3 positive\n numbers\n :param is_orthogonal: set to True is the frame is orthogonal, False otherwise\n (detector frame) Used for plot labels.\n :param reciprocal_space: True if the data is in reciprocal space,\n False otherwise. Used for plot labels.\n :param vmin: lower boundary for the colorbar. Float or tuple of 3 floats\n :param vmax: higher boundary for the colorbar. Float or tuple of 3 floats\n :param ipynb_layout: toggle for 3 plots in a row, cleaner in an Jupyter Notebook\n :param save_as: if string, saves figure at this path\n :param kwargs:\n - 'invert_y': boolean, True to invert the vertical axis of the plot.\n Will overwrite the default behavior.\n\n :return: fig, (ax0, ax1, ax2, ax3), (plt0, plt1, plt2) instances\n \"\"\"\n mpl.rcParams[\"axes.linewidth\"] = tick_width # set the linewidth globally\n ###############\n # load kwargs #\n ###############\n valid.valid_kwargs(\n kwargs=kwargs, allowed_kwargs={\"invert_y\"}, name=\"graph_utils.multislices_plot\"\n )\n invert_y = kwargs.get(\"invert_y\")\n\n #########################\n # check some parameters #\n #########################\n if not isinstance(sum_frames, bool):\n raise TypeError(\"sum_frames should be a boolean\")\n if scale not in {\"linear\", \"log\"}:\n raise ValueError('scale should be either \"linear\" or \"log\"')\n valid.valid_ndarray(array, ndim=3)\n nb_dim = array.ndim\n\n nbz, nby, nbx = array.shape\n\n if isinstance(vmin, Real):\n vmin = [vmin, vmin, vmin]\n valid.valid_container(\n obj=vmin,\n container_types=(tuple, list),\n length=3,\n item_types=Real,\n name=\"graph_utils.multislices_plot\",\n )\n min_value = vmin\n\n if isinstance(vmax, Real):\n vmax = [vmax, vmax, vmax]\n valid.valid_container(\n obj=vmax,\n container_types=(tuple, list),\n length=3,\n item_types=Real,\n name=\"graph_utils.multislices_plot\",\n )\n max_value = vmax\n if any(\n v_min >= v_max\n for v_min, v_max in zip(min_value, max_value)\n if not np.isnan(v_min) and not np.isnan(v_max)\n ):\n raise ValueError(\"vmin should be strictly smaller than vmax\")\n\n if not sum_frames:\n slice_position = slice_position or (int(nbz // 2), int(nby // 2), int(nbx // 2))\n valid.valid_container(\n obj=slice_position,\n container_types=(tuple, list),\n length=3,\n item_types=int,\n min_included=0,\n name=\"graph_utils.multislices_plot\",\n )\n\n if isinstance(pixel_spacing, Real):\n pixel_spacing = (pixel_spacing,) * nb_dim\n valid.valid_container(\n obj=pixel_spacing,\n container_types=(tuple, list),\n length=nb_dim,\n item_types=Real,\n min_excluded=0,\n allow_none=True,\n name=\"graph_utils.multislices_plot\",\n )\n width_z = width_z or nbz\n width_y = width_y or nby\n width_x = width_x or nbx\n\n invert_yaxis = bool(is_orthogonal)\n if invert_y is not None: # override the default behavior for invert_yaxis\n invert_yaxis = invert_y\n\n ####################################\n # create the labels and the figure #\n ####################################\n slice_names, ver_labels, hor_labels = define_labels(\n reciprocal_space=reciprocal_space,\n is_orthogonal=is_orthogonal,\n sum_frames=sum_frames,\n )\n\n plt.ion()\n if ipynb_layout:\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(15, 4.5))\n ax3 = None\n else:\n fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2, figsize=(12, 9))\n\n ##########\n # axis 0 #\n ##########\n temp_array = np.copy(array)\n if not sum_frames:\n temp_array = temp_array[slice_position[0], :, :]\n else:\n temp_array = temp_array.sum(axis=0)\n # now array is 2D\n temp_array = temp_array[\n int(np.rint(nby // 2 - min(width_y, nby) // 2)) : int(\n np.rint(nby // 2 - min(width_y, nby) // 2)\n )\n + min(width_y, nby),\n int(np.rint(nbx // 2 - min(width_x, nbx) // 2)) : int(\n np.rint(nbx // 2 - min(width_x, nbx) // 2)\n )\n + min(width_x, nbx),\n ]\n if scale == \"linear\":\n if np.isnan(min_value[0]):\n try:\n min_value[0] = temp_array[~np.isnan(temp_array)].min()\n except ValueError:\n min_value[0] = 0\n if np.isnan(max_value[0]):\n try:\n max_value[0] = temp_array[~np.isnan(temp_array)].max()\n except ValueError:\n max_value[0] = 1\n plt0 = ax0.imshow(temp_array, vmin=min_value[0], vmax=max_value[0], cmap=cmap)\n else: # 'log'\n if np.isnan(min_value[0]):\n try:\n min_value[0] = np.log10(abs(temp_array[~np.isnan(temp_array)]).min())\n except ValueError:\n min_value[0] = 0\n if np.isinf(min_value[0]):\n min_value[0] = 0\n if np.isnan(max_value[0]):\n try:\n max_value[0] = np.log10(abs(temp_array[~np.isnan(temp_array)]).max())\n except ValueError:\n max_value[0] = 1\n plt0 = ax0.imshow(\n np.log10(abs(temp_array)), vmin=min_value[0], vmax=max_value[0], cmap=cmap\n )\n\n ax0.set_xlabel(hor_labels[0])\n ax0.set_ylabel(ver_labels[0])\n ax0.set_title(title + slice_names[0])\n if invert_yaxis: # detector Y is axis 0, need to be flipped\n ax0.invert_yaxis()\n plt.axis(\"scaled\")\n if plot_colorbar:\n cbar = colorbar(plt0, numticks=5)\n cbar.ax.tick_params(length=tick_length, width=tick_width)\n ax0.tick_params(direction=tick_direction, length=tick_length, width=tick_width)\n if pixel_spacing is not None:\n ax0.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[2]))\n ax0.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[1]))\n ax0.tick_params(labelbottom=False, labelleft=False, top=True, right=True)\n\n ##########\n # axis 1 #\n ##########\n temp_array = np.copy(array)\n if not sum_frames:\n temp_array = temp_array[:, slice_position[1], :]\n else:\n temp_array = temp_array.sum(axis=1)\n # now array is 2D\n temp_array = temp_array[\n int(np.rint(nbz // 2 - min(width_z, nbz) // 2)) : int(\n np.rint(nbz // 2 - min(width_z, nbz) // 2)\n )\n + min(width_z, nbz),\n int(np.rint(nbx // 2 - min(width_x, nbx) // 2)) : int(\n np.rint(nbx // 2 - min(width_x, nbx) // 2)\n )\n + min(width_x, nbx),\n ]\n if scale == \"linear\":\n if np.isnan(min_value[1]):\n try:\n min_value[1] = temp_array[~np.isnan(temp_array)].min()\n except ValueError:\n min_value[1] = 0\n if np.isnan(max_value[1]):\n try:\n max_value[1] = temp_array[~np.isnan(temp_array)].max()\n except ValueError:\n max_value[1] = 1\n plt1 = ax1.imshow(temp_array, vmin=min_value[1], vmax=max_value[1], cmap=cmap)\n else: # 'log'\n if np.isnan(min_value[1]):\n try:\n min_value[1] = np.log10(abs(temp_array[~np.isnan(temp_array)]).min())\n except ValueError:\n min_value[1] = 0\n if np.isinf(min_value[1]):\n min_value[1] = 0\n if np.isnan(max_value[1]):\n try:\n max_value[1] = np.log10(abs(temp_array[~np.isnan(temp_array)]).max())\n except ValueError:\n max_value[1] = 1\n plt1 = ax1.imshow(\n np.log10(abs(temp_array)), vmin=min_value[1], vmax=max_value[1], cmap=cmap\n )\n\n ax1.set_xlabel(hor_labels[1])\n ax1.set_ylabel(ver_labels[1])\n ax1.set_title(title + slice_names[1])\n plt.axis(\"scaled\")\n if plot_colorbar:\n cbar = colorbar(plt1, numticks=5)\n cbar.ax.tick_params(length=tick_length, width=tick_width)\n ax1.tick_params(direction=tick_direction, length=tick_length, width=tick_width)\n if pixel_spacing is not None:\n ax1.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[2]))\n ax1.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[0]))\n ax1.tick_params(labelbottom=False, labelleft=False, top=True, right=True)\n\n ##########\n # axis 2 #\n ##########\n temp_array = np.copy(array)\n if not sum_frames:\n temp_array = temp_array[:, :, slice_position[2]]\n else:\n temp_array = temp_array.sum(axis=2)\n # now array is 2D\n temp_array = temp_array[\n int(np.rint(nbz // 2 - min(width_z, nbz) // 2)) : int(\n np.rint(nbz // 2 - min(width_z, nbz) // 2)\n )\n + min(width_z, nbz),\n int(np.rint(nby // 2 - min(width_y, nby) // 2)) : int(\n np.rint(nby // 2 - min(width_y, nby) // 2)\n )\n + min(width_y, nby),\n ]\n if scale == \"linear\":\n if np.isnan(min_value[2]):\n try:\n min_value[2] = temp_array[~np.isnan(temp_array)].min()\n except ValueError:\n min_value[2] = 0\n if np.isnan(max_value[2]):\n try:\n max_value[2] = temp_array[~np.isnan(temp_array)].max()\n except ValueError:\n max_value[2] = 1\n plt2 = ax2.imshow(temp_array, vmin=min_value[2], vmax=max_value[2], cmap=cmap)\n else: # 'log'\n if np.isnan(min_value[2]):\n try:\n min_value[2] = np.log10(abs(temp_array[~np.isnan(temp_array)]).min())\n except ValueError:\n min_value[2] = 0\n if np.isinf(min_value[2]):\n min_value[2] = 0\n if np.isnan(max_value[2]):\n try:\n max_value[2] = np.log10(abs(temp_array[~np.isnan(temp_array)]).max())\n except ValueError:\n max_value[2] = 1\n plt2 = ax2.imshow(\n np.log10(abs(temp_array)), vmin=min_value[2], vmax=max_value[2], cmap=cmap\n )\n\n ax2.set_xlabel(hor_labels[2])\n ax2.set_ylabel(ver_labels[2])\n ax2.set_title(title + slice_names[2])\n plt.axis(\"scaled\")\n\n if plot_colorbar:\n cbar = colorbar(plt2, numticks=5)\n cbar.ax.tick_params(length=tick_length, width=tick_width)\n ax2.tick_params(direction=tick_direction, length=tick_length, width=tick_width)\n if pixel_spacing is not None:\n ax2.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[1]))\n ax2.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing[0]))\n ax2.tick_params(labelbottom=False, labelleft=False, top=True, right=True)\n\n ##########\n # axis 3 #\n ##########\n if not ipynb_layout and ax3 is not None:\n # hide axis 3\n ax3.set_visible(False)\n\n plt.tight_layout() # avoids the overlap of subplots with axes labels\n plt.pause(0.1)\n plt.ioff()\n\n if isinstance(save_as, str):\n pathlib.Path(save_as).parent.mkdir(parents=True, exist_ok=True)\n fig.savefig(save_as)\n\n if ipynb_layout:\n return fig, (ax0, ax1, ax2), (plt0, plt1, plt2)\n return fig, (ax0, ax1, ax2, ax3), (plt0, plt1, plt2)\n\n\ndef plot_linecut(\n linecuts: Dict,\n filename: Optional[str] = None,\n voxel_sizes: Optional[List[float]] = None,\n label: str = \"linecut\",\n) -> None:\n \"\"\"\n Plot linecuts and optionally corresponding fits.\n\n Expected structure for linecuts::\n\n linecuts = {\n 'dimension_0': {\n 'linecut': np.ndarray (2, M),\n 'derivative_0': np.ndarray (2, N),\n 'derivative_1': np.ndarray (2, O),\n 'fit_0': np.ndarray (2, P),\n 'fit_1': np.ndarray (2, P),\n 'param_0': {'amp': float, 'sig': float, 'cen': float},\n 'param_1': {'amp': float, 'sig': float, 'cen': float},\n },\n 'dimension_1': {...}\n ...\n }\n\n :param linecuts: a dictionary containing cuts, with keys 'dim_0', 'dim_1', ...\n :param filename: str, the figure will be saved there\n :param voxel_sizes: tuple of voxel sizes for each linecut (1 per dimension)\n :param label: str, label for the vertical axis of the linecut plots\n \"\"\"\n if filename is not None and not isinstance(filename, str):\n raise TypeError(f\"filename should be a string, got {type(filename)}\")\n if not isinstance(linecuts, dict):\n raise TypeError(\"expected a dictionary, got {type(linecuts)}\")\n if voxel_sizes is not None:\n unit = \"nm\"\n valid.valid_container(\n voxel_sizes,\n container_types=(tuple, list),\n item_types=(float, int),\n length=len(linecuts),\n name=\"voxel_sizes\",\n )\n else:\n unit = \"pixels\"\n\n labels = {\n \"dimension_0\": f\"Z ({unit})\",\n \"dimension_1\": f\"Y ({unit})\",\n \"dimension_2\": f\"X ({unit})\",\n }\n\n # plot the linecuts\n fig, axes = plt.subplots(nrows=len(linecuts), ncols=1, figsize=(12, 9))\n for idx, key in enumerate(linecuts.keys()):\n factor = voxel_sizes[idx] if voxel_sizes is not None else 1\n\n axes[idx].plot(\n linecuts[key][\"linecut\"][0] * factor, linecuts[key][\"linecut\"][1], \".-b\"\n )\n axes[idx].set_xlabel(labels.get(key, key))\n axes[idx].set_ylabel(label)\n\n plt.tight_layout() # avoids the overlap of subplots with axes labels\n plt.pause(0.1)\n plt.ioff()\n\n if filename:\n fig.savefig(filename)\n\n # plot the derivatives and the fits\n fig, axes = plt.subplots(nrows=len(linecuts), ncols=2, figsize=(12, 9))\n for idx, key in enumerate(linecuts.keys()):\n factor = voxel_sizes[idx] if voxel_sizes is not None else 1\n for subkey in linecuts[key].keys():\n if subkey.startswith(\"derivative\"):\n index = int(subkey[-1])\n (line1,) = axes[idx][index].plot(\n linecuts[key][subkey][0] * factor,\n linecuts[key][subkey][1],\n \".b\",\n label=\"derivative\",\n )\n (line2,) = axes[idx][index].plot(\n linecuts[key][f\"fit_{index}\"][0] * factor,\n linecuts[key][f\"fit_{index}\"][1],\n \"-r\",\n label=\"gaussian fit\",\n )\n axes[idx][index].set_xlabel(labels.get(key, key))\n axes[idx][index].legend(handles=[line1, line2])\n fwhm = (\n 2\n * np.sqrt(2 * np.log(2))\n * factor\n * linecuts[key][f\"param_{index}\"][\"sig\"]\n )\n\n axes[idx][index].text(\n x=0.05,\n y=0.9,\n s=f\"FWHM={fwhm:.2f} {unit}\",\n transform=axes[idx][index].transAxes,\n )\n\n plt.tight_layout() # avoids the overlap of subplots with axes labels\n plt.pause(0.1)\n plt.ioff()\n\n if filename:\n base, _ = os.path.splitext(filename)\n fig.savefig(base + \"_fits.png\")\n\n\ndef plot_3dmesh(\n vertices, faces, data_shape, title=\"Mesh - z axis flipped because of CXI convention\"\n):\n \"\"\"\n Plot a 3D mesh defined by its vertices and faces.\n\n :param vertices: n*3 ndarray of n vertices defined by 3 positions\n :param faces: m*3 ndarray of m faces defined by 3 indices of vertices\n :param data_shape: tuple corresponding to the 3d data shape\n :param title: title for the plot\n :return: figure and axe instances\n \"\"\"\n plt.ion()\n fig = plt.figure(figsize=(10, 10))\n ax0 = Axes3D(fig)\n mymesh = Poly3DCollection(vertices[faces])\n mymesh.set_edgecolor(\"k\")\n ax0.add_collection3d(mymesh)\n ax0.set_xlim(0, data_shape[0])\n ax0.set_xlabel(\"Z\")\n ax0.set_ylim(0, data_shape[1])\n ax0.set_ylabel(\"Y\")\n ax0.set_zlim(0, data_shape[2])\n ax0.set_zlabel(\"X\")\n plt.title(title)\n\n plt.pause(0.1)\n plt.ioff()\n return fig, ax0\n\n\ndef savefig(\n savedir,\n figure,\n axes,\n xlabels=\"\",\n ylabels=\"\",\n titles=\"\",\n filename=\"\",\n tick_direction=\"out\",\n tick_width=2,\n tick_length=10,\n tick_labelsize=16,\n legend_labelsize=16,\n label_size=20,\n title_size=20,\n only_labels=False,\n **kwargs,\n):\n \"\"\"\n Save a template figures for publication, without and with labels.\n\n :param savedir: str, the directory where to save the figures\n :param figure: a matplotlib figure instance\n :param axes: a matplotlib axis or a tuple of axes\n :param xlabels: str, horizontal labels (one per axis)\n :param ylabels: str, vertical labels (one per axis)\n :param titles: str, title (one per axis)\n :param filename: name of the file for saving the figure\n :param tick_direction: 'in', 'out' or 'inout'\n :param tick_width: tick width in points\n :param tick_length: tick length in points\n :param tick_labelsize: label size in points of tick labels\n :param legend_labelsize: label size in points of the legend\n :param label_size: label size in points of axis labels\n :param title_size: label size in points of titles\n :param only_labels: bool, if True only the figure with all labels will be saved\n :param kwargs:\n - 'bottom', 'top', 'left', 'right': bool, whether to draw the respective ticks.\n - 'labelbottom', 'labeltop', 'labelleft', 'labelright': bool, whether to draw\n the respective tick labels.\n - 'legend': bool, wheter to show the legend or not\n - 'text': dict, a dictionnary of dictionnaries containing the parameters for\n matplotlib.pyplot.text function e.g. {0: {'x': 0.4, 'y': 0.4, 's': 'test',\n 'fontsize': 12}, 1:{'x': 0.4, 'y': 0.5, 's': 'res', 'fontsize': 12}}\n\n \"\"\"\n #########################\n # check and load kwargs #\n #########################\n valid.valid_kwargs(\n kwargs=kwargs,\n allowed_kwargs={\n \"labelbottom\",\n \"labeltop\",\n \"labelleft\",\n \"labelright\",\n \"bottom\",\n \"top\",\n \"left\",\n \"right\",\n \"legend\",\n \"text\",\n },\n name=\"kwargs\",\n )\n labelbottom = kwargs.get(\"labelbottom\", True)\n labeltop = kwargs.get(\"labeltop\", False)\n labelleft = kwargs.get(\"labelleft\", True)\n labelright = kwargs.get(\"labelright\", False)\n bottom = kwargs.get(\"bottom\", True)\n top = kwargs.get(\"top\", False)\n left = kwargs.get(\"left\", True)\n right = kwargs.get(\"right\", False)\n legend = kwargs.get(\"legend\", False)\n text = kwargs.get(\"text\")\n\n ####################\n # check parameters #\n ####################\n fname = \"savefig\"\n valid.valid_container(savedir, container_types=str, min_length=1, name=fname)\n if not isinstance(figure, mpl.figure.Figure):\n raise TypeError(\"figure should be a matplotlib Figure\")\n if isinstance(axes, mpl.axes.Axes):\n axes = (axes,)\n valid.valid_container(axes, container_types=(tuple, list), min_length=1, name=fname)\n for ax in axes:\n if not isinstance(ax, mpl.axes.Axes):\n raise TypeError(\"axes should be a tuple of matplotlib axes\")\n nb_axes = len(axes)\n if isinstance(labelbottom, bool):\n labelbottom = (labelbottom,) * nb_axes\n valid.valid_container(\n labelbottom,\n container_types=(tuple, list),\n item_types=bool,\n length=nb_axes,\n name=fname,\n )\n if isinstance(labeltop, bool):\n labeltop = (labeltop,) * nb_axes\n valid.valid_container(\n labeltop,\n container_types=(tuple, list),\n item_types=bool,\n length=nb_axes,\n name=fname,\n )\n if isinstance(labelleft, bool):\n labelleft = (labelleft,) * nb_axes\n valid.valid_container(\n labelleft,\n container_types=(tuple, list),\n item_types=bool,\n length=nb_axes,\n name=fname,\n )\n if isinstance(labelright, bool):\n labelright = (labelright,) * nb_axes\n valid.valid_container(\n labelright,\n container_types=(tuple, list),\n item_types=bool,\n length=nb_axes,\n name=fname,\n )\n if isinstance(bottom, bool):\n bottom = (bottom,) * nb_axes\n valid.valid_container(\n bottom,\n container_types=(tuple, list),\n item_types=bool,\n length=nb_axes,\n name=fname,\n )\n if isinstance(top, bool):\n top = (top,) * nb_axes\n valid.valid_container(\n top, container_types=(tuple, list), item_types=bool, length=nb_axes, name=fname\n )\n if isinstance(left, bool):\n left = (left,) * nb_axes\n valid.valid_container(\n left, container_types=(tuple, list), item_types=bool, length=nb_axes, name=fname\n )\n if isinstance(right, bool):\n right = (right,) * nb_axes\n valid.valid_container(\n right,\n container_types=(tuple, list),\n item_types=bool,\n length=nb_axes,\n name=fname,\n )\n if isinstance(legend, bool):\n legend = (legend,) * nb_axes\n valid.valid_container(\n legend,\n container_types=(tuple, list),\n item_types=bool,\n length=nb_axes,\n name=fname,\n )\n if isinstance(xlabels, str):\n xlabels = (xlabels,) * nb_axes\n valid.valid_container(\n xlabels,\n container_types=(tuple, list),\n item_types=str,\n length=nb_axes,\n name=fname,\n )\n if isinstance(ylabels, str):\n ylabels = (ylabels,) * nb_axes\n valid.valid_container(\n ylabels,\n container_types=(tuple, list),\n item_types=str,\n length=nb_axes,\n name=fname,\n )\n if isinstance(titles, str):\n titles = (titles,) * nb_axes\n valid.valid_container(\n titles,\n container_types=(tuple, list),\n item_types=str,\n length=nb_axes,\n name=fname,\n )\n valid.valid_container(filename, container_types=str, name=fname)\n filename = filename.replace(\n \".png\", \"\"\n ) # in case the user put the extension in the filename\n if tick_direction not in {\"in\", \"out\", \"inout\"}:\n raise ValueError(\n \"Invalid value {tick_direction} for tick_direction,\"\n \" allowed are 'in', 'out', 'inout'\"\n )\n valid.valid_item(tick_width, allowed_types=int, min_excluded=0, name=fname)\n valid.valid_item(tick_length, allowed_types=int, min_excluded=0, name=fname)\n valid.valid_item(tick_labelsize, allowed_types=int, min_excluded=0, name=fname)\n valid.valid_item(legend_labelsize, allowed_types=int, min_excluded=0, name=fname)\n valid.valid_item(label_size, allowed_types=int, min_excluded=0, name=fname)\n valid.valid_item(title_size, allowed_types=int, min_excluded=0, name=fname)\n valid.valid_container(\n text,\n container_types=dict,\n item_types=int,\n allow_none=True,\n min_length=1,\n min_included=0,\n name=fname,\n )\n valid.valid_item(only_labels, allowed_types=bool, name=fname)\n\n #########################\n # plot and save figures #\n #########################\n xlims = []\n ylims = []\n xlocs = []\n ylocs = []\n plt.ion()\n for idx, ax in enumerate(axes):\n ax.tick_params(\n labelbottom=False,\n labelleft=False,\n labelright=False,\n labeltop=False,\n bottom=bottom[idx],\n top=top[idx],\n left=left[idx],\n right=right[idx],\n direction=tick_direction,\n length=tick_length,\n width=tick_width,\n labelsize=tick_labelsize,\n )\n ax.set_xlabel(xlabels[idx], fontsize=label_size, visible=False)\n ax.set_ylabel(ylabels[idx], fontsize=label_size, visible=False)\n ax.set_title(titles[idx], fontsize=title_size, visible=False)\n ax.spines[\"right\"].set_linewidth(tick_width)\n ax.spines[\"left\"].set_linewidth(tick_width)\n ax.spines[\"top\"].set_linewidth(tick_width)\n ax.spines[\"bottom\"].set_linewidth(tick_width)\n try: # Check if there is a colorbar\n cbar = ax.images[0].colorbar\n cbar.ax.tick_params(\n labelright=False,\n length=tick_length,\n width=tick_width,\n labelsize=tick_labelsize,\n )\n cbar.outline.set_linewidth(tick_width)\n except IndexError:\n cbar = None\n xlims.append(ax.get_xlim())\n ylims.append(ax.get_ylim())\n xlocs.append(ax.xaxis.get_ticklocs())\n ylocs.append(ax.yaxis.get_ticklocs())\n\n if not only_labels:\n figure.savefig(savedir + filename + \".png\")\n\n for idx, ax in enumerate(axes):\n ax.tick_params(\n labelbottom=labelbottom[idx],\n labelleft=labelleft[idx],\n labelright=labelright[idx],\n labeltop=labeltop[idx],\n axis=\"both\",\n which=\"major\",\n labelsize=label_size,\n )\n ax.set_xlabel(xlabels[idx], fontsize=label_size, visible=True)\n ax.set_ylabel(ylabels[idx], fontsize=label_size, visible=True)\n ax.set_title(titles[idx], fontsize=title_size, visible=True)\n if legend[idx]:\n ax.legend(fontsize=legend_labelsize)\n ax.set_xticks(xlocs[idx])\n ax.set_yticks(ylocs[idx])\n ax.set_xlim(left=xlims[idx][0], right=xlims[idx][1])\n ax.set_ylim(bottom=ylims[idx][0], top=ylims[idx][1])\n\n if text is not None:\n for _, value in text.items():\n figure.text(**value)\n if cbar is not None:\n cbar.ax.tick_params(labelright=True)\n\n figure.tight_layout()\n figure.savefig(savedir + filename + \"_labels.png\")\n plt.ioff()\n\n\ndef save_to_vti(\n filename,\n voxel_size,\n tuple_array,\n tuple_fieldnames,\n origin=(0, 0, 0),\n amplitude_threshold=0.01,\n **kwargs,\n):\n \"\"\"\n Save arrays defined by their name in a single vti file.\n\n Paraview expects data in an orthonormal basis (x,y,z). For BCDI data in the .cxi\n convention (hence: z, y,x) it is necessary to flip the last axis. The data sent\n to Paraview will be in the orthonormal frame (z,y,-x), therefore Paraview_x is z\n (downstream), Paraview_y is y (vertical up), Paraview_z is -x (inboard) of the\n .cxi convention.\n\n :param filename: the file name of the vti file\n :param voxel_size: tuple (voxel_size_axis0, voxel_size_axis1, voxel_size_axis2)\n :param tuple_array: tuple of arrays of the same dimension\n :param tuple_fieldnames: tuple of strings for the field names, same number of\n elements as tuple_array\n :param origin: tuple of points for vtk SetOrigin()\n :param amplitude_threshold: lower threshold for saving the reconstruction\n modulus (save memory space)\n :param kwargs:\n\n - 'logger': an optional logger\n\n :return: nothing\n \"\"\"\n import vtk\n from vtk.util import numpy_support\n\n logger = kwargs.get(\"logger\", module_logger)\n #########################\n # check some parameters #\n #########################\n valid.valid_container(\n obj=voxel_size,\n container_types=(tuple, list),\n length=3,\n item_types=Real,\n min_excluded=0,\n name=\"voxel_size\",\n )\n\n if isinstance(tuple_array, np.ndarray):\n tuple_array = (tuple_array,)\n valid.valid_ndarray(tuple_array, ndim=3)\n nb_arrays = len(tuple_array)\n nbz, nby, nbx = tuple_array[0].shape\n\n if isinstance(tuple_fieldnames, str):\n tuple_fieldnames = (tuple_fieldnames,)\n valid.valid_container(\n obj=tuple_fieldnames,\n container_types=(tuple, list),\n length=nb_arrays,\n item_types=str,\n name=\"tuple_fieldnames\",\n )\n\n #############################\n # initialize the VTK object #\n #############################\n image_data = vtk.vtkImageData()\n image_data.SetOrigin(origin[0], origin[1], origin[2])\n image_data.SetSpacing(voxel_size[0], voxel_size[1], voxel_size[2])\n image_data.SetExtent(0, nbz - 1, 0, nby - 1, 0, nbx - 1)\n\n #######################################\n # check if one of the fields in 'amp' #\n #######################################\n # it will use the thresholded normalized 'amp' as support\n # when saving other fields, in order to save disk space\n try:\n index_first = tuple_fieldnames.index(\"amp\")\n first_array = tuple_array[index_first]\n first_array = first_array / first_array.max()\n first_array[\n first_array < amplitude_threshold\n ] = 0 # theshold low amplitude values in order to save disk space\n is_amp = True\n except ValueError:\n logger.info('\"amp\" not in fieldnames, will save arrays without thresholding')\n index_first = 0\n first_array = tuple_array[0]\n is_amp = False\n\n first_arr = np.transpose(np.flip(first_array, 2)).reshape(first_array.size)\n first_arr = numpy_support.numpy_to_vtk(first_arr)\n pd = image_data.GetPointData()\n pd.SetScalars(first_arr)\n pd.GetArray(0).SetName(tuple_fieldnames[index_first])\n counter = 1\n for idx in range(nb_arrays):\n if idx == index_first:\n continue\n temp_array = tuple_array[idx]\n if is_amp:\n temp_array[\n first_array == 0\n ] = 0 # use the thresholded amplitude as a support\n # in order to save disk space\n temp_array = np.transpose(np.flip(temp_array, 2)).reshape(temp_array.size)\n temp_array = numpy_support.numpy_to_vtk(temp_array)\n pd.AddArray(temp_array)\n pd.GetArray(counter).SetName(tuple_fieldnames[idx])\n pd.Update()\n counter = counter + 1\n\n # export data to file\n writer = vtk.vtkXMLImageDataWriter()\n writer.SetFileName(filename)\n writer.SetInputData(image_data)\n writer.Write()\n\n\ndef scatter_plot(array, labels, markersize=4, markercolor=\"b\", title=\"\"):\n \"\"\"\n 2D or 3D Scatter plot of a 2D ndarray.\n\n :param array: 2D ndarray, the number of columns is the number of dimensions\n of the scatter plot (2 or 3)\n :param labels: tuple of string labels (length = number of columns in array)\n :param markersize: number corresponding to the marker size\n :param markercolor: string corresponding to the marker color\n :param title: string, title for the scatter plot\n :return: figure, axes instances\n \"\"\"\n valid.valid_ndarray(array, ndim=2, fix_shape=False)\n ndim = array.shape[1]\n if isinstance(labels, tuple):\n if len(labels) != ndim:\n raise ValueError(\n \"len(labels) is different from the number of columns in the array\"\n )\n else: # it is a string or a number\n labels = (labels,) * ndim\n\n plt.ion()\n fig = plt.figure()\n\n if ndim == 2:\n ax = plt.subplot(111)\n ax.scatter(array[:, 0], array[:, 1], s=markersize, color=markercolor)\n plt.title(title)\n ax.set_xlabel(\n labels[0]\n ) # first dimension is x for scatter plots, but z for NEXUS convention\n ax.set_ylabel(labels[1])\n plt.pause(0.1)\n elif ndim == 3:\n ax = plt.subplot(111, projection=\"3d\")\n ax.scatter(\n array[:, 0], array[:, 1], array[:, 2], s=markersize, color=markercolor\n )\n plt.title(title)\n ax.set_xlabel(\n labels[0]\n ) # first dimension is x for scatter plots, but z for NEXUS convention\n ax.set_ylabel(labels[1])\n ax.set_zlabel(labels[2])\n else:\n raise ValueError(\"There should be 2 or 3 columns in the array\")\n if ndim == 2:\n plt.axis(\"scaled\")\n plt.pause(0.1)\n plt.ioff()\n return fig, ax\n\n\ndef scatter_plot_overlaid(arrays, markersizes, markercolors, labels, title=\"\"):\n \"\"\"\n Overlaid scatter plot of 2D ndarrays having the same number of columns.\n\n :param arrays: tuple of 2D ndarrays, the number of columns is the number\n of dimensions of the scatter plot (2 or 3)\n :param markersizes: tuple of numbers corresponding to the marker sizes\n (length = number of arrays)\n :param markercolors: tuple of strings corresponding to the marker color\n (length = number of arrays)\n :param labels: tuple of string labels (length = number of columns in arrays)\n :param title: string, title for the scatter plot\n :return: figure, axes instances\n \"\"\"\n if isinstance(arrays, np.ndarray):\n fig, ax = scatter_plot(\n array=arrays,\n markersize=markersizes,\n markercolor=markercolors,\n labels=labels,\n title=title,\n )\n return fig, ax\n\n valid.valid_ndarray(arrays, ndim=2, fix_shape=False)\n\n ndim = arrays[0].shape[1]\n nb_arrays = len(arrays)\n\n if isinstance(labels, tuple):\n if len(labels) != ndim:\n raise ValueError(\n \"len(labels) is different from the number of columns in the array\"\n )\n else: # it is a string or a number\n labels = (labels,) * ndim\n try:\n if len(markersizes) != nb_arrays:\n raise ValueError(\"len(markersizes) is different from the number of arrays\")\n except TypeError: # it is a number\n markersizes = (markersizes,) * nb_arrays\n if isinstance(markercolors, tuple):\n if len(markercolors) != nb_arrays:\n raise ValueError(\"len(markercolors) is different from the number of arrays\")\n else: # it is a string or a number\n markercolors = (markercolors,) * nb_arrays\n\n plt.ion()\n fig = plt.figure()\n if ndim == 2:\n ax = plt.subplot(111)\n elif ndim == 3:\n ax = plt.subplot(111, projection=\"3d\")\n else:\n raise ValueError(\"There should be 2 or 3 columns in the array\")\n\n for idx in range(nb_arrays):\n array = arrays[idx]\n if array.shape[1] != ndim:\n raise ValueError(\"All arrays should have the same number of columns\")\n\n if ndim == 2:\n ax.scatter(\n array[:, 0], array[:, 1], s=markersizes[idx], color=markercolors[idx]\n )\n else: # 3D\n ax.scatter(\n array[:, 0],\n array[:, 1],\n array[:, 2],\n s=markersizes[idx],\n color=markercolors[idx],\n )\n\n plt.title(title)\n if ndim == 2:\n ax.set_xlabel(\n labels[0]\n ) # first dimension is x for scatter plots, but z for NEXUS convention\n ax.set_ylabel(labels[1])\n else:\n ax.set_xlabel(\n labels[0]\n ) # first dimension is x for scatter plots, but z for NEXUS convention\n ax.set_ylabel(labels[1])\n ax.set_zlabel(labels[2])\n if ndim == 2:\n plt.axis(\"scaled\")\n plt.pause(0.1)\n plt.ioff()\n return fig, ax\n\n\ndef scatter_stereographic(\n euclidian_u,\n euclidian_v,\n color,\n title=\"\",\n max_angle=95,\n cmap=default_cmap,\n uv_labels=(\"\", \"\"),\n):\n \"\"\"\n Plot the stereographic projection of the real scattered positions of data points.\n\n :param euclidian_u: flattened array, normalized Euclidian metric coordinates\n (points can be not on a regular grid)\n :param euclidian_v: flattened array, normalized Euclidian metric coordinates\n (points can be not on a regular grid)\n :param color: flattened array, intensity of density kernel estimation at radius_mean\n :param title: title for the stereographic plot\n :param max_angle: maximum angle in degrees of the stereographic projection\n (should be larger than 90)\n :param cmap: colormap to be used\n :param uv_labels: tuple of strings, labels for the u axis and the v axis,\n respectively\n :return: figure and axe instances\n \"\"\"\n fig, ax0 = plt.subplots(nrows=1, ncols=1)\n plt0 = ax0.scatter(\n euclidian_u,\n euclidian_v,\n s=6,\n c=color,\n cmap=cmap,\n norm=colors.LogNorm(\n vmin=max(color[~np.isnan(color)].min(), 1),\n vmax=color[~np.isnan(color)].max(),\n ),\n )\n circle = patches.Circle((0, 0), 90, color=\"k\", fill=False, linewidth=1.5)\n ax0.add_artist(circle)\n ax0.axis(\"scaled\")\n ax0.set_xlim(-max_angle, max_angle)\n ax0.set_ylim(-max_angle, max_angle)\n ax0.set_xlabel(\"u \" + uv_labels[0])\n ax0.set_ylabel(\"v \" + uv_labels[1])\n ax0.set_title(title)\n colorbar(plt0, scale=\"log\", numticks=5)\n plt.pause(0.1)\n plt.ioff()\n return fig, ax0\n\n\ndef update_aliens(\n key,\n pix,\n piy,\n original_data,\n original_mask,\n updated_data,\n updated_mask,\n figure,\n width,\n dim,\n idx,\n vmax,\n vmin=0,\n invert_yaxis=False,\n):\n \"\"\"\n Update the plot while removing the parasitic diffraction intensity in 3D dataset.\n\n :param key: the keyboard key which was pressed\n :param pix: the x value of the mouse pointer\n :param piy: the y value of the mouse pointer\n :param original_data: the 3D data array before masking aliens\n :param original_mask: the 3D mask array before masking aliens\n :param updated_data: the current 3D data array\n :param updated_mask: the current 3D mask array\n :param figure: the figure instance\n :param width: the half_width of the masking window\n :param dim: the axis currently under review (axis 0, 1 or 2)\n :param idx: the frame index in the current axis\n :param vmax: the higher boundary for the colorbar\n :param vmin: the lower boundary for the colorbar\n :param invert_yaxis: True to invert the y axis of imshow plots\n :return: updated data, mask and controls\n \"\"\"\n # check some parameters\n valid.valid_ndarray(\n arrays=(original_data, updated_data, original_mask, updated_mask),\n ndim=3,\n )\n if dim not in {0, 1, 2}:\n raise ValueError(\"dim should be 0, 1 or 2\")\n\n # process arrays\n nbz, nby, nbx = original_data.shape\n stop_masking = False\n if dim == 0:\n current_nby = nby\n current_nbx = nbx\n elif dim == 1:\n current_nby = nbz\n current_nbx = nbx\n else: # dim = 2\n current_nby = nbz\n current_nbx = nby\n\n axs = figure.gca()\n xmin, xmax = axs.get_xlim()\n ymin, ymax = axs.get_ylim()\n if key == \"u\": # show next frame\n idx = idx + 1\n if dim == 0:\n if idx > nbz - 1:\n idx = 0\n elif dim == 1:\n if idx > nby - 1:\n idx = 0\n else: # dim=2\n if idx > nbx - 1:\n idx = 0\n\n elif key == \"d\": # show previous frame\n idx = idx - 1\n if dim == 0:\n if idx < 0:\n idx = nbz - 1\n elif dim == 1:\n if idx < 0:\n idx = nby - 1\n else: # dim=2\n if idx < 0:\n idx = nbx - 1\n\n elif key == \"up\":\n width = width + 1\n\n elif key == \"down\":\n width = width - 1\n width = max(width, 0)\n\n elif key == \"right\": # increase colobar max\n vmax = vmax * 2\n\n elif key == \"left\": # reduce colobar max\n vmax = vmax / 2\n vmax = max(vmax, 1)\n\n elif key == \"m\": # mask intensities\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n if dim == 0:\n updated_data[idx, starty:stopy, startx:stopx] = 0\n updated_mask[idx, starty:stopy, startx:stopx] = 1\n elif dim == 1:\n updated_data[starty:stopy, idx, startx:stopx] = 0\n updated_mask[starty:stopy, idx, startx:stopx] = 1\n else: # dim=2\n updated_data[starty:stopy, startx:stopx, idx] = 0\n updated_mask[starty:stopy, startx:stopx, idx] = 1\n\n elif key == \"b\": # back to measured intensities\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n if dim == 0:\n updated_data[idx, starty:stopy, startx:stopx] = original_data[\n idx, starty:stopy, startx:stopx\n ]\n updated_mask[idx, starty:stopy, startx:stopx] = original_mask[\n idx, starty:stopy, startx:stopx\n ]\n\n elif dim == 1:\n updated_data[starty:stopy, idx, startx:stopx] = original_data[\n starty:stopy, idx, startx:stopx\n ]\n updated_mask[starty:stopy, idx, startx:stopx] = original_mask[\n starty:stopy, idx, startx:stopx\n ]\n else: # dim=2\n updated_data[starty:stopy, startx:stopx, idx] = original_data[\n starty:stopy, startx:stopx, idx\n ]\n updated_mask[starty:stopy, startx:stopx, idx] = original_mask[\n starty:stopy, startx:stopx, idx\n ]\n\n elif key in {\"p\", \"a\"}: # plot full image or restart masking\n if dim == 0:\n xmin, xmax = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin, ymax = -0.5, nby - 0.5 # pointing up\n else:\n ymin, ymax = nby - 0.5, -0.5 # pointing down\n elif dim == 1:\n xmin, xmax = -0.5, nbx - 0.5\n ymin, ymax = nbz - 0.5, -0.5 # pointing down\n else: # dim=2\n xmin, xmax = -0.5, nby - 0.5\n ymin, ymax = nbz - 0.5, -0.5 # pointing down\n if key == \"a\": # restart masking\n updated_data[:] = original_data[:]\n updated_mask[:] = original_mask[:]\n\n elif key == \"q\":\n stop_masking = True\n\n else:\n return updated_data, updated_mask, width, vmax, idx, stop_masking\n\n axs.cla()\n if dim == 0:\n axs.imshow(updated_data[idx, :, :], vmin=vmin, vmax=vmax)\n axs.set_title(\n \"XY - Frame \" + str(idx + 1) + \"/\" + str(nbz) + \"\\n\"\n \"m mask ; b unmask ; q quit ; u next frame ; d previous frame\\n\"\n \"up larger ; down smaller ; right darker ; left brighter\"\n )\n elif dim == 1:\n axs.imshow(updated_data[:, idx, :], vmin=vmin, vmax=vmax)\n axs.set_title(\n \"XZ - Frame \" + str(idx + 1) + \"/\" + str(nby) + \"\\n\"\n \"m mask ; b unmask ; q quit ; u next frame ; d previous frame\\n\"\n \"up larger ; down smaller ; right darker ; left brighter\"\n )\n elif dim == 2:\n axs.imshow(updated_data[:, :, idx], vmin=vmin, vmax=vmax)\n axs.set_title(\n \"YZ - Frame \" + str(idx + 1) + \"/\" + str(nbx) + \"\\n\"\n \"m mask ; b unmask ; q quit ; u next frame ; d previous frame\\n\"\n \"up larger ; down smaller ; right darker ; left brighter\"\n )\n if invert_yaxis:\n axs.invert_yaxis()\n axs.set_xlim([xmin, xmax])\n axs.set_ylim([ymin, ymax])\n plt.draw()\n\n return updated_data, updated_mask, width, vmax, idx, stop_masking\n\n\ndef update_aliens_combined(\n key,\n pix,\n piy,\n original_data,\n original_mask,\n updated_data,\n updated_mask,\n axes,\n width,\n dim,\n frame_index,\n vmax,\n vmin=0,\n cmap=default_cmap,\n invert_yaxis=False,\n):\n \"\"\"\n Update the plot while removing the parasitic diffraction intensity in 3D dataset.\n\n :param key: the keyboard key which was pressed\n :param pix: the x value of the mouse pointer\n :param piy: the y value of the mouse pointer\n :param original_data: the 3D data array before masking aliens\n :param original_mask: the 3D mask array before masking aliens\n :param updated_data: the current 3D data array\n :param updated_mask: the current 3D mask array\n :param axes: tuple of the 4 axes instances in a plt.subplots(nrows=2, ncols=2)\n :param width: the half_width of the masking window\n :param dim: the axis currently under review (axis 0, 1 or 2)\n :param frame_index: list of 3 frame indices (one per axis)\n :param vmax: the higher boundary for the colorbar\n :param vmin: the lower boundary for the colorbar\n :param cmap: colormap to be used\n :param invert_yaxis: True to invert the y axis of imshow plots\n :return: updated data, mask (-1 filled, 0 non masked, 1 masked voxel) and controls\n \"\"\"\n # check some parameters\n valid.valid_ndarray(\n arrays=(original_data, updated_data, original_mask, updated_mask),\n ndim=3,\n )\n if dim not in {0, 1, 2}:\n raise ValueError(\"dim should be 0, 1 or 2\")\n\n # process arrays\n nbz, nby, nbx = original_data.shape\n stop_masking = False\n if dim == 0:\n current_nby = nby\n current_nbx = nbx\n elif dim == 1:\n current_nby = nbz\n current_nbx = nbx\n else: # dim = 2\n current_nby = nbz\n current_nbx = nby\n\n xmin0, xmax0 = axes[0].get_xlim()\n ymin0, ymax0 = axes[0].get_ylim()\n xmin1, xmax1 = axes[1].get_xlim()\n ymin1, ymax1 = axes[1].get_ylim()\n xmin2, xmax2 = axes[2].get_xlim()\n ymin2, ymax2 = axes[2].get_ylim()\n\n if key == \"u\": # show next frame\n if dim == 0:\n frame_index[0] = frame_index[0] + 1\n if frame_index[0] > nbz - 1:\n frame_index[0] = 0\n elif dim == 1:\n frame_index[1] = frame_index[1] + 1\n if frame_index[1] > nby - 1:\n frame_index[1] = 0\n else: # dim=2\n frame_index[2] = frame_index[2] + 1\n if frame_index[2] > nbx - 1:\n frame_index[2] = 0\n\n elif key == \"d\": # show previous frame\n if dim == 0:\n frame_index[0] = frame_index[0] - 1\n if frame_index[0] < 0:\n frame_index[0] = nbz - 1\n elif dim == 1:\n frame_index[1] = frame_index[1] - 1\n if frame_index[1] < 0:\n frame_index[1] = nby - 1\n else: # dim=2\n frame_index[2] = frame_index[2] - 1\n if frame_index[2] < 0:\n frame_index[2] = nbx - 1\n\n elif key == \"up\":\n width = width + 1\n\n elif key == \"down\":\n width = width - 1\n width = max(width, 0)\n\n elif key == \"right\": # increase colobar max\n vmax = vmax * 2\n\n elif key == \"left\": # reduce colobar max\n vmax = vmax / 2\n vmax = max(vmax, 1)\n\n elif key == \"m\": # mask intensities\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n if dim == 0:\n updated_data[frame_index[0], starty:stopy, startx:stopx] = 0\n updated_mask[frame_index[0], starty:stopy, startx:stopx] = 1\n elif dim == 1:\n updated_data[starty:stopy, frame_index[1], startx:stopx] = 0\n updated_mask[starty:stopy, frame_index[1], startx:stopx] = 1\n else: # dim=2\n updated_data[starty:stopy, startx:stopx, frame_index[2]] = 0\n updated_mask[starty:stopy, startx:stopx, frame_index[2]] = 1\n\n elif key == \"b\": # back to measured intensities\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n if dim == 0:\n updated_data[\n frame_index[0], starty:stopy, startx:stopx\n ] = original_data[frame_index[0], starty:stopy, startx:stopx]\n updated_mask[\n frame_index[0], starty:stopy, startx:stopx\n ] = original_mask[frame_index[0], starty:stopy, startx:stopx]\n elif dim == 1:\n updated_data[\n starty:stopy, frame_index[1], startx:stopx\n ] = original_data[starty:stopy, frame_index[1], startx:stopx]\n updated_mask[\n starty:stopy, frame_index[1], startx:stopx\n ] = original_mask[starty:stopy, frame_index[1], startx:stopx]\n else: # dim=2\n updated_data[\n starty:stopy, startx:stopx, frame_index[2]\n ] = original_data[starty:stopy, startx:stopx, frame_index[2]]\n updated_mask[\n starty:stopy, startx:stopx, frame_index[2]\n ] = original_mask[starty:stopy, startx:stopx, frame_index[2]]\n\n elif key == \"f\": # fill empty voxels\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n if dim == 0:\n updated_data[\n frame_index[0], starty:stopy, startx:stopx\n ] = original_data.max()\n updated_mask[frame_index[0], starty:stopy, startx:stopx] = -1\n elif dim == 1:\n updated_data[\n starty:stopy, frame_index[1], startx:stopx\n ] = original_data.max()\n updated_mask[starty:stopy, frame_index[1], startx:stopx] = -1\n else: # dim=2\n updated_data[\n starty:stopy, startx:stopx, frame_index[2]\n ] = original_data.max()\n updated_mask[starty:stopy, startx:stopx, frame_index[2]] = -1\n\n elif key in {\"p\", \"a\"}: # plot full image or restart masking\n xmin0, xmax0 = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin0, ymax0 = -0.5, nby - 0.5 # pointing up\n else:\n ymin0, ymax0 = nby - 0.5, -0.5 # pointing down\n xmin1, xmax1 = -0.5, nbx - 0.5\n ymin1, ymax1 = nbz - 0.5, -0.5 # pointing down\n xmin2, xmax2 = -0.5, nby - 0.5\n ymin2, ymax2 = nbz - 0.5, -0.5 # pointing down\n if key == \"a\": # restart masking\n updated_data[:] = original_data[:]\n updated_mask[:] = original_mask[:]\n\n elif key == \"q\":\n stop_masking = True\n\n else:\n return updated_data, updated_mask, width, vmax, frame_index, stop_masking\n\n axes[0].cla()\n axes[1].cla()\n axes[2].cla()\n axes[0].imshow(updated_data[frame_index[0], :, :], vmin=vmin, vmax=vmax, cmap=cmap)\n axes[1].imshow(updated_data[:, frame_index[1], :], vmin=vmin, vmax=vmax, cmap=cmap)\n axes[2].imshow(updated_data[:, :, frame_index[2]], vmin=vmin, vmax=vmax, cmap=cmap)\n axes[0].set_title(\"XY - Frame \" + str(frame_index[0] + 1) + \"/\" + str(nbz))\n axes[0].axis(\"scaled\")\n if invert_yaxis:\n axes[0].invert_yaxis()\n axes[0].set_xlim([xmin0, xmax0])\n axes[0].set_ylim([ymin0, ymax0])\n axes[1].set_title(\"XZ - Frame \" + str(frame_index[1] + 1) + \"/\" + str(nby))\n axes[1].axis(\"scaled\")\n axes[1].set_xlim([xmin1, xmax1])\n axes[1].set_ylim([ymin1, ymax1])\n axes[2].set_title(\"YZ - Frame \" + str(frame_index[2] + 1) + \"/\" + str(nbx))\n axes[2].axis(\"scaled\")\n axes[2].set_xlim([xmin2, xmax2])\n axes[2].set_ylim([ymin2, ymax2])\n plt.draw()\n\n return updated_data, updated_mask, width, vmax, frame_index, stop_masking\n\n\ndef update_aliens_2d(\n key,\n pix,\n piy,\n original_data,\n original_mask,\n updated_data,\n updated_mask,\n figure,\n width,\n vmax,\n vmin=0,\n invert_yaxis=False,\n):\n \"\"\"\n Update the plot while removing the parasitic diffraction intensity in 2D dataset.\n\n :param key: the keyboard key which was pressed\n :param pix: the x value of the mouse pointer\n :param piy: the y value of the mouse pointer\n :param original_data: the 2D data array before masking aliens\n :param original_mask: the 2D mask array before masking aliens\n :param updated_data: the current 2D data array\n :param updated_mask: the current 2D mask array\n :param figure: the figure instance\n :param width: the half_width of the masking window\n :param vmax: the higher boundary for the colorbar\n :param vmin: the lower boundary for the colorbar\n :param invert_yaxis: True to invert the y axis of imshow plots\n :return: updated data, mask and controls\n \"\"\"\n # check some parameters\n valid.valid_ndarray(\n arrays=(original_data, updated_data, original_mask, updated_mask),\n ndim=2,\n )\n\n # process arrays\n nby, nbx = original_data.shape\n stop_masking = False\n\n axs = figure.gca()\n xmin, xmax = axs.get_xlim()\n ymin, ymax = axs.get_ylim()\n\n if key == \"up\":\n width = width + 1\n\n elif key == \"down\":\n width = width - 1\n width = max(width, 0)\n\n elif key == \"right\":\n vmax = vmax * 2\n\n elif key == \"left\":\n vmax = vmax / 2\n vmax = max(vmax, 1)\n\n elif key == \"m\":\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= nby:\n stopy = max(nby, piy - width)\n if stopy > nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= nbx:\n stopx = max(nbx, pix - width)\n if stopx > nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n updated_data[starty:stopy, startx:stopx] = 0\n updated_mask[starty:stopy, startx:stopx] = 1\n\n elif key == \"b\":\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= nby:\n stopy = max(nby, piy - width)\n if stopy > nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= nbx:\n stopx = max(nbx, pix - width)\n if stopx > nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n updated_data[starty:stopy, startx:stopx] = original_data[\n starty:stopy, startx:stopx\n ]\n updated_mask[starty:stopy, startx:stopx] = original_mask[\n starty:stopy, startx:stopx\n ]\n\n elif key in {\"p\", \"a\"}: # plot full image or restart masking\n xmin, xmax = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin, ymax = -0.5, nby - 0.5 # pointing up\n else:\n ymin, ymax = nby - 0.5, -0.5 # pointing down\n if key == \"a\": # restart masking\n updated_data[:] = original_data[:]\n updated_mask[:] = original_mask[:]\n\n elif key == \"q\":\n stop_masking = True\n\n else:\n return updated_data, updated_mask, width, vmax, stop_masking\n\n axs.cla()\n axs.imshow(updated_data, vmin=vmin, vmax=vmax)\n axs.set_title(\n \"m mask ; b unmask ; q quit ; u next frame ; d previous frame\\n\"\n \"up larger ; down smaller ; right darker ; left brighter\"\n )\n if invert_yaxis:\n axs.invert_yaxis()\n axs.set_xlim([xmin, xmax])\n axs.set_ylim([ymin, ymax])\n plt.draw()\n\n return updated_data, updated_mask, width, vmax, stop_masking\n\n\ndef update_background(\n key, distances, data, figure, flag_pause, xy, scale=\"log\", xlim=None, ylim=None\n):\n \"\"\"\n Define the background for a 1D reciprocal space dataset.\n\n :param key: the keyboard key which was pressed\n :param distances: x axis for data\n :param data: the 1D data before background subtraction\n :param figure: the figure instance\n :param flag_pause: set to 1 to stop registering vertices using mouse clicks\n :param xy: the list of vertices which defines a polygon to be masked\n :param scale: scale of data, 'linear' or 'log'\n :param xlim: x axis plot limits\n :param ylim: y axis plot limits\n :return: updated background and controls\n \"\"\"\n valid.valid_ndarray(data, ndim=1)\n axs = figure.gca()\n if xlim is None:\n xmin, xmax = axs.get_xlim()\n else:\n xmin, xmax = xlim\n if ylim is None:\n ymin, ymax = axs.get_ylim()\n else:\n ymin, ymax = ylim\n\n stop_masking = False\n xy = sorted(xy, key=itemgetter(0))\n\n if key == \"b\": # remove the last selected background point\n xy.pop()\n\n elif key == \"a\": # restart background selection from the beginning\n xy = []\n print(\"restart background selection\")\n\n elif key == \"p\": # plot background\n pass\n\n elif key == \"x\":\n if not flag_pause:\n flag_pause = True\n print(\"pause for pan/zoom\")\n else:\n flag_pause = False\n print(\"resume masking\")\n\n elif key == \"q\":\n stop_masking = True\n\n else:\n return flag_pause, xy, stop_masking\n\n background = np.asarray(xy)\n axs.cla()\n if len(xy) != 0:\n if scale == \"linear\":\n axs.plot(distances, data, \".-r\", background[:, 0], background[:, 1], \"b\")\n else:\n axs.plot(\n distances,\n np.log10(data),\n \".-r\",\n background[:, 0],\n background[:, 1],\n \"b\",\n ) # background is in log scale directly\n else: # restart background selection\n if scale == \"linear\":\n axs.plot(distances, data, \".-r\")\n else:\n axs.plot(distances, np.log10(data), \".-r\")\n axs.set_xlim([xmin, xmax])\n axs.set_ylim([ymin, ymax])\n axs.set_xlabel(\"q (1/nm)\")\n axs.set_ylabel(\"Angular average (A.U.)\")\n axs.set_title(\n \"Click to select background points\\nx to pause/resume for pan/zoom\\n\"\n \"a restart ; p plot background ; q quit\"\n )\n plt.draw()\n\n return flag_pause, xy, stop_masking\n\n\ndef update_mask(\n key,\n pix,\n piy,\n original_data,\n original_mask,\n updated_data,\n updated_mask,\n figure,\n flag_pause,\n points,\n xy,\n width,\n dim,\n vmax,\n vmin=0,\n masked_color=0.1,\n invert_yaxis=False,\n):\n \"\"\"\n Update the mask corresponding to parasitic intensities in a 3D dataset.\n\n The GUI contains one 2D projection of the mask, the projection axis is\n determined by the parameter \"dim\".\n\n :param key: the keyboard key which was pressed\n :param pix: the x value of the mouse pointer\n :param piy: the y value of the mouse pointer\n :param original_data: the 3D data array before masking\n :param original_mask: the 3D mask array before masking\n :param updated_data: the current 3D data array\n :param updated_mask: the temporary 2D mask array with updated points\n :param figure: the figure instance\n :param flag_pause: set to 1 to stop registering vertices using mouse clicks\n :param points: list of all point coordinates: points=np.stack((x, y), axis=0).T\n with x=x.flatten() , y = y.flatten() given x,y=np.meshgrid(np.arange(nx),\n np.arange(ny))\n :param xy: the list of vertices which defines a polygon to be masked\n :param width: the half_width of the masking window\n :param dim: the axis currently under review (axis 0, 1 or 2)\n :param vmax: the higher boundary for the colorbar\n :param vmin: the lower boundary for the colorbar\n :param masked_color: the value that detector gaps should have in plots\n :param invert_yaxis: True to invert the y axis of imshow plots\n :return: updated data, mask and controls\n \"\"\"\n # check some parameters\n valid.valid_ndarray(\n arrays=(original_data, updated_data, original_mask),\n ndim=3,\n )\n valid.valid_ndarray(updated_mask, ndim=2)\n if dim not in {0, 1, 2}:\n raise ValueError(\"dim should be 0, 1 or 2\")\n\n # process arrays\n nbz, nby, nbx = original_data.shape\n stop_masking = False\n if dim == 0:\n current_nby = nby\n current_nbx = nbx\n elif dim == 1:\n current_nby = nbz\n current_nbx = nbx\n else: # dim = 2\n current_nby = nbz\n current_nbx = nby\n\n axs = figure.gca()\n xmin, xmax = axs.get_xlim()\n ymin, ymax = axs.get_ylim()\n\n if key == \"up\":\n width = width + 1\n\n elif key == \"down\":\n width = width - 1\n width = max(width, 0)\n\n elif key == \"right\":\n vmax = vmax + 1\n\n elif key == \"left\":\n vmax = vmax - 1\n vmax = max(vmax, 1)\n\n elif key == \"m\":\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n updated_mask[starty:stopy, startx:stopx] = 1\n\n elif key == \"b\":\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n updated_mask[starty:stopy, startx:stopx] = 0\n if dim == 0:\n updated_data[:, starty:stopy, startx:stopx] = original_data[\n :, starty:stopy, startx:stopx\n ]\n elif dim == 1:\n updated_data[starty:stopy, :, startx:stopx] = original_data[\n starty:stopy, :, startx:stopx\n ]\n else: # dim=2\n updated_data[starty:stopy, startx:stopx, :] = original_data[\n starty:stopy, startx:stopx, :\n ]\n\n elif key == \"a\": # restart mask from beginning\n updated_data[:] = original_data[:]\n xy = []\n print(\"Restart masking...\")\n if dim == 0:\n updated_data[original_mask == 1] = (\n masked_color / nbz\n ) # masked pixels plotted with the value of masked_pixel\n updated_mask = np.zeros((nby, nbx))\n xmin, xmax = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin, ymax = -0.5, nby - 0.5 # pointing up\n else:\n ymin, ymax = nby - 0.5, -0.5 # pointing down\n elif dim == 1:\n updated_data[original_mask == 1] = (\n masked_color / nby\n ) # masked pixels plotted with the value of masked_pixel\n updated_mask = np.zeros((nbz, nbx))\n xmin, xmax = -0.5, nbx - 0.5\n ymin, ymax = nbz - 0.5, -0.5 # pointing down\n else: # dim=2\n updated_data[original_mask == 1] = (\n masked_color / nbx\n ) # masked pixels plotted with the value of masked_pixel\n updated_mask = np.zeros((nbz, nby))\n xmin, xmax = -0.5, nby - 0.5\n ymin, ymax = nbz - 0.5, -0.5 # pointing down\n\n elif key == \"p\": # plot full image\n if dim == 0:\n xmin, xmax = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin, ymax = -0.5, nby - 0.5 # pointing up\n else:\n ymin, ymax = nby - 0.5, -0.5 # pointing down\n elif dim == 1:\n xmin, xmax = -0.5, nbx - 0.5\n ymin, ymax = nbz - 0.5, -0.5 # pointing down\n else: # dim=2\n xmin, xmax = -0.5, nby - 0.5\n ymin, ymax = nbz - 0.5, -0.5 # pointing down\n if not flag_pause and len(xy) != 0:\n xy.append(xy[0])\n print(xy)\n if dim == 0:\n ind = Path(np.array(xy)).contains_points(points).reshape((nby, nbx))\n elif dim == 1:\n ind = Path(np.array(xy)).contains_points(points).reshape((nbz, nbx))\n else: # dim=2\n ind = Path(np.array(xy)).contains_points(points).reshape((nbz, nby))\n updated_mask[ind] = 1\n xy = [] # allow to mask a different area\n\n elif key == \"r\":\n xy = []\n\n elif key == \"x\":\n if not flag_pause:\n flag_pause = True\n print(\"pause for pan/zoom\")\n else:\n flag_pause = False\n print(\"resume masking\")\n\n elif key == \"q\":\n stop_masking = True\n\n else:\n return updated_data, updated_mask, flag_pause, xy, width, vmax, stop_masking\n\n array = updated_data.sum(axis=dim) # updated_data is not modified\n array[updated_mask == 1] = masked_color\n\n axs.cla()\n axs.imshow(np.log10(abs(array)), vmin=vmin, vmax=vmax)\n if invert_yaxis:\n axs.invert_yaxis()\n axs.set_xlim([xmin, xmax])\n axs.set_ylim([ymin, ymax])\n axs.set_title(\n \"x to pause/resume masking for pan/zoom \\n\"\n \"p plot mask ; a restart ; click to select vertices\\n\"\n \"m mask ; b unmask ; q quit ; u next frame ; d previous frame\\n\"\n \"up larger ; down smaller ; right darker ; left brighter\"\n )\n plt.draw()\n\n return updated_data, updated_mask, flag_pause, xy, width, vmax, stop_masking\n\n\ndef update_mask_combined(\n key,\n pix,\n piy,\n original_data,\n original_mask,\n updated_data,\n updated_mask,\n axes,\n flag_pause,\n points,\n xy,\n width,\n dim,\n click_dim,\n info_text,\n vmax,\n vmin=0,\n cmap=default_cmap,\n invert_yaxis=False,\n):\n \"\"\"\n Update the mask to remove parasitic intensities in a 3D dataset.\n\n The GUI contains the three 2D projections of the mask, the axis being modified is\n determined by the parameter \"dim\".\n\n :param key: the keyboard key which was pressed\n :param pix: the x value of the mouse pointer\n :param piy: the y value of the mouse pointer\n :param original_data: the 3D data array before masking\n :param original_mask: the 3D mask array before masking\n :param updated_data: the current 3D data array\n :param updated_mask: the temporary 3D mask array with updated points\n :param axes: tuple of the 4 axes instances in a plt.subplots(nrows=2, ncols=2)\n :param flag_pause: set to 1 to stop registering vertices using mouse clicks\n :param points: list of all point coordinates: points=np.stack((x, y), axis=0).T\n with x=x.flatten() , y = y.flatten() given x,y=np.meshgrid(np.arange(nx),\n np.arange(ny))\n :param xy: the list of vertices which defines a polygon to be masked\n :param width: the half_width of the masking window\n :param dim: the axis currently under review (axis 0, 1 or 2)\n :param click_dim: the dimension (0, 1 or 2) where the selection of mask polygon\n vertices by clicking was performed\n :param info_text: text instance in the figure\n :param vmax: the higher boundary for the colorbar\n :param vmin: the lower boundary for the colorbar\n :param cmap: colormap to be used\n :param invert_yaxis: True to invert the y axis of imshow plots\n :return: updated data, mask (-1 filled, 0 non masked, 1 masked voxel) and controls\n \"\"\"\n # check some parameters\n valid.valid_ndarray(\n arrays=(original_data, updated_data, original_mask, updated_mask),\n ndim=3,\n )\n if dim not in {0, 1, 2}:\n raise ValueError(\"dim should be 0, 1 or 2\")\n\n # process arrays\n nbz, nby, nbx = original_data.shape\n stop_masking = False\n update_fig = False\n if dim == 0:\n current_nby = nby\n current_nbx = nbx\n elif dim == 1:\n current_nby = nbz\n current_nbx = nbx\n else: # dim = 2\n current_nby = nbz\n current_nbx = nby\n\n xmin0, xmax0 = axes[0].get_xlim()\n ymin0, ymax0 = axes[0].get_ylim()\n xmin1, xmax1 = axes[1].get_xlim()\n ymin1, ymax1 = axes[1].get_ylim()\n xmin2, xmax2 = axes[2].get_xlim()\n ymin2, ymax2 = axes[2].get_ylim()\n\n if key == \"up\":\n width = width + 1\n\n elif key == \"down\":\n width = width - 1\n width = max(width, 0)\n\n elif key == \"right\":\n vmax = vmax + 1\n update_fig = True\n\n elif key == \"left\":\n vmax = vmax - 1\n vmax = max(vmax, 1)\n update_fig = True\n\n elif key == \"m\":\n skip = False\n update_fig = True\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n if dim == 0:\n updated_mask[:, starty:stopy, startx:stopx] = 1\n elif dim == 1:\n updated_mask[starty:stopy, :, startx:stopx] = 1\n else: # dim=2\n updated_mask[starty:stopy, startx:stopx, :] = 1\n\n elif key == \"b\":\n skip = False\n update_fig = True\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n if dim == 0:\n updated_mask[:, starty:stopy, startx:stopx] = 0\n updated_data[:, starty:stopy, startx:stopx] = original_data[\n :, starty:stopy, startx:stopx\n ]\n elif dim == 1:\n updated_mask[starty:stopy, :, startx:stopx] = 0\n updated_data[starty:stopy, :, startx:stopx] = original_data[\n starty:stopy, :, startx:stopx\n ]\n else: # dim=2\n updated_mask[starty:stopy, startx:stopx, :] = 0\n updated_data[starty:stopy, startx:stopx, :] = original_data[\n starty:stopy, startx:stopx, :\n ]\n\n elif key == \"f\": # fill with ones\n skip = False\n update_fig = True\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= current_nby:\n stopy = max(current_nby, piy - width)\n if stopy > current_nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= current_nbx:\n stopx = max(current_nbx, pix - width)\n if stopx > current_nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n if dim == 0:\n updated_mask[:, starty:stopy, startx:stopx] = -1\n updated_data[:, starty:stopy, startx:stopx] = original_data.max()\n elif dim == 1:\n updated_mask[starty:stopy, :, startx:stopx] = -1\n updated_data[starty:stopy, :, startx:stopx] = original_data.max()\n else: # dim=2\n updated_mask[starty:stopy, startx:stopx, :] = -1\n updated_data[starty:stopy, startx:stopx, :] = original_data.max()\n\n elif key == \"a\": # restart mask from beginning\n update_fig = True\n updated_data = np.copy(original_data)\n xy = []\n click_dim = None\n print(\"Restart masking...\")\n xmin0, xmax0 = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin0, ymax0 = -0.5, nby - 0.5 # pointing up\n else:\n ymin0, ymax0 = nby - 0.5, -0.5 # pointing down\n xmin1, xmax1 = -0.5, nbx - 0.5\n ymin1, ymax1 = nbz - 0.5, -0.5 # pointing down\n xmin2, xmax2 = -0.5, nby - 0.5\n ymin2, ymax2 = nbz - 0.5, -0.5 # pointing down\n\n updated_data[:] = original_data[:]\n updated_mask = np.zeros((nbz, nby, nbx))\n\n elif key == \"p\": # plot full image\n update_fig = True\n xmin0, xmax0 = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin0, ymax0 = -0.5, nby - 0.5 # pointing up\n else:\n ymin0, ymax0 = nby - 0.5, -0.5 # pointing down\n xmin1, xmax1 = -0.5, nbx - 0.5\n ymin1, ymax1 = nbz - 0.5, -0.5 # pointing down\n xmin2, xmax2 = -0.5, nby - 0.5\n ymin2, ymax2 = nbz - 0.5, -0.5 # pointing down\n if not flag_pause and len(xy) != 0:\n xy.append(xy[0])\n print(xy)\n if click_dim == 0:\n ind = Path(np.array(xy)).contains_points(points).reshape((nby, nbx))\n temp_mask = np.zeros((nby, nbx))\n temp_mask[ind] = 1\n updated_mask[\n np.repeat(temp_mask[np.newaxis, :, :], repeats=nbz, axis=0) == 1\n ] = 1\n elif click_dim == 1:\n ind = Path(np.array(xy)).contains_points(points).reshape((nbz, nbx))\n temp_mask = np.zeros((nbz, nbx))\n temp_mask[ind] = 1\n updated_mask[\n np.repeat(temp_mask[:, np.newaxis, :], repeats=nby, axis=1) == 1\n ] = 1\n else: # dim=2\n ind = Path(np.array(xy)).contains_points(points).reshape((nbz, nby))\n temp_mask = np.zeros((nbz, nby))\n temp_mask[ind] = 1\n updated_mask[\n np.repeat(temp_mask[:, :, np.newaxis], repeats=nbx, axis=2) == 1\n ] = 1\n xy = [] # allow to mask a different area\n click_dim = None\n\n elif key == \"r\":\n xy = []\n\n elif key == \"x\":\n if not flag_pause:\n flag_pause = True\n print(\"pause for pan/zoom\")\n else:\n flag_pause = False\n print(\"resume masking\")\n\n elif key == \"q\":\n stop_masking = True\n\n else:\n return (\n updated_data,\n updated_mask,\n flag_pause,\n xy,\n width,\n vmax,\n click_dim,\n stop_masking,\n info_text,\n )\n\n if update_fig:\n updated_data[original_mask == 1] = 0\n updated_data[updated_mask == 1] = 0\n\n axes[0].cla()\n axes[1].cla()\n axes[2].cla()\n axes[0].imshow(\n np.log10(updated_data.sum(axis=0)), vmin=vmin, vmax=vmax, cmap=cmap\n )\n axes[1].imshow(\n np.log10(updated_data.sum(axis=1)), vmin=vmin, vmax=vmax, cmap=cmap\n )\n axes[2].imshow(\n np.log10(updated_data.sum(axis=2)), vmin=vmin, vmax=vmax, cmap=cmap\n )\n axes[0].set_title(\"XY\")\n axes[0].axis(\"scaled\")\n if invert_yaxis:\n axes[0].invert_yaxis()\n axes[0].set_xlim([xmin0, xmax0])\n axes[0].set_ylim([ymin0, ymax0])\n axes[1].set_title(\"XZ\")\n axes[1].axis(\"scaled\")\n axes[1].set_xlim([xmin1, xmax1])\n axes[1].set_ylim([ymin1, ymax1])\n axes[2].set_title(\"YZ\")\n axes[2].axis(\"scaled\")\n axes[2].set_xlim([xmin2, xmax2])\n axes[2].set_ylim([ymin2, ymax2])\n fig = plt.gcf()\n info_text.remove()\n if flag_pause:\n info_text = fig.text(0.6, 0.05, \"masking paused\", size=16)\n else:\n info_text = fig.text(0.6, 0.05, \"masking enabled\", size=16)\n plt.draw()\n\n return (\n updated_data,\n updated_mask,\n flag_pause,\n xy,\n width,\n vmax,\n click_dim,\n stop_masking,\n info_text,\n )\n\n\ndef update_mask_2d(\n key,\n pix,\n piy,\n original_data,\n original_mask,\n updated_data,\n updated_mask,\n figure,\n flag_pause,\n points,\n xy,\n width,\n vmax,\n vmin=0,\n masked_color=0.1,\n invert_yaxis=False,\n):\n \"\"\"\n Update the mask corresponding to parasitic intensities in a 2D dataset.\n\n :param key: the keyboard key which was pressed\n :param pix: the x value of the mouse pointer\n :param piy: the y value of the mouse pointer\n :param original_data: the 2D data array before masking\n :param original_mask: the 2D mask array before masking\n :param updated_data: the current 2D data array\n :param updated_mask: the temporary 2D mask array with updated points\n :param figure: the figure instance\n :param flag_pause: set to 1 to stop registering vertices using mouse clicks\n :param points: list of all point coordinates: points=np.stack((x, y), axis=0).T\n with x=x.flatten() , y = y.flatten() given x,y=np.meshgrid(np.arange(nx),\n np.arange(ny))\n :param xy: the list of vertices which defines a polygon to be masked\n :param width: the half_width of the masking window\n :param vmax: the higher boundary for the colorbar\n :param vmin: the lower boundary for the colorbar\n :param masked_color: the value that detector gaps should have in plots\n :param invert_yaxis: True to invert the y axis of imshow plots\n :return: updated data, mask and controls\n \"\"\"\n # check some parameters\n valid.valid_ndarray(\n arrays=(original_data, updated_data, original_mask, updated_mask),\n ndim=2,\n )\n # process arrays\n nby, nbx = original_data.shape\n stop_masking = False\n\n axs = figure.gca()\n xmin, xmax = axs.get_xlim()\n ymin, ymax = axs.get_ylim()\n\n if key == \"up\":\n width = width + 1\n\n elif key == \"down\":\n width = width - 1\n width = max(width, 0)\n\n elif key == \"right\":\n vmax = vmax + 1\n updated_data[updated_mask == 1] = masked_color\n\n elif key == \"left\":\n vmax = vmax - 1\n vmax = max(vmax, 1)\n updated_data[updated_mask == 1] = masked_color\n\n elif key == \"m\":\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= nby:\n stopy = max(nby, piy - width)\n if stopy > nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= nbx:\n stopx = max(nbx, pix - width)\n if stopx > nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n updated_mask[starty:stopy, startx:stopx] = 1\n updated_data[updated_mask == 1] = masked_color\n\n elif key == \"b\":\n skip = False\n\n # check if the masking window fit in the data range\n # (vertical axis of the 2D plot)\n if (piy - width) < 0:\n starty = min(0, piy + width)\n if starty < 0:\n skip = True\n else:\n starty = piy - width\n if (piy + width) >= nby:\n stopy = max(nby, piy - width)\n if stopy > nby:\n skip = True\n else:\n stopy = piy + width + 1\n\n # check if the masking window fit in the data range\n # (horizontal axis of the 2D plot)\n if (pix - width) < 0:\n startx = min(0, pix + width)\n if startx < 0:\n skip = True\n else:\n startx = pix - width\n if (pix + width) >= nbx:\n stopx = max(nbx, pix - width)\n if stopx > nbx:\n skip = True\n else:\n stopx = pix + width + 1\n\n if not skip:\n updated_mask[starty:stopy, startx:stopx] = 0\n updated_data[updated_mask == 1] = masked_color\n\n elif key == \"a\": # restart mask from beginning\n updated_data = np.copy(original_data)\n xy = []\n print(\"restart masking\")\n updated_data[\n original_mask == 1\n ] = masked_color # masked pixels plotted with the value of masked_pixel\n updated_mask = np.zeros((nby, nbx))\n xmin, xmax = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin, ymax = -0.5, nby - 0.5 # pointing up\n else:\n ymin, ymax = nby - 0.5, -0.5 # pointing down\n\n elif key == \"p\": # plot full image\n xmin, xmax = -0.5, nbx - 0.5\n if invert_yaxis:\n ymin, ymax = -0.5, nby - 0.5 # pointing up\n else:\n ymin, ymax = nby - 0.5, -0.5 # pointing down\n if not flag_pause and len(xy) != 0:\n xy.append(xy[0])\n print(xy)\n ind = Path(np.array(xy)).contains_points(points).reshape((nby, nbx))\n updated_mask[ind] = 1\n\n updated_data[updated_mask == 1] = masked_color\n xy = [] # allow to mask a different area\n\n elif key == \"r\":\n xy = []\n\n elif key == \"x\":\n if not flag_pause:\n flag_pause = True\n print(\"pause for pan/zoom\")\n else:\n flag_pause = False\n print(\"resume masking\")\n\n elif key == \"q\":\n stop_masking = True\n\n else:\n return updated_data, updated_mask, flag_pause, xy, width, vmax, stop_masking\n\n axs.cla()\n axs.imshow(np.log10(abs(updated_data)), vmin=vmin, vmax=vmax)\n if invert_yaxis:\n axs.invert_yaxis()\n axs.set_xlim([xmin, xmax])\n axs.set_ylim([ymin, ymax])\n axs.set_title(\n \"x to pause/resume masking for pan/zoom \\n\"\n \"p plot mask ; a restart ; click to select vertices\\n\"\n \"m mask ; b unmask ; q quit ; u next frame ; d previous frame\\n\"\n \"up larger ; down smaller ; right darker ; left brighter\"\n )\n plt.draw()\n\n return updated_data, updated_mask, flag_pause, xy, width, vmax, stop_masking\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data\n# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP\n# (c) 07/2019-present : DESY PHOTON SCIENCE\n# authors:\n# Jerome Carnis, [email protected]\n\nimport gc\nimport tkinter as tk\nfrom tkinter import filedialog\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport bcdi.graph.graph_utils as gu\nimport bcdi.utils.utilities as util\nfrom bcdi.graph.colormap import ColormapFactory\n\nhelptext = \"\"\"\nCrop a stacked 3D dataset saved in NPZ format, to the desired region of interest.\n\"\"\"\n\nscan = 24 # scan number, used in the filename when saving\ndatadir = \"D:/data/Longfei/data/B15_syn_S1_2_00024/pynxraw/\"\ncrop_center = [75, 128, 90] # center of the region of interest\nroi_size = (\n 144,\n 256,\n 180,\n) # size of the region of interest to crop centered on crop_center, before binning\nbinning = (1, 1, 1) # binning to apply further to the cropped data\nload_mask = True # True to load the mask and crop it\nload_qvalues = False # True to load the q values and crop it\nis_orthogonal = False # True if the data is in an orthogonal frame, only used for plots\nreciprocal_space = True # True if the data is in reciprocal space, only used for plots\ndebug = True # True to see more plots\ncomment = \"\" # should start with _\n##################################\n# end of user-defined parameters #\n##################################\n\n###################\n# define colormap #\n###################\nmy_cmap = ColormapFactory().cmap\n\n#################\n# load the data #\n#################\nroot = tk.Tk()\nroot.withdraw()\nfile_path = filedialog.askopenfilename(\n initialdir=datadir,\n title=\"Select the data file\",\n filetypes=[(\"NPZ\", \"*.npz\"), (\"CXI\", \"*.cxi\"), (\"HDF5\", \"*.h5\")],\n)\ndata, _ = util.load_file(file_path)\ndata = data.astype(float)\nnbz, nby, nbx = data.shape\n\n#################################################################\n# check parameters depending on the shape of the reference scan #\n#################################################################\ncrop_center = list(\n crop_center or [nbz // 2, nby // 2, nbx // 2]\n) # if None, default to the middle of the array\nif len(crop_center) != 3:\n raise ValueError(\"crop_center should be a list or tuple of three indices\")\nif not np.all(np.asarray(crop_center) - np.asarray(roi_size) // 2 >= 0):\n raise ValueError(\"crop_center incompatible with roi_size\")\nif not (\n crop_center[0] + roi_size[0] // 2 <= nbz\n and crop_center[1] + roi_size[1] // 2 <= nby\n and crop_center[2] + roi_size[2] // 2 <= nbx\n):\n raise ValueError(\"crop_center incompatible with roi_size\")\n\n#######################################################\n# crop the data, and optionally the mask and q values #\n#######################################################\ndata = util.crop_pad(\n data, output_shape=roi_size, crop_center=crop_center, debugging=debug\n)\ndata = util.bin_data(data, binning=binning, debugging=debug)\ncomment = (\n f\"{data.shape[0]}_{data.shape[1]}_{data.shape[2]}_\"\n f\"{binning[0]}_{binning[1]}_{binning[2]}\" + comment\n)\nnp.savez_compressed(datadir + \"S\" + str(scan) + \"_pynx\" + comment + \".npz\", data=data)\n\nfig, _, _ = gu.multislices_plot(\n data,\n sum_frames=True,\n scale=\"log\",\n plot_colorbar=True,\n vmin=0,\n title=\"Cropped data\",\n is_orthogonal=is_orthogonal,\n reciprocal_space=reciprocal_space,\n)\nfig.savefig(datadir + \"S\" + str(scan) + \"_pynx\" + comment + \".png\")\nplt.close(fig)\ndel data\ngc.collect()\n\nif load_mask:\n file_path = filedialog.askopenfilename(\n initialdir=datadir,\n title=\"Select the mask file\",\n filetypes=[(\"NPZ\", \"*.npz\"), (\"CXI\", \"*.cxi\"), (\"HDF5\", \"*.h5\")],\n )\n mask, _ = util.load_file(file_path)\n mask = mask.astype(float)\n mask = util.crop_pad(\n mask, output_shape=roi_size, crop_center=crop_center, debugging=debug\n )\n mask = util.bin_data(mask, binning=binning, debugging=debug)\n\n mask[np.nonzero(mask)] = 1\n mask = mask.astype(int)\n np.savez_compressed(\n datadir + \"S\" + str(scan) + \"_maskpynx\" + comment + \".npz\", mask=mask\n )\n fig, _, _ = gu.multislices_plot(\n mask,\n sum_frames=True,\n scale=\"linear\",\n plot_colorbar=True,\n vmin=0,\n title=\"Cropped mask\",\n is_orthogonal=is_orthogonal,\n reciprocal_space=reciprocal_space,\n )\n fig.savefig(datadir + \"S\" + str(scan) + \"_maskpynx\" + comment + \".png\")\n plt.close(fig)\n del mask\n gc.collect()\n\nif load_qvalues:\n file_path = filedialog.askopenfilename(\n initialdir=datadir,\n title=\"Select the file containing q values\",\n filetypes=[(\"NPZ\", \"*.npz\")],\n )\n q_values = np.load(file_path)\n qx = q_values[\"qx\"] # 1D array\n qy = q_values[\"qy\"] # 1D array\n qz = q_values[\"qz\"] # 1D array\n qx = util.crop_pad_1d(qx, roi_size[0], crop_center=crop_center[0]) # qx along z\n qy = util.crop_pad_1d(qy, roi_size[2], crop_center=crop_center[2]) # qy along x\n qz = util.crop_pad_1d(qz, roi_size[1], crop_center=crop_center[1]) # qz along y\n\n numz, numy, numx = len(qx), len(qz), len(qy)\n qx = qx[: numz - (numz % binning[0]) : binning[0]] # along z downstream\n qz = qz[: numy - (numy % binning[1]) : binning[1]] # along y vertical\n qy = qy[: numx - (numx % binning[2]) : binning[2]] # along x outboard\n\n np.savez_compressed(\n datadir + \"S\" + str(scan) + \"_qvalues_\" + comment + \".npz\", qx=qx, qz=qz, qy=qy\n )\n\nprint(\"End of script\")\nplt.ioff()\nplt.show()\n", "# -*- coding: utf-8 -*-\n\n# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data\n# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP\n# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE\n# authors:\n# Jerome Carnis, [email protected]\n\nimport unittest\n\nimport matplotlib\n\n\ndef has_backend(backend: str) -> bool:\n \"\"\"Check if the desired backend is available on the runner.\"\"\"\n try:\n matplotlib.use(backend)\n except ImportError:\n return False\n return True\n\n\ndef run_tests(test_class):\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n runner = unittest.TextTestRunner(verbosity=2)\n return runner.run(suite)\n" ]
[ [ "numpy.dot", "matplotlib.pyplot.imshow", "matplotlib.pyplot.connect", "numpy.sqrt", "numpy.asarray", "numpy.concatenate", "numpy.arctan2", "scipy.stats.gaussian_kde", "scipy.ndimage.measurements.center_of_mass", "scipy.interpolate.griddata", "numpy.cross", "matplotlib.pyplot.gca", "numpy.unique", "numpy.arcsin", "numpy.arange", "scipy.ndimage.distance_transform_edt", "numpy.copy", "matplotlib.pyplot.axis", "matplotlib.pyplot.disconnect", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.nonzero", "numpy.isnan", "numpy.multiply", "numpy.median", "numpy.rint", "matplotlib.patches.Circle", "scipy.ndimage.label", "numpy.delete", "numpy.array", "matplotlib.pyplot.ion", "scipy.signal.convolve", "numpy.sum", "matplotlib.patches.Ellipse", "numpy.isinf", "numpy.gradient", "matplotlib.pyplot.subplots", "numpy.linalg.norm", "numpy.ones", "numpy.argwhere", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.ioff", "matplotlib.pyplot.pause" ], [ "matplotlib.ticker.MultipleLocator", "numpy.linspace", "numpy.asarray", "scipy.interpolate.griddata", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.arange", "matplotlib.pyplot.gcf", "numpy.sin", "matplotlib.ticker.LinearLocator", "numpy.copy", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "numpy.repeat", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.isclose", "numpy.log", "matplotlib.pyplot.title", "numpy.isnan", "numpy.logspace", "numpy.rint", "matplotlib.patches.Circle", "matplotlib.pyplot.savefig", "numpy.log10", "matplotlib.pyplot.ion", "numpy.flip", "numpy.array", "numpy.isinf", "numpy.gradient", "matplotlib.pyplot.sca", "matplotlib.pyplot.subplots", "matplotlib.pyplot.draw", "numpy.cos", "matplotlib.pyplot.ioff", "matplotlib.ticker.LogLocator", "matplotlib.pyplot.pause", "numpy.vstack" ], [ "numpy.nonzero", "numpy.asarray", "matplotlib.pyplot.ioff", "matplotlib.pyplot.close", "numpy.load", "matplotlib.pyplot.show" ], [ "matplotlib.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.10", "1.3", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vinnamkim/examples
[ "f1838619141d7f2a2553f7282c642a6f51a4df48", "f1838619141d7f2a2553f7282c642a6f51a4df48" ]
[ "regression/main.py", "word_language_model/main.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import print_function\nfrom itertools import count\n\nimport torch\nimport torch.nn.functional as F\n\nPOLY_DEGREE = 4\nW_target = torch.randn(POLY_DEGREE, 1) * 5\nb_target = torch.randn(1) * 5\n\n\ndef make_features(x):\n \"\"\"Builds features i.e. a matrix with columns [x, x^2, x^3, x^4].\"\"\"\n x = x.unsqueeze(1)\n return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)\n\n\ndef f(x):\n \"\"\"Approximated function.\"\"\"\n return x.mm(W_target) + b_target.item()\n\n\ndef poly_desc(W, b):\n \"\"\"Creates a string description of a polynomial.\"\"\"\n result = 'y = '\n for i, w in enumerate(W):\n result += '{:+.2f} x^{} '.format(w, len(W) - i)\n result += '{:+.2f}'.format(b[0])\n return result\n\n\ndef get_batch(batch_size=32):\n \"\"\"Builds a batch i.e. (x, f(x)) pair.\"\"\"\n random = torch.randn(batch_size)\n x = make_features(random)\n y = f(x)\n return x, y\n\n\n# Define model\nfc = torch.nn.Linear(W_target.size(0), 1)\n\nfor batch_idx in count(1):\n # Get data\n batch_x, batch_y = get_batch()\n\n # Reset gradients\n fc.zero_grad()\n\n # Forward pass\n output = F.smooth_l1_loss(fc(batch_x), batch_y)\n loss = output.item()\n\n # Backward pass\n output.backward()\n\n # Apply gradients\n for param in fc.parameters():\n param.data.add_(-0.1 * param.grad.data)\n\n # Stop criterion\n if loss < 1e-3:\n break\n\nprint('Loss: {:.6f} after {} batches'.format(loss, batch_idx))\nprint('==> Learned function:\\t' + poly_desc(fc.weight.view(-1), fc.bias))\nprint('==> Actual function:\\t' + poly_desc(W_target.view(-1), b_target))\n", "# coding: utf-8\nimport argparse\nimport time\nimport math\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.onnx\n\nimport data\nimport model\n\nparser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')\nparser.add_argument('--data', type=str, default='./data/wikitext-2',\n help='location of the data corpus')\nparser.add_argument('--model', type=str, default='LSTM',\n help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU, Transformer)')\nparser.add_argument('--emsize', type=int, default=200,\n help='size of word embeddings')\nparser.add_argument('--nhid', type=int, default=200,\n help='number of hidden units per layer')\nparser.add_argument('--nlayers', type=int, default=2,\n help='number of layers')\nparser.add_argument('--lr', type=float, default=20,\n help='initial learning rate')\nparser.add_argument('--clip', type=float, default=0.25,\n help='gradient clipping')\nparser.add_argument('--epochs', type=int, default=40,\n help='upper epoch limit')\nparser.add_argument('--batch_size', type=int, default=20, metavar='N',\n help='batch size')\nparser.add_argument('--bptt', type=int, default=35,\n help='sequence length')\nparser.add_argument('--dropout', type=float, default=0.2,\n help='dropout applied to layers (0 = no dropout)')\nparser.add_argument('--tied', action='store_true',\n help='tie the word embedding and softmax weights')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--log-interval', type=int, default=200, metavar='N',\n help='report interval')\nparser.add_argument('--save', type=str, default='model.pt',\n help='path to save the final model')\nparser.add_argument('--onnx-export', type=str, default='',\n help='path to export the final model in onnx format')\n\nparser.add_argument('--nhead', type=int, default=2,\n help='the number of heads in the encoder/decoder of the transformer model')\n\nargs = parser.parse_args()\n\n# Set the random seed manually for reproducibility.\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n###############################################################################\n# Load data\n###############################################################################\n\ncorpus = data.Corpus(args.data)\n\n# Starting from sequential data, batchify arranges the dataset into columns.\n# For instance, with the alphabet as the sequence and batch size 4, we'd get\n# ┌ a g m s ┐\n# │ b h n t │\n# │ c i o u │\n# │ d j p v │\n# │ e k q w │\n# └ f l r x ┘.\n# These columns are treated as independent by the model, which means that the\n# dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient\n# batch processing.\n\ndef batchify(data, bsz):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = data.size(0) // bsz\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * bsz)\n # Evenly divide the data across the bsz batches.\n data = data.view(bsz, -1).t().contiguous()\n return data.to(device)\n\neval_batch_size = 10\ntrain_data = batchify(corpus.train, args.batch_size)\nval_data = batchify(corpus.valid, eval_batch_size)\ntest_data = batchify(corpus.test, eval_batch_size)\n\n###############################################################################\n# Build the model\n###############################################################################\n\nntokens = len(corpus.dictionary)\nif args.model == 'Transformer':\n model = model.TransformerModel(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout).to(device)\nelse:\n model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied).to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\n###############################################################################\n# Training code\n###############################################################################\n\ndef repackage_hidden(h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(repackage_hidden(v) for v in h)\n\n\n# get_batch subdivides the source data into chunks of length args.bptt.\n# If source is equal to the example output of the batchify function, with\n# a bptt-limit of 2, we'd get the following two Variables for i = 0:\n# ┌ a g m s ┐ ┌ b h n t ┐\n# └ b h n t ┘ └ c i o u ┘\n# Note that despite the name of the function, the subdivison of data is not\n# done along the batch dimension (i.e. dimension 1), since that was handled\n# by the batchify function. The chunks are along dimension 0, corresponding\n# to the seq_len dimension in the LSTM.\n\ndef get_batch(source, i):\n seq_len = min(args.bptt, len(source) - 1 - i)\n data = source[i:i+seq_len]\n target = source[i+1:i+1+seq_len].view(-1)\n return data, target\n\n\ndef evaluate(data_source):\n # Turn on evaluation mode which disables dropout.\n model.eval()\n total_loss = 0.\n ntokens = len(corpus.dictionary)\n if args.model != 'Transformer':\n hidden = model.init_hidden(eval_batch_size)\n with torch.no_grad():\n for i in range(0, data_source.size(0) - 1, args.bptt):\n data, targets = get_batch(data_source, i)\n if args.model == 'Transformer':\n output = model(data)\n else:\n output, hidden = model(data, hidden)\n hidden = repackage_hidden(hidden)\n output_flat = output.view(-1, ntokens)\n total_loss += len(data) * criterion(output_flat, targets).item()\n return total_loss / (len(data_source) - 1)\n\n\ndef train():\n # Turn on training mode which enables dropout.\n model.train()\n total_loss = 0.\n start_time = time.time()\n ntokens = len(corpus.dictionary)\n if args.model != 'Transformer':\n hidden = model.init_hidden(args.batch_size)\n for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):\n data, targets = get_batch(train_data, i)\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n model.zero_grad()\n if args.model == 'Transformer':\n output = model(data)\n else:\n hidden = repackage_hidden(hidden)\n output, hidden = model(data, hidden)\n loss = criterion(output.view(-1, ntokens), targets)\n loss.backward()\n\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)\n for p in model.parameters():\n p.data.add_(-lr, p.grad.data)\n\n total_loss += loss.item()\n\n if batch % args.log_interval == 0 and batch > 0:\n cur_loss = total_loss / args.log_interval\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '\n 'loss {:5.2f} | ppl {:8.2f}'.format(\n epoch, batch, len(train_data) // args.bptt, lr,\n elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))\n total_loss = 0\n start_time = time.time()\n\n\ndef export_onnx(path, batch_size, seq_len):\n print('The model is also exported in ONNX format at {}'.\n format(os.path.realpath(args.onnx_export)))\n model.eval()\n dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)\n hidden = model.init_hidden(batch_size)\n torch.onnx.export(model, (dummy_input, hidden), path)\n\n\n# Loop over epochs.\nlr = args.lr\nbest_val_loss = None\n\n# At any point you can hit Ctrl + C to break out of training early.\ntry:\n for epoch in range(1, args.epochs+1):\n epoch_start_time = time.time()\n train()\n val_loss = evaluate(val_data)\n print('-' * 89)\n print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '\n 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),\n val_loss, math.exp(val_loss)))\n print('-' * 89)\n # Save the model if the validation loss is the best we've seen so far.\n if not best_val_loss or val_loss < best_val_loss:\n with open(args.save, 'wb') as f:\n torch.save(model, f)\n best_val_loss = val_loss\n else:\n # Anneal the learning rate if no improvement has been seen in the validation dataset.\n lr /= 4.0\nexcept KeyboardInterrupt:\n print('-' * 89)\n print('Exiting from training early')\n\n# Load the best saved model.\nwith open(args.save, 'rb') as f:\n model = torch.load(f)\n # after load the rnn params are not a continuous chunk of memory\n # this makes them a continuous chunk, and will speed up forward pass\n # Currently, only rnn model supports flatten_parameters function.\n if args.model in ['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU']:\n model.rnn.flatten_parameters()\n\n# Run on test data.\ntest_loss = evaluate(test_data)\nprint('=' * 89)\nprint('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(\n test_loss, math.exp(test_loss)))\nprint('=' * 89)\n\nif len(args.onnx_export) > 0:\n # Export the model in ONNX format.\n export_onnx(args.onnx_export, batch_size=1, seq_len=args.bptt)\n" ]
[ [ "torch.randn" ], [ "torch.nn.CrossEntropyLoss", "torch.onnx.export", "torch.LongTensor", "torch.load", "torch.manual_seed", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skmatz/frcnn
[ "eae9d42f964a5883f72dc294984c019b3c75e837" ]
[ "frcnn/viewer.py" ]
[ "\"\"\"Module for (demo) viewer.\"\"\"\n\nimport os\nfrom dataclasses import dataclass\nfrom glob import glob\nfrom logging import getLogger\nfrom os.path import basename, join\nfrom typing import List, Optional, Tuple\n\nimport cv2\nimport numpy as np\nimport seaborn as sns\nimport torch\nimport torch.cuda\nimport torchvision\nfrom hydra.utils import to_absolute_path\n\nfrom frcnn.labels import COCO91\nfrom frcnn.models import FasterRCNN, fasterrcnn_resnet50_fpn\n\n__all__ = [\"ImageViewer\"]\n\nlogger = getLogger(__name__)\n\nColorType = Tuple[int, int, int]\n\n\n@dataclass\nclass BasicConfig:\n gpu: bool\n conf: float\n display: bool\n weights: Optional[str]\n\n\n@dataclass\nclass ImageConfig:\n root: str\n outputs: str\n\n\n@dataclass\nclass Config:\n basic: BasicConfig\n image: ImageConfig\n\n\n@dataclass\nclass FasterRCNNOutput:\n boxes: torch.Tensor\n labels: torch.Tensor\n scores: torch.Tensor\n\n\nclass ImageViewer:\n COLORS: List[ColorType] = [\n tuple(int(c * 255) for c in color) for color in sns.color_palette(n_colors=len(COCO91)) # type: ignore\n ]\n\n def __init__(self, cfg: Config):\n self._cfg = cfg\n self._model = self._load_model(cfg.basic.weights)\n self._paths = sorted(glob(join(to_absolute_path(cfg.image.root), \"*\")))\n self._device = \"cuda\" if cfg.basic.gpu and torch.cuda.is_available() else \"cpu\"\n\n os.makedirs(cfg.image.outputs, exist_ok=True)\n\n @torch.no_grad()\n def run(self):\n self._model = self._model.to(self._device).eval()\n\n for i, path in enumerate(self._paths):\n image_bgr: np.ndarray = cv2.imread(path)\n image_rgb: np.ndarray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)\n image_tensor: torch.Tensor = torchvision.transforms.functional.to_tensor(image_rgb).to(self._device)\n\n # only the first element because input only one image\n output = FasterRCNNOutput(**self._model([image_tensor])[0])\n\n boxes = output.boxes.cpu().numpy()\n labels = output.labels.cpu().numpy()\n scores = output.scores.cpu().numpy()\n\n logger.debug(\n f\"[{i + 1}/{len(self._paths)}] Detect {len([s for s in scores if s >= self._cfg.basic.conf]):2d} \"\n + f\"objects in {path}\",\n )\n\n image_bgr = self._draw_results(image_bgr, boxes, labels, scores)\n\n if self._cfg.basic.display:\n cv2.imshow(\"\", image_bgr)\n cv2.waitKey(1)\n\n cv2.imwrite(join(self._cfg.image.outputs, basename(path)), image_bgr)\n\n @staticmethod\n def _load_model(weights: Optional[str]) -> FasterRCNN:\n logger.debug(f\"Load weights: {weights}\")\n\n if weights is None:\n model = fasterrcnn_resnet50_fpn(pretrained=True)\n else:\n model = fasterrcnn_resnet50_fpn(pretrained=False)\n model = model.load_state_dict(torch.load(weights))\n\n return model\n\n def _draw_results(self, image: np.ndarray, boxes: np.ndarray, labels: np.ndarray, scores: np.ndarray) -> np.ndarray:\n \"\"\"Draw texts and rectangles to the image (BGR).\"\"\"\n\n for box, label, score in zip(boxes, labels, scores):\n if score < self._cfg.basic.conf:\n continue\n\n image = cv2.putText(\n image,\n COCO91[label],\n (round(box[0]), round(box[1])),\n fontFace=cv2.FONT_HERSHEY_DUPLEX,\n fontScale=1,\n color=self.COLORS[label],\n thickness=2,\n )\n\n image = cv2.rectangle(\n image,\n (round(box[0]), round(box[1])),\n (round(box[2]), round(box[3])),\n color=self.COLORS[label],\n thickness=2,\n )\n\n return image\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangqx/movie_recommender
[ "93eddb89f7ac2a8358bbe5c91b26e7e2b4184c38" ]
[ "wang/trainModel.py" ]
[ "import os\n#\n#\n# % matplotlib inline\n# % config InlineBackend.figure_format = 'retina'\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport time\nimport datetime\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport pickle\nimport tensorflow as tf\n\n# from wang.buildModel import *\n\n# os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n#\n# from ml-1m import *\n\n\n# 从本地读取数据\ntitle_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = pickle.load(open('preprocess.p', mode='rb'))\n\n\ndef save_params(params):\n \"\"\"\n Save parameters to file\n \"\"\"\n pickle.dump(params, open('params.p', 'wb'))\n\n\ndef load_params():\n \"\"\"\n Load parameters from file\n \"\"\"\n return pickle.load(open('params.p', mode='rb'))\n\n# 编码实现\n# 编码实现\n# 编码实现\n\n\n#嵌入矩阵的维度\nembed_dim = 32\n#用户ID个数\nuid_max = max(features.take(0,1)) + 1 # 6040\n#性别个数\ngender_max = max(features.take(2,1)) + 1 # 1 + 1 = 2\n#年龄类别个数\nage_max = max(features.take(3,1)) + 1 # 6 + 1 = 7\n#职业个数\njob_max = max(features.take(4,1)) + 1# 20 + 1 = 21\n\n#电影ID个数\nmovie_id_max = max(features.take(1,1)) + 1 # 3952\n#电影类型个数\nmovie_categories_max = max(genres2int.values()) + 1 # 18 + 1 = 19\n#电影名单词个数\nmovie_title_max = len(title_set) # 5216\n\n#对电影类型嵌入向量做加和操作的标志,考虑过使用mean做平均,但是没实现mean\ncombiner = \"sum\"\n\n#电影名长度\nsentences_size = title_count # = 15\n#文本卷积滑动窗口,分别滑动2, 3, 4, 5个单词\nwindow_sizes = {2, 3, 4, 5}\n#文本卷积核数量\nfilter_num = 8\n\n#电影ID转下标的字典,数据集中电影ID跟下标不一致,比如第5行的数据电影ID不一定是5\nmovieid2idx = {val[0]:i for i, val in enumerate(movies.values)}\n\n\n\n# 超参\n\n\n# Number of Epochs\nnum_epochs = 5\n# Batch Size\nbatch_size = 256\n\ndropout_keep = 0.5\n# Learning Rate\nlearning_rate = 0.0001\n# Show stats for every n number of batches\nshow_every_n_batches = 20\n\nsave_dir = './save'\n\n\n# 输入\n\n\ndef get_inputs():\n uid = tf.placeholder(tf.int32, [None, 1], name=\"uid\")\n user_gender = tf.placeholder(tf.int32, [None, 1], name=\"user_gender\")\n user_age = tf.placeholder(tf.int32, [None, 1], name=\"user_age\")\n user_job = tf.placeholder(tf.int32, [None, 1], name=\"user_job\")\n\n movie_id = tf.placeholder(tf.int32, [None, 1], name=\"movie_id\")\n movie_categories = tf.placeholder(tf.int32, [None, 18], name=\"movie_categories\")\n movie_titles = tf.placeholder(tf.int32, [None, 15], name=\"movie_titles\")\n targets = tf.placeholder(tf.int32, [None, 1], name=\"targets\")\n LearningRate = tf.placeholder(tf.float32, name=\"LearningRate\")\n dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, LearningRate, dropout_keep_prob\n\n\n# 构建神经网络\n# 构建神经网络\n# 构建神经网络\n\n# 定义user的潜入矩阵\ndef get_user_embedding(uid, user_gender, user_age, user_job):\n with tf.name_scope(\"user_embedding\"):\n uid_embed_matrix = tf.Variable(tf.random_uniform([uid_max, embed_dim], -1, 1), name=\"uid_embed_matrix\")\n uid_embed_layer = tf.nn.embedding_lookup(uid_embed_matrix, uid, name=\"uid_embed_layer\")\n\n gender_embed_matrix = tf.Variable(tf.random_uniform([gender_max, embed_dim // 2], -1, 1),\n name=\"gender_embed_matrix\")\n gender_embed_layer = tf.nn.embedding_lookup(gender_embed_matrix, user_gender, name=\"gender_embed_layer\")\n\n age_embed_matrix = tf.Variable(tf.random_uniform([age_max, embed_dim // 2], -1, 1), name=\"age_embed_matrix\")\n age_embed_layer = tf.nn.embedding_lookup(age_embed_matrix, user_age, name=\"age_embed_layer\")\n\n job_embed_matrix = tf.Variable(tf.random_uniform([job_max, embed_dim // 2], -1, 1), name=\"job_embed_matrix\")\n job_embed_layer = tf.nn.embedding_lookup(job_embed_matrix, user_job, name=\"job_embed_layer\")\n return uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer\n\n\n# 将User的嵌入矩阵一起全连接生成User的特征\ndef get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer):\n with tf.name_scope(\"user_fc\"):\n # 第一层全连接\n uid_fc_layer = tf.layers.dense(uid_embed_layer, embed_dim, name=\"uid_fc_layer\", activation=tf.nn.relu)\n gender_fc_layer = tf.layers.dense(gender_embed_layer, embed_dim, name=\"gender_fc_layer\", activation=tf.nn.relu)\n age_fc_layer = tf.layers.dense(age_embed_layer, embed_dim, name=\"age_fc_layer\", activation=tf.nn.relu)\n job_fc_layer = tf.layers.dense(job_embed_layer, embed_dim, name=\"job_fc_layer\", activation=tf.nn.relu)\n\n # 第二层全连接\n user_combine_layer = tf.concat([uid_fc_layer, gender_fc_layer, age_fc_layer, job_fc_layer], 2) # (?, 1, 128)\n user_combine_layer = tf.contrib.layers.fully_connected(user_combine_layer, 200, tf.tanh) # (?, 1, 200)\n\n user_combine_layer_flat = tf.reshape(user_combine_layer, [-1, 200])\n return user_combine_layer, user_combine_layer_flat\n\n\n# 定义movie id 的潜入矩阵\ndef get_movie_id_embed_layer(movie_id):\n with tf.name_scope(\"movie_embedding\"):\n movie_id_embed_matrix = tf.Variable(tf.random_uniform([movie_id_max, embed_dim], -1, 1), name = \"movie_id_embed_matrix\")\n movie_id_embed_layer = tf.nn.embedding_lookup(movie_id_embed_matrix, movie_id, name = \"movie_id_embed_layer\")\n return movie_id_embed_layer\n\n# 对电影类型的多个嵌入向量做加和\ndef get_movie_categories_layers(movie_categories):\n with tf.name_scope(\"movie_categories_layers\"):\n movie_categories_embed_matrix = tf.Variable(tf.random_uniform([movie_categories_max, embed_dim], -1, 1), name = \"movie_categories_embed_matrix\")\n movie_categories_embed_layer = tf.nn.embedding_lookup(movie_categories_embed_matrix, movie_categories, name = \"movie_categories_embed_layer\")\n if combiner == \"sum\":\n movie_categories_embed_layer = tf.reduce_sum(movie_categories_embed_layer, axis=1, keep_dims=True)\n # elif combiner == \"mean\":\n\n return movie_categories_embed_layer\n\n\n\n# movie title 的文本卷积网络实现\n\ndef get_movie_cnn_layer(movie_titles):\n # 从嵌入矩阵中得到电影名对应的各个单词的嵌入向量\n with tf.name_scope(\"movie_embedding\"):\n movie_title_embed_matrix = tf.Variable(tf.random_uniform([movie_title_max, embed_dim], -1, 1),\n name=\"movie_title_embed_matrix\")\n movie_title_embed_layer = tf.nn.embedding_lookup(movie_title_embed_matrix, movie_titles,\n name=\"movie_title_embed_layer\")\n movie_title_embed_layer_expand = tf.expand_dims(movie_title_embed_layer, -1)\n\n # 对文本嵌入层使用不同尺寸的卷积核做卷积和最大池化\n pool_layer_lst = []\n for window_size in window_sizes:\n with tf.name_scope(\"movie_txt_conv_maxpool_{}\".format(window_size)):\n filter_weights = tf.Variable(tf.truncated_normal([window_size, embed_dim, 1, filter_num], stddev=0.1),\n name=\"filter_weights\")\n filter_bias = tf.Variable(tf.constant(0.1, shape=[filter_num]), name=\"filter_bias\")\n\n conv_layer = tf.nn.conv2d(movie_title_embed_layer_expand, filter_weights, [1, 1, 1, 1], padding=\"VALID\",\n name=\"conv_layer\")\n relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer, filter_bias), name=\"relu_layer\")\n\n maxpool_layer = tf.nn.max_pool(relu_layer, [1, sentences_size - window_size + 1, 1, 1], [1, 1, 1, 1],\n padding=\"VALID\", name=\"maxpool_layer\")\n pool_layer_lst.append(maxpool_layer)\n\n # Dropout层\n with tf.name_scope(\"pool_dropout\"):\n pool_layer = tf.concat(pool_layer_lst, 3, name=\"pool_layer\")\n max_num = len(window_sizes) * filter_num\n pool_layer_flat = tf.reshape(pool_layer, [-1, 1, max_num], name=\"pool_layer_flat\")\n\n dropout_layer = tf.nn.dropout(pool_layer_flat, dropout_keep_prob, name=\"dropout_layer\")\n return pool_layer_flat, dropout_layer\n\n\n# 将movie的 各个层一起做全连接\n\ndef get_movie_feature_layer(movie_id_embed_layer, movie_categories_embed_layer, dropout_layer):\n with tf.name_scope(\"movie_fc\"):\n # 第一层全连接\n movie_id_fc_layer = tf.layers.dense(movie_id_embed_layer, embed_dim, name=\"movie_id_fc_layer\",\n activation=tf.nn.relu)\n movie_categories_fc_layer = tf.layers.dense(movie_categories_embed_layer, embed_dim,\n name=\"movie_categories_fc_layer\", activation=tf.nn.relu)\n\n # 第二层全连接\n movie_combine_layer = tf.concat([movie_id_fc_layer, movie_categories_fc_layer, dropout_layer], 2) # (?, 1, 96)\n movie_combine_layer = tf.contrib.layers.fully_connected(movie_combine_layer, 200, tf.tanh) # (?, 1, 200)\n\n movie_combine_layer_flat = tf.reshape(movie_combine_layer, [-1, 200])\n return movie_combine_layer, movie_combine_layer_flat\n\n\n\n# 构建计算图\n# 构建计算图\n# 构建计算图\n\ntf.reset_default_graph()\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n #获取输入占位符\n uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob = get_inputs()\n #获取User的4个嵌入向量\n uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer = get_user_embedding(uid, user_gender, user_age, user_job)\n #得到用户特征\n user_combine_layer, user_combine_layer_flat = get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer)\n #获取电影ID的嵌入向量\n movie_id_embed_layer = get_movie_id_embed_layer(movie_id)\n #获取电影类型的嵌入向量\n movie_categories_embed_layer = get_movie_categories_layers(movie_categories)\n #获取电影名的特征向量\n pool_layer_flat, dropout_layer = get_movie_cnn_layer(movie_titles)\n #得到电影特征\n movie_combine_layer, movie_combine_layer_flat = get_movie_feature_layer(movie_id_embed_layer,\n movie_categories_embed_layer,\n dropout_layer)\n #计算出评分,要注意两个不同的方案,inference的名字(name值)是不一样的,后面做推荐时要根据name取得tensor\n with tf.name_scope(\"inference\"):\n #将用户特征和电影特征作为输入,经过全连接,输出一个值的方案\n# inference_layer = tf.concat([user_combine_layer_flat, movie_combine_layer_flat], 1) #(?, 200)\n# inference = tf.layers.dense(inference_layer, 1,\n# kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n# kernel_regularizer=tf.nn.l2_loss, name=\"inference\")\n #简单的将用户特征和电影特征做矩阵乘法得到一个预测评分\n# inference = tf.matmul(user_combine_layer_flat, tf.transpose(movie_combine_layer_flat))\n inference = tf.reduce_sum(user_combine_layer_flat * movie_combine_layer_flat, axis=1)\n inference = tf.expand_dims(inference, axis=1)\n\n with tf.name_scope(\"loss\"):\n # MSE损失,将计算值回归到评分\n cost = tf.losses.mean_squared_error(targets, inference )\n loss = tf.reduce_mean(cost)\n # 优化损失\n# train_op = tf.train.AdamOptimizer(lr).minimize(loss) #cost\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(lr)\n gradients = optimizer.compute_gradients(loss) #cost\n train_op = optimizer.apply_gradients(gradients, global_step=global_step)\n\n\n# 取得batch\ndef get_batches(Xs, ys, batch_size):\n for start in range(0, len(Xs), batch_size):\n end = min(start + batch_size, len(Xs))\n yield Xs[start:end], ys[start:end]\n\n# 训练网络\n# 训练网络\n# 训练网络\n\n\nlosses = {'train': [], 'test': []}\n\nwith tf.Session(graph=train_graph) as sess:\n # 搜集数据给tensorBoard用\n # Keep track of gradient values and sparsity\n grad_summaries = []\n for g, v in gradients:\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name.replace(':', '_')), g)\n sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name.replace(':', '_')),\n tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n # Output directory for models and summaries\n timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Summaries for loss and accuracy\n loss_summary = tf.summary.scalar(\"loss\", loss)\n\n # Train Summaries\n train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n # Inference summaries\n inference_summary_op = tf.summary.merge([loss_summary])\n inference_summary_dir = os.path.join(out_dir, \"summaries\", \"inference\")\n inference_summary_writer = tf.summary.FileWriter(inference_summary_dir, sess.graph)\n\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for epoch_i in range(num_epochs):\n\n # 将数据集分成训练集和测试集,随机种子不固定\n train_X, test_X, train_y, test_y = train_test_split(features,\n targets_values,\n test_size=0.2,\n random_state=0)\n\n train_batches = get_batches(train_X, train_y, batch_size)\n test_batches = get_batches(test_X, test_y, batch_size)\n\n # 训练的迭代,保存训练损失\n for batch_i in range(len(train_X) // batch_size):\n x, y = next(train_batches)\n\n categories = np.zeros([batch_size, 18])\n for i in range(batch_size):\n categories[i] = x.take(6, 1)[i]\n\n titles = np.zeros([batch_size, sentences_size])\n for i in range(batch_size):\n titles[i] = x.take(5, 1)[i]\n\n feed = {\n uid: np.reshape(x.take(0, 1), [batch_size, 1]),\n user_gender: np.reshape(x.take(2, 1), [batch_size, 1]),\n user_age: np.reshape(x.take(3, 1), [batch_size, 1]),\n user_job: np.reshape(x.take(4, 1), [batch_size, 1]),\n movie_id: np.reshape(x.take(1, 1), [batch_size, 1]),\n movie_categories: categories, # x.take(6,1)\n movie_titles: titles, # x.take(5,1)\n targets: np.reshape(y, [batch_size, 1]),\n dropout_keep_prob: dropout_keep, # dropout_keep\n lr: learning_rate}\n\n step, train_loss, summaries, _ = sess.run([global_step, loss, train_summary_op, train_op], feed) # cost\n losses['train'].append(train_loss)\n train_summary_writer.add_summary(summaries, step) #\n\n # Show every <show_every_n_batches> batches\n if (epoch_i * (len(train_X) // batch_size) + batch_i) % show_every_n_batches == 0:\n time_str = datetime.datetime.now().isoformat()\n print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(\n time_str,\n epoch_i,\n batch_i,\n (len(train_X) // batch_size),\n train_loss))\n\n # 使用测试数据的迭代\n for batch_i in range(len(test_X) // batch_size):\n x, y = next(test_batches)\n\n categories = np.zeros([batch_size, 18])\n for i in range(batch_size):\n categories[i] = x.take(6, 1)[i]\n\n titles = np.zeros([batch_size, sentences_size])\n for i in range(batch_size):\n titles[i] = x.take(5, 1)[i]\n\n feed = {\n uid: np.reshape(x.take(0, 1), [batch_size, 1]),\n user_gender: np.reshape(x.take(2, 1), [batch_size, 1]),\n user_age: np.reshape(x.take(3, 1), [batch_size, 1]),\n user_job: np.reshape(x.take(4, 1), [batch_size, 1]),\n movie_id: np.reshape(x.take(1, 1), [batch_size, 1]),\n movie_categories: categories, # x.take(6,1)\n movie_titles: titles, # x.take(5,1)\n targets: np.reshape(y, [batch_size, 1]),\n dropout_keep_prob: 1,\n lr: learning_rate}\n\n step, test_loss, summaries = sess.run([global_step, loss, inference_summary_op], feed) # cost\n\n # 保存测试损失\n losses['test'].append(test_loss)\n inference_summary_writer.add_summary(summaries, step) #\n\n time_str = datetime.datetime.now().isoformat()\n if (epoch_i * (len(test_X) // batch_size) + batch_i) % show_every_n_batches == 0:\n print('{}: Epoch {:>3} Batch {:>4}/{} test_loss = {:.3f}'.format(\n time_str,\n epoch_i,\n batch_i,\n (len(test_X) // batch_size),\n test_loss))\n\n # Save Model\n saver.save(sess, save_dir) # , global_step=epoch_i\n print('Model Trained and Saved')\n\n# 保存参数\nsave_params((save_dir))\n\nload_dir = load_params()\nprint('load_dir: ', load_dir)\n\n# 显示训练loss\nplt.plot(losses['train'], label='Training loss')\nplt.legend()\n_ = plt.ylim()\n\n# 显示测试loss\nplt.plot(losses['test'], label='Test loss')\nplt.legend()\n_ = plt.ylim()\nplt.show()\n\n\n# 获取tensors 使用函数 get_tensor_by_name()从 loaded_graph 中获取tensors,后面的推荐功能要用到\n\ndef get_tensors(loaded_graph):\n\n uid = loaded_graph.get_tensor_by_name(\"uid:0\")\n user_gender = loaded_graph.get_tensor_by_name(\"user_gender:0\")\n user_age = loaded_graph.get_tensor_by_name(\"user_age:0\")\n user_job = loaded_graph.get_tensor_by_name(\"user_job:0\")\n movie_id = loaded_graph.get_tensor_by_name(\"movie_id:0\")\n movie_categories = loaded_graph.get_tensor_by_name(\"movie_categories:0\")\n movie_titles = loaded_graph.get_tensor_by_name(\"movie_titles:0\")\n targets = loaded_graph.get_tensor_by_name(\"targets:0\")\n dropout_keep_prob = loaded_graph.get_tensor_by_name(\"dropout_keep_prob:0\")\n lr = loaded_graph.get_tensor_by_name(\"LearningRate:0\")\n #两种不同计算预测评分的方案使用不同的name获取tensor inference\n# inference = loaded_graph.get_tensor_by_name(\"inference/inference/BiasAdd:0\")\n inference = loaded_graph.get_tensor_by_name(\"inference/ExpandDims:0\") # 之前是MatMul:0 因为inference代码修改了 这里也要修改 感谢网友 @清歌 指出问题\n movie_combine_layer_flat = loaded_graph.get_tensor_by_name(\"movie_fc/Reshape:0\")\n user_combine_layer_flat = loaded_graph.get_tensor_by_name(\"user_fc/Reshape:0\")\n return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, movie_combine_layer_flat, user_combine_layer_flat\n\n\n# 指定用户和电影进行评分,这部分就是对网络做正向传播,计算得到预测的评分\ndef rating_movie(user_id_val, movie_id_val):\n loaded_graph = tf.Graph() #\n with tf.Session(graph=loaded_graph) as sess: #\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, _, __ = get_tensors(\n loaded_graph) # loaded_graph\n\n categories = np.zeros([1, 18])\n categories[0] = movies.values[movieid2idx[movie_id_val]][2]\n\n titles = np.zeros([1, sentences_size])\n titles[0] = movies.values[movieid2idx[movie_id_val]][1]\n\n feed = {\n uid: np.reshape(users.values[user_id_val - 1][0], [1, 1]),\n user_gender: np.reshape(users.values[user_id_val - 1][1], [1, 1]),\n user_age: np.reshape(users.values[user_id_val - 1][2], [1, 1]),\n user_job: np.reshape(users.values[user_id_val - 1][3], [1, 1]),\n movie_id: np.reshape(movies.values[movieid2idx[movie_id_val]][0], [1, 1]),\n movie_categories: categories, # x.take(6,1)\n movie_titles: titles, # x.take(5,1)\n dropout_keep_prob: 1}\n\n # Get Prediction\n inference_val = sess.run([inference], feed)\n\n return (inference_val)\n\n# ?????????\n\n\nrating_movie(234, 1401)\n\n\n# 生成movie特征矩阵 将训练好的电影特征组合成电影特征矩阵并保存到本地\n\nloaded_graph = tf.Graph() #\nmovie_matrics = []\nwith tf.Session(graph=loaded_graph) as sess: #\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, movie_combine_layer_flat, __ = get_tensors(loaded_graph) #loaded_graph\n\n for item in movies.values:\n categories = np.zeros([1, 18])\n categories[0] = item.take(2)\n\n titles = np.zeros([1, sentences_size])\n titles[0] = item.take(1)\n\n feed = {\n movie_id: np.reshape(item.take(0), [1, 1]),\n movie_categories: categories, #x.take(6,1)\n movie_titles: titles, #x.take(5,1)\n dropout_keep_prob: 1}\n\n movie_combine_layer_flat_val = sess.run([movie_combine_layer_flat], feed)\n movie_matrics.append(movie_combine_layer_flat_val)\n\npickle.dump((np.array(movie_matrics).reshape(-1, 200)), open('movie_matrics.p', 'wb'))\nmovie_matrics = pickle.load(open('movie_matrics.p', mode='rb'))\n\n# movie_matrics = pickle.load(open('movie_matrics.p', mode='rb'))\n\n\n\n\n#生成user特征矩阵 将训练好的用户特征组合成用户特征矩阵并保存到本地\n\nloaded_graph = tf.Graph() #\nusers_matrics = []\nwith tf.Session(graph=loaded_graph) as sess: #\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, __,user_combine_layer_flat = get_tensors(loaded_graph) #loaded_graph\n\n for item in users.values:\n\n feed = {\n uid: np.reshape(item.take(0), [1, 1]),\n user_gender: np.reshape(item.take(1), [1, 1]),\n user_age: np.reshape(item.take(2), [1, 1]),\n user_job: np.reshape(item.take(3), [1, 1]),\n dropout_keep_prob: 1}\n\n user_combine_layer_flat_val = sess.run([user_combine_layer_flat], feed)\n users_matrics.append(user_combine_layer_flat_val)\n\npickle.dump((np.array(users_matrics).reshape(-1, 200)), open('users_matrics.p', 'wb'))\n# users_matrics = pickle.load(open('users_matrics.p', mode='rb'))\n\n# users_matrics = pickle.load(open('users_matrics.p', mode='rb'))\n\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.concat", "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "matplotlib.pyplot.plot", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.nn.conv2d", "tensorflow.Graph", "tensorflow.Variable", "numpy.reshape", "tensorflow.layers.dense", "tensorflow.train.import_meta_graph", "tensorflow.reset_default_graph", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.train.Saver", "numpy.zeros", "tensorflow.nn.dropout", "tensorflow.truncated_normal", "matplotlib.pyplot.ylim", "tensorflow.placeholder", "sklearn.model_selection.train_test_split", "tensorflow.global_variables_initializer", "matplotlib.pyplot.show", "numpy.array", "tensorflow.nn.embedding_lookup", "tensorflow.summary.merge", "tensorflow.nn.bias_add", "tensorflow.losses.mean_squared_error", "tensorflow.summary.FileWriter", "tensorflow.constant", "tensorflow.reduce_mean", "matplotlib.use", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.contrib.layers.fully_connected", "tensorflow.nn.zero_fraction", "tensorflow.random_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
santamm/DeepNet
[ "fd05804200eb1bd62fb3a80a793b22794e4ec7d2" ]
[ "dnn_utils.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\n\n\ndef sigmoid(Z):\n \"\"\"\n Implements the sigmoid activation in numpy\n\n Arguments:\n Z -- numpy array of any shape\n\n Returns:\n A -- output of sigmoid(z), same shape as Z\n cache -- returns Z as well, useful during backpropagation\n \"\"\"\n\n A = 1/(1+np.exp(-Z))\n cache = Z\n\n return A, cache\n\ndef relu(Z):\n \"\"\"\n Implement the RELU function.\n\n Arguments:\n Z -- Output of the linear layer, of any shape\n\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing \"A\" ; stored for computing the backward pass efficiently\n \"\"\"\n\n A = np.maximum(0,Z)\n\n assert(A.shape == Z.shape)\n\n cache = Z\n return A, cache\n\n\ndef relu_backward(dA, cache):\n \"\"\"\n Implement the backward propagation for a single RELU unit.\n\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n\n Returns:\n dZ -- Gradient of the cost with respect to Z\n \"\"\"\n\n Z = cache\n dZ = np.array(dA, copy=True) # just converting dz to a correct object.\n\n # When z <= 0, you should set dz to 0 as well.\n dZ[Z <= 0] = 0\n\n assert (dZ.shape == Z.shape)\n\n return dZ\n\ndef sigmoid_backward(dA, cache):\n \"\"\"\n Implement the backward propagation for a single SIGMOID unit.\n\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n\n Returns:\n dZ -- Gradient of the cost with respect to Z\n \"\"\"\n\n Z = cache\n\n s = 1/(1+np.exp(-Z))\n dZ = dA * s * (1-s)\n\n assert (dZ.shape == Z.shape)\n\n return dZ\n\n\ndef load_data(train_dataset, test_dataset):\n train_dataset = h5py.File(train_dataset, \"r\")\n\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File(test_dataset, \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n\n\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n\n Returns:\n parameters -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n\n np.random.seed(1)\n\n W1 = np.random.randn(n_h, n_x)*0.01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h)*0.01\n b2 = np.zeros((n_y, 1))\n\n assert(W1.shape == (n_h, n_x))\n assert(b1.shape == (n_h, 1))\n assert(W2.shape == (n_y, n_h))\n assert(b2.shape == (n_y, 1))\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n\n return parameters\n\n\ndef initialize_parameters_deep(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n\n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n\n np.random.seed(1)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1]) #*0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n\n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n\n return parameters\n\ndef linear_forward(A, W, b):\n \"\"\"\n Implement the linear part of a layer's forward propagation.\n\n Arguments:\n A -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n\n Returns:\n Z -- the input of the activation function, also called pre-activation parameter\n cache -- a python dictionary containing \"A\", \"W\" and \"b\" ; stored for computing the backward pass efficiently\n \"\"\"\n\n Z = W.dot(A) + b\n\n assert(Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n\n return Z, cache\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n Implement the forward propagation for the LINEAR->ACTIVATION layer\n\n Arguments:\n A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n Returns:\n A -- the output of the activation function, also called the post-activation value\n cache -- a python dictionary containing \"linear_cache\" and \"activation_cache\";\n stored for computing the backward pass efficiently\n \"\"\"\n\n if activation == \"sigmoid\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n\n elif activation == \"relu\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n\n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n cache = (linear_cache, activation_cache)\n\n return A, cache\n\ndef L_model_forward(X, parameters):\n \"\"\"\n Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation\n\n Arguments:\n X -- data, numpy array of shape (input size, number of examples)\n parameters -- output of initialize_parameters_deep()\n\n Returns:\n AL -- last post-activation value\n caches -- list of caches containing:\n every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)\n the cache of linear_sigmoid_forward() (there is one, indexed L-1)\n \"\"\"\n\n caches = []\n A = X\n L = len(parameters) // 2 # number of layers in the neural network\n\n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n for l in range(1, L):\n A_prev = A\n A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = \"relu\")\n caches.append(cache)\n\n # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = \"sigmoid\")\n caches.append(cache)\n\n assert(AL.shape == (1,X.shape[1]))\n\n return AL, caches\n\ndef compute_cost(AL, Y):\n \"\"\"\n Implement the cost function defined by equation (7).\n\n Arguments:\n AL -- probability vector corresponding to your label predictions, shape (1, number of examples)\n Y -- true \"label\" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)\n\n Returns:\n cost -- cross-entropy cost\n \"\"\"\n\n m = Y.shape[1]\n\n # Compute loss from aL and y.\n cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))\n\n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n assert(cost.shape == ())\n\n return cost\n\ndef linear_backward(dZ, cache):\n \"\"\"\n Implement the linear portion of backward propagation for a single layer (layer l)\n\n Arguments:\n dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer\n\n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n dW = 1./m * np.dot(dZ,A_prev.T)\n db = 1./m * np.sum(dZ, axis = 1, keepdims = True)\n dA_prev = np.dot(W.T,dZ)\n\n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n\n return dA_prev, dW, db\n\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n Implement the backward propagation for the LINEAR->ACTIVATION layer.\n\n Arguments:\n dA -- post-activation gradient for current layer l\n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n linear_cache, activation_cache = cache\n\n if activation == \"relu\":\n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n\n elif activation == \"sigmoid\":\n dZ = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n\n return dA_prev, dW, db\n\ndef L_model_backward(AL, Y, caches):\n \"\"\"\n Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n\n Arguments:\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (there are (L-1) or them, indexes from 0 to L-2)\n the cache of linear_activation_forward() with \"sigmoid\" (there is one, index L-1)\n\n Returns:\n grads -- A dictionary with the gradients\n grads[\"dA\" + str(l)] = ...\n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ...\n \"\"\"\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n\n # Initializing the backpropagation\n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n\n # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"AL, Y, caches\". Outputs: \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n current_cache = caches[L-1]\n grads[\"dA\" + str(L-1)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, activation = \"sigmoid\")\n\n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 1)], current_cache, activation = \"relu\")\n grads[\"dA\" + str(l)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n\n return grads\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n\n Arguments:\n parameters -- python dictionary containing your parameters\n grads -- python dictionary containing your gradients, output of L_model_backward\n\n Returns:\n parameters -- python dictionary containing your updated parameters\n parameters[\"W\" + str(l)] = ...\n parameters[\"b\" + str(l)] = ...\n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads[\"db\" + str(l+1)]\n\n return parameters\n\ndef predict(X, y, parameters):\n \"\"\"\n This function is used to predict the results of a L-layer neural network.\n\n Arguments:\n X -- data set of examples you would like to label\n parameters -- parameters of the trained model\n\n Returns:\n p -- predictions for the given dataset X\n Accuracy -- accuracy\n \"\"\"\n\n m = X.shape[1]\n n = len(parameters) // 2 # number of layers in the neural network\n p = np.zeros((1,m))\n\n # Forward propagation\n probas, caches = L_model_forward(X, parameters)\n\n\n # convert probas to 0/1 predictions\n for i in range(0, probas.shape[1]):\n if probas[0,i] > 0.5:\n p[0,i] = 1\n else:\n p[0,i] = 0\n\n #print results\n #print (\"predictions: \" + str(p))\n #print (\"true labels: \" + str(y))\n accuracy = np.sum((p == y)/m)\n #print(\"Accuracy: \" + str(accuracy))\n\n return p, accuracy\n\ndef print_mislabeled_images(classes, X, y, p):\n \"\"\"\n Plots images where predictions and truth were different.\n X -- dataset\n y -- true labels\n p -- predictions\n \"\"\"\n a = p + y\n mislabeled_indices = np.asarray(np.where(a == 1))\n plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots\n num_images = len(mislabeled_indices[0])\n plt.figure(figsize=(10,10))\n for i in range(num_images):\n index = mislabeled_indices[1][i]\n plt.subplot(2, num_images, i + 1)\n plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')\n plt.axis('off')\n plt.title(\"Prediction: \" + classes[int(p[0,index])].decode(\"utf-8\") + \" \\n Class: \" + classes[y[0,index]].decode(\"utf-8\"))\n plt.show()\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.maximum", "numpy.sqrt", "numpy.random.seed", "numpy.squeeze", "numpy.divide", "matplotlib.pyplot.subplot", "numpy.random.randn", "numpy.where", "matplotlib.pyplot.axis", "numpy.exp", "numpy.array", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akusumoto/sample_dash
[ "431d4b8a0e524ed8eb6f616594afbb67f5dcd428" ]
[ "extract.py" ]
[ "import numpy as np\r\n\r\n\r\ndef random(texture, num):\r\n# idx = np.random.choice(texture.shape[0], num, replace=False) # 乱数を抽出するときに重複を許さない場合(ただし、サンプル数が少ないとエラーになりやすい)\r\n idx = np.random.choice(texture.shape[0], num) # 乱数を抽出するときに重複を許す場合(ただし、サンプル数が少ない時でも安定)\r\n return texture[idx]\r\n\r\n\r\ndef stat(texture, num):\r\n pass\r\n\r\n\r\ndef hybrid(texture, num):\r\n pass\r\n\r\n\r\nmethod = {'random': random, 'STAT': stat, 'HybridIA': hybrid}\r\n" ]
[ [ "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
paulc/streamlit-hello
[ "f41623efe049db982ac63af725ee2316477360e3" ]
[ "t1.py" ]
[ "\nimport streamlit as st\nimport pandas as pd\n\nst.write(\"Here's our first attempt at using data to create a table:\")\nst.write(pd.DataFrame({\n 'first column': [1, 2, 3, 4],\n 'second column': [10, 20, 30, 40]\n}))\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
a8/discovergy
[ "7766a6eb74e8c3cf9b09dfdac21d79b31f5922e5" ]
[ "src/discovergy/utils.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\n\nDiscovergy shared helper code\n\n\"\"\"\n__author__ = \"Frank Becker <[email protected]>\"\n__copyright__ = \"Frank Becker\"\n__license__ = \"mit\"\n\nimport gzip\nimport json\nimport os\nimport re\nimport sys\n\nfrom contextlib import ContextDecorator\nfrom pathlib import Path\nfrom timeit import default_timer\nfrom typing import Any, Callable, Dict, List, NamedTuple, Optional, Union\n\nimport pandas as pd # type: ignore\nimport pystore\n\nfrom box import Box # type: ignore\nfrom loguru import logger as log\nfrom tenacity import _utils # type: ignore\n\n\nclass TimeStampedValue(NamedTuple):\n timestamp: float\n value: Any\n\n\nclass ValueUnit(NamedTuple):\n value: Union[float, int]\n unit: str\n\n\nclass measure_duration(ContextDecorator):\n \"\"\"A context manager that measures time from enter to exit.\"\"\"\n\n def __enter__(self):\n self.start = default_timer()\n return self\n\n def __exit__(self, *exc):\n self.duration = default_timer() - self.start\n return False\n\n\ndef start_logging(config: Box) -> None:\n \"\"\"Start console and file logging\"\"\"\n log_dir = Path(config.file_location.log_dir).expanduser()\n if not log_dir.is_dir():\n sys.stderr.write(f\"Could not find the log dir {log_dir}. Creating it ...\\n\")\n os.makedirs(log_dir.as_posix())\n log_config = {\n \"handlers\": [\n {\n \"sink\": sys.stderr,\n \"format\": \"{time:YYYY-MM-DD HH:mm:ss} | <level>{level}</level> | {message}\",\n \"colorize\": True,\n \"level\": \"DEBUG\",\n \"backtrace\": True,\n },\n {\n \"sink\": log_dir / \"discovergy_{time}.log\",\n \"rotation\": \"1 day\",\n \"compression\": \"gz\",\n \"format\": \"{time:YYYY-MM-DDTHH:mm:ss} | {level} | {message}\",\n \"backtrace\": True,\n \"serialize\": False,\n },\n ],\n \"extra\": {\"user\": \"someone\"},\n }\n log.configure(**log_config) # type: ignore\n\n\ndef before_log(logger: Any, log_level: str) -> Callable:\n \"\"\"Before call strategy that logs to some logger the attempt.\"\"\"\n\n def log_it(retry_state):\n logger = getattr(log, log_level)\n logger(\n f\"Starting call to '{_utils.get_callback_name(retry_state.fn)}', \"\n f\"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.\"\n )\n\n return log_it\n\n\ndef split_df_by_month(*, df) -> List[pd.DataFrame]:\n \"\"\"Return data frames split by month.\"\"\"\n data_frames = []\n intervals = sorted(set([(e.year, e.month) for e in df.index.unique()]))\n if len(intervals) == 1:\n # One month only, early return\n data_frames.append(df)\n return data_frames\n date_range = pd.date_range(\n \"{}-{:02d}\".format(intervals[0][0], intervals[0][1]),\n periods=len(intervals),\n freq=\"M\",\n tz=\"UTC\",\n )\n prev_month = date_range[0]\n data_frames.append(df[df.index <= date_range[0]])\n for date in date_range[1:]:\n df_per_month = df[(prev_month < df.index) & (df.index <= date)]\n data_frames.append(df_per_month)\n prev_month = date\n return data_frames\n\n\ndef split_df_by_day(*, df) -> List[pd.DataFrame]:\n \"\"\"Return data frames split by day.\"\"\"\n data_frames = []\n intervals = sorted(set([(e.year, e.month, e.day) for e in df.index.unique()]))\n if len(intervals) == 1:\n # One day only, early return\n data_frames.append(df)\n return data_frames\n date_range = pd.date_range(\n \"{}-{:02d}-{:02d}\".format(intervals[0][0], intervals[0][1], intervals[0][2]),\n periods=len(intervals),\n freq=\"D\",\n # tz=\"UTC\",\n )\n # date_range starts at 0h 00m 00s\n prev_day = date_range[0]\n for date in date_range[1:]:\n df_per_day = df[(prev_day < df.index) & (df.index <= date)]\n data_frames.append(df_per_day)\n prev_day = date\n return data_frames\n\n\ndef str2bool(value: str) -> bool:\n \"\"\"Return the boolean value of the value given as a str.\"\"\"\n if value.lower() in [\"true\", \"1\", \"t\", \"y\", \"yes\", \"yeah\"]:\n return True\n\n return False\n\n\ndef verify_file_permissions(path: Path) -> bool:\n \"\"\"Return (True|False) if the file system access rights are set to current user only.\"\"\"\n if path.is_file:\n file_stat = path.stat()\n if file_stat.st_uid != os.getuid():\n return False\n\n if re.match(r\"0o*100[0-6]00\", oct(file_stat.st_mode)):\n return True\n try:\n os.chmod(path, 0o600)\n except OSError:\n log.error(\n f\"Tried to change the permissions of {path} but failed. \"\n \"Please fix the permissions to max. 0600 yourself!\"\n )\n return False\n else:\n log.warning(\n \"The file {} didn't have secure file permissions {}. \"\n \"The permissions were changed to -rw------- for you. \".format(\n path, oct(file_stat.st_mode)\n )\n )\n return True\n return False\n\n\ndef write_data(*, data: List[Dict], file_path: Path) -> None:\n \"\"\"Write the gz-iped raw data to file_path.\"\"\"\n dst_dir = file_path.parent\n if not dst_dir.expanduser().is_dir():\n log.warning(f\"Creating the data destination directory {dst_dir}.\")\n os.makedirs(dst_dir.expanduser().as_posix())\n\n with gzip.open(file_path.expanduser().as_posix(), \"wb\") as fh:\n fh.write(json.dumps(data).encode(\"utf-8\"))\n\n\ndef write_data_frames(\n *, config: Box, data_frames: List[pd.DataFrame], name: str\n) -> None:\n \"\"\"Create or update the data as a Pandas DataFrame in hdf5 file.\"\"\"\n if not data_frames:\n log.debug(f\"Did not receive any data for {name}.\")\n return\n for df in data_frames:\n if not len(df):\n log.debug(f\"Did not find any data in {df}. Skipping...\")\n continue\n first_ts = min(df.index)\n file_name = f\"{name}_{first_ts.year}-{first_ts.month:02d}.hdf5\"\n file_path = Path(config.file_location.data_dir) / Path(file_name)\n file_path = file_path.expanduser()\n if file_path.is_file():\n df_prev = pd.read_hdf(file_path, name)\n df = df.combine_first(df_prev)\n df.to_hdf(file_path, key=name)\n\n\ndef write_data_to_pystore(\n *,\n config: Box,\n data_frames: List[pd.DataFrame],\n name: str,\n metadata: Optional[Dict] = None,\n) -> None:\n \"\"\"Create or update the pandas.DataFrames as Pystore collection.items.\n\n The DataFrames must contain time series data with the index of type datetime64.\n The lowest index (min(index)) will be converted as YYYY-MM string and set\n as the item name.\n Each dataframe must only contain data of one day! This function doesn't check max(df.index).\n\n Note, PyStore will make sure there is a unique index:\n\n ~/.../site-packages/pystore/collection.py in append(self, item, data, npartitions, epochdate, threaded, reload_items, **kwargs)\n 183 # combined = current.data.append(new)\n 184 combined = dd.concat([current.data, new]).drop_duplicates(keep=\"last\")\n\n PyStore:\n https://medium.com/@aroussi/fast-data-store-for-pandas-time-series-data-using-pystore-89d9caeef4e2\n \"\"\"\n\n if metadata is None:\n metadata = {}\n if not data_frames:\n log.debug(f\"Did not receive any data for {name}.\")\n return\n store = pystore.store(\"discovergy\")\n collection = store.collection(name)\n item_names = collection.list_items()\n for df in data_frames:\n if not len(df):\n log.debug(f\"Did not find any data in {df}. Skipping...\")\n continue\n first_ts = min(df.index)\n item_name = f\"{first_ts.year}-{first_ts.month:02d}\"\n if item_name in item_names:\n # FIXME (a8): Create one partition per day. There must be a better way. Issue is that\n # pandas loads the full pd.DataFrame into memory. That requires memory.\n npartitions = first_ts.day\n log.debug(f\"Appended to {item_name} {first_ts}.\")\n collection.append(item_name, df, npartitions=npartitions)\n else:\n log.debug(\"Created new Dask DF.\")\n collection.write(item_name, df, metadata=metadata, overwrite=False)\n" ]
[ [ "pandas.read_hdf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hssinejihene/deepchem-1.1.0
[ "6efbe6b638b77bb2685ac617f4d6649755c01335", "6efbe6b638b77bb2685ac617f4d6649755c01335" ]
[ "deepchem/nn/model_ops.py", "examples/benchmark2.py" ]
[ "\"\"\"Ops for graph construction.\n\nLarge amounts of code borrowed from Keras. Will try to incorporate into\nDeepChem properly.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport traceback\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.training import moving_averages\nfrom collections import defaultdict\n# TODO(rbharath): What does this line do?\npy_all = all\n\n# TODO(rbharath): REMOVE GLOBAL VARS! BREAKS DEEPCHEM STYLE! \n_UID_PREFIXES = defaultdict(int)\n# This dictionary holds a mapping {graph: learning_phase}.\n# A learning phase is a bool tensor used to run Keras models in\n# either train mode (learning_phase == 1) or test mode (learning_phase == 0).\n_GRAPH_LEARNING_PHASES = {}\n\n\ndef _to_tensor(x, dtype):\n x = tf.convert_to_tensor(x)\n if x.dtype != dtype:\n x = tf.cast(x, dtype)\n return x\n\n\ndef learning_phase():\n \"\"\"Returns the learning phase flag.\n\n The learning phase flag is a bool tensor (0 = test, 1 = train)\n to be passed as input to any Keras function\n that uses a different behavior at train time and test time.\n \"\"\"\n graph = tf.get_default_graph()\n if graph not in _GRAPH_LEARNING_PHASES:\n phase = tf.placeholder(dtype='bool', name='keras_learning_phase')\n _GRAPH_LEARNING_PHASES[graph] = phase\n return _GRAPH_LEARNING_PHASES[graph]\n\n\ndef in_train_phase(x, alt):\n \"\"\"Selects `x` in train phase, and `alt` otherwise.\n Note that `alt` should have the *same shape* as `x`.\n\n Returns\n -------\n Either `x` or `alt` based on `K.learning_phase`.\n \"\"\"\n if learning_phase() is 1:\n return x\n elif learning_phase() is 0:\n return alt\n # else: assume learning phase is a placeholder tensor.\n x = switch(learning_phase(), x, alt)\n x._uses_learning_phase = True\n return x\n\n\ndef switch(condition, then_expression, else_expression):\n \"\"\"Switches between two operations\n depending on a scalar value (`int` or `bool`).\n Note that both `then_expression` and `else_expression`\n should be symbolic tensors of the *same shape*.\n\n Parameters\n ----------\n condition: scalar tensor.\n then_expression: either a tensor, or a callable that returns a tensor.\n else_expression: either a tensor, or a callable that returns a tensor.\n\n Returns\n -------\n The selected tensor.\n \"\"\"\n if condition.dtype != tf.bool:\n condition = tf.cast(condition, 'bool')\n if not callable(then_expression):\n\n def then_expression_fn():\n return then_expression\n else:\n then_expression_fn = then_expression\n if not callable(else_expression):\n\n def else_expression_fn():\n return else_expression\n else:\n else_expression_fn = else_expression\n x = tf.cond(condition, then_expression_fn, else_expression_fn)\n return x\n\n\ndef normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):\n \"\"\"Computes mean and std for batch then apply batch_normalization on batch.\n\n Returns\n -------\n A tuple length of 3, (normalized_tensor, mean, variance).\n \"\"\"\n mean, var = tf.nn.moments(\n x, reduction_axes, shift=None, name=None, keep_dims=False)\n if sorted(reduction_axes) == range(ndim(x))[:-1]:\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)\n else:\n # need broadcasting\n target_shape = []\n for axis in range(get_ndim(x)):\n if axis in reduction_axes:\n target_shape.append(1)\n else:\n target_shape.append(tf.shape(x)[axis])\n target_shape = stack(target_shape)\n\n broadcast_mean = tf.reshape(mean, target_shape)\n broadcast_var = tf.reshape(var, target_shape)\n broadcast_gamma = tf.reshape(gamma, target_shape)\n broadcast_beta = tf.reshape(beta, target_shape)\n normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var,\n broadcast_beta, broadcast_gamma, epsilon)\n return normed, mean, var\n\n\ndef ones(shape, dtype=None, name=None):\n \"\"\"Instantiates an all-ones tensor variable and returns it.\n\n Parameters\n ----------\n shape: Tuple of integers, shape of returned Keras variable.\n dtype: Tensorflow dtype \n name: String, name of returned Keras variable.\n\n Returns\n -------\n A Keras variable, filled with `1.0`.\n \"\"\"\n if dtype is None:\n dtype = tf.float32\n shape = tuple(map(int, shape))\n return tf.Variable(\n tf.constant_initializer(1., dtype=dtype)(shape), dtype, name)\n\n\ndef cast_to_floatx(x):\n \"\"\"Cast a Numpy array to the default Keras float type.\n\n Parameters\n ----------\n x: Numpy array.\n\n Returns\n -------\n The same Numpy array, cast to its new type.\n \"\"\"\n return np.asarray(x, dtype=tf.float32)\n\n\ndef moving_average_update(variable, value, momentum):\n try:\n return moving_averages.assign_moving_average(\n variable, value, momentum, zero_debias=False)\n except TypeError:\n return moving_averages.assign_moving_average(variable, value, momentum)\n\n\ndef int_shape(x):\n \"\"\"Returns the shape of a Keras tensor or a Keras variable as a tuple of\n integers or None entries.\n\n Arguments\n ---------\n x: Tensor or variable.\n\n Returns\n -------\n A tuple of integers (or None entries).\n \"\"\"\n shape = x.get_shape()\n return tuple([i.__int__() for i in shape])\n\n\ndef get_uid(prefix=''):\n \"\"\"Provides a unique UID given a string prefix.\n\n Parameters\n ----------\n prefix: string.\n\n Returns\n -------\n An integer.\n \"\"\"\n _UID_PREFIXES[prefix] += 1\n return _UID_PREFIXES[prefix]\n\n\ndef concatenate(tensors, axis=-1):\n \"\"\"Concatenates a list of tensors alongside the specified axis.\n\n Returns\n -------\n A tensor.\n \"\"\"\n if axis < 0:\n dims = get_ndim(tensors[0])\n if dims:\n axis = axis % dims\n else:\n axis = 0\n\n try:\n return tf.concat_v2([x for x in tensors], axis)\n except AttributeError:\n return tf.concat(axis=axis, values=[x for x in tensors])\n\n\ndef _normalize_axis(axis, ndim):\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list):\n for i, a in enumerate(axis):\n if a is not None and a < 0:\n axis[i] = a % ndim\n else:\n if axis is not None and axis < 0:\n axis = axis % ndim\n return axis\n\n\ndef mean(x, axis=None, keepdims=False):\n \"\"\"Mean of a tensor, alongside the specified axis.\n\n Parameters\n ----------\n x: A tensor or variable.\n axis: A list of integer. Axes to compute the mean.\n keepdims: A boolean, whether to keep the dimensions or not.\n If keepdims is False, the rank of the tensor is reduced\n by 1 for each entry in axis. If keep_dims is True,\n the reduced dimensions are retained with length 1.\n\n Returns\n -------\n A tensor with the mean of elements of x.\n \"\"\"\n axis = _normalize_axis(axis, get_ndim(x))\n if x.dtype.base_dtype == tf.bool:\n x = tf.cast(x, tf.float32)\n return tf.reduce_mean(x, axis=axis, keep_dims=keepdims)\n\n\ndef dot(x, y):\n \"\"\"Multiplies 2 tensors (and/or variables) and returns a *tensor*.\n When attempting to multiply a ND tensor\n with a ND tensor, it reproduces the Theano behavior.\n (e.g. (2, 3).(4, 3, 5) = (2, 4, 5))\n\n Parameters\n ----------\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns\n -------\n A tensor, dot product of x and y.\n \"\"\"\n if get_ndim(x) is not None and (get_ndim(x) > 2 or get_ndim(y) > 2):\n x_shape = []\n for i, s in zip(int_shape(x), tf.unstack(tf.shape(x))):\n if i is not None:\n x_shape.append(i)\n else:\n x_shape.append(s)\n x_shape = tuple(x_shape)\n y_shape = []\n for i, s in zip(int_shape(y), tf.unstack(tf.shape(y))):\n if i is not None:\n y_shape.append(i)\n else:\n y_shape.append(s)\n y_shape = tuple(y_shape)\n y_permute_dim = list(range(get_ndim(y)))\n y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim\n xt = tf.reshape(x, [-1, x_shape[-1]])\n yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])\n return tf.reshape(\n tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])\n out = tf.matmul(x, y)\n return out\n\n\ndef get_ndim(x):\n \"\"\"Returns the number of axes in a tensor, as an integer.\n\n Parameters\n ----------\n x: Tensor or variable.\n\n Returns\n -------\n Integer (scalar), number of axes.\n \"\"\"\n dims = x.get_shape()._dims\n if dims is not None:\n return len(dims)\n return None\n\n\ndef get_dtype(x):\n \"\"\"Returns the dtype of a Keras tensor or variable, as a string.\n\n Parameters\n ----------\n x: Tensor or variable.\n\n Returns\n -------\n String, dtype of `x`.\n \"\"\"\n return x.dtype.name\n\n\ndef clip(x, min_value, max_value):\n \"\"\"Element-wise value clipping.\n\n Returns\n -------\n A tensor.\n \"\"\"\n if max_value is not None and max_value < min_value:\n max_value = min_value\n min_value = _to_tensor(min_value, x.dtype.base_dtype)\n max_value = _to_tensor(max_value, x.dtype.base_dtype)\n return tf.clip_by_value(x, min_value, max_value)\n\n\ndef epsilon():\n \"\"\"Returns the value of the fuzz\n factor used in numeric expressions.\n\n Returns\n -------\n A float.\n \"\"\"\n return 1e-7\n\n\ndef random_uniform_variable(shape,\n low,\n high,\n dtype=tf.float32,\n name=None,\n seed=None):\n \"\"\"Instantiates an variable filled with\n samples drawn from a uniform distribution and returns it.\n\n Parameters\n ----------\n shape: Tuple of integers, shape of returned variable.\n low: Float, lower boundary of the output inteval.\n high: Float, upper boundary of the output interval.\n dtype: Tensorflow dtype\n name: String, name of returned variable.\n seed: Integer, random seed.\n\n Returns\n -------\n A tf.Variable, filled with drawn samples.\n \"\"\"\n shape = tuple(map(int, shape))\n if seed is None:\n # ensure that randomness is conditioned by the Numpy RNG\n seed = np.random.randint(10e8)\n value = tf.random_uniform_initializer(\n low, high, dtype=dtype, seed=seed)(shape)\n return tf.Variable(value, dtype=dtype, name=name)\n\n\ndef random_normal_variable(shape,\n mean,\n scale,\n dtype=tf.float32,\n name=None,\n seed=None):\n \"\"\"Instantiates an Keras variable filled with\n samples drawn from a normal distribution and returns it.\n\n Parameters\n ----------\n shape: Tuple of integers, shape of returned Keras variable.\n mean: Float, mean of the normal distribution.\n scale: Float, standard deviation of the normal distribution.\n dtype: Tensorflow dtype\n name: String, name of returned Keras variable.\n seed: Integer, random seed.\n\n Returns\n -------\n A tf.Variable, filled with drawn samples.\n \"\"\"\n shape = tuple(map(int, shape))\n if seed is None:\n # ensure that randomness is conditioned by the Numpy RNG\n seed = np.random.randint(10e8)\n value = tf.random_normal_initializer(\n mean, scale, dtype=dtype, seed=seed)(shape)\n return tf.Variable(value, dtype=dtype, name=name)\n\n\ndef max(x, axis=None, keepdims=False):\n \"\"\"Maximum value in a tensor.\n\n Parameters\n ----------\n x: A tensor or variable.\n axis: An integer, the axis to find maximum values.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n Returns\n -------\n A tensor with maximum values of `x`.\n \"\"\"\n axis = _normalize_axis(axis, get_ndim(x))\n return tf.reduce_max(x, axis=axis, keep_dims=keepdims)\n\n\ndef l2_normalize(x, axis):\n \"\"\"Normalizes a tensor wrt the L2 norm alongside the specified axis.\n\n Parameters\n ----------\n x: input tensor.\n axis: axis along which to perform normalization.\n\n Returns\n -------\n A tensor.\n \"\"\"\n if axis < 0:\n axis = axis % len(x.get_shape())\n return tf.nn.l2_normalize(x, dim=axis)\n\n\ndef categorical_crossentropy(output, target, from_logits=False):\n \"\"\"Categorical crossentropy between an output tensor\n and a target tensor, where the target is a tensor of the same\n shape as the output.\n\n # TODO(rbharath): Should probably swap this over to tf mode.\n \"\"\"\n # Note: tf.nn.softmax_cross_entropy_with_logits\n # expects logits, Keras expects probabilities.\n if not from_logits:\n # scale preds so that the class probas of each sample sum to 1\n output /= tf.reduce_sum(\n output, axis=len(output.get_shape()) - 1, keep_dims=True)\n # manual computation of crossentropy\n epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon, 1. - epsilon)\n return -tf.reduce_sum(\n target * tf.log(output), axis=len(output.get_shape()) - 1)\n else:\n try:\n return tf.nn.softmax_cross_entropy_with_logits(\n labels=target, logits=output)\n except TypeError:\n return tf.nn.softmax_cross_entropy_with_logits(\n logits=output, labels=target)\n\n\ndef sparse_categorical_crossentropy(output, target, from_logits=False):\n \"\"\"Categorical crossentropy between an output tensor\n and a target tensor, where the target is an integer tensor.\n \"\"\"\n # Note: tf.nn.softmax_cross_entropy_with_logits\n # expects logits, Keras expects probabilities.\n if not from_logits:\n epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon, 1 - epsilon)\n output = tf.log(output)\n\n output_shape = output.get_shape()\n targets = cast(flatten(target), 'int64')\n logits = tf.reshape(output, [-1, int(output_shape[-1])])\n try:\n res = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=targets, logits=logits)\n except TypeError:\n res = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=targets)\n if len(output_shape) == 3:\n # if our output includes timesteps we need to reshape\n return tf.reshape(res, tf.shape(output)[:-1])\n else:\n return res\n\n\ndef binary_crossentropy(output, target, from_logits=False):\n \"\"\"Binary crossentropy between an output tensor and a target tensor.\n\n # Arguments\n output: A tensor.\n target: A tensor with the same shape as `output`.\n from_logits: Whether `output` is expected to be a logits tensor.\n By default, we consider that `output`\n encodes a probability distribution.\n\n # Returns\n A tensor.\n \"\"\"\n # Note: tf.nn.softmax_cross_entropy_with_logits\n # expects logits, Keras expects probabilities.\n if not from_logits:\n # transform back to logits\n epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon, 1 - epsilon)\n output = tf.log(output / (1 - output))\n try:\n return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)\n except TypeError:\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target)\n\n\ndef sum(x, axis=None, keepdims=False):\n \"\"\"Sum of the values in a tensor, alongside the specified axis.\n\n Parameters\n ----------\n x: A tensor or variable.\n axis: An integer, the axis to sum over.\n keepdims: A boolean, whether to keep the dimensions or not.\n If keepdims is False, the rank of the tensor is reduced\n by 1. If keepdims is True,\n the reduced dimension is retained with length 1.\n\n Returns\n -------\n A tensor with sum of x.\n \"\"\"\n axis = _normalize_axis(axis, get_ndim(x))\n return tf.reduce_sum(x, axis=axis, keep_dims=keepdims)\n\n\n# TODO(rbharath): Need to rename this. This makes a variable, not just creates\n# a tensor. Confusing with tf.zeros...\ndef zeros(shape, dtype=tf.float32, name=None):\n \"\"\"Instantiates an all-zeros variable and returns it.\n\n Parameters\n ----------\n shape: Tuple of integers, shape of returned Keras variable\n dtype: Tensorflow dtype \n name: String, name of returned Keras variable\n\n Returns\n -------\n A variable (including Keras metadata), filled with `0.0`.\n \"\"\"\n shape = tuple(map(int, shape))\n return tf.Variable(\n tf.constant_initializer(0., dtype=dtype)(shape), dtype, name)\n\n\ndef cosine_distances(test, support):\n \"\"\"Computes pairwise cosine distances between provided tensors\n\n Parameters\n ----------\n test: tf.Tensor\n Of shape (n_test, n_feat)\n support: tf.Tensor\n Of shape (n_support, n_feat)\n\n Returns\n -------\n tf.Tensor:\n Of shape (n_test, n_support)\n \"\"\"\n rnorm_test = tf.rsqrt(\n tf.reduce_sum(tf.square(test), 1, keep_dims=True)) + 1e-7\n rnorm_support = tf.rsqrt(\n tf.reduce_sum(tf.square(support), 1, keep_dims=True)) + 1e-7\n test_normalized = test * rnorm_test\n support_normalized = support * rnorm_support\n\n # Transpose for mul\n support_normalized_t = tf.transpose(support_normalized, perm=[1, 0])\n g = tf.matmul(test_normalized, support_normalized_t) # Gram matrix\n return g\n\n\ndef elu(x, alpha=1.):\n \"\"\"Exponential linear unit.\n\n Parameters\n ----------\n x: A tensor or variable to compute the activation function for.\n alpha: A scalar, slope of positive section.\n\n Returns\n -------\n A tensor.\n \"\"\"\n res = tf.nn.elu(x)\n if alpha == 1:\n return res\n else:\n return tf.where(x > 0, res, alpha * res)\n\n\ndef relu(x, alpha=0., max_value=None):\n \"\"\"Rectified linear unit.\n With default values, it returns element-wise `max(x, 0)`.\n\n Parameters\n ----------\n x: A tensor or variable.\n alpha: A scalar, slope of negative section (default=`0.`).\n max_value: Saturation threshold.\n\n Returns\n -------\n A tensor.\n \"\"\"\n if alpha != 0.:\n negative_part = tf.nn.relu(-x)\n x = tf.nn.relu(x)\n if max_value is not None:\n max_value = _to_tensor(max_value, x.dtype.base_dtype)\n zero = _to_tensor(0., x.dtype.base_dtype)\n x = tf.clip_by_value(x, zero, max_value)\n if alpha != 0.:\n alpha = _to_tensor(alpha, x.dtype.base_dtype)\n x -= alpha * negative_part\n return x\n\n\ndef hard_sigmoid(x):\n \"\"\"Segment-wise linear approximation of sigmoid.\n Faster than sigmoid.\n Returns 0. if x < -2.5, 1. if x > 2.5.\n In -2.5 <= x <= 2.5, returns 0.2 * x + 0.5.\n\n Parameters\n ----------\n x: A tensor or variable.\n\n Returns\n -------\n A tensor.\n \"\"\"\n x = (0.2 * x) + 0.5\n zero = _to_tensor(0., x.dtype.base_dtype)\n one = _to_tensor(1., x.dtype.base_dtype)\n x = tf.clip_by_value(x, zero, one)\n return x\n\n\ndef sqrt(x):\n \"\"\"Element-wise square root.\n\n Parameters\n ----------\n x: input tensor.\n\n Returns\n -------\n A tensor.\n \"\"\"\n zero = _to_tensor(0., x.dtype.base_dtype)\n inf = _to_tensor(np.inf, x.dtype.base_dtype)\n x = tf.clip_by_value(x, zero, inf)\n return tf.sqrt(x)\n\n\ndef var(x, axis=None, keepdims=False):\n \"\"\"Variance of a tensor, alongside the specified axis.\n\n Parameters\n ----------\n x: A tensor or variable.\n axis: An integer, the axis to compute the variance.\n keepdims: A boolean, whether to keep the dimensions or not.\n If keepdims is False, the rank of the tensor is reduced\n by 1. If keepdims is True,\n the reduced dimension is retained with length 1.\n\n Returns\n -------\n A tensor with the variance of elements of `x`.\n \"\"\"\n axis = _normalize_axis(axis, get_ndim(x))\n if x.dtype.base_dtype == tf.bool:\n x = tf.cast(x, tf.float32)\n m = tf.reduce_mean(x, axis=axis, keep_dims=True)\n devs_squared = tf.square(x - m)\n return tf.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims)\n\n\ndef euclidean_distance(test, support, max_dist_sq=20):\n \"\"\"Computes pairwise euclidean distances between provided tensors\n\n TODO(rbharath): BROKEN! THIS DOESN'T WORK!\n\n Parameters\n ----------\n test: tf.Tensor\n Of shape (n_test, n_feat)\n support: tf.Tensor\n Of shape (n_support, n_feat)\n max_dist_sq: float, optional\n Maximum pairwise distance allowed.\n\n Returns\n -------\n tf.Tensor:\n Of shape (n_test, n_support)\n \"\"\"\n test = tf.expand_dims(test, 1)\n support = tf.expand_dims(support, 0)\n g = -tf.maximum(tf.reduce_sum(tf.square(test - support), 2), max_dist_sq)\n return g\n\n\ndef add_bias(tensor, init=None, name=None):\n \"\"\"Add a bias term to a tensor.\n\n Parameters\n ---------- \n tensor: tf.Tensor\n Variable tensor.\n init: float\n Bias initializer. Defaults to zero.\n name: str\n Name for this op. Defaults to tensor.op.name.\n\n Returns\n -------\n tf.Tensor\n A biased tensor with the same shape as the input tensor.\n \"\"\"\n if init is None:\n init = tf.zeros([tensor.get_shape()[-1].value])\n with tf.name_scope(name, tensor.op.name, [tensor]):\n b = tf.Variable(init, name='b')\n return tf.nn.bias_add(tensor, b)\n\n\ndef dropout(tensor, dropout_prob, training=True, training_only=True):\n \"\"\"Random dropout.\n\n This implementation supports \"always-on\" dropout (training_only=False), which\n can be used to calculate model uncertainty. See Gal and Ghahramani,\n http://arxiv.org/abs/1506.02142.\n\n NOTE(user): To simplify the implementation, I have chosen not to reverse\n the scaling that occurs in tf.nn.dropout when using dropout during\n inference. This shouldn't be an issue since the activations will be scaled\n by the same constant in both training and inference. This means that there\n are no training-time differences between networks that use dropout during\n inference and those that do not.\n\n Parameters\n ---------- \n tensor: tf.Tensor\n Input tensor.\n dropout_prob: float\n Float giving dropout probability for weights (NOT keep probability).\n training_only: bool\n Boolean. If True (standard dropout), apply dropout only\n during training. If False, apply dropout during inference as well.\n\n Returns\n -------\n tf.Tensor:\n A tensor with the same shape as the input tensor.\n \"\"\"\n if not dropout_prob:\n return tensor # do nothing\n keep_prob = 1.0 - dropout_prob\n if training or not training_only:\n tensor = tf.nn.dropout(tensor, keep_prob)\n return tensor\n\n\ndef fully_connected_layer(tensor,\n size=None,\n weight_init=None,\n bias_init=None,\n name=None):\n \"\"\"Fully connected layer.\n\n Parameters\n ----------\n tensor: tf.Tensor\n Input tensor.\n size: int\n Number of output nodes for this layer.\n weight_init: float\n Weight initializer.\n bias_init: float\n Bias initializer.\n name: str\n Name for this op. Defaults to 'fully_connected'.\n\n Returns\n -------\n tf.Tensor:\n A new tensor representing the output of the fully connected layer.\n\n Raises\n ------\n ValueError\n If input tensor is not 2D.\n \"\"\"\n if weight_init is None:\n num_features = tensor.get_shape()[-1].value\n weight_init = tf.truncated_normal([num_features, size], stddev=0.01)\n if bias_init is None:\n bias_init = tf.zeros([size])\n\n with tf.name_scope(name, 'fully_connected', [tensor]):\n w = tf.Variable(weight_init, name='w', dtype=tf.float32)\n b = tf.Variable(bias_init, name='b', dtype=tf.float32)\n return tf.nn.xw_plus_b(tensor, w, b)\n\n\ndef weight_decay(penalty_type, penalty):\n \"\"\"Add weight decay.\n\n Args:\n model: TensorflowGraph.\n\n Returns:\n A scalar tensor containing the weight decay cost.\n\n Raises:\n NotImplementedError: If an unsupported penalty type is requested.\n \"\"\"\n variables = []\n # exclude bias variables\n for v in tf.trainable_variables():\n if v.get_shape().ndims == 2:\n variables.append(v)\n\n with tf.name_scope('weight_decay'):\n if penalty_type == 'l1':\n cost = tf.add_n([tf.reduce_sum(tf.abs(v)) for v in variables])\n elif penalty_type == 'l2':\n cost = tf.add_n([tf.nn.l2_loss(v) for v in variables])\n else:\n raise NotImplementedError('Unsupported penalty_type %s' % penalty_type)\n cost *= penalty\n #tf.scalar_summary('Weight Decay Cost', cost)\n return cost\n\n\ndef multitask_logits(features,\n num_tasks,\n num_classes=2,\n weight_init=None,\n bias_init=None,\n dropout_prob=None,\n name=None):\n \"\"\"Create a logit tensor for each classification task.\n\n Args:\n features: A 2D tensor with dimensions batch_size x num_features.\n num_tasks: Number of classification tasks.\n num_classes: Number of classes for each task.\n weight_init: Weight initializer.\n bias_init: Bias initializer.\n dropout_prob: Float giving dropout probability for weights (NOT keep\n probability).\n name: Name for this op. Defaults to 'multitask_logits'.\n\n Returns:\n A list of logit tensors; one for each classification task.\n \"\"\"\n logits_list = []\n with tf.name_scope('multitask_logits'):\n for task_idx in range(num_tasks):\n with tf.name_scope(name,\n ('task' + str(task_idx).zfill(len(str(num_tasks)))),\n [features]):\n logits_list.append(\n logits(\n features,\n num_classes,\n weight_init=weight_init,\n bias_init=bias_init,\n dropout_prob=dropout_prob))\n return logits_list\n\n\ndef logits(features,\n num_classes=2,\n weight_init=None,\n bias_init=None,\n dropout_prob=None,\n name=None):\n \"\"\"Create a logits tensor for a single classification task.\n\n You almost certainly don't want dropout on there -- it's like randomly setting\n the (unscaled) probability of a target class to 0.5.\n\n Args:\n features: A 2D tensor with dimensions batch_size x num_features.\n num_classes: Number of classes for each task.\n weight_init: Weight initializer.\n bias_init: Bias initializer.\n dropout_prob: Float giving dropout probability for weights (NOT keep\n probability).\n name: Name for this op.\n\n Returns:\n A logits tensor with shape batch_size x num_classes.\n \"\"\"\n with tf.name_scope(name, 'logits', [features]) as name:\n return dropout(\n fully_connected_layer(\n features,\n num_classes,\n weight_init=weight_init,\n bias_init=bias_init,\n name=name), dropout_prob)\n\n\ndef softmax_N(tensor, name=None):\n \"\"\"Apply softmax across last dimension of a tensor.\n\n Args:\n tensor: Input tensor.\n name: Name for this op. If None, defaults to 'softmax_N'.\n\n Returns:\n A tensor with softmax-normalized values on the last dimension.\n \"\"\"\n with tf.name_scope(name, 'softmax_N', [tensor]):\n exp_tensor = tf.exp(tensor)\n reduction_indices = [tensor.get_shape().ndims - 1]\n return tf.div(exp_tensor,\n tf.reduce_sum(\n exp_tensor, axis=reduction_indices, keep_dims=True))\n\n\ndef optimizer(optimizer=\"adam\", learning_rate=.001, momentum=.9):\n \"\"\"Create model optimizer.\n\n Parameters\n ----------\n optimizer: str, optional\n Name of optimizer\n learning_rate: float, optional\n Learning rate for algorithm\n momentum: float, optional\n Momentum rate\n\n Returns\n -------\n A training Optimizer.\n\n Raises:\n NotImplementedError: If an unsupported optimizer is requested.\n \"\"\"\n # TODO(user): gradient clipping (see Minimize)\n if optimizer == 'adagrad':\n train_op = tf.train.AdagradOptimizer(learning_rate)\n elif optimizer == 'adam':\n train_op = tf.train.AdamOptimizer(learning_rate)\n elif optimizer == 'momentum':\n train_op = tf.train.MomentumOptimizer(learning_rate, momentum)\n elif optimizer == 'rmsprop':\n train_op = tf.train.RMSPropOptimizer(learning_rate, momentum)\n elif optimizer == 'sgd':\n train_op = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise NotImplementedError('Unsupported optimizer %s' % optimizer)\n return train_op\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 18 15:53:27 2016\n\n@author: Michael Wu\n\nBenchmark test:\n\nGiving classification performances of: \n Random forest(rf), MultitaskDNN(tf), \n RobustMultitaskDNN(tf_robust),\n Logistic regression(logreg), IRV(irv)\n Graph convolution(graphconv), xgboost(xgb),\n Directed acyclic graph(dag), Weave(weave) \non datasets: bace_c, bbbp, clintox, hiv, muv, pcba, sider, tox21, toxcast \n\nGiving regression performances of:\n MultitaskDNN(tf_regression),\n Fit Transformer MultitaskDNN(tf_regression_ft),\n Random forest(rf_regression),\n Graph convolution regression(graphconvreg),\n xgboost(xgb_regression), Deep tensor neural net(dtnn),\n Directed acyclic graph(dag_regression),\n Weave(weave_regression)\non datasets: bace_r, chembl, clearance, delaney(ESOL), hopv, kaggle, lipo,\n nci, pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl(FreeSolv)\n \n\ntime estimation listed in README file\n\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport os\nimport numpy as np\nimport deepchem as dc\nimport argparse\n\nnp.random.seed(123)\n\nparser = argparse.ArgumentParser(\n description='Deepchem benchmark: ' +\n 'giving performances of different learning models on datasets')\nparser.add_argument(\n '-s',\n action='append',\n dest='splitter_args',\n default=[],\n help='Choice of splitting function: index, random, scaffold, stratified')\nparser.add_argument(\n '-m',\n action='append',\n dest='model_args',\n default=[],\n help='Choice of model: tf, tf_robust, logreg, rf, irv, graphconv, xgb,' + \\\n ' dag, weave, tf_regression, tf_regression_ft, rf_regression, ' + \\\n 'graphconvreg, xgb_regression, dtnn, dag_regression, weave_regression')\nparser.add_argument(\n '-d',\n action='append',\n dest='dataset_args',\n default=[],\n help='Choice of dataset: bace_c, bace_r, bbbp, chembl, clearance, ' +\n 'clintox, delaney, hiv, hopv, kaggle, lipo, muv, nci, pcba, ' +\n 'pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl, sider, tox21, toxcast')\nparser.add_argument(\n '-t',\n action='store_true',\n dest='test',\n default=False,\n help='Evalute performance on test set')\nargs = parser.parse_args()\n#Datasets and models used in the benchmark test\nsplitters = args.splitter_args\nmodels = args.model_args\ndatasets = args.dataset_args\ntest = args.test\n\nif len(splitters) == 0:\n splitters = ['index', 'random', 'scaffold']\nif len(models) == 0:\n models = [\n 'tf', 'tf_robust', 'logreg', 'graphconv', 'tf_regression',\n 'tf_regression_ft', 'graphconvreg'\n ]\n #irv, rf, rf_regression should be assigned manually\nif len(datasets) == 0:\n datasets = [\n 'bace_c', 'bace_r', 'bbbp', 'clearance', 'clintox', 'delaney', 'hiv',\n 'hopv', 'lipo', 'muv', 'pdbbind', 'ppb', 'qm7b', 'qm8', 'qm9', 'sampl',\n 'sider', 'tox21', 'toxcast'\n ]\n\nfor dataset in datasets:\n for split in splitters:\n for model in models:\n dc.molnet.run_benchmark([dataset], str(model), split=split, test=test)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.cond", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.concat", "tensorflow.zeros", "numpy.asarray", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.nn.l2_loss", "tensorflow.where", "tensorflow.train.AdamOptimizer", "tensorflow.get_default_graph", "numpy.random.randint", "tensorflow.python.training.moving_averages.assign_moving_average", "tensorflow.Variable", "tensorflow.random_uniform_initializer", "tensorflow.nn.moments", "tensorflow.train.MomentumOptimizer", "tensorflow.name_scope", "tensorflow.square", "tensorflow.trainable_variables", "tensorflow.random_normal_initializer", "tensorflow.nn.dropout", "tensorflow.nn.l2_normalize", "tensorflow.nn.xw_plus_b", "tensorflow.matmul", "tensorflow.nn.elu", "tensorflow.nn.batch_normalization", "tensorflow.truncated_normal", "tensorflow.train.AdagradOptimizer", "tensorflow.shape", "tensorflow.train.RMSPropOptimizer", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.train.GradientDescentOptimizer", "tensorflow.concat_v2", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.clip_by_value", "tensorflow.nn.relu", "tensorflow.reduce_max", "tensorflow.nn.bias_add", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.log", "tensorflow.sqrt", "tensorflow.abs" ], [ "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gzhu06/TDspkr-mismatch-study
[ "1106a988e15a111646981c5b6fb30219d1ff6e8a" ]
[ "backbones/aggregator/ECAPA-TDNN.py" ]
[ "'''\nReference: https://github.com/lawlict/ECAPA-TDNN\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n''' Res2Conv1d + BatchNorm1d + ReLU\n'''\nclass Res2Conv1dReluBn(nn.Module):\n '''\n in_channels == out_channels == channels\n '''\n def __init__(self, channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False, scale=4):\n super().__init__()\n assert channels % scale == 0, \"{} % {} != 0\".format(channels, scale)\n self.scale = scale\n self.width = channels // scale\n self.nums = scale if scale == 1 else scale - 1\n\n self.convs = []\n self.bns = []\n for i in range(self.nums):\n self.convs.append(nn.Conv1d(self.width, self.width, kernel_size, stride, padding, dilation, bias=bias))\n self.bns.append(nn.BatchNorm1d(self.width))\n self.convs = nn.ModuleList(self.convs)\n self.bns = nn.ModuleList(self.bns)\n\n def forward(self, x):\n out = []\n spx = torch.split(x, self.width, 1)\n for i in range(self.nums):\n if i == 0:\n sp = spx[i]\n else:\n sp = sp + spx[i]\n # Order: conv -> relu -> bn\n sp = self.convs[i](sp)\n sp = self.bns[i](F.relu(sp))\n out.append(sp)\n if self.scale != 1:\n out.append(spx[self.nums])\n out = torch.cat(out, dim=1)\n return out\n\n''' Conv1d + BatchNorm1d + ReLU\n'''\nclass Conv1dReluBn(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):\n super().__init__()\n self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)\n self.bn = nn.BatchNorm1d(out_channels)\n\n def forward(self, x):\n return self.bn(F.relu(self.conv(x)))\n\n''' The SE connection of 1D case.\n'''\nclass SE_Connect(nn.Module):\n def __init__(self, channels, s=2):\n super().__init__()\n assert channels % s == 0, \"{} % {} != 0\".format(channels, s)\n self.linear1 = nn.Linear(channels, channels // s)\n self.linear2 = nn.Linear(channels // s, channels)\n\n def forward(self, x):\n out = x.mean(dim=2)\n out = F.relu(self.linear1(out))\n out = torch.sigmoid(self.linear2(out))\n out = x * out.unsqueeze(2)\n return out\n\n''' SE-Res2Block.\n Note: residual connection is implemented in the ECAPA_TDNN model, not here.\n'''\ndef SE_Res2Block(channels, kernel_size, stride, padding, dilation, scale):\n return nn.Sequential(\n Conv1dReluBn(channels, channels, kernel_size=1, stride=1, padding=0),\n Res2Conv1dReluBn(channels, kernel_size, stride, padding, dilation, scale=scale),\n Conv1dReluBn(channels, channels, kernel_size=1, stride=1, padding=0),\n SE_Connect(channels)\n )\n\n''' Attentive weighted mean and standard deviation pooling.\n'''\nclass AttentiveStatsPool(nn.Module):\n def __init__(self, in_dim, bottleneck_dim):\n super().__init__()\n # Use Conv1d with stride == 1 rather than Linear, then we don't need to transpose inputs.\n self.linear1 = nn.Conv1d(in_dim, bottleneck_dim, kernel_size=1) # equals W and b in the paper\n self.linear2 = nn.Conv1d(bottleneck_dim, in_dim, kernel_size=1) # equals V and k in the paper\n\n def forward(self, x):\n # DON'T use ReLU here! In experiments, I find ReLU hard to converge.\n alpha = torch.tanh(self.linear1(x))\n alpha = torch.softmax(self.linear2(alpha), dim=2)\n mean = torch.sum(alpha * x, dim=2)\n residuals = torch.sum(alpha * x ** 2, dim=2) - mean ** 2\n std = torch.sqrt(residuals.clamp(min=1e-9))\n return torch.cat([mean, std], dim=1)\n\n''' Implementation of\n \"ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification\".\n\n Note that we DON'T concatenate the last frame-wise layer with non-weighted mean and standard deviation, \n because it brings little improvment but significantly increases model parameters. \n As a result, this implementation basically equals the A.2 of Table 2 in the paper.\n'''\nclass ECAPA_TDNN(nn.Module):\n def __init__(self, in_channels=80, channels=512, embd_dim=192):\n super().__init__()\n self.layer1 = Conv1dReluBn(in_channels, channels, kernel_size=5, padding=2)\n self.layer2 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=2, dilation=2, scale=8)\n self.layer3 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=3, dilation=3, scale=8)\n self.layer4 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=4, dilation=4, scale=8)\n\n cat_channels = channels * 3\n self.conv = nn.Conv1d(cat_channels, 1536, kernel_size=1)\n self.pooling = AttentiveStatsPool(1536, 128)\n self.bn1 = nn.BatchNorm1d(3072)\n self.linear = nn.Linear(3072, embd_dim)\n self.bn2 = nn.BatchNorm1d(embd_dim)\n\n def forward(self, x):\n# x = x.transpose(1, 2)\n out1 = self.layer1(x)\n out2 = self.layer2(out1) + out1\n out3 = self.layer3(out1 + out2) + out1 + out2\n out4 = self.layer4(out1 + out2 + out3) + out1 + out2 + out3\n\n out = torch.cat([out2, out3, out4], dim=1)\n out = F.relu(self.conv(out))\n out = self.bn1(self.pooling(out))\n out = self.bn2(self.linear(out))\n return out\n \nclass architecture(nn.Module):\n def __init__(self, embed_dim=512):\n super(architecture, self).__init__()\n \n self.tdnn_aggregator = ECAPA_TDNN(in_channels=512, channels=512, embd_dim=512)\n \n def forward(self, x):\n\n out = self.tdnn_aggregator(x)\n return out\n\nif __name__ == '__main__':\n # Input size: batch_size * seq_len * feat_dim\n x = torch.zeros(2, 200, 80)\n model = ECAPA_TDNN(in_channels=80, channels=512, embd_dim=192)\n out = model(x)\n print(model)\n print(out.shape) # should be [2, 192]" ]
[ [ "torch.nn.BatchNorm1d", "torch.cat", "torch.zeros", "torch.nn.ModuleList", "torch.sum", "torch.nn.Linear", "torch.nn.Conv1d", "torch.nn.functional.relu", "torch.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Solid-Mechanics/matplotlib-4-abaqus
[ "96bdd150fcd92fa155dfc7b13d930bab394e8e47", "96bdd150fcd92fa155dfc7b13d930bab394e8e47" ]
[ "matplotlib/collections.py", "matplotlib/axis.py" ]
[ "\"\"\"\nClasses for the efficient drawing of large collections of objects that\nshare most properties, e.g., a large number of line segments or\npolygons.\n\nThe classes are not meant to be as flexible as their single element\ncounterparts (e.g., you may not be able to select all line styles) but\nthey are meant to be fast for common use cases (e.g., a large set of solid\nline segemnts)\n\"\"\"\nfrom __future__ import print_function\nimport warnings\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib as mpl\nimport matplotlib.cbook as cbook\nimport matplotlib.colors as mcolors\nimport matplotlib.cm as cm\nfrom matplotlib import docstring\nimport matplotlib.transforms as transforms\nimport matplotlib.artist as artist\nfrom matplotlib.artist import allow_rasterization\nimport matplotlib.backend_bases as backend_bases\nimport matplotlib.path as mpath\nfrom matplotlib import _path\nimport matplotlib.mlab as mlab\n\n\nclass Collection(artist.Artist, cm.ScalarMappable):\n \"\"\"\n Base class for Collections. Must be subclassed to be usable.\n\n All properties in a collection must be sequences or scalars;\n if scalars, they will be converted to sequences. The\n property of the ith element of the collection is::\n\n prop[i % len(props)]\n\n Keyword arguments and default values:\n\n * *edgecolors*: None\n * *facecolors*: None\n * *linewidths*: None\n * *antialiaseds*: None\n * *offsets*: None\n * *transOffset*: transforms.IdentityTransform()\n * *offset_position*: 'screen' (default) or 'data'\n * *norm*: None (optional for\n :class:`matplotlib.cm.ScalarMappable`)\n * *cmap*: None (optional for\n :class:`matplotlib.cm.ScalarMappable`)\n * *hatch*: None\n * *zorder*: 1\n\n\n *offsets* and *transOffset* are used to translate the patch after\n rendering (default no offsets). If offset_position is 'screen'\n (default) the offset is applied after the master transform has\n been applied, that is, the offsets are in screen coordinates. If\n offset_position is 'data', the offset is applied before the master\n transform, i.e., the offsets are in data coordinates.\n\n If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*\n are None, they default to their :data:`matplotlib.rcParams` patch\n setting, in sequence form.\n\n The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If\n the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None\n (ie a call to set_array has been made), at draw time a call to\n scalar mappable will be made to set the face colors.\n \"\"\"\n _offsets = np.array([], np.float_)\n # _offsets must be a Nx2 array!\n _offsets.shape = (0, 2)\n _transOffset = transforms.IdentityTransform()\n _transforms = []\n\n\n\n def __init__(self,\n edgecolors=None,\n facecolors=None,\n linewidths=None,\n linestyles='solid',\n antialiaseds=None,\n offsets=None,\n transOffset=None,\n norm=None, # optional for ScalarMappable\n cmap=None, # ditto\n pickradius=5.0,\n hatch=None,\n urls=None,\n offset_position='screen',\n zorder=1,\n **kwargs\n ):\n \"\"\"\n Create a Collection\n\n %(Collection)s\n \"\"\"\n artist.Artist.__init__(self)\n cm.ScalarMappable.__init__(self, norm, cmap)\n\n self.set_edgecolor(edgecolors)\n self.set_facecolor(facecolors)\n self.set_linewidth(linewidths)\n self.set_linestyle(linestyles)\n self.set_antialiased(antialiaseds)\n self.set_pickradius(pickradius)\n self.set_urls(urls)\n self.set_hatch(hatch)\n self.set_offset_position(offset_position)\n self.set_zorder(zorder)\n\n self._uniform_offsets = None\n self._offsets = np.array([[0, 0]], np.float_)\n if offsets is not None:\n offsets = np.asanyarray(offsets)\n offsets.shape = (-1, 2) # Make it Nx2\n if transOffset is not None:\n self._offsets = offsets\n self._transOffset = transOffset\n else:\n self._uniform_offsets = offsets\n\n self._path_effects = None\n self.update(kwargs)\n self._paths = None\n\n @staticmethod\n def _get_value(val):\n try:\n return (float(val), )\n except TypeError:\n if cbook.iterable(val) and len(val):\n try:\n float(val[0])\n except (TypeError, ValueError):\n pass # raise below\n else:\n return val\n\n raise TypeError('val must be a float or nonzero sequence of floats')\n\n @staticmethod\n def _get_bool(val):\n if not cbook.iterable(val):\n val = (val,)\n try:\n bool(val[0])\n except (TypeError, IndexError):\n raise TypeError('val must be a bool or nonzero sequence of them')\n return val\n\n def get_paths(self):\n return self._paths\n\n def set_paths(self):\n raise NotImplementedError\n\n def get_transforms(self):\n return self._transforms\n\n def get_offset_transform(self):\n t = self._transOffset\n if (not isinstance(t, transforms.Transform)\n and hasattr(t, '_as_mpl_transform')):\n t = t._as_mpl_transform(self.axes)\n return t\n\n def get_datalim(self, transData):\n transform = self.get_transform()\n transOffset = self.get_offset_transform()\n offsets = self._offsets\n paths = self.get_paths()\n\n if not transform.is_affine:\n paths = [transform.transform_path_non_affine(p) for p in paths]\n transform = transform.get_affine()\n if not transOffset.is_affine:\n offsets = transOffset.transform_non_affine(offsets)\n transOffset = transOffset.get_affine()\n\n offsets = np.asanyarray(offsets, np.float_)\n if np.ma.isMaskedArray(offsets):\n offsets = offsets.filled(np.nan)\n # get_path_collection_extents handles nan but not masked arrays\n offsets.shape = (-1, 2) # Make it Nx2\n\n if paths:\n result = mpath.get_path_collection_extents(\n transform.frozen(), paths, self.get_transforms(),\n offsets, transOffset.frozen())\n result = result.inverse_transformed(transData)\n else:\n result = transforms.Bbox([[0, 0], [0, 0]])\n return result\n\n def get_window_extent(self, renderer):\n # TODO:check to ensure that this does not fail for\n # cases other than scatter plot legend\n return self.get_datalim(transforms.IdentityTransform())\n\n def _prepare_points(self):\n \"\"\"Point prep for drawing and hit testing\"\"\"\n\n transform = self.get_transform()\n transOffset = self.get_offset_transform()\n offsets = self._offsets\n paths = self.get_paths()\n\n if self.have_units():\n paths = []\n for path in self.get_paths():\n vertices = path.vertices\n xs, ys = vertices[:, 0], vertices[:, 1]\n xs = self.convert_xunits(xs)\n ys = self.convert_yunits(ys)\n paths.append(mpath.Path(zip(xs, ys), path.codes))\n\n if offsets.size > 0:\n xs = self.convert_xunits(offsets[:, 0])\n ys = self.convert_yunits(offsets[:, 1])\n offsets = zip(xs, ys)\n\n offsets = np.asanyarray(offsets, np.float_)\n offsets.shape = (-1, 2) # Make it Nx2\n\n if not transform.is_affine:\n paths = [transform.transform_path_non_affine(path)\n for path in paths]\n transform = transform.get_affine()\n if not transOffset.is_affine:\n offsets = transOffset.transform_non_affine(offsets)\n # This might have changed an ndarray into a masked array.\n transOffset = transOffset.get_affine()\n\n if np.ma.isMaskedArray(offsets):\n offsets = offsets.filled(np.nan)\n # Changing from a masked array to nan-filled ndarray\n # is probably most efficient at this point.\n\n return transform, transOffset, offsets, paths\n\n @allow_rasterization\n def draw(self, renderer):\n if not self.get_visible():\n return\n renderer.open_group(self.__class__.__name__, self.get_gid())\n\n self.update_scalarmappable()\n\n transform, transOffset, offsets, paths = self._prepare_points()\n\n gc = renderer.new_gc()\n self._set_gc_clip(gc)\n gc.set_snap(self.get_snap())\n\n if self._hatch:\n gc.set_hatch(self._hatch)\n\n if self.get_sketch_params() is not None:\n gc.set_sketch_params(*self.get_sketch_params())\n\n if self.get_path_effects():\n for pe in self.get_path_effects():\n pe.draw_path_collection(renderer,\n gc, transform.frozen(), paths, self.get_transforms(),\n offsets, transOffset, self.get_facecolor(), self.get_edgecolor(),\n self._linewidths, self._linestyles, self._antialiaseds, self._urls,\n self._offset_position)\n else:\n renderer.draw_path_collection(\n gc, transform.frozen(), paths, self.get_transforms(),\n offsets, transOffset, self.get_facecolor(), self.get_edgecolor(),\n self._linewidths, self._linestyles, self._antialiaseds, self._urls,\n self._offset_position)\n\n gc.restore()\n renderer.close_group(self.__class__.__name__)\n\n def set_pickradius(self, pr):\n self._pickradius = pr\n\n def get_pickradius(self):\n return self._pickradius\n\n def contains(self, mouseevent):\n \"\"\"\n Test whether the mouse event occurred in the collection.\n\n Returns True | False, ``dict(ind=itemlist)``, where every\n item in itemlist contains the event.\n \"\"\"\n if callable(self._contains):\n return self._contains(self, mouseevent)\n\n if not self.get_visible():\n return False, {}\n\n if self._picker is True: # the Boolean constant, not just nonzero or 1\n pickradius = self._pickradius\n else:\n try:\n pickradius = float(self._picker)\n except TypeError:\n # This should not happen if \"contains\" is called via\n # pick, the normal route; the check is here in case\n # it is called through some unanticipated route.\n warnings.warn(\n \"Collection picker %s could not be converted to float\"\n % self._picker)\n pickradius = self._pickradius\n\n transform, transOffset, offsets, paths = self._prepare_points()\n\n ind = _path.point_in_path_collection(\n mouseevent.x, mouseevent.y, pickradius,\n transform.frozen(), paths, self.get_transforms(),\n offsets, transOffset, pickradius <= 0,\n self.get_offset_position())\n\n return len(ind) > 0, dict(ind=ind)\n\n def set_urls(self, urls):\n if urls is None:\n self._urls = [None, ]\n else:\n self._urls = urls\n\n def get_urls(self):\n return self._urls\n\n def set_hatch(self, hatch):\n \"\"\"\n Set the hatching pattern\n\n *hatch* can be one of::\n\n / - diagonal hatching\n \\ - back diagonal\n | - vertical\n - - horizontal\n + - crossed\n x - crossed diagonal\n o - small circle\n O - large circle\n . - dots\n * - stars\n\n Letters can be combined, in which case all the specified\n hatchings are done. If same letter repeats, it increases the\n density of hatching of that pattern.\n\n Hatching is supported in the PostScript, PDF, SVG and Agg\n backends only.\n\n Unlike other properties such as linewidth and colors, hatching\n can only be specified for the collection as a whole, not separately\n for each member.\n\n ACCEPTS: [ '/' | '\\\\\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ]\n \"\"\"\n self._hatch = hatch\n\n def get_hatch(self):\n 'Return the current hatching pattern'\n return self._hatch\n\n def set_offsets(self, offsets):\n \"\"\"\n Set the offsets for the collection. *offsets* can be a scalar\n or a sequence.\n\n ACCEPTS: float or sequence of floats\n \"\"\"\n offsets = np.asanyarray(offsets, np.float_)\n offsets.shape = (-1, 2) # Make it Nx2\n #This decision is based on how they are initialized above\n if self._uniform_offsets is None:\n self._offsets = offsets\n else:\n self._uniform_offsets = offsets\n\n def get_offsets(self):\n \"\"\"\n Return the offsets for the collection.\n \"\"\"\n #This decision is based on how they are initialized above in __init__()\n if self._uniform_offsets is None:\n return self._offsets\n else:\n return self._uniform_offsets\n\n def set_offset_position(self, offset_position):\n \"\"\"\n Set how offsets are applied. If *offset_position* is 'screen'\n (default) the offset is applied after the master transform has\n been applied, that is, the offsets are in screen coordinates.\n If offset_position is 'data', the offset is applied before the\n master transform, i.e., the offsets are in data coordinates.\n \"\"\"\n if offset_position not in ('screen', 'data'):\n raise ValueError(\"offset_position must be 'screen' or 'data'\")\n self._offset_position = offset_position\n\n def get_offset_position(self):\n \"\"\"\n Returns how offsets are applied for the collection. If\n *offset_position* is 'screen', the offset is applied after the\n master transform has been applied, that is, the offsets are in\n screen coordinates. If offset_position is 'data', the offset\n is applied before the master transform, i.e., the offsets are\n in data coordinates.\n \"\"\"\n return self._offset_position\n\n def set_linewidth(self, lw):\n \"\"\"\n Set the linewidth(s) for the collection. *lw* can be a scalar\n or a sequence; if it is a sequence the patches will cycle\n through the sequence\n\n ACCEPTS: float or sequence of floats\n \"\"\"\n if lw is None:\n lw = mpl.rcParams['patch.linewidth']\n self._linewidths = self._get_value(lw)\n\n def set_linewidths(self, lw):\n \"\"\"alias for set_linewidth\"\"\"\n return self.set_linewidth(lw)\n\n def set_lw(self, lw):\n \"\"\"alias for set_linewidth\"\"\"\n return self.set_linewidth(lw)\n\n def set_linestyle(self, ls):\n \"\"\"\n Set the linestyle(s) for the collection.\n\n ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |\n (offset, on-off-dash-seq) ]\n \"\"\"\n try:\n dashd = backend_bases.GraphicsContextBase.dashd\n if cbook.is_string_like(ls):\n if ls in dashd:\n dashes = [dashd[ls]]\n elif ls in cbook.ls_mapper:\n dashes = [dashd[cbook.ls_mapper[ls]]]\n else:\n raise ValueError()\n elif cbook.iterable(ls):\n try:\n dashes = []\n for x in ls:\n if cbook.is_string_like(x):\n if x in dashd:\n dashes.append(dashd[x])\n elif x in cbook.ls_mapper:\n dashes.append(dashd[cbook.ls_mapper[x]])\n else:\n raise ValueError()\n elif cbook.iterable(x) and len(x) == 2:\n dashes.append(x)\n else:\n raise ValueError()\n except ValueError:\n if len(ls) == 2:\n dashes = ls\n else:\n raise ValueError()\n else:\n raise ValueError()\n except ValueError:\n raise ValueError('Do not know how to convert %s to dashes' % ls)\n self._linestyles = dashes\n\n def set_linestyles(self, ls):\n \"\"\"alias for set_linestyle\"\"\"\n return self.set_linestyle(ls)\n\n def set_dashes(self, ls):\n \"\"\"alias for set_linestyle\"\"\"\n return self.set_linestyle(ls)\n\n def set_antialiased(self, aa):\n \"\"\"\n Set the antialiasing state for rendering.\n\n ACCEPTS: Boolean or sequence of booleans\n \"\"\"\n if aa is None:\n aa = mpl.rcParams['patch.antialiased']\n self._antialiaseds = self._get_bool(aa)\n\n def set_antialiaseds(self, aa):\n \"\"\"alias for set_antialiased\"\"\"\n return self.set_antialiased(aa)\n\n def set_color(self, c):\n \"\"\"\n Set both the edgecolor and the facecolor.\n\n ACCEPTS: matplotlib color arg or sequence of rgba tuples\n\n .. seealso::\n\n :meth:`set_facecolor`, :meth:`set_edgecolor`\n For setting the edge or face color individually.\n \"\"\"\n self.set_facecolor(c)\n self.set_edgecolor(c)\n\n def set_facecolor(self, c):\n \"\"\"\n Set the facecolor(s) of the collection. *c* can be a\n matplotlib color arg (all patches have same color), or a\n sequence of rgba tuples; if it is a sequence the patches will\n cycle through the sequence.\n\n If *c* is 'none', the patch will not be filled.\n\n ACCEPTS: matplotlib color arg or sequence of rgba tuples\n \"\"\"\n self._is_filled = True\n try:\n if c.lower() == 'none':\n self._is_filled = False\n except AttributeError:\n pass\n if c is None:\n c = mpl.rcParams['patch.facecolor']\n self._facecolors_original = c\n self._facecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)\n\n def set_facecolors(self, c):\n \"\"\"alias for set_facecolor\"\"\"\n return self.set_facecolor(c)\n\n def get_facecolor(self):\n return self._facecolors\n get_facecolors = get_facecolor\n\n def get_edgecolor(self):\n if self._edgecolors == 'face':\n return self.get_facecolors()\n else:\n return self._edgecolors\n get_edgecolors = get_edgecolor\n\n def set_edgecolor(self, c):\n \"\"\"\n Set the edgecolor(s) of the collection. *c* can be a\n matplotlib color arg (all patches have same color), or a\n sequence of rgba tuples; if it is a sequence the patches will\n cycle through the sequence.\n\n If *c* is 'face', the edge color will always be the same as\n the face color. If it is 'none', the patch boundary will not\n be drawn.\n\n ACCEPTS: matplotlib color arg or sequence of rgba tuples\n \"\"\"\n self._is_stroked = True\n try:\n if c.lower() == 'none':\n self._is_stroked = False\n except AttributeError:\n pass\n try:\n if c.lower() == 'face':\n self._edgecolors = 'face'\n self._edgecolors_original = 'face'\n return\n except AttributeError:\n pass\n if c is None:\n c = mpl.rcParams['patch.edgecolor']\n self._edgecolors_original = c\n self._edgecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)\n\n def set_edgecolors(self, c):\n \"\"\"alias for set_edgecolor\"\"\"\n return self.set_edgecolor(c)\n\n def set_alpha(self, alpha):\n \"\"\"\n Set the alpha tranparencies of the collection. *alpha* must be\n a float or *None*.\n\n ACCEPTS: float or None\n \"\"\"\n if alpha is not None:\n try:\n float(alpha)\n except TypeError:\n raise TypeError('alpha must be a float or None')\n artist.Artist.set_alpha(self, alpha)\n try:\n self._facecolors = mcolors.colorConverter.to_rgba_array(\n self._facecolors_original, self._alpha)\n except (AttributeError, TypeError, IndexError):\n pass\n try:\n if self._edgecolors_original != 'face':\n self._edgecolors = mcolors.colorConverter.to_rgba_array(\n self._edgecolors_original, self._alpha)\n except (AttributeError, TypeError, IndexError):\n pass\n\n def get_linewidths(self):\n return self._linewidths\n get_linewidth = get_linewidths\n\n def get_linestyles(self):\n return self._linestyles\n get_dashes = get_linestyle = get_linestyles\n\n def update_scalarmappable(self):\n \"\"\"\n If the scalar mappable array is not none, update colors\n from scalar data\n \"\"\"\n if self._A is None:\n return\n if self._A.ndim > 1:\n raise ValueError('Collections can only map rank 1 arrays')\n if not self.check_update(\"array\"):\n return\n if self._is_filled:\n self._facecolors = self.to_rgba(self._A, self._alpha)\n elif self._is_stroked:\n self._edgecolors = self.to_rgba(self._A, self._alpha)\n\n def update_from(self, other):\n 'copy properties from other to self'\n\n artist.Artist.update_from(self, other)\n self._antialiaseds = other._antialiaseds\n self._edgecolors_original = other._edgecolors_original\n self._edgecolors = other._edgecolors\n self._facecolors_original = other._facecolors_original\n self._facecolors = other._facecolors\n self._linewidths = other._linewidths\n self._linestyles = other._linestyles\n self._pickradius = other._pickradius\n self._hatch = other._hatch\n\n # update_from for scalarmappable\n self._A = other._A\n self.norm = other.norm\n self.cmap = other.cmap\n # self.update_dict = other.update_dict # do we need to copy this? -JJL\n\n\n# these are not available for the object inspector until after the\n# class is built so we define an initial set here for the init\n# function and they will be overridden after object defn\ndocstring.interpd.update(Collection=\"\"\"\\\n Valid Collection keyword arguments:\n\n * *edgecolors*: None\n * *facecolors*: None\n * *linewidths*: None\n * *antialiaseds*: None\n * *offsets*: None\n * *transOffset*: transforms.IdentityTransform()\n * *norm*: None (optional for\n :class:`matplotlib.cm.ScalarMappable`)\n * *cmap*: None (optional for\n :class:`matplotlib.cm.ScalarMappable`)\n\n *offsets* and *transOffset* are used to translate the patch after\n rendering (default no offsets)\n\n If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*\n are None, they default to their :data:`matplotlib.rcParams` patch\n setting, in sequence form.\n\"\"\")\n\n\nclass PathCollection(Collection):\n \"\"\"\n This is the most basic :class:`Collection` subclass.\n \"\"\"\n @docstring.dedent_interpd\n def __init__(self, paths, sizes=None, **kwargs):\n \"\"\"\n *paths* is a sequence of :class:`matplotlib.path.Path`\n instances.\n\n %(Collection)s\n \"\"\"\n\n Collection.__init__(self, **kwargs)\n self.set_paths(paths)\n self._sizes = sizes\n\n def set_paths(self, paths):\n self._paths = paths\n\n def get_paths(self):\n return self._paths\n\n def get_sizes(self):\n return self._sizes\n\n @allow_rasterization\n def draw(self, renderer):\n if self._sizes is not None:\n self._transforms = [\n transforms.Affine2D().scale(\n (np.sqrt(x) * self.figure.dpi / 72.0))\n for x in self._sizes]\n return Collection.draw(self, renderer)\n\n\nclass PolyCollection(Collection):\n @docstring.dedent_interpd\n def __init__(self, verts, sizes=None, closed=True, **kwargs):\n \"\"\"\n *verts* is a sequence of ( *verts0*, *verts1*, ...) where\n *verts_i* is a sequence of *xy* tuples of vertices, or an\n equivalent :mod:`numpy` array of shape (*nv*, 2).\n\n *sizes* is *None* (default) or a sequence of floats that\n scale the corresponding *verts_i*. The scaling is applied\n before the Artist master transform; if the latter is an identity\n transform, then the overall scaling is such that if\n *verts_i* specify a unit square, then *sizes_i* is the area\n of that square in points^2.\n If len(*sizes*) < *nv*, the additional values will be\n taken cyclically from the array.\n\n *closed*, when *True*, will explicitly close the polygon.\n\n %(Collection)s\n \"\"\"\n Collection.__init__(self, **kwargs)\n self._sizes = sizes\n self.set_verts(verts, closed)\n\n def set_verts(self, verts, closed=True):\n '''This allows one to delay initialization of the vertices.'''\n if np.ma.isMaskedArray(verts):\n verts = verts.astype(np.float_).filled(np.nan)\n # This is much faster than having Path do it one at a time.\n if closed:\n self._paths = []\n for xy in verts:\n if len(xy):\n if np.ma.isMaskedArray(xy):\n xy = np.ma.concatenate([xy, np.zeros((1, 2))])\n else:\n xy = np.asarray(xy)\n xy = np.concatenate([xy, np.zeros((1, 2))])\n codes = np.empty(xy.shape[0], dtype=mpath.Path.code_type)\n codes[:] = mpath.Path.LINETO\n codes[0] = mpath.Path.MOVETO\n codes[-1] = mpath.Path.CLOSEPOLY\n self._paths.append(mpath.Path(xy, codes))\n else:\n self._paths.append(mpath.Path(xy))\n else:\n self._paths = [mpath.Path(xy) for xy in verts]\n\n set_paths = set_verts\n\n @allow_rasterization\n def draw(self, renderer):\n if self._sizes is not None:\n self._transforms = [\n transforms.Affine2D().scale(\n (np.sqrt(x) * self.figure.dpi / 72.0))\n for x in self._sizes]\n return Collection.draw(self, renderer)\n\n\nclass BrokenBarHCollection(PolyCollection):\n \"\"\"\n A collection of horizontal bars spanning *yrange* with a sequence of\n *xranges*.\n \"\"\"\n @docstring.dedent_interpd\n def __init__(self, xranges, yrange, **kwargs):\n \"\"\"\n *xranges*\n sequence of (*xmin*, *xwidth*)\n\n *yrange*\n *ymin*, *ywidth*\n\n %(Collection)s\n \"\"\"\n ymin, ywidth = yrange\n ymax = ymin + ywidth\n verts = [[(xmin, ymin),\n (xmin, ymax),\n (xmin + xwidth, ymax),\n (xmin + xwidth, ymin),\n (xmin, ymin)] for xmin, xwidth in xranges]\n PolyCollection.__init__(self, verts, **kwargs)\n\n @staticmethod\n def span_where(x, ymin, ymax, where, **kwargs):\n \"\"\"\n Create a BrokenBarHCollection to plot horizontal bars from\n over the regions in *x* where *where* is True. The bars range\n on the y-axis from *ymin* to *ymax*\n\n A :class:`BrokenBarHCollection` is returned. *kwargs* are\n passed on to the collection.\n \"\"\"\n xranges = []\n for ind0, ind1 in mlab.contiguous_regions(where):\n xslice = x[ind0:ind1]\n if not len(xslice):\n continue\n xranges.append((xslice[0], xslice[-1] - xslice[0]))\n\n collection = BrokenBarHCollection(\n xranges, [ymin, ymax - ymin], **kwargs)\n return collection\n\n\nclass RegularPolyCollection(Collection):\n \"\"\"Draw a collection of regular polygons with *numsides*.\"\"\"\n _path_generator = mpath.Path.unit_regular_polygon\n\n @docstring.dedent_interpd\n def __init__(self,\n numsides,\n rotation=0,\n sizes=(1,),\n **kwargs):\n \"\"\"\n *numsides*\n the number of sides of the polygon\n\n *rotation*\n the rotation of the polygon in radians\n\n *sizes*\n gives the area of the circle circumscribing the\n regular polygon in points^2\n\n %(Collection)s\n\n Example: see :file:`examples/dynamic_collection.py` for\n complete example::\n\n offsets = np.random.rand(20,2)\n facecolors = [cm.jet(x) for x in np.random.rand(20)]\n black = (0,0,0,1)\n\n collection = RegularPolyCollection(\n numsides=5, # a pentagon\n rotation=0, sizes=(50,),\n facecolors = facecolors,\n edgecolors = (black,),\n linewidths = (1,),\n offsets = offsets,\n transOffset = ax.transData,\n )\n \"\"\"\n Collection.__init__(self, **kwargs)\n self._sizes = sizes\n self._numsides = numsides\n self._paths = [self._path_generator(numsides)]\n self._rotation = rotation\n self.set_transform(transforms.IdentityTransform())\n\n @allow_rasterization\n def draw(self, renderer):\n self._transforms = [\n transforms.Affine2D().rotate(-self._rotation).scale(\n (np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))\n for x in self._sizes]\n return Collection.draw(self, renderer)\n\n def get_numsides(self):\n return self._numsides\n\n def get_rotation(self):\n return self._rotation\n\n def get_sizes(self):\n return self._sizes\n\n\nclass StarPolygonCollection(RegularPolyCollection):\n \"\"\"\n Draw a collection of regular stars with *numsides* points.\"\"\"\n\n _path_generator = mpath.Path.unit_regular_star\n\n\nclass AsteriskPolygonCollection(RegularPolyCollection):\n \"\"\"\n Draw a collection of regular asterisks with *numsides* points.\"\"\"\n\n _path_generator = mpath.Path.unit_regular_asterisk\n\n\nclass LineCollection(Collection):\n \"\"\"\n All parameters must be sequences or scalars; if scalars, they will\n be converted to sequences. The property of the ith line\n segment is::\n\n prop[i % len(props)]\n\n i.e., the properties cycle if the ``len`` of props is less than the\n number of segments.\n \"\"\"\n\n\n def __init__(self, segments, # Can be None.\n linewidths=None,\n colors=None,\n antialiaseds=None,\n linestyles='solid',\n offsets=None,\n transOffset=None,\n norm=None,\n cmap=None,\n pickradius=5,\n zorder=2,\n **kwargs\n ):\n \"\"\"\n *segments*\n a sequence of (*line0*, *line1*, *line2*), where::\n\n linen = (x0, y0), (x1, y1), ... (xm, ym)\n\n or the equivalent numpy array with two columns. Each line\n can be a different length.\n\n *colors*\n must be a sequence of RGBA tuples (eg arbitrary color\n strings, etc, not allowed).\n\n *antialiaseds*\n must be a sequence of ones or zeros\n\n *linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n a string or dash tuple. The dash tuple is::\n\n (offset, onoffseq),\n\n where *onoffseq* is an even length tuple of on and off ink\n in points.\n\n If *linewidths*, *colors*, or *antialiaseds* is None, they\n default to their rcParams setting, in sequence form.\n\n If *offsets* and *transOffset* are not None, then\n *offsets* are transformed by *transOffset* and applied after\n the segments have been transformed to display coordinates.\n\n If *offsets* is not None but *transOffset* is None, then the\n *offsets* are added to the segments before any transformation.\n In this case, a single offset can be specified as::\n\n offsets=(xo,yo)\n\n and this value will be added cumulatively to each successive\n segment, so as to produce a set of successively offset curves.\n\n *norm*\n None (optional for :class:`matplotlib.cm.ScalarMappable`)\n *cmap*\n None (optional for :class:`matplotlib.cm.ScalarMappable`)\n\n *pickradius* is the tolerance for mouse clicks picking a line.\n The default is 5 pt.\n\n *zorder*\n The zorder of the LineCollection. Default is 2\n\n The use of :class:`~matplotlib.cm.ScalarMappable` is optional.\n If the :class:`~matplotlib.cm.ScalarMappable` array\n :attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to\n :meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at\n draw time a call to scalar mappable will be made to set the colors.\n \"\"\"\n if colors is None:\n colors = mpl.rcParams['lines.color']\n if linewidths is None:\n linewidths = (mpl.rcParams['lines.linewidth'],)\n if antialiaseds is None:\n antialiaseds = (mpl.rcParams['lines.antialiased'],)\n self.set_linestyles(linestyles)\n\n colors = mcolors.colorConverter.to_rgba_array(colors)\n\n Collection.__init__(\n self,\n edgecolors=colors,\n facecolors='none',\n linewidths=linewidths,\n linestyles=linestyles,\n antialiaseds=antialiaseds,\n offsets=offsets,\n transOffset=transOffset,\n norm=norm,\n cmap=cmap,\n pickradius=pickradius,\n zorder=zorder,\n **kwargs)\n\n self.set_segments(segments)\n\n def set_segments(self, segments):\n if segments is None:\n return\n _segments = []\n\n for seg in segments:\n\n if not np.ma.isMaskedArray(seg):\n seg = np.asarray(seg, np.float_)\n _segments.append(seg)\n if self._uniform_offsets is not None:\n _segments = self._add_offsets(_segments)\n self._paths = [mpath.Path(seg) for seg in _segments]\n\n set_verts = set_segments # for compatibility with PolyCollection\n set_paths = set_segments\n\n def get_segments(self):\n segments = []\n\n for path in self._paths:\n vertices = [vertex for vertex, _ in path.iter_segments()]\n vertices = np.asarray(vertices)\n segments.append(vertices)\n\n return segments\n\n def _add_offsets(self, segs):\n offsets = self._uniform_offsets\n Nsegs = len(segs)\n Noffs = offsets.shape[0]\n if Noffs == 1:\n for i in range(Nsegs):\n segs[i] = segs[i] + i * offsets\n else:\n for i in range(Nsegs):\n io = i % Noffs\n segs[i] = segs[i] + offsets[io:io + 1]\n return segs\n\n def set_color(self, c):\n \"\"\"\n Set the color(s) of the line collection. *c* can be a\n matplotlib color arg (all patches have same color), or a\n sequence or rgba tuples; if it is a sequence the patches will\n cycle through the sequence.\n\n ACCEPTS: matplotlib color arg or sequence of rgba tuples\n \"\"\"\n self.set_edgecolor(c)\n\n def color(self, c):\n \"\"\"\n Set the color(s) of the line collection. *c* can be a\n matplotlib color arg (all patches have same color), or a\n sequence or rgba tuples; if it is a sequence the patches will\n cycle through the sequence\n\n ACCEPTS: matplotlib color arg or sequence of rgba tuples\n \"\"\"\n warnings.warn('LineCollection.color deprecated; use set_color instead')\n return self.set_color(c)\n\n def get_color(self):\n return self._edgecolors\n get_colors = get_color # for compatibility with old versions\n\n\nclass EventCollection(LineCollection):\n '''\n A collection of discrete events.\n\n An event is a 1-dimensional value, usually the position of something along\n an axis, such as time or length. Events do not have an amplitude. They\n are displayed as v\n '''\n\n def __init__(self,\n positions, # Can be None.\n orientation=None,\n lineoffset=0,\n linelength=1,\n linewidth=None,\n color=None,\n linestyle='solid',\n antialiased=None,\n **kwargs\n ):\n \"\"\"\n *positions*\n a sequence of numerical values or a 1D numpy array. Can be None\n\n *orientation* [ 'horizontal' | 'vertical' | None ]\n defaults to 'horizontal' if not specified or None\n\n *lineoffset*\n a single numerical value, corresponding to the offset of the center\n of the markers from the origin\n\n *linelength*\n a single numerical value, corresponding to the total height of the\n marker (i.e. the marker stretches from lineoffset+linelength/2 to\n lineoffset-linelength/2). Defaults to 1\n\n *linewidth*\n a single numerical value\n\n *color*\n must be a sequence of RGBA tuples (eg arbitrary color\n strings, etc, not allowed).\n\n *linestyle* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n\n *antialiased*\n 1 or 2\n\n If *linewidth*, *color*, or *antialiased* is None, they\n default to their rcParams setting, in sequence form.\n\n *norm*\n None (optional for :class:`matplotlib.cm.ScalarMappable`)\n *cmap*\n None (optional for :class:`matplotlib.cm.ScalarMappable`)\n\n *pickradius* is the tolerance for mouse clicks picking a line.\n The default is 5 pt.\n\n The use of :class:`~matplotlib.cm.ScalarMappable` is optional.\n If the :class:`~matplotlib.cm.ScalarMappable` array\n :attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to\n :meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at\n draw time a call to scalar mappable will be made to set the colors.\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/eventcollection_demo.py\n \"\"\"\n\n segment = (lineoffset + linelength / 2.,\n lineoffset - linelength / 2.)\n if len(positions) == 0:\n segments = []\n elif hasattr(positions, 'ndim') and positions.ndim > 1:\n raise ValueError('if positions is an ndarry it cannot have '\n 'dimensionality great than 1 ')\n elif (orientation is None or orientation.lower() == 'none' or\n orientation.lower() == 'horizontal'):\n positions.sort()\n segments = [[(coord1, coord2) for coord2 in segment] for\n coord1 in positions]\n self._is_horizontal = True\n elif orientation.lower() == 'vertical':\n positions.sort()\n segments = [[(coord2, coord1) for coord2 in segment] for\n coord1 in positions]\n self._is_horizontal = False\n else:\n raise ValueError(\"orientation must be 'horizontal' or 'vertical'\")\n\n LineCollection.__init__(self,\n segments,\n linewidths=linewidth,\n colors=color,\n antialiaseds=antialiased,\n linestyles=linestyle,\n **kwargs)\n\n self._linelength = linelength\n self._lineoffset = lineoffset\n\n def get_positions(self):\n '''\n return an array containing the floating-point values of the positions\n '''\n segments = self.get_segments()\n pos = 0 if self.is_horizontal() else 1\n positions = []\n for segment in segments:\n positions.append(segment[0, pos])\n return positions\n\n def set_positions(self, positions):\n '''\n set the positions of the events to the specified value\n '''\n if positions is None or (hasattr(positions, 'len') and\n len(positions) == 0):\n self.set_segments([])\n return\n\n lineoffset = self.get_lineoffset()\n linelength = self.get_linelength()\n segment = (lineoffset + linelength / 2.,\n lineoffset - linelength / 2.)\n positions = np.asanyarray(positions)\n positions.sort()\n if self.is_horizontal():\n segments = [[(coord1, coord2) for coord2 in segment] for\n coord1 in positions]\n else:\n segments = [[(coord2, coord1) for coord2 in segment] for\n coord1 in positions]\n self.set_segments(segments)\n\n def add_positions(self, position):\n '''\n add one or more events at the specified positions\n '''\n if position is None or (hasattr(position, 'len') and\n len(position) == 0):\n return\n positions = self.get_positions()\n positions = np.hstack([positions, np.asanyarray(position)])\n self.set_positions(positions)\n extend_positions = append_positions = add_positions\n\n def is_horizontal(self):\n '''\n True if the eventcollection is horizontal, False if vertical\n '''\n return self._is_horizontal\n\n def get_orientation(self):\n '''\n get the orientation of the event line, may be:\n [ 'horizontal' | 'vertical' ]\n '''\n return 'horizontal' if self.is_horizontal() else 'vertical'\n\n def switch_orientation(self):\n '''\n switch the orientation of the event line, either from vertical to\n horizontal or vice versus\n '''\n segments = self.get_segments()\n for i, segment in enumerate(segments):\n segments[i] = np.fliplr(segment)\n self.set_segments(segments)\n self._is_horizontal = not self.is_horizontal()\n\n def set_orientation(self, orientation=None):\n '''\n set the orientation of the event line\n [ 'horizontal' | 'vertical' | None ]\n defaults to 'horizontal' if not specified or None\n '''\n if (orientation is None or orientation.lower() == 'none' or\n orientation.lower() == 'horizontal'):\n is_horizontal = True\n elif orientation.lower() == 'vertical':\n is_horizontal = False\n else:\n raise ValueError(\"orientation must be 'horizontal' or 'vertical'\")\n\n if is_horizontal == self.is_horizontal():\n return\n self.switch_orientation()\n\n def get_linelength(self):\n '''\n get the length of the lines used to mark each event\n '''\n return self._linelength\n\n def set_linelength(self, linelength):\n '''\n set the length of the lines used to mark each event\n '''\n if linelength == self.get_linelength():\n return\n lineoffset = self.get_lineoffset()\n segments = self.get_segments()\n pos = 1 if self.is_horizontal() else 0\n for segment in segments:\n segment[0, pos] = lineoffset + linelength / 2.\n segment[1, pos] = lineoffset - linelength / 2.\n self.set_segments(segments)\n self._linelength = linelength\n\n def get_lineoffset(self):\n '''\n get the offset of the lines used to mark each event\n '''\n return self._lineoffset\n\n def set_lineoffset(self, lineoffset):\n '''\n set the offset of the lines used to mark each event\n '''\n if lineoffset == self.get_lineoffset():\n return\n linelength = self.get_linelength()\n segments = self.get_segments()\n pos = 1 if self.is_horizontal() else 0\n for segment in segments:\n segment[0, pos] = lineoffset + linelength / 2.\n segment[1, pos] = lineoffset - linelength / 2.\n self.set_segments(segments)\n self._lineoffset = lineoffset\n\n def get_linewidth(self):\n '''\n get the width of the lines used to mark each event\n '''\n return self.get_linewidths()[0]\n\n def get_linestyle(self):\n '''\n get the style of the lines used to mark each event\n [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n '''\n return self.get_linestyles()\n\n def get_color(self):\n '''\n get the color of the lines used to mark each event\n '''\n return self.get_colors()[0]\n\n\nclass CircleCollection(Collection):\n \"\"\"\n A collection of circles, drawn using splines.\n \"\"\"\n @docstring.dedent_interpd\n def __init__(self, sizes, **kwargs):\n \"\"\"\n *sizes*\n Gives the area of the circle in points^2\n\n %(Collection)s\n \"\"\"\n Collection.__init__(self, **kwargs)\n self._sizes = sizes\n self.set_transform(transforms.IdentityTransform())\n self._paths = [mpath.Path.unit_circle()]\n\n def get_sizes(self):\n \"return sizes of circles\"\n return self._sizes\n\n @allow_rasterization\n def draw(self, renderer):\n # sizes is the area of the circle circumscribing the polygon\n # in points^2\n self._transforms = [\n transforms.Affine2D().scale(\n (np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))\n for x in self._sizes]\n return Collection.draw(self, renderer)\n\n\nclass EllipseCollection(Collection):\n \"\"\"\n A collection of ellipses, drawn using splines.\n \"\"\"\n @docstring.dedent_interpd\n def __init__(self, widths, heights, angles, units='points', **kwargs):\n \"\"\"\n *widths*: sequence\n lengths of first axes (e.g., major axis lengths)\n\n *heights*: sequence\n lengths of second axes\n\n *angles*: sequence\n angles of first axes, degrees CCW from the X-axis\n\n *units*: ['points' | 'inches' | 'dots' | 'width' | 'height'\n | 'x' | 'y' | 'xy']\n\n units in which majors and minors are given; 'width' and\n 'height' refer to the dimensions of the axes, while 'x'\n and 'y' refer to the *offsets* data units. 'xy' differs\n from all others in that the angle as plotted varies with\n the aspect ratio, and equals the specified angle only when\n the aspect ratio is unity. Hence it behaves the same as\n the :class:`~matplotlib.patches.Ellipse` with\n axes.transData as its transform.\n\n Additional kwargs inherited from the base :class:`Collection`:\n\n %(Collection)s\n \"\"\"\n Collection.__init__(self, **kwargs)\n self._widths = 0.5 * np.asarray(widths).ravel()\n self._heights = 0.5 * np.asarray(heights).ravel()\n self._angles = np.asarray(angles).ravel() * (np.pi / 180.0)\n self._units = units\n self.set_transform(transforms.IdentityTransform())\n self._transforms = []\n self._paths = [mpath.Path.unit_circle()]\n\n def _set_transforms(self):\n \"\"\"\n Calculate transforms immediately before drawing.\n \"\"\"\n self._transforms = []\n ax = self.axes\n fig = self.figure\n\n if self._units == 'xy':\n sc = 1\n elif self._units == 'x':\n sc = ax.bbox.width / ax.viewLim.width\n elif self._units == 'y':\n sc = ax.bbox.height / ax.viewLim.height\n elif self._units == 'inches':\n sc = fig.dpi\n elif self._units == 'points':\n sc = fig.dpi / 72.0\n elif self._units == 'width':\n sc = ax.bbox.width\n elif self._units == 'height':\n sc = ax.bbox.height\n elif self._units == 'dots':\n sc = 1.0\n else:\n raise ValueError('unrecognized units: %s' % self._units)\n\n _affine = transforms.Affine2D\n for x, y, a in zip(self._widths, self._heights, self._angles):\n trans = _affine().scale(x * sc, y * sc).rotate(a)\n self._transforms.append(trans)\n\n if self._units == 'xy':\n m = ax.transData.get_affine().get_matrix().copy()\n m[:2, 2:] = 0\n self.set_transform(_affine(m))\n\n @allow_rasterization\n def draw(self, renderer):\n self._set_transforms()\n Collection.draw(self, renderer)\n\n\nclass PatchCollection(Collection):\n \"\"\"\n A generic collection of patches.\n\n This makes it easier to assign a color map to a heterogeneous\n collection of patches.\n\n This also may improve plotting speed, since PatchCollection will\n draw faster than a large number of patches.\n \"\"\"\n\n def __init__(self, patches, match_original=False, **kwargs):\n \"\"\"\n *patches*\n a sequence of Patch objects. This list may include\n a heterogeneous assortment of different patch types.\n\n *match_original*\n If True, use the colors and linewidths of the original\n patches. If False, new colors may be assigned by\n providing the standard collection arguments, facecolor,\n edgecolor, linewidths, norm or cmap.\n\n If any of *edgecolors*, *facecolors*, *linewidths*,\n *antialiaseds* are None, they default to their\n :data:`matplotlib.rcParams` patch setting, in sequence form.\n\n The use of :class:`~matplotlib.cm.ScalarMappable` is optional.\n If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not\n None (ie a call to set_array has been made), at draw time a\n call to scalar mappable will be made to set the face colors.\n \"\"\"\n\n if match_original:\n def determine_facecolor(patch):\n if patch.get_fill():\n return patch.get_facecolor()\n return [0, 0, 0, 0]\n\n facecolors = [determine_facecolor(p) for p in patches]\n edgecolors = [p.get_edgecolor() for p in patches]\n linewidths = [p.get_linewidth() for p in patches]\n linestyles = [p.get_linestyle() for p in patches]\n antialiaseds = [p.get_antialiased() for p in patches]\n\n Collection.__init__(\n self,\n edgecolors=edgecolors,\n facecolors=facecolors,\n linewidths=linewidths,\n linestyles=linestyles,\n antialiaseds=antialiaseds)\n else:\n Collection.__init__(self, **kwargs)\n\n self.set_paths(patches)\n\n def set_paths(self, patches):\n paths = [p.get_transform().transform_path(p.get_path())\n for p in patches]\n self._paths = paths\n\n\nclass TriMesh(Collection):\n \"\"\"\n Class for the efficient drawing of a triangular mesh using\n Gouraud shading.\n\n A triangular mesh is a :class:`~matplotlib.tri.Triangulation`\n object.\n \"\"\"\n def __init__(self, triangulation, **kwargs):\n Collection.__init__(self, **kwargs)\n self._triangulation = triangulation\n self._shading = 'gouraud'\n self._is_filled = True\n\n self._bbox = transforms.Bbox.unit()\n\n # Unfortunately this requires a copy, unless Triangulation\n # was rewritten.\n xy = np.hstack((triangulation.x.reshape(-1, 1),\n triangulation.y.reshape(-1, 1)))\n self._bbox.update_from_data_xy(xy)\n\n def get_paths(self):\n if self._paths is None:\n self.set_paths()\n return self._paths\n\n def set_paths(self):\n self._paths = self.convert_mesh_to_paths(self._triangulation)\n\n @staticmethod\n def convert_mesh_to_paths(tri):\n \"\"\"\n Converts a given mesh into a sequence of\n :class:`matplotlib.path.Path` objects for easier rendering by\n backends that do not directly support meshes.\n\n This function is primarily of use to backend implementers.\n \"\"\"\n Path = mpath.Path\n triangles = tri.get_masked_triangles()\n verts = np.concatenate((tri.x[triangles][..., np.newaxis],\n tri.y[triangles][..., np.newaxis]), axis=2)\n return [Path(x) for x in verts]\n\n @allow_rasterization\n def draw(self, renderer):\n if not self.get_visible():\n return\n renderer.open_group(self.__class__.__name__)\n transform = self.get_transform()\n\n # Get a list of triangles and the color at each vertex.\n tri = self._triangulation\n triangles = tri.get_masked_triangles()\n\n verts = np.concatenate((tri.x[triangles][..., np.newaxis],\n tri.y[triangles][..., np.newaxis]), axis=2)\n\n self.update_scalarmappable()\n colors = self._facecolors[triangles]\n\n gc = renderer.new_gc()\n self._set_gc_clip(gc)\n gc.set_linewidth(self.get_linewidth()[0])\n renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())\n gc.restore()\n renderer.close_group(self.__class__.__name__)\n\n\nclass QuadMesh(Collection):\n \"\"\"\n Class for the efficient drawing of a quadrilateral mesh.\n\n A quadrilateral mesh consists of a grid of vertices. The\n dimensions of this array are (*meshWidth* + 1, *meshHeight* +\n 1). Each vertex in the mesh has a different set of \"mesh\n coordinates\" representing its position in the topology of the\n mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*\n and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates\n (*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)\n form one of the quadrilaterals in the mesh. There are thus\n (*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh\n need not be regular and the polygons need not be convex.\n\n A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *\n (*meshHeight* + 1))) numpy array *coordinates*, where each row is\n the *x* and *y* coordinates of one of the vertices. To define the\n function that maps from a data point to its corresponding color,\n use the :meth:`set_cmap` method. Each of these arrays is indexed in\n row-major order by the mesh coordinates of the vertex (or the mesh\n coordinates of the lower left vertex, in the case of the\n colors).\n\n For example, the first entry in *coordinates* is the\n coordinates of the vertex at mesh coordinates (0, 0), then the one\n at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and\n so on.\n\n *shading* may be 'flat', or 'gouraud'\n \"\"\"\n def __init__(self, meshWidth, meshHeight, coordinates,\n antialiased=True, shading='flat', **kwargs):\n Collection.__init__(self, **kwargs)\n self._meshWidth = meshWidth\n self._meshHeight = meshHeight\n self._coordinates = coordinates\n self._antialiased = antialiased\n self._shading = shading\n\n self._bbox = transforms.Bbox.unit()\n self._bbox.update_from_data_xy(coordinates.reshape(\n ((meshWidth + 1) * (meshHeight + 1), 2)))\n\n # By converting to floats now, we can avoid that on every draw.\n self._coordinates = self._coordinates.reshape(\n (meshHeight + 1, meshWidth + 1, 2))\n self._coordinates = np.array(self._coordinates, np.float_)\n\n def get_paths(self):\n if self._paths is None:\n self.set_paths()\n return self._paths\n\n def set_paths(self):\n self._paths = self.convert_mesh_to_paths(\n self._meshWidth, self._meshHeight, self._coordinates)\n\n @staticmethod\n def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):\n \"\"\"\n Converts a given mesh into a sequence of\n :class:`matplotlib.path.Path` objects for easier rendering by\n backends that do not directly support quadmeshes.\n\n This function is primarily of use to backend implementers.\n \"\"\"\n Path = mpath.Path\n\n if ma.isMaskedArray(coordinates):\n c = coordinates.data\n else:\n c = coordinates\n\n points = np.concatenate((\n c[0:-1, 0:-1],\n c[0:-1, 1:],\n c[1:, 1:],\n c[1:, 0:-1],\n c[0:-1, 0:-1]\n ), axis=2)\n points = points.reshape((meshWidth * meshHeight, 5, 2))\n return [Path(x) for x in points]\n\n def convert_mesh_to_triangles(self, meshWidth, meshHeight, coordinates):\n \"\"\"\n Converts a given mesh into a sequence of triangles, each point\n with its own color. This is useful for experiments using\n `draw_qouraud_triangle`.\n \"\"\"\n if ma.isMaskedArray(coordinates):\n p = coordinates.data\n else:\n p = coordinates\n\n p_a = p[:-1, :-1]\n p_b = p[:-1, 1:]\n p_c = p[1:, 1:]\n p_d = p[1:, :-1]\n p_center = (p_a + p_b + p_c + p_d) / 4.0\n\n triangles = np.concatenate((\n p_a, p_b, p_center,\n p_b, p_c, p_center,\n p_c, p_d, p_center,\n p_d, p_a, p_center,\n ), axis=2)\n triangles = triangles.reshape((meshWidth * meshHeight * 4, 3, 2))\n\n c = self.get_facecolor().reshape((meshHeight + 1, meshWidth + 1, 4))\n c_a = c[:-1, :-1]\n c_b = c[:-1, 1:]\n c_c = c[1:, 1:]\n c_d = c[1:, :-1]\n c_center = (c_a + c_b + c_c + c_d) / 4.0\n\n colors = np.concatenate((\n c_a, c_b, c_center,\n c_b, c_c, c_center,\n c_c, c_d, c_center,\n c_d, c_a, c_center,\n ), axis=2)\n colors = colors.reshape((meshWidth * meshHeight * 4, 3, 4))\n\n return triangles, colors\n\n def get_datalim(self, transData):\n return self._bbox\n\n @allow_rasterization\n def draw(self, renderer):\n if not self.get_visible():\n return\n renderer.open_group(self.__class__.__name__, self.get_gid())\n transform = self.get_transform()\n transOffset = self.get_offset_transform()\n offsets = self._offsets\n\n if self.have_units():\n if len(self._offsets):\n xs = self.convert_xunits(self._offsets[:, 0])\n ys = self.convert_yunits(self._offsets[:, 1])\n offsets = zip(xs, ys)\n\n offsets = np.asarray(offsets, np.float_)\n offsets.shape = (-1, 2) # Make it Nx2\n\n self.update_scalarmappable()\n\n if not transform.is_affine:\n coordinates = self._coordinates.reshape(\n (self._coordinates.shape[0] *\n self._coordinates.shape[1],\n 2))\n coordinates = transform.transform(coordinates)\n coordinates = coordinates.reshape(self._coordinates.shape)\n transform = transforms.IdentityTransform()\n else:\n coordinates = self._coordinates\n\n if not transOffset.is_affine:\n offsets = transOffset.transform_non_affine(offsets)\n transOffset = transOffset.get_affine()\n\n gc = renderer.new_gc()\n self._set_gc_clip(gc)\n gc.set_linewidth(self.get_linewidth()[0])\n\n if self._shading == 'gouraud':\n triangles, colors = self.convert_mesh_to_triangles(\n self._meshWidth, self._meshHeight, coordinates)\n renderer.draw_gouraud_triangles(\n gc, triangles, colors, transform.frozen())\n else:\n renderer.draw_quad_mesh(\n gc, transform.frozen(), self._meshWidth, self._meshHeight,\n coordinates, offsets, transOffset, self.get_facecolor(),\n self._antialiased, self.get_edgecolors())\n gc.restore()\n renderer.close_group(self.__class__.__name__)\n\n\npatchstr = artist.kwdoc(Collection)\nfor k in ('QuadMesh', 'TriMesh', 'PolyCollection', 'BrokenBarHCollection',\n 'RegularPolyCollection', 'PathCollection',\n 'StarPolygonCollection', 'PatchCollection',\n 'CircleCollection', 'Collection',):\n docstring.interpd.update({k: patchstr})\ndocstring.interpd.update(LineCollection=artist.kwdoc(LineCollection))\n", "\"\"\"\nClasses for the ticks and x and y axis\n\"\"\"\nfrom __future__ import division, print_function\n\nfrom matplotlib import rcParams\nimport matplotlib.artist as artist\nfrom matplotlib.artist import allow_rasterization\nimport matplotlib.cbook as cbook\nimport matplotlib.font_manager as font_manager\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport matplotlib.scale as mscale\nimport matplotlib.text as mtext\nimport matplotlib.ticker as mticker\nimport matplotlib.transforms as mtransforms\nimport matplotlib.units as munits\nimport numpy as np\nimport warnings\n\nGRIDLINE_INTERPOLATION_STEPS = 180\n\n\nclass Tick(artist.Artist):\n \"\"\"\n Abstract base class for the axis ticks, grid lines and labels\n\n 1 refers to the bottom of the plot for xticks and the left for yticks\n 2 refers to the top of the plot for xticks and the right for yticks\n\n Publicly accessible attributes:\n\n :attr:`tick1line`\n a Line2D instance\n\n :attr:`tick2line`\n a Line2D instance\n\n :attr:`gridline`\n a Line2D instance\n\n :attr:`label1`\n a Text instance\n\n :attr:`label2`\n a Text instance\n\n :attr:`gridOn`\n a boolean which determines whether to draw the tickline\n\n :attr:`tick1On`\n a boolean which determines whether to draw the 1st tickline\n\n :attr:`tick2On`\n a boolean which determines whether to draw the 2nd tickline\n\n :attr:`label1On`\n a boolean which determines whether to draw tick label\n\n :attr:`label2On`\n a boolean which determines whether to draw tick label\n\n \"\"\"\n def __init__(self, axes, loc, label,\n size=None, # points\n width=None,\n color=None,\n tickdir=None,\n pad=None,\n labelsize=None,\n labelcolor=None,\n zorder=None,\n gridOn=None, # defaults to axes.grid\n tick1On=True,\n tick2On=True,\n label1On=True,\n label2On=False,\n major=True,\n ):\n \"\"\"\n bbox is the Bound2D bounding box in display coords of the Axes\n loc is the tick location in data coords\n size is the tick size in points\n \"\"\"\n artist.Artist.__init__(self)\n\n if gridOn is None:\n gridOn = rcParams['axes.grid']\n\n self.set_figure(axes.figure)\n self.axes = axes\n\n name = self.__name__.lower()\n self._name = name\n\n self._loc = loc\n\n if size is None:\n if major:\n size = rcParams['%s.major.size' % name]\n else:\n size = rcParams['%s.minor.size' % name]\n self._size = size\n\n if width is None:\n if major:\n width = rcParams['%s.major.width' % name]\n else:\n width = rcParams['%s.minor.width' % name]\n self._width = width\n\n if color is None:\n color = rcParams['%s.color' % name]\n self._color = color\n\n if pad is None:\n if major:\n pad = rcParams['%s.major.pad' % name]\n else:\n pad = rcParams['%s.minor.pad' % name]\n self._base_pad = pad\n\n if labelcolor is None:\n labelcolor = rcParams['%s.color' % name]\n self._labelcolor = labelcolor\n\n if labelsize is None:\n labelsize = rcParams['%s.labelsize' % name]\n self._labelsize = labelsize\n\n if zorder is None:\n if major:\n zorder = mlines.Line2D.zorder + 0.01\n else:\n zorder = mlines.Line2D.zorder\n self._zorder = zorder\n\n self.apply_tickdir(tickdir)\n\n self.tick1line = self._get_tick1line()\n self.tick2line = self._get_tick2line()\n self.gridline = self._get_gridline()\n\n self.label1 = self._get_text1()\n self.label = self.label1 # legacy name\n self.label2 = self._get_text2()\n\n self.gridOn = gridOn\n self.tick1On = tick1On\n self.tick2On = tick2On\n self.label1On = label1On\n self.label2On = label2On\n\n self.update_position(loc)\n\n def apply_tickdir(self, tickdir):\n \"\"\"\n Calculate self._pad and self._tickmarkers\n \"\"\"\n pass\n\n def get_children(self):\n children = [self.tick1line, self.tick2line,\n self.gridline, self.label1, self.label2]\n return children\n\n def set_clip_path(self, clippath, transform=None):\n artist.Artist.set_clip_path(self, clippath, transform)\n #self.tick1line.set_clip_path(clippath, transform)\n #self.tick2line.set_clip_path(clippath, transform)\n self.gridline.set_clip_path(clippath, transform)\n set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__\n\n def get_pad_pixels(self):\n return self.figure.dpi * self._base_pad / 72.0\n\n def contains(self, mouseevent):\n \"\"\"\n Test whether the mouse event occurred in the Tick marks.\n\n This function always returns false. It is more useful to test if the\n axis as a whole contains the mouse rather than the set of tick marks.\n \"\"\"\n if callable(self._contains):\n return self._contains(self, mouseevent)\n return False, {}\n\n def set_pad(self, val):\n \"\"\"\n Set the tick label pad in points\n\n ACCEPTS: float\n \"\"\"\n self._apply_params(pad=val)\n\n def get_pad(self):\n 'Get the value of the tick label pad in points'\n return self._base_pad\n\n def _get_text1(self):\n 'Get the default Text 1 instance'\n pass\n\n def _get_text2(self):\n 'Get the default Text 2 instance'\n pass\n\n def _get_tick1line(self):\n 'Get the default line2D instance for tick1'\n pass\n\n def _get_tick2line(self):\n 'Get the default line2D instance for tick2'\n pass\n\n def _get_gridline(self):\n 'Get the default grid Line2d instance for this tick'\n pass\n\n def get_loc(self):\n 'Return the tick location (data coords) as a scalar'\n return self._loc\n\n @allow_rasterization\n def draw(self, renderer):\n if not self.get_visible():\n return\n renderer.open_group(self.__name__)\n midPoint = mtransforms.interval_contains(self.get_view_interval(),\n self.get_loc())\n\n if midPoint:\n if self.gridOn:\n self.gridline.draw(renderer)\n if self.tick1On:\n self.tick1line.draw(renderer)\n if self.tick2On:\n self.tick2line.draw(renderer)\n\n if self.label1On:\n self.label1.draw(renderer)\n if self.label2On:\n self.label2.draw(renderer)\n\n renderer.close_group(self.__name__)\n\n def set_label1(self, s):\n \"\"\"\n Set the text of ticklabel\n\n ACCEPTS: str\n \"\"\"\n self.label1.set_text(s)\n set_label = set_label1\n\n def set_label2(self, s):\n \"\"\"\n Set the text of ticklabel2\n\n ACCEPTS: str\n \"\"\"\n self.label2.set_text(s)\n\n def _set_artist_props(self, a):\n a.set_figure(self.figure)\n #if isinstance(a, mlines.Line2D): a.set_clip_box(self.axes.bbox)\n\n def get_view_interval(self):\n 'return the view Interval instance for the axis this tick is ticking'\n raise NotImplementedError('Derived must override')\n\n def _apply_params(self, **kw):\n switchkw = ['gridOn', 'tick1On', 'tick2On', 'label1On', 'label2On']\n switches = [k for k in kw if k in switchkw]\n for k in switches:\n setattr(self, k, kw.pop(k))\n dirpad = [k for k in kw if k in ['pad', 'tickdir']]\n if dirpad:\n self._base_pad = kw.pop('pad', self._base_pad)\n self.apply_tickdir(kw.pop('tickdir', self._tickdir))\n trans = self._get_text1_transform()[0]\n self.label1.set_transform(trans)\n trans = self._get_text2_transform()[0]\n self.label2.set_transform(trans)\n self.tick1line.set_marker(self._tickmarkers[0])\n self.tick2line.set_marker(self._tickmarkers[1])\n tick_kw = dict([kv for kv in kw.iteritems()\n if kv[0] in ['color', 'zorder']])\n if tick_kw:\n self.tick1line.set(**tick_kw)\n self.tick2line.set(**tick_kw)\n for k, v in tick_kw.iteritems():\n setattr(self, '_' + k, v)\n tick_list = [kv for kv\n in kw.iteritems() if kv[0] in ['size', 'width']]\n for k, v in tick_list:\n setattr(self, '_' + k, v)\n if k == 'size':\n self.tick1line.set_markersize(v)\n self.tick2line.set_markersize(v)\n else:\n self.tick1line.set_markeredgewidth(v)\n self.tick2line.set_markeredgewidth(v)\n label_list = [k for k in kw.iteritems()\n if k[0] in ['labelsize', 'labelcolor']]\n if label_list:\n label_kw = dict([(k[5:], v) for (k, v) in label_list])\n self.label1.set(**label_kw)\n self.label2.set(**label_kw)\n for k, v in label_kw.iteritems():\n setattr(self, '_' + k, v)\n\n\nclass XTick(Tick):\n \"\"\"\n Contains all the Artists needed to make an x tick - the tick line,\n the label text and the grid line\n \"\"\"\n __name__ = 'xtick'\n\n def _get_text1_transform(self):\n return self.axes.get_xaxis_text1_transform(self._pad)\n\n def _get_text2_transform(self):\n return self.axes.get_xaxis_text2_transform(self._pad)\n\n def apply_tickdir(self, tickdir):\n if tickdir is None:\n tickdir = rcParams['%s.direction' % self._name]\n self._tickdir = tickdir\n\n if self._tickdir == 'in':\n self._tickmarkers = (mlines.TICKUP, mlines.TICKDOWN)\n self._pad = self._base_pad\n elif self._tickdir == 'inout':\n self._tickmarkers = ('|', '|')\n self._pad = self._base_pad + self._size / 2.\n else:\n self._tickmarkers = (mlines.TICKDOWN, mlines.TICKUP)\n self._pad = self._base_pad + self._size\n\n def _get_text1(self):\n 'Get the default Text instance'\n # the y loc is 3 points below the min of y axis\n # get the affine as an a,b,c,d,tx,ty list\n # x in data coords, y in axes coords\n #t = mtext.Text(\n trans, vert, horiz = self._get_text1_transform()\n t = mtext.Text(\n x=0, y=0,\n fontproperties=font_manager.FontProperties(size=self._labelsize),\n color=self._labelcolor,\n verticalalignment=vert,\n horizontalalignment=horiz,\n )\n t.set_transform(trans)\n self._set_artist_props(t)\n return t\n\n def _get_text2(self):\n\n 'Get the default Text 2 instance'\n # x in data coords, y in axes coords\n #t = mtext.Text(\n trans, vert, horiz = self._get_text2_transform()\n t = mtext.Text(\n x=0, y=1,\n fontproperties=font_manager.FontProperties(size=self._labelsize),\n color=self._labelcolor,\n verticalalignment=vert,\n horizontalalignment=horiz,\n )\n t.set_transform(trans)\n self._set_artist_props(t)\n return t\n\n def _get_tick1line(self):\n 'Get the default line2D instance'\n # x in data coords, y in axes coords\n l = mlines.Line2D(xdata=(0,), ydata=(0,),\n color=self._color,\n linestyle='None',\n marker=self._tickmarkers[0],\n markersize=self._size,\n markeredgewidth=self._width,\n zorder=self._zorder,\n )\n l.set_transform(self.axes.get_xaxis_transform(which='tick1'))\n self._set_artist_props(l)\n return l\n\n def _get_tick2line(self):\n 'Get the default line2D instance'\n # x in data coords, y in axes coords\n l = mlines.Line2D(xdata=(0,), ydata=(1,),\n color=self._color,\n linestyle='None',\n marker=self._tickmarkers[1],\n markersize=self._size,\n markeredgewidth=self._width,\n zorder=self._zorder,\n )\n\n l.set_transform(self.axes.get_xaxis_transform(which='tick2'))\n self._set_artist_props(l)\n return l\n\n def _get_gridline(self):\n 'Get the default line2D instance'\n # x in data coords, y in axes coords\n l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),\n color=rcParams['grid.color'],\n linestyle=rcParams['grid.linestyle'],\n linewidth=rcParams['grid.linewidth'],\n alpha=rcParams['grid.alpha'],\n markersize=0\n )\n l.set_transform(self.axes.get_xaxis_transform(which='grid'))\n l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS\n self._set_artist_props(l)\n\n return l\n\n def update_position(self, loc):\n 'Set the location of tick in data coords with scalar *loc*'\n x = loc\n\n nonlinear = (hasattr(self.axes, 'yaxis') and\n self.axes.yaxis.get_scale() != 'linear' or\n hasattr(self.axes, 'xaxis') and\n self.axes.xaxis.get_scale() != 'linear')\n\n if self.tick1On:\n self.tick1line.set_xdata((x,))\n if self.tick2On:\n self.tick2line.set_xdata((x,))\n if self.gridOn:\n self.gridline.set_xdata((x,))\n if self.label1On:\n self.label1.set_x(x)\n if self.label2On:\n self.label2.set_x(x)\n\n if nonlinear:\n self.tick1line._invalid = True\n self.tick2line._invalid = True\n self.gridline._invalid = True\n\n self._loc = loc\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n return self.axes.viewLim.intervalx\n\n\nclass YTick(Tick):\n \"\"\"\n Contains all the Artists needed to make a Y tick - the tick line,\n the label text and the grid line\n \"\"\"\n __name__ = 'ytick'\n\n def _get_text1_transform(self):\n return self.axes.get_yaxis_text1_transform(self._pad)\n\n def _get_text2_transform(self):\n return self.axes.get_yaxis_text2_transform(self._pad)\n\n def apply_tickdir(self, tickdir):\n if tickdir is None:\n tickdir = rcParams['%s.direction' % self._name]\n self._tickdir = tickdir\n\n if self._tickdir == 'in':\n self._tickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)\n self._pad = self._base_pad\n elif self._tickdir == 'inout':\n self._tickmarkers = ('_', '_')\n self._pad = self._base_pad + self._size / 2.\n else:\n self._tickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)\n self._pad = self._base_pad + self._size\n\n # how far from the y axis line the right of the ticklabel are\n def _get_text1(self):\n 'Get the default Text instance'\n # x in axes coords, y in data coords\n trans, vert, horiz = self._get_text1_transform()\n t = mtext.Text(\n x=0, y=0,\n fontproperties=font_manager.FontProperties(size=self._labelsize),\n color=self._labelcolor,\n verticalalignment=vert,\n horizontalalignment=horiz,\n )\n t.set_transform(trans)\n #t.set_transform( self.axes.transData )\n self._set_artist_props(t)\n return t\n\n def _get_text2(self):\n 'Get the default Text instance'\n # x in axes coords, y in data coords\n trans, vert, horiz = self._get_text2_transform()\n t = mtext.Text(\n x=1, y=0,\n fontproperties=font_manager.FontProperties(size=self._labelsize),\n color=self._labelcolor,\n verticalalignment=vert,\n horizontalalignment=horiz,\n )\n t.set_transform(trans)\n self._set_artist_props(t)\n return t\n\n def _get_tick1line(self):\n 'Get the default line2D instance'\n # x in axes coords, y in data coords\n\n l = mlines.Line2D((0,), (0,),\n color=self._color,\n marker=self._tickmarkers[0],\n linestyle='None',\n markersize=self._size,\n markeredgewidth=self._width,\n zorder=self._zorder,\n )\n l.set_transform(self.axes.get_yaxis_transform(which='tick1'))\n self._set_artist_props(l)\n return l\n\n def _get_tick2line(self):\n 'Get the default line2D instance'\n # x in axes coords, y in data coords\n l = mlines.Line2D((1,), (0,),\n color=self._color,\n marker=self._tickmarkers[1],\n linestyle='None',\n markersize=self._size,\n markeredgewidth=self._width,\n zorder=self._zorder,\n )\n l.set_transform(self.axes.get_yaxis_transform(which='tick2'))\n self._set_artist_props(l)\n return l\n\n def _get_gridline(self):\n 'Get the default line2D instance'\n # x in axes coords, y in data coords\n l = mlines.Line2D(xdata=(0, 1), ydata=(0, 0),\n color=rcParams['grid.color'],\n linestyle=rcParams['grid.linestyle'],\n linewidth=rcParams['grid.linewidth'],\n alpha=rcParams['grid.alpha'],\n markersize=0\n )\n\n l.set_transform(self.axes.get_yaxis_transform(which='grid'))\n l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS\n self._set_artist_props(l)\n return l\n\n def update_position(self, loc):\n 'Set the location of tick in data coords with scalar loc'\n y = loc\n\n nonlinear = (hasattr(self.axes, 'yaxis') and\n self.axes.yaxis.get_scale() != 'linear' or\n hasattr(self.axes, 'xaxis') and\n self.axes.xaxis.get_scale() != 'linear')\n\n if self.tick1On:\n self.tick1line.set_ydata((y,))\n if self.tick2On:\n self.tick2line.set_ydata((y,))\n if self.gridOn:\n self.gridline.set_ydata((y, ))\n if self.label1On:\n self.label1.set_y(y)\n if self.label2On:\n self.label2.set_y(y)\n if nonlinear:\n self.tick1line._invalid = True\n self.tick2line._invalid = True\n self.gridline._invalid = True\n\n self._loc = loc\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n return self.axes.viewLim.intervaly\n\n\nclass Ticker:\n locator = None\n formatter = None\n\n\nclass Axis(artist.Artist):\n \"\"\"\n Public attributes\n\n * :attr:`axes.transData` - transform data coords to display coords\n * :attr:`axes.transAxes` - transform axis coords to display coords\n * :attr:`labelpad` - number of points between the axis and its label\n \"\"\"\n OFFSETTEXTPAD = 3\n\n def __str__(self):\n return self.__class__.__name__ \\\n + \"(%f,%f)\" % tuple(self.axes.transAxes.transform_point((0, 0)))\n\n def __init__(self, axes, pickradius=15):\n \"\"\"\n Init the axis with the parent Axes instance\n \"\"\"\n artist.Artist.__init__(self)\n self.set_figure(axes.figure)\n\n # Keep track of setting to the default value, this allows use to know\n # if any of the following values is explicitly set by the user, so as\n # to not overwrite their settings with any of our 'auto' settings.\n self.isDefault_majloc = True\n self.isDefault_minloc = True\n self.isDefault_majfmt = True\n self.isDefault_minfmt = True\n self.isDefault_label = True\n\n self.axes = axes\n self.major = Ticker()\n self.minor = Ticker()\n self.callbacks = cbook.CallbackRegistry()\n\n #class dummy:\n # locator = None\n # formatter = None\n #self.major = dummy()\n #self.minor = dummy()\n\n self._autolabelpos = True\n self._smart_bounds = False\n\n self.label = self._get_label()\n self.labelpad = 5\n self.offsetText = self._get_offset_text()\n self.majorTicks = []\n self.minorTicks = []\n self.pickradius = pickradius\n\n # Initialize here for testing; later add API\n self._major_tick_kw = dict()\n self._minor_tick_kw = dict()\n\n self.cla()\n self._set_scale('linear')\n\n def set_label_coords(self, x, y, transform=None):\n \"\"\"\n Set the coordinates of the label. By default, the x\n coordinate of the y label is determined by the tick label\n bounding boxes, but this can lead to poor alignment of\n multiple ylabels if there are multiple axes. Ditto for the y\n coodinate of the x label.\n\n You can also specify the coordinate system of the label with\n the transform. If None, the default coordinate system will be\n the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)\n is middle, etc\n\n \"\"\"\n\n self._autolabelpos = False\n if transform is None:\n transform = self.axes.transAxes\n\n self.label.set_transform(transform)\n self.label.set_position((x, y))\n\n def get_transform(self):\n return self._scale.get_transform()\n\n def get_scale(self):\n return self._scale.name\n\n @cbook.deprecated('1.3')\n def set_scale(self, value, **kwargs):\n \"\"\"\n This should be a private function (moved to _set_scale)\n \"\"\"\n self._set_scale(value, **kwargs)\n\n def _set_scale(self, value, **kwargs):\n self._scale = mscale.scale_factory(value, self, **kwargs)\n self._scale.set_default_locators_and_formatters(self)\n\n self.isDefault_majloc = True\n self.isDefault_minloc = True\n self.isDefault_majfmt = True\n self.isDefault_minfmt = True\n\n def limit_range_for_scale(self, vmin, vmax):\n return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())\n\n def get_children(self):\n children = [self.label, self.offsetText]\n majorticks = self.get_major_ticks()\n minorticks = self.get_minor_ticks()\n\n children.extend(majorticks)\n children.extend(minorticks)\n return children\n\n def cla(self):\n 'clear the current axis'\n self.set_major_locator(mticker.AutoLocator())\n self.set_major_formatter(mticker.ScalarFormatter())\n self.set_minor_locator(mticker.NullLocator())\n self.set_minor_formatter(mticker.NullFormatter())\n\n self.set_label_text('')\n self._set_artist_props(self.label)\n\n # Keep track of setting to the default value, this allows use to know\n # if any of the following values is explicitly set by the user, so as\n # to not overwrite their settings with any of our 'auto' settings.\n self.isDefault_majloc = True\n self.isDefault_minloc = True\n self.isDefault_majfmt = True\n self.isDefault_minfmt = True\n self.isDefault_label = True\n\n # Clear the callback registry for this axis, or it may \"leak\"\n self.callbacks = cbook.CallbackRegistry()\n\n # whether the grids are on\n self._gridOnMajor = rcParams['axes.grid']\n self._gridOnMinor = False\n\n self.label.set_text('')\n self._set_artist_props(self.label)\n\n self.reset_ticks()\n\n self.converter = None\n self.units = None\n self.set_units(None)\n\n def reset_ticks(self):\n # build a few default ticks; grow as necessary later; only\n # define 1 so properties set on ticks will be copied as they\n # grow\n cbook.popall(self.majorTicks)\n cbook.popall(self.minorTicks)\n\n self.majorTicks.extend([self._get_tick(major=True)])\n self.minorTicks.extend([self._get_tick(major=False)])\n self._lastNumMajorTicks = 1\n self._lastNumMinorTicks = 1\n\n def set_tick_params(self, which='major', reset=False, **kw):\n \"\"\"\n Set appearance parameters for ticks and ticklabels.\n\n For documentation of keyword arguments, see\n :meth:`matplotlib.axes.Axes.tick_params`.\n \"\"\"\n dicts = []\n if which == 'major' or which == 'both':\n dicts.append(self._major_tick_kw)\n if which == 'minor' or which == 'both':\n dicts.append(self._minor_tick_kw)\n kwtrans = self._translate_tick_kw(kw, to_init_kw=True)\n for d in dicts:\n if reset:\n d.clear()\n d.update(kwtrans)\n if reset:\n self.reset_ticks()\n else:\n if which == 'major' or which == 'both':\n for tick in self.majorTicks:\n tick._apply_params(**self._major_tick_kw)\n if which == 'minor' or which == 'both':\n for tick in self.minorTicks:\n tick._apply_params(**self._minor_tick_kw)\n\n @staticmethod\n def _translate_tick_kw(kw, to_init_kw=True):\n # We may want to move the following function to\n # a more visible location; or maybe there already\n # is something like this.\n def _bool(arg):\n if cbook.is_string_like(arg):\n if arg.lower() == 'on':\n return True\n if arg.lower() == 'off':\n return False\n raise ValueError('String \"%s\" should be \"on\" or \"off\"' % arg)\n return bool(arg)\n # The following lists may be moved to a more\n # accessible location.\n kwkeys0 = ['size', 'width', 'color', 'tickdir', 'pad',\n 'labelsize', 'labelcolor', 'zorder', 'gridOn',\n 'tick1On', 'tick2On', 'label1On', 'label2On']\n kwkeys1 = ['length', 'direction', 'left', 'bottom', 'right', 'top',\n 'labelleft', 'labelbottom', 'labelright', 'labeltop']\n kwkeys = kwkeys0 + kwkeys1\n kwtrans = dict()\n if to_init_kw:\n if 'length' in kw:\n kwtrans['size'] = kw.pop('length')\n if 'direction' in kw:\n kwtrans['tickdir'] = kw.pop('direction')\n if 'left' in kw:\n kwtrans['tick1On'] = _bool(kw.pop('left'))\n if 'bottom' in kw:\n kwtrans['tick1On'] = _bool(kw.pop('bottom'))\n if 'right' in kw:\n kwtrans['tick2On'] = _bool(kw.pop('right'))\n if 'top' in kw:\n kwtrans['tick2On'] = _bool(kw.pop('top'))\n\n if 'labelleft' in kw:\n kwtrans['label1On'] = _bool(kw.pop('labelleft'))\n if 'labelbottom' in kw:\n kwtrans['label1On'] = _bool(kw.pop('labelbottom'))\n if 'labelright' in kw:\n kwtrans['label2On'] = _bool(kw.pop('labelright'))\n if 'labeltop' in kw:\n kwtrans['label2On'] = _bool(kw.pop('labeltop'))\n if 'colors' in kw:\n c = kw.pop('colors')\n kwtrans['color'] = c\n kwtrans['labelcolor'] = c\n # Maybe move the checking up to the caller of this method.\n for key in kw:\n if key not in kwkeys:\n raise ValueError(\n \"keyword %s is not recognized; valid keywords are %s\"\n % (key, kwkeys))\n kwtrans.update(kw)\n else:\n raise NotImplementedError(\"Inverse translation is deferred\")\n return kwtrans\n\n def set_clip_path(self, clippath, transform=None):\n artist.Artist.set_clip_path(self, clippath, transform)\n for child in self.majorTicks + self.minorTicks:\n child.set_clip_path(clippath, transform)\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n raise NotImplementedError('Derived must override')\n\n def set_view_interval(self, vmin, vmax, ignore=False):\n raise NotImplementedError('Derived must override')\n\n def get_data_interval(self):\n 'return the Interval instance for this axis data limits'\n raise NotImplementedError('Derived must override')\n\n def set_data_interval(self):\n '''set the axis data limits'''\n raise NotImplementedError('Derived must override')\n\n def set_default_intervals(self):\n '''set the default limits for the axis data and view interval if they\n are not mutated'''\n\n # this is mainly in support of custom object plotting. For\n # example, if someone passes in a datetime object, we do not\n # know automagically how to set the default min/max of the\n # data and view limits. The unit conversion AxisInfo\n # interface provides a hook for custom types to register\n # default limits through the AxisInfo.default_limits\n # attribute, and the derived code below will check for that\n # and use it if is available (else just use 0..1)\n pass\n\n def _set_artist_props(self, a):\n if a is None:\n return\n a.set_figure(self.figure)\n\n def iter_ticks(self):\n \"\"\"\n Iterate through all of the major and minor ticks.\n \"\"\"\n majorLocs = self.major.locator()\n majorTicks = self.get_major_ticks(len(majorLocs))\n self.major.formatter.set_locs(majorLocs)\n majorLabels = [self.major.formatter(val, i)\n for i, val in enumerate(majorLocs)]\n\n minorLocs = self.minor.locator()\n minorTicks = self.get_minor_ticks(len(minorLocs))\n self.minor.formatter.set_locs(minorLocs)\n minorLabels = [self.minor.formatter(val, i)\n for i, val in enumerate(minorLocs)]\n\n major_minor = [\n (majorTicks, majorLocs, majorLabels),\n (minorTicks, minorLocs, minorLabels)]\n\n for group in major_minor:\n for tick in zip(*group):\n yield tick\n\n def get_ticklabel_extents(self, renderer):\n \"\"\"\n Get the extents of the tick labels on either side\n of the axes.\n \"\"\"\n\n ticks_to_draw = self._update_ticks(renderer)\n ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,\n renderer)\n\n if len(ticklabelBoxes):\n bbox = mtransforms.Bbox.union(ticklabelBoxes)\n else:\n bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)\n if len(ticklabelBoxes2):\n bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)\n else:\n bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)\n return bbox, bbox2\n\n def set_smart_bounds(self, value):\n \"\"\"set the axis to have smart bounds\"\"\"\n self._smart_bounds = value\n\n def get_smart_bounds(self):\n \"\"\"get whether the axis has smart bounds\"\"\"\n return self._smart_bounds\n\n def _update_ticks(self, renderer):\n \"\"\"\n Update ticks (position and labels) using the current data\n interval of the axes. Returns a list of ticks that will be\n drawn.\n \"\"\"\n\n interval = self.get_view_interval()\n tick_tups = [t for t in self.iter_ticks()]\n if self._smart_bounds:\n # handle inverted limits\n view_low, view_high = min(*interval), max(*interval)\n data_low, data_high = self.get_data_interval()\n if data_low > data_high:\n data_low, data_high = data_high, data_low\n locs = [ti[1] for ti in tick_tups]\n locs.sort()\n locs = np.array(locs)\n if len(locs):\n if data_low <= view_low:\n # data extends beyond view, take view as limit\n ilow = view_low\n else:\n # data stops within view, take best tick\n cond = locs <= data_low\n good_locs = locs[cond]\n if len(good_locs) > 0:\n # last tick prior or equal to first data point\n ilow = good_locs[-1]\n else:\n # No ticks (why not?), take first tick\n ilow = locs[0]\n if data_high >= view_high:\n # data extends beyond view, take view as limit\n ihigh = view_high\n else:\n # data stops within view, take best tick\n cond = locs >= data_high\n good_locs = locs[cond]\n if len(good_locs) > 0:\n # first tick after or equal to last data point\n ihigh = good_locs[0]\n else:\n # No ticks (why not?), take last tick\n ihigh = locs[-1]\n tick_tups = [ti for ti in tick_tups\n if (ti[1] >= ilow) and (ti[1] <= ihigh)]\n\n # so that we don't lose ticks on the end, expand out the interval ever so slightly. The\n # \"ever so slightly\" is defined to be the width of a half of a pixel. We don't want to draw\n # a tick that even one pixel outside of the defined axis interval.\n if interval[0] <= interval[1]:\n interval_expanded = interval\n else:\n interval_expanded = interval[1], interval[0]\n\n if hasattr(self, '_get_pixel_distance_along_axis'):\n # normally, one does not want to catch all exceptions that\n # could possibly happen, but it is not clear exactly what\n # exceptions might arise from a user's projection (their\n # rendition of the Axis object). So, we catch all, with\n # the idea that one would rather potentially lose a tick\n # from one side of the axis or another, rather than see a\n # stack trace.\n try:\n ds1 = self._get_pixel_distance_along_axis(interval_expanded[0], -0.5)\n except:\n warnings.warn(\"Unable to find pixel distance along axis for interval padding; assuming no interval padding needed.\")\n ds1 = 0.0\n if np.isnan(ds1):\n ds1 = 0.0\n try:\n ds2 = self._get_pixel_distance_along_axis(interval_expanded[1], +0.5)\n except:\n warnings.warn(\"Unable to find pixel distance along axis for interval padding; assuming no interval padding needed.\")\n ds2 = 0.0\n if np.isnan(ds2):\n ds2 = 0.0\n interval_expanded = (interval_expanded[0] - ds1,\n interval_expanded[1] + ds2)\n\n ticks_to_draw = []\n for tick, loc, label in tick_tups:\n if tick is None:\n continue\n if not mtransforms.interval_contains(interval_expanded, loc):\n continue\n tick.update_position(loc)\n tick.set_label1(label)\n tick.set_label2(label)\n ticks_to_draw.append(tick)\n\n return ticks_to_draw\n\n def _get_tick_bboxes(self, ticks, renderer):\n \"\"\"\n Given the list of ticks, return two lists of bboxes. One for\n tick lable1's and another for tick label2's.\n \"\"\"\n\n ticklabelBoxes = []\n ticklabelBoxes2 = []\n\n for tick in ticks:\n if tick.label1On and tick.label1.get_visible():\n extent = tick.label1.get_window_extent(renderer)\n ticklabelBoxes.append(extent)\n if tick.label2On and tick.label2.get_visible():\n extent = tick.label2.get_window_extent(renderer)\n ticklabelBoxes2.append(extent)\n return ticklabelBoxes, ticklabelBoxes2\n\n def get_tightbbox(self, renderer):\n \"\"\"\n Return a bounding box that encloses the axis. It only accounts\n tick labels, axis label, and offsetText.\n \"\"\"\n if not self.get_visible():\n return\n\n ticks_to_draw = self._update_ticks(renderer)\n ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,\n renderer)\n\n self._update_label_position(ticklabelBoxes, ticklabelBoxes2)\n\n self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)\n self.offsetText.set_text(self.major.formatter.get_offset())\n\n bb = []\n\n for a in [self.label, self.offsetText]:\n if a.get_visible():\n bb.append(a.get_window_extent(renderer))\n\n bb.extend(ticklabelBoxes)\n bb.extend(ticklabelBoxes2)\n\n #self.offsetText\n bb = [b for b in bb if b.width != 0 or b.height != 0]\n if bb:\n _bbox = mtransforms.Bbox.union(bb)\n return _bbox\n else:\n return None\n\n @allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n 'Draw the axis lines, grid lines, tick lines and labels'\n\n if not self.get_visible():\n return\n renderer.open_group(__name__)\n\n ticks_to_draw = self._update_ticks(renderer)\n ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,\n renderer)\n\n for tick in ticks_to_draw:\n tick.draw(renderer)\n\n # scale up the axis label box to also find the neighbors, not\n # just the tick labels that actually overlap note we need a\n # *copy* of the axis label box because we don't wan't to scale\n # the actual bbox\n\n self._update_label_position(ticklabelBoxes, ticklabelBoxes2)\n\n self.label.draw(renderer)\n\n self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)\n self.offsetText.set_text(self.major.formatter.get_offset())\n self.offsetText.draw(renderer)\n\n if 0: # draw the bounding boxes around the text for debug\n for tick in self.majorTicks:\n label = tick.label1\n mpatches.bbox_artist(label, renderer)\n mpatches.bbox_artist(self.label, renderer)\n\n renderer.close_group(__name__)\n\n def _get_label(self):\n raise NotImplementedError('Derived must override')\n\n def _get_offset_text(self):\n raise NotImplementedError('Derived must override')\n\n def get_gridlines(self):\n 'Return the grid lines as a list of Line2D instance'\n ticks = self.get_major_ticks()\n return cbook.silent_list('Line2D gridline',\n [tick.gridline for tick in ticks])\n\n def get_label(self):\n 'Return the axis label as a Text instance'\n return self.label\n\n def get_offset_text(self):\n 'Return the axis offsetText as a Text instance'\n return self.offsetText\n\n def get_pickradius(self):\n 'Return the depth of the axis used by the picker'\n return self.pickradius\n\n def get_majorticklabels(self):\n 'Return a list of Text instances for the major ticklabels'\n ticks = self.get_major_ticks()\n labels1 = [tick.label1 for tick in ticks if tick.label1On]\n labels2 = [tick.label2 for tick in ticks if tick.label2On]\n return cbook.silent_list('Text major ticklabel', labels1 + labels2)\n\n def get_minorticklabels(self):\n 'Return a list of Text instances for the minor ticklabels'\n ticks = self.get_minor_ticks()\n labels1 = [tick.label1 for tick in ticks if tick.label1On]\n labels2 = [tick.label2 for tick in ticks if tick.label2On]\n return cbook.silent_list('Text minor ticklabel', labels1 + labels2)\n\n def get_ticklabels(self, minor=False):\n 'Return a list of Text instances for ticklabels'\n if minor:\n return self.get_minorticklabels()\n return self.get_majorticklabels()\n\n def get_majorticklines(self):\n 'Return the major tick lines as a list of Line2D instances'\n lines = []\n ticks = self.get_major_ticks()\n for tick in ticks:\n lines.append(tick.tick1line)\n lines.append(tick.tick2line)\n return cbook.silent_list('Line2D ticklines', lines)\n\n def get_minorticklines(self):\n 'Return the minor tick lines as a list of Line2D instances'\n lines = []\n ticks = self.get_minor_ticks()\n for tick in ticks:\n lines.append(tick.tick1line)\n lines.append(tick.tick2line)\n return cbook.silent_list('Line2D ticklines', lines)\n\n def get_ticklines(self, minor=False):\n 'Return the tick lines as a list of Line2D instances'\n if minor:\n return self.get_minorticklines()\n return self.get_majorticklines()\n\n def get_majorticklocs(self):\n \"Get the major tick locations in data coordinates as a numpy array\"\n return self.major.locator()\n\n def get_minorticklocs(self):\n \"Get the minor tick locations in data coordinates as a numpy array\"\n return self.minor.locator()\n\n def get_ticklocs(self, minor=False):\n \"Get the tick locations in data coordinates as a numpy array\"\n if minor:\n return self.minor.locator()\n return self.major.locator()\n\n def _get_tick(self, major):\n 'return the default tick instance'\n raise NotImplementedError('derived must override')\n\n def _copy_tick_props(self, src, dest):\n 'Copy the props from src tick to dest tick'\n if src is None or dest is None:\n return\n dest.label1.update_from(src.label1)\n dest.label2.update_from(src.label2)\n\n dest.tick1line.update_from(src.tick1line)\n dest.tick2line.update_from(src.tick2line)\n dest.gridline.update_from(src.gridline)\n\n dest.tick1On = src.tick1On\n dest.tick2On = src.tick2On\n dest.label1On = src.label1On\n dest.label2On = src.label2On\n\n def get_label_text(self):\n 'Get the text of the label'\n return self.label.get_text()\n\n def get_major_locator(self):\n 'Get the locator of the major ticker'\n return self.major.locator\n\n def get_minor_locator(self):\n 'Get the locator of the minor ticker'\n return self.minor.locator\n\n def get_major_formatter(self):\n 'Get the formatter of the major ticker'\n return self.major.formatter\n\n def get_minor_formatter(self):\n 'Get the formatter of the minor ticker'\n return self.minor.formatter\n\n def get_major_ticks(self, numticks=None):\n 'get the tick instances; grow as necessary'\n if numticks is None:\n numticks = len(self.get_major_locator()())\n if len(self.majorTicks) < numticks:\n # update the new tick label properties from the old\n for i in range(numticks - len(self.majorTicks)):\n tick = self._get_tick(major=True)\n self.majorTicks.append(tick)\n\n if self._lastNumMajorTicks < numticks:\n protoTick = self.majorTicks[0]\n for i in range(self._lastNumMajorTicks, len(self.majorTicks)):\n tick = self.majorTicks[i]\n if self._gridOnMajor:\n tick.gridOn = True\n self._copy_tick_props(protoTick, tick)\n\n self._lastNumMajorTicks = numticks\n ticks = self.majorTicks[:numticks]\n\n return ticks\n\n def get_minor_ticks(self, numticks=None):\n 'get the minor tick instances; grow as necessary'\n if numticks is None:\n numticks = len(self.get_minor_locator()())\n\n if len(self.minorTicks) < numticks:\n # update the new tick label properties from the old\n for i in range(numticks - len(self.minorTicks)):\n tick = self._get_tick(major=False)\n self.minorTicks.append(tick)\n\n if self._lastNumMinorTicks < numticks:\n protoTick = self.minorTicks[0]\n for i in range(self._lastNumMinorTicks, len(self.minorTicks)):\n tick = self.minorTicks[i]\n if self._gridOnMinor:\n tick.gridOn = True\n self._copy_tick_props(protoTick, tick)\n\n self._lastNumMinorTicks = numticks\n ticks = self.minorTicks[:numticks]\n\n return ticks\n\n def grid(self, b=None, which='major', **kwargs):\n \"\"\"\n Set the axis grid on or off; b is a boolean. Use *which* =\n 'major' | 'minor' | 'both' to set the grid for major or minor ticks.\n\n If *b* is *None* and len(kwargs)==0, toggle the grid state. If\n *kwargs* are supplied, it is assumed you want the grid on and *b*\n will be set to True.\n\n *kwargs* are used to set the line properties of the grids, eg,\n\n xax.grid(color='r', linestyle='-', linewidth=2)\n \"\"\"\n if len(kwargs):\n b = True\n which = which.lower()\n if which in ['minor', 'both']:\n if b is None:\n self._gridOnMinor = not self._gridOnMinor\n else:\n self._gridOnMinor = b\n for tick in self.minorTicks: # don't use get_ticks here!\n if tick is None:\n continue\n tick.gridOn = self._gridOnMinor\n if len(kwargs):\n artist.setp(tick.gridline, **kwargs)\n self._minor_tick_kw['gridOn'] = self._gridOnMinor\n if which in ['major', 'both']:\n if b is None:\n self._gridOnMajor = not self._gridOnMajor\n else:\n self._gridOnMajor = b\n for tick in self.majorTicks: # don't use get_ticks here!\n if tick is None:\n continue\n tick.gridOn = self._gridOnMajor\n if len(kwargs):\n artist.setp(tick.gridline, **kwargs)\n self._major_tick_kw['gridOn'] = self._gridOnMajor\n\n def update_units(self, data):\n \"\"\"\n introspect *data* for units converter and update the\n axis.converter instance if necessary. Return *True*\n if *data* is registered for unit conversion.\n \"\"\"\n\n converter = munits.registry.get_converter(data)\n if converter is None:\n return False\n\n neednew = self.converter != converter\n self.converter = converter\n default = self.converter.default_units(data, self)\n #print 'update units: default=%s, units=%s'%(default, self.units)\n if default is not None and self.units is None:\n self.set_units(default)\n\n if neednew:\n self._update_axisinfo()\n return True\n\n def _update_axisinfo(self):\n \"\"\"\n check the axis converter for the stored units to see if the\n axis info needs to be updated\n \"\"\"\n\n if self.converter is None:\n return\n\n info = self.converter.axisinfo(self.units, self)\n if info is None:\n return\n if info.majloc is not None and \\\n self.major.locator != info.majloc and self.isDefault_majloc:\n self.set_major_locator(info.majloc)\n self.isDefault_majloc = True\n if info.minloc is not None and \\\n self.minor.locator != info.minloc and self.isDefault_minloc:\n self.set_minor_locator(info.minloc)\n self.isDefault_minloc = True\n if info.majfmt is not None and \\\n self.major.formatter != info.majfmt and self.isDefault_majfmt:\n self.set_major_formatter(info.majfmt)\n self.isDefault_majfmt = True\n if info.minfmt is not None and \\\n self.minor.formatter != info.minfmt and self.isDefault_minfmt:\n self.set_minor_formatter(info.minfmt)\n self.isDefault_minfmt = True\n if info.label is not None and self.isDefault_label:\n self.set_label_text(info.label)\n self.isDefault_label = True\n\n self.set_default_intervals()\n\n def have_units(self):\n return self.converter is not None or self.units is not None\n\n def convert_units(self, x):\n if self.converter is None:\n self.converter = munits.registry.get_converter(x)\n\n if self.converter is None:\n return x\n\n ret = self.converter.convert(x, self.units, self)\n return ret\n\n def set_units(self, u):\n \"\"\"\n set the units for axis\n\n ACCEPTS: a units tag\n \"\"\"\n pchanged = False\n if u is None:\n self.units = None\n pchanged = True\n else:\n if u != self.units:\n self.units = u\n pchanged = True\n if pchanged:\n self._update_axisinfo()\n self.callbacks.process('units')\n self.callbacks.process('units finalize')\n\n def get_units(self):\n 'return the units for axis'\n return self.units\n\n def set_label_text(self, label, fontdict=None, **kwargs):\n \"\"\" Sets the text value of the axis label\n\n ACCEPTS: A string value for the label\n \"\"\"\n self.isDefault_label = False\n self.label.set_text(label)\n if fontdict is not None:\n self.label.update(fontdict)\n self.label.update(kwargs)\n return self.label\n\n def set_major_formatter(self, formatter):\n \"\"\"\n Set the formatter of the major ticker\n\n ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance\n \"\"\"\n self.isDefault_majfmt = False\n self.major.formatter = formatter\n formatter.set_axis(self)\n\n def set_minor_formatter(self, formatter):\n \"\"\"\n Set the formatter of the minor ticker\n\n ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance\n \"\"\"\n self.isDefault_minfmt = False\n self.minor.formatter = formatter\n formatter.set_axis(self)\n\n def set_major_locator(self, locator):\n \"\"\"\n Set the locator of the major ticker\n\n ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance\n \"\"\"\n self.isDefault_majloc = False\n self.major.locator = locator\n locator.set_axis(self)\n\n def set_minor_locator(self, locator):\n \"\"\"\n Set the locator of the minor ticker\n\n ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance\n \"\"\"\n self.isDefault_minloc = False\n self.minor.locator = locator\n locator.set_axis(self)\n\n def set_pickradius(self, pickradius):\n \"\"\"\n Set the depth of the axis used by the picker\n\n ACCEPTS: a distance in points\n \"\"\"\n self.pickradius = pickradius\n\n def set_ticklabels(self, ticklabels, *args, **kwargs):\n \"\"\"\n Set the text values of the tick labels. Return a list of Text\n instances. Use *kwarg* *minor=True* to select minor ticks.\n All other kwargs are used to update the text object properties.\n As for get_ticklabels, label1 (left or bottom) is\n affected for a given tick only if its label1On attribute\n is True, and similarly for label2. The list of returned\n label text objects consists of all such label1 objects followed\n by all such label2 objects.\n\n The input *ticklabels* is assumed to match the set of\n tick locations, regardless of the state of label1On and\n label2On.\n\n ACCEPTS: sequence of strings\n \"\"\"\n #ticklabels = [str(l) for l in ticklabels]\n minor = kwargs.pop('minor', False)\n if minor:\n self.set_minor_formatter(mticker.FixedFormatter(ticklabels))\n ticks = self.get_minor_ticks()\n else:\n self.set_major_formatter(mticker.FixedFormatter(ticklabels))\n ticks = self.get_major_ticks()\n\n ret1 = []\n ret2 = []\n for i, tick in enumerate(ticks):\n if i < len(ticklabels):\n if tick.label1On:\n tick.label1.set_text(ticklabels[i])\n tick.label1.update(kwargs)\n ret1.append(tick.label1)\n if tick.label2On:\n tick.label2.set_text(ticklabels[i])\n ret2.append(tick.label2)\n tick.label2.update(kwargs)\n return ret1 + ret2\n\n def set_ticks(self, ticks, minor=False):\n \"\"\"\n Set the locations of the tick marks from sequence ticks\n\n ACCEPTS: sequence of floats\n \"\"\"\n ### XXX if the user changes units, the information will be lost here\n ticks = self.convert_units(ticks)\n if len(ticks) > 1:\n xleft, xright = self.get_view_interval()\n if xright > xleft:\n self.set_view_interval(min(ticks), max(ticks))\n else:\n self.set_view_interval(max(ticks), min(ticks))\n if minor:\n self.set_minor_locator(mticker.FixedLocator(ticks))\n return self.get_minor_ticks(len(ticks))\n else:\n self.set_major_locator(mticker.FixedLocator(ticks))\n return self.get_major_ticks(len(ticks))\n\n def _update_label_position(self, bboxes, bboxes2):\n \"\"\"\n Update the label position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n raise NotImplementedError('Derived must override')\n\n def _update_offset_text_postion(self, bboxes, bboxes2):\n \"\"\"\n Update the label position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n raise NotImplementedError('Derived must override')\n\n def pan(self, numsteps):\n 'Pan *numsteps* (can be positive or negative)'\n self.major.locator.pan(numsteps)\n\n def zoom(self, direction):\n \"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out\"\n self.major.locator.zoom(direction)\n\n def axis_date(self, tz=None):\n \"\"\"\n Sets up x-axis ticks and labels that treat the x data as dates.\n *tz* is a :class:`tzinfo` instance or a timezone string.\n This timezone is used to create date labels.\n \"\"\"\n # By providing a sample datetime instance with the desired\n # timezone, the registered converter can be selected,\n # and the \"units\" attribute, which is the timezone, can\n # be set.\n import datetime\n if isinstance(tz, (str, unicode)):\n import pytz\n tz = pytz.timezone(tz)\n self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))\n\n\nclass XAxis(Axis):\n __name__ = 'xaxis'\n axis_name = 'x'\n\n def contains(self, mouseevent):\n \"\"\"Test whether the mouse event occured in the x axis.\n \"\"\"\n if callable(self._contains):\n return self._contains(self, mouseevent)\n\n x, y = mouseevent.x, mouseevent.y\n try:\n trans = self.axes.transAxes.inverted()\n xaxes, yaxes = trans.transform_point((x, y))\n except ValueError:\n return False, {}\n l, b = self.axes.transAxes.transform_point((0, 0))\n r, t = self.axes.transAxes.transform_point((1, 1))\n inaxis = xaxes >= 0 and xaxes <= 1 and (\n (y < b and y > b - self.pickradius) or\n (y > t and y < t + self.pickradius))\n return inaxis, {}\n\n def _get_tick(self, major):\n if major:\n tick_kw = self._major_tick_kw\n else:\n tick_kw = self._minor_tick_kw\n return XTick(self.axes, 0, '', major=major, **tick_kw)\n\n def _get_label(self):\n # x in axes coords, y in display coords (to be updated at draw\n # time by _update_label_positions)\n label = mtext.Text(x=0.5, y=0,\n fontproperties=font_manager.FontProperties(\n size=rcParams['axes.labelsize'],\n weight=rcParams['axes.labelweight']),\n color=rcParams['axes.labelcolor'],\n verticalalignment='top',\n horizontalalignment='center',\n )\n\n label.set_transform(mtransforms.blended_transform_factory(\n self.axes.transAxes, mtransforms.IdentityTransform()))\n\n self._set_artist_props(label)\n self.label_position = 'bottom'\n return label\n\n def _get_offset_text(self):\n # x in axes coords, y in display coords (to be updated at draw time)\n offsetText = mtext.Text(x=1, y=0,\n fontproperties=font_manager.FontProperties(\n size=rcParams['xtick.labelsize']),\n color=rcParams['xtick.color'],\n verticalalignment='top',\n horizontalalignment='right',\n )\n offsetText.set_transform(mtransforms.blended_transform_factory(\n self.axes.transAxes, mtransforms.IdentityTransform()))\n self._set_artist_props(offsetText)\n self.offset_text_position = 'bottom'\n return offsetText\n\n def _get_pixel_distance_along_axis(self, where, perturb):\n \"\"\"\n Returns the amount, in data coordinates, that a single pixel corresponds to in the\n locality given by \"where\", which is also given in data coordinates, and is an x coordinate.\n \"perturb\" is the amount to perturb the pixel. Usually +0.5 or -0.5.\n\n Implementing this routine for an axis is optional; if present, it will ensure that no\n ticks are lost due to round-off at the extreme ends of an axis.\n \"\"\"\n\n # Note that this routine does not work for a polar axis, because of the 1e-10 below. To\n # do things correctly, we need to use rmax instead of 1e-10 for a polar axis. But\n # since we do not have that kind of information at this point, we just don't try to\n # pad anything for the theta axis of a polar plot.\n if self.axes.name == 'polar':\n return 0.0\n\n #\n # first figure out the pixel location of the \"where\" point. We use 1e-10 for the\n # y point, so that we remain compatible with log axes.\n #\n trans = self.axes.transData # transformation from data coords to display coords\n transinv = trans.inverted() # transformation from display coords to data coords\n pix = trans.transform_point((where, 1e-10))\n ptp = transinv.transform_point((pix[0] + perturb, pix[1])) # perturb the pixel.\n dx = abs(ptp[0] - where)\n\n return dx\n\n def get_label_position(self):\n \"\"\"\n Return the label position (top or bottom)\n \"\"\"\n return self.label_position\n\n def set_label_position(self, position):\n \"\"\"\n Set the label position (top or bottom)\n\n ACCEPTS: [ 'top' | 'bottom' ]\n \"\"\"\n assert position == 'top' or position == 'bottom'\n if position == 'top':\n self.label.set_verticalalignment('baseline')\n else:\n self.label.set_verticalalignment('top')\n self.label_position = position\n\n def _update_label_position(self, bboxes, bboxes2):\n \"\"\"\n Update the label position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n if not self._autolabelpos:\n return\n x, y = self.label.get_position()\n if self.label_position == 'bottom':\n if not len(bboxes):\n bottom = self.axes.bbox.ymin\n else:\n bbox = mtransforms.Bbox.union(bboxes)\n bottom = bbox.y0\n self.label.set_position((x,\n bottom - \\\n self.labelpad * self.figure.dpi / 72.0))\n\n else:\n if not len(bboxes2):\n top = self.axes.bbox.ymax\n else:\n bbox = mtransforms.Bbox.union(bboxes2)\n top = bbox.y1\n self.label.set_position((x,\n top + self.labelpad * self.figure.dpi / 72.0))\n\n def _update_offset_text_position(self, bboxes, bboxes2):\n \"\"\"\n Update the offset_text position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n x, y = self.offsetText.get_position()\n if not len(bboxes):\n bottom = self.axes.bbox.ymin\n else:\n bbox = mtransforms.Bbox.union(bboxes)\n bottom = bbox.y0\n self.offsetText.set_position((x,\n bottom - self.OFFSETTEXTPAD * self.figure.dpi / 72.0))\n\n def get_text_heights(self, renderer):\n \"\"\"\n Returns the amount of space one should reserve for text\n above and below the axes. Returns a tuple (above, below)\n \"\"\"\n bbox, bbox2 = self.get_ticklabel_extents(renderer)\n # MGDTODO: Need a better way to get the pad\n padPixels = self.majorTicks[0].get_pad_pixels()\n\n above = 0.0\n if bbox2.height:\n above += bbox2.height + padPixels\n below = 0.0\n if bbox.height:\n below += bbox.height + padPixels\n\n if self.get_label_position() == 'top':\n above += self.label.get_window_extent(renderer).height + padPixels\n else:\n below += self.label.get_window_extent(renderer).height + padPixels\n return above, below\n\n def set_ticks_position(self, position):\n \"\"\"\n Set the ticks position (top, bottom, both, default or none)\n both sets the ticks to appear on both positions, but does not\n change the tick labels. 'default' resets the tick positions to\n the default: ticks on both positions, labels at bottom. 'none'\n can be used if you don't want any ticks. 'none' and 'both'\n affect only the ticks, not the labels.\n\n ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]\n \"\"\"\n if position == 'top':\n self.set_tick_params(which='both', top=True, labeltop=True,\n bottom=False, labelbottom=False)\n elif position == 'bottom':\n self.set_tick_params(which='both', top=False, labeltop=False,\n bottom=True, labelbottom=True)\n elif position == 'both':\n self.set_tick_params(which='both', top=True,\n bottom=True)\n elif position == 'none':\n self.set_tick_params(which='both', top=False,\n bottom=False)\n elif position == 'default':\n self.set_tick_params(which='both', top=True, labeltop=False,\n bottom=True, labelbottom=True)\n else:\n raise ValueError(\"invalid position: %s\" % position)\n\n def tick_top(self):\n 'use ticks only on top'\n self.set_ticks_position('top')\n\n def tick_bottom(self):\n 'use ticks only on bottom'\n self.set_ticks_position('bottom')\n\n def get_ticks_position(self):\n \"\"\"\n Return the ticks position (top, bottom, default or unknown)\n \"\"\"\n majt = self.majorTicks[0]\n mT = self.minorTicks[0]\n\n majorTop = (not majt.tick1On) and \\\n majt.tick2On and (not majt.label1On) and majt.label2On\n minorTop = (not mT.tick1On) and \\\n mT.tick2On and (not mT.label1On) and mT.label2On\n if majorTop and minorTop:\n return 'top'\n\n MajorBottom = majt.tick1On and (not majt.tick2On) and \\\n majt.label1On and (not majt.label2On)\n MinorBottom = mT.tick1On and (not mT.tick2On) and \\\n mT.label1On and (not mT.label2On)\n if MajorBottom and MinorBottom:\n return 'bottom'\n\n majorDefault = majt.tick1On and majt.tick2On and \\\n majt.label1On and (not majt.label2On)\n minorDefault = mT.tick1On and mT.tick2On and \\\n mT.label1On and (not mT.label2On)\n if majorDefault and minorDefault:\n return 'default'\n\n return 'unknown'\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n return self.axes.viewLim.intervalx\n\n def set_view_interval(self, vmin, vmax, ignore=False):\n \"\"\"\n If *ignore* is *False*, the order of vmin, vmax\n does not matter; the original axis orientation will\n be preserved. In addition, the view limits can be\n expanded, but will not be reduced. This method is\n for mpl internal use; for normal use, see\n :meth:`~matplotlib.axes.Axes.set_xlim`.\n\n \"\"\"\n if ignore:\n self.axes.viewLim.intervalx = vmin, vmax\n else:\n Vmin, Vmax = self.get_view_interval()\n if Vmin < Vmax:\n self.axes.viewLim.intervalx = (min(vmin, vmax, Vmin),\n max(vmin, vmax, Vmax))\n else:\n self.axes.viewLim.intervalx = (max(vmin, vmax, Vmin),\n min(vmin, vmax, Vmax))\n\n def get_minpos(self):\n return self.axes.dataLim.minposx\n\n def get_data_interval(self):\n 'return the Interval instance for this axis data limits'\n return self.axes.dataLim.intervalx\n\n def set_data_interval(self, vmin, vmax, ignore=False):\n 'set the axis data limits'\n if ignore:\n self.axes.dataLim.intervalx = vmin, vmax\n else:\n Vmin, Vmax = self.get_data_interval()\n self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)\n\n def set_default_intervals(self):\n 'set the default limits for the axis interval if they are not mutated'\n xmin, xmax = 0., 1.\n dataMutated = self.axes.dataLim.mutatedx()\n viewMutated = self.axes.viewLim.mutatedx()\n if not dataMutated or not viewMutated:\n if self.converter is not None:\n info = self.converter.axisinfo(self.units, self)\n if info.default_limits is not None:\n valmin, valmax = info.default_limits\n xmin = self.converter.convert(valmin, self.units, self)\n xmax = self.converter.convert(valmax, self.units, self)\n if not dataMutated:\n self.axes.dataLim.intervalx = xmin, xmax\n if not viewMutated:\n self.axes.viewLim.intervalx = xmin, xmax\n\n\nclass YAxis(Axis):\n __name__ = 'yaxis'\n axis_name = 'y'\n\n def contains(self, mouseevent):\n \"\"\"Test whether the mouse event occurred in the y axis.\n\n Returns *True* | *False*\n \"\"\"\n if callable(self._contains):\n return self._contains(self, mouseevent)\n\n x, y = mouseevent.x, mouseevent.y\n try:\n trans = self.axes.transAxes.inverted()\n xaxes, yaxes = trans.transform_point((x, y))\n except ValueError:\n return False, {}\n l, b = self.axes.transAxes.transform_point((0, 0))\n r, t = self.axes.transAxes.transform_point((1, 1))\n inaxis = yaxes >= 0 and yaxes <= 1 and (\n (x < l and x > l - self.pickradius) or\n (x > r and x < r + self.pickradius))\n return inaxis, {}\n\n def _get_tick(self, major):\n if major:\n tick_kw = self._major_tick_kw\n else:\n tick_kw = self._minor_tick_kw\n return YTick(self.axes, 0, '', major=major, **tick_kw)\n\n def _get_label(self):\n # x in display coords (updated by _update_label_position)\n # y in axes coords\n label = mtext.Text(x=0, y=0.5,\n # todo: get the label position\n fontproperties=font_manager.FontProperties(\n size=rcParams['axes.labelsize'],\n weight=rcParams['axes.labelweight']),\n color=rcParams['axes.labelcolor'],\n verticalalignment='bottom',\n horizontalalignment='center',\n rotation='vertical',\n rotation_mode='anchor',\n )\n label.set_transform(mtransforms.blended_transform_factory(\n mtransforms.IdentityTransform(), self.axes.transAxes))\n\n self._set_artist_props(label)\n self.label_position = 'left'\n return label\n\n def _get_offset_text(self):\n # x in display coords, y in axes coords (to be updated at draw time)\n offsetText = mtext.Text(x=0, y=0.5,\n fontproperties=font_manager.FontProperties(\n size=rcParams['ytick.labelsize']),\n color=rcParams['ytick.color'],\n verticalalignment='baseline',\n horizontalalignment='left',\n )\n offsetText.set_transform(mtransforms.blended_transform_factory(\n self.axes.transAxes, mtransforms.IdentityTransform()))\n self._set_artist_props(offsetText)\n self.offset_text_position = 'left'\n return offsetText\n\n def _get_pixel_distance_along_axis(self, where, perturb):\n \"\"\"\n Returns the amount, in data coordinates, that a single pixel corresponds to in the\n locality given by \"where\", which is also given in data coordinates, and is an y coordinate.\n \"perturb\" is the amount to perturb the pixel. Usually +0.5 or -0.5.\n\n Implementing this routine for an axis is optional; if present, it will ensure that no\n ticks are lost due to round-off at the extreme ends of an axis.\n \"\"\"\n\n #\n # first figure out the pixel location of the \"where\" point. We use 1e-10 for the\n # x point, so that we remain compatible with log axes.\n #\n trans = self.axes.transData # transformation from data coords to display coords\n transinv = trans.inverted() # transformation from display coords to data coords\n pix = trans.transform_point((1e-10, where))\n ptp = transinv.transform_point((pix[0], pix[1] + perturb)) # perturb the pixel.\n dy = abs(ptp[1] - where)\n return dy\n\n def get_label_position(self):\n \"\"\"\n Return the label position (left or right)\n \"\"\"\n return self.label_position\n\n def set_label_position(self, position):\n \"\"\"\n Set the label position (left or right)\n\n ACCEPTS: [ 'left' | 'right' ]\n \"\"\"\n assert position == 'left' or position == 'right'\n self.label.set_rotation_mode('anchor')\n self.label.set_horizontalalignment('center')\n if position == 'left':\n self.label.set_verticalalignment('bottom')\n else:\n self.label.set_verticalalignment('top')\n self.label_position = position\n\n def _update_label_position(self, bboxes, bboxes2):\n \"\"\"\n Update the label position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n if not self._autolabelpos:\n return\n x, y = self.label.get_position()\n if self.label_position == 'left':\n if not len(bboxes):\n left = self.axes.bbox.xmin\n else:\n bbox = mtransforms.Bbox.union(bboxes)\n left = bbox.x0\n\n self.label.set_position((left - \\\n self.labelpad * self.figure.dpi / 72.0, y))\n\n else:\n if not len(bboxes2):\n right = self.axes.bbox.xmax\n else:\n bbox = mtransforms.Bbox.union(bboxes2)\n right = bbox.x1\n\n self.label.set_position((right + \\\n self.labelpad * self.figure.dpi / 72.0, y))\n\n def _update_offset_text_position(self, bboxes, bboxes2):\n \"\"\"\n Update the offset_text position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n x, y = self.offsetText.get_position()\n top = self.axes.bbox.ymax\n self.offsetText.set_position((x,\n top + self.OFFSETTEXTPAD * self.figure.dpi / 72.0))\n\n def set_offset_position(self, position):\n assert position == 'left' or position == 'right'\n\n x, y = self.offsetText.get_position()\n if position == 'left':\n x = 0\n else:\n x = 1\n\n self.offsetText.set_ha(position)\n self.offsetText.set_position((x, y))\n\n def get_text_widths(self, renderer):\n bbox, bbox2 = self.get_ticklabel_extents(renderer)\n # MGDTODO: Need a better way to get the pad\n padPixels = self.majorTicks[0].get_pad_pixels()\n\n left = 0.0\n if bbox.width:\n left += bbox.width + padPixels\n right = 0.0\n if bbox2.width:\n right += bbox2.width + padPixels\n\n if self.get_label_position() == 'left':\n left += self.label.get_window_extent(renderer).width + padPixels\n else:\n right += self.label.get_window_extent(renderer).width + padPixels\n return left, right\n\n def set_ticks_position(self, position):\n \"\"\"\n Set the ticks position (left, right, both, default or none)\n 'both' sets the ticks to appear on both positions, but does not\n change the tick labels. 'default' resets the tick positions to\n the default: ticks on both positions, labels at left. 'none'\n can be used if you don't want any ticks. 'none' and 'both'\n affect only the ticks, not the labels.\n\n ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]\n \"\"\"\n if position == 'right':\n self.set_tick_params(which='both', right=True, labelright=True,\n left=False, labelleft=False)\n elif position == 'left':\n self.set_tick_params(which='both', right=False, labelright=False,\n left=True, labelleft=True)\n elif position == 'both':\n self.set_tick_params(which='both', right=True,\n left=True)\n elif position == 'none':\n self.set_tick_params(which='both', right=False,\n left=False)\n elif position == 'default':\n self.set_tick_params(which='both', right=True, labelright=False,\n left=True, labelleft=True)\n else:\n raise ValueError(\"invalid position: %s\" % position)\n\n def tick_right(self):\n 'use ticks only on right'\n self.set_ticks_position('right')\n\n def tick_left(self):\n 'use ticks only on left'\n self.set_ticks_position('left')\n\n def get_ticks_position(self):\n \"\"\"\n Return the ticks position (left, right, both or unknown)\n \"\"\"\n majt = self.majorTicks[0]\n mT = self.minorTicks[0]\n\n majorRight = (not majt.tick1On) and majt.tick2On \\\n and (not majt.label1On) and majt.label2On\n minorRight = (not mT.tick1On) and mT.tick2On and \\\n (not mT.label1On) and mT.label2On\n if majorRight and minorRight:\n return 'right'\n\n majorLeft = majt.tick1On and (not majt.tick2On) and \\\n majt.label1On and (not majt.label2On)\n minorLeft = mT.tick1On and (not mT.tick2On) and \\\n mT.label1On and (not mT.label2On)\n if majorLeft and minorLeft:\n return 'left'\n\n majorDefault = majt.tick1On and majt.tick2On and \\\n majt.label1On and (not majt.label2On)\n minorDefault = mT.tick1On and mT.tick2On and \\\n mT.label1On and (not mT.label2On)\n if majorDefault and minorDefault:\n return 'default'\n\n return 'unknown'\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n return self.axes.viewLim.intervaly\n\n def set_view_interval(self, vmin, vmax, ignore=False):\n \"\"\"\n If *ignore* is *False*, the order of vmin, vmax\n does not matter; the original axis orientation will\n be preserved. In addition, the view limits can be\n expanded, but will not be reduced. This method is\n for mpl internal use; for normal use, see\n :meth:`~matplotlib.axes.Axes.set_ylim`.\n\n \"\"\"\n if ignore:\n self.axes.viewLim.intervaly = vmin, vmax\n else:\n Vmin, Vmax = self.get_view_interval()\n if Vmin < Vmax:\n self.axes.viewLim.intervaly = (min(vmin, vmax, Vmin),\n max(vmin, vmax, Vmax))\n else:\n self.axes.viewLim.intervaly = (max(vmin, vmax, Vmin),\n min(vmin, vmax, Vmax))\n\n def get_minpos(self):\n return self.axes.dataLim.minposy\n\n def get_data_interval(self):\n 'return the Interval instance for this axis data limits'\n return self.axes.dataLim.intervaly\n\n def set_data_interval(self, vmin, vmax, ignore=False):\n 'set the axis data limits'\n if ignore:\n self.axes.dataLim.intervaly = vmin, vmax\n else:\n Vmin, Vmax = self.get_data_interval()\n self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)\n\n def set_default_intervals(self):\n 'set the default limits for the axis interval if they are not mutated'\n ymin, ymax = 0., 1.\n dataMutated = self.axes.dataLim.mutatedy()\n viewMutated = self.axes.viewLim.mutatedy()\n if not dataMutated or not viewMutated:\n if self.converter is not None:\n info = self.converter.axisinfo(self.units, self)\n if info.default_limits is not None:\n valmin, valmax = info.default_limits\n ymin = self.converter.convert(valmin, self.units, self)\n ymax = self.converter.convert(valmax, self.units, self)\n if not dataMutated:\n self.axes.dataLim.intervaly = ymin, ymax\n if not viewMutated:\n self.axes.viewLim.intervaly = ymin, ymax\n" ]
[ [ "matplotlib.transforms.Bbox", "matplotlib.transforms.Bbox.unit", "numpy.sqrt", "numpy.asarray", "matplotlib.transforms.Affine2D", "numpy.concatenate", "matplotlib.artist.Artist.update_from", "matplotlib.cbook.iterable", "numpy.fliplr", "matplotlib.path.Path.unit_circle", "numpy.asanyarray", "numpy.zeros", "matplotlib.artist.kwdoc", "numpy.ma.isMaskedArray", "matplotlib.artist.Artist.set_alpha", "matplotlib.artist.Artist.__init__", "matplotlib.path.Path", "matplotlib.cm.ScalarMappable.__init__", "numpy.array", "matplotlib.colors.colorConverter.to_rgba_array", "matplotlib.transforms.IdentityTransform", "numpy.empty", "matplotlib.docstring.interpd.update", "matplotlib.mlab.contiguous_regions", "matplotlib.cbook.is_string_like" ], [ "matplotlib.transforms.Bbox.union", "matplotlib.ticker.FixedFormatter", "matplotlib.cbook.CallbackRegistry", "matplotlib.ticker.AutoLocator", "matplotlib.artist.setp", "matplotlib.ticker.FixedLocator", "matplotlib.cbook.popall", "matplotlib.transforms.Bbox.from_extents", "matplotlib.ticker.ScalarFormatter", "matplotlib.artist.Artist.set_clip_path", "matplotlib.transforms.interval_contains", "matplotlib.artist.Artist.__init__", "numpy.isnan", "matplotlib.cbook.deprecated", "matplotlib.font_manager.FontProperties", "matplotlib.ticker.NullFormatter", "matplotlib.cbook.silent_list", "matplotlib.patches.bbox_artist", "numpy.array", "matplotlib.ticker.NullLocator", "matplotlib.lines.Line2D", "matplotlib.transforms.IdentityTransform", "matplotlib.scale.scale_factory", "matplotlib.units.registry.get_converter", "matplotlib.cbook.is_string_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
waterfallhyb/portfolio-examples
[ "46ff40f593c88979495f0987f857d056f8addaf7", "46ff40f593c88979495f0987f857d056f8addaf7" ]
[ "tensorflow2/ABC_COVID-19/ABC_IPU.py", "pytorch/gpt2/utils.py" ]
[ "# Copyright 2020 Graphcore Ltd.\r\n\"\"\"\r\nABC algorithm for COVID-19 modelling, replicated across multiple IPUs.\r\n\r\nSee README for model background.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport os\r\nimport time as time\r\nfrom tensorflow.python import ipu\r\nimport tensorflow as tf\r\nimport tensorflow_probability as tfp\r\n\r\nimport covid_data\r\nfrom argparser import get_argparser\r\n\r\ntfd = tfp.distributions\r\n\r\n# Parse the CLI args\r\nap = get_argparser()\r\nargs = ap.parse_args()\r\n\r\nassert (not args.enqueue_chunk_size or\r\n args.n_samples_per_batch % args.enqueue_chunk_size == 0), \\\r\n \"--enqueue-chunk-size must divide into --n-samples-per-batch exactly\"\r\nif args.samples_filepath:\r\n assert os.path.exists(os.path.dirname(os.path.abspath(args.samples_filepath))), \\\r\n \"Path to save samples (--samples-fn) does not exist.\"\r\n\r\n\r\n# Mapping to tf constants to avoid graph recompilation.\r\nargs.tolerance = tf.constant(args.tolerance, dtype=tf.float32)\r\nargs.n_samples_target = tf.constant(args.n_samples_target, dtype=tf.int32)\r\nargs.max_n_runs = tf.constant(args.max_n_runs, dtype=tf.int32)\r\n# The parameters args.enqueue_chunk_size and n_samples_per_batch are not mapped\r\n# to constants since they change the data structure and respective\r\n# layout of convolutions on the IPU.\r\n\r\n# Modelling constants\r\nCOUNTRY_DATA_TRAIN, POPULATION = covid_data.get_data(args.country)\r\n# Casting population to tf.constant avoids recompilation but increases\r\n# processing time by around 15%\r\n# POPULATION = tf.constant(POPULATION, dtype=tf.float32)\r\n\r\nMIXING_MATRIX = tf.constant([[-1, 1, 0, 0, 0, 0],\r\n [0, -1, 1, 0, 0, 0],\r\n [0, 0, -1, 1, 0, 0],\r\n [0, 0, -1, 0, 1, 0],\r\n [0, -1, 0, 0, 0, 1]],\r\n dtype=tf.float32)\r\n\r\nUNIFORM_PRIOR_UPPER_LIMIT = tf.constant(\r\n [1.0, 100.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0])\r\n\r\n# Run args\r\nMAX_REPORT_SIZE = int(5e9)\r\n\r\nif args.n_days is None:\r\n country_data_train = COUNTRY_DATA_TRAIN\r\nelse:\r\n country_data_train = COUNTRY_DATA_TRAIN[:, :args.n_days]\r\n\r\n\r\ndef configure_ipu():\r\n \"\"\"Reserve IPUs and setup profiling.\"\"\"\r\n if args.profile:\r\n print(f'Writing profile to {args.profile_dir}.')\r\n\r\n cfg = ipu.utils.create_ipu_config(\r\n profiling=args.profile,\r\n use_poplar_cbor_report=args.profile,\r\n profile_execution=ipu.utils.ExecutionProfileType.TILE_PROFILE\r\n if args.profile else False,\r\n report_directory=args.profile_dir if args.profile else '',\r\n max_report_size=MAX_REPORT_SIZE\r\n )\r\n cfg = ipu.utils.auto_select_ipus(cfg, args.replication_factor)\r\n ipu.utils.configure_ipu_system(cfg)\r\n\r\n\r\nconfigure_ipu()\r\n\r\n# Create an IPU distribution strategy.\r\nstrategy = ipu.ipu_strategy.IPUStrategy()\r\n\r\n# Create outfeed for streaming data to host\r\noutfeed_data = ipu.ipu_outfeed_queue.IPUOutfeedQueue(\r\n 'outfeed_data', replication_factor=args.replication_factor)\r\n\r\n\r\ndef conditional_enqueue_op(params, n_accs, dists, gain):\r\n \"\"\"Enqueue only if relevant samples are included.\"\"\"\r\n def _enq_fn(to_enq):\r\n return tf.no_op() if args.no_outfeed_ops \\\r\n else outfeed_data.enqueue(to_enq)\r\n\r\n if args.outfeed_num_samples:\r\n maybe_enqueue_op = tf.cond(\r\n tf.math.greater(gain, 0),\r\n lambda: _enq_fn([params, dists, n_accs]),\r\n lambda: tf.no_op()\r\n )\r\n else:\r\n maybe_enqueue_op = tf.cond(\r\n tf.math.greater(gain, 0),\r\n lambda: _enq_fn([params, dists]),\r\n lambda: tf.no_op()\r\n )\r\n return maybe_enqueue_op\r\n\r\n\r\ndef chunked_outfeed_enqueue(chunk_id, total_gain, p_vec, d_vec, acc_mask):\r\n \"\"\"Enqueue only relevant chunks.\r\n\r\n Iterate over chunks of param vector samples,\r\n only enqueue the host to outfeed if it has an\r\n accepted sample in it\r\n \"\"\"\r\n # sync between replicas\r\n g = ipu.cross_replica_ops.cross_replica_sum(\r\n acc_mask[chunk_id], name=\"accumulated_sum\")\r\n maybe_enqueue = \\\r\n conditional_enqueue_op(params=tf.gather(p_vec, chunk_id, axis=1),\r\n dists=tf.gather(d_vec, chunk_id),\r\n n_accs=acc_mask[chunk_id],\r\n gain=g)\r\n\r\n with tf.control_dependencies([maybe_enqueue]):\r\n g = tf.identity(g)\r\n return chunk_id + 1, total_gain + g, p_vec, d_vec, acc_mask\r\n\r\n\r\[email protected](experimental_compile=True)\r\ndef build_graph(accumulated_number_of_samples, run_number, local_tolerance):\r\n \"\"\"Run full simulation over all days.\"\"\"\r\n # init of the simulation\r\n n_days = tf.cast(country_data_train.shape[1], tf.int32)\r\n P = tf.ones(args.n_samples_per_batch) * POPULATION\r\n A_0 = tf.ones(args.n_samples_per_batch) * country_data_train[0, 0]\r\n R_0 = tf.ones(args.n_samples_per_batch) * country_data_train[1, 0]\r\n D_0 = tf.ones(args.n_samples_per_batch) * country_data_train[2, 0]\r\n # param_vector elements are\r\n # [alpha_0, alpha, n, beta, gamma, delta, eta, kappa]\r\n param_vector = tf.transpose(tfd.Uniform(\r\n tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),\r\n UNIFORM_PRIOR_UPPER_LIMIT,\r\n ).sample(args.n_samples_per_batch))\r\n\r\n summary = tf.zeros([n_days, 3, args.n_samples_per_batch])\r\n\r\n S_store = P - param_vector[7] * A_0 - (A_0 + R_0 + D_0)\r\n I_store = param_vector[7] * A_0\r\n A_store = A_0\r\n R_store = R_0\r\n D_store = D_0\r\n Ru_store = tf.zeros(args.n_samples_per_batch)\r\n\r\n summary = tf.tensor_scatter_nd_add(\r\n tensor=summary,\r\n indices=[[0, 0], [0, 1], [0, 2]],\r\n updates=tf.stack([A_store, R_store, D_store]))\r\n\r\n init_idx = tf.zeros([], dtype=tf.int32) + 1\r\n init_vars = \\\r\n [init_idx, summary, S_store, I_store,\r\n A_store, R_store, D_store, Ru_store]\r\n\r\n def body(i, s, S, I, A, R, D, Ru):\r\n \"\"\"Single update for one day.\"\"\"\r\n U = A + R + D\r\n alpha_t = param_vector[0] + (\r\n param_vector[1] / (tf.constant(1.0) + tf.pow(U, param_vector[2])))\r\n h_1 = (S * I / P) * alpha_t\r\n h_2 = I * param_vector[4]\r\n h_3 = A * param_vector[3]\r\n h_4 = A * param_vector[5]\r\n h_5 = I * param_vector[6] * param_vector[3]\r\n h = tf.stack([h_1, h_2, h_3, h_4, h_5])\r\n normal_sample = tfd.Normal(loc=h, scale=tf.sqrt(h)).sample()\r\n Y_store = tf.clip_by_value(tf.math.floor(normal_sample), 0.0, P)\r\n\r\n m = tf.matmul(tf.transpose(MIXING_MATRIX), Y_store)\r\n\r\n # Note: Simple vectorisation suppresses parameter update in loop.\r\n S = tf.clip_by_value(S + m[0, :], 0.0, P)\r\n I = tf.clip_by_value(I + m[1, :], 0.0, P)\r\n A = tf.clip_by_value(A + m[2, :], 0.0, P)\r\n R = tf.clip_by_value(R + m[3, :], 0.0, P)\r\n D = tf.clip_by_value(D + m[4, :], 0.0, P)\r\n Ru = tf.clip_by_value(Ru + m[5, :], 0.0, P)\r\n\r\n s = tf.tensor_scatter_nd_add(tensor=s,\r\n indices=[[i, 0], [i, 1], [i, 2]],\r\n updates=tf.stack([A, R, D]))\r\n\r\n return i+1, s, S, I, A, R, D, Ru\r\n\r\n # populate summary with data from different days\r\n k, summary, *_ = tf.while_loop(\r\n cond=lambda k, *_: k < n_days,\r\n body=body,\r\n loop_vars=init_vars\r\n )\r\n\r\n # calculate Euclid distances between real and simulated data\r\n t_summary = tf.transpose(summary, perm=[2, 1, 0])\r\n distances = tf.norm(tf.broadcast_to(country_data_train, tf.constant(\r\n [args.n_samples_per_batch,\r\n country_data_train.shape[0], country_data_train.shape[1]],\r\n dtype=tf.int32)) - t_summary, axis=2)\r\n reduced_distances = tf.reduce_sum(distances, axis=1)\r\n # calculate which simulations were successful\r\n acceptance_vector = tf.cast(\r\n reduced_distances <= local_tolerance, dtype=tf.int32)\r\n\r\n if args.enqueue_chunk_size:\r\n # split simulations into chunks, iterate over each chunk, counting\r\n # num. accepted and enqueueing chunk to outfeed if any accepted\r\n n_chunk = tf.constant(args.n_samples_per_batch // int(args.enqueue_chunk_size))\r\n acc_chunk_shp = [n_chunk, int(args.enqueue_chunk_size)]\r\n acc_chunk = \\\r\n tf.reduce_sum(tf.reshape(acceptance_vector, acc_chunk_shp), axis=1)\r\n param_chunk_shp = [param_vector.shape[0]] + acc_chunk_shp\r\n init_vars = [tf.constant(0),\r\n tf.constant(0),\r\n tf.reshape(param_vector, param_chunk_shp),\r\n tf.reshape(reduced_distances, acc_chunk_shp),\r\n acc_chunk]\r\n _, gain, _, _, _ = tf.while_loop(cond=lambda n, *_: tf.less(n, n_chunk),\r\n body=chunked_outfeed_enqueue,\r\n loop_vars=init_vars)\r\n else:\r\n num_accepted_samples = tf.reduce_sum(\r\n acceptance_vector, name=\"num_accepted_samples\")\r\n\r\n # sync between replicas\r\n gain = ipu.cross_replica_ops.cross_replica_sum(\r\n num_accepted_samples, name=\"accumulated_sum\")\r\n\r\n # transfer stats for simulations with at least once success\r\n maybe_enq = conditional_enqueue_op(params=param_vector,\r\n dists=reduced_distances,\r\n n_accs=num_accepted_samples,\r\n gain=gain)\r\n\r\n total_number_of_samples = accumulated_number_of_samples + gain\r\n return total_number_of_samples, run_number + 1, local_tolerance\r\n\r\n\r\[email protected](experimental_compile=True)\r\ndef loop_collect_samples(local_samples_target, local_max_num_runs, local_tolerance):\r\n \"\"\"Repeat batch simulations until target condition is reached.\"\"\"\r\n a = tf.zeros([], dtype=tf.int32) # Number of accepted samples\r\n n = tf.zeros([], dtype=tf.int32) # Number of runs\r\n a, n, *_ = tf.while_loop(\r\n lambda a, n, *_:\r\n tf.logical_and(\r\n tf.less(a, local_samples_target),\r\n tf.less(n, local_max_num_runs)),\r\n build_graph, [a, n, local_tolerance])\r\n\r\n return a, n\r\n\r\n\r\ndef dequeue_and_postproc(time_it=False):\r\n \"\"\"Dequeue the outfeed data stream and filter out the relevant data.\"\"\"\r\n if time_it and not args.sparse_output:\r\n start_time = time.time()\r\n\r\n deq_out = outfeed_data.dequeue()\r\n deq_end_time = time.time()\r\n\r\n if deq_out[0].shape[0] > 0: # Only process if something dequeued\r\n if args.outfeed_num_samples:\r\n (param_vector, reduced_distances, num_accepted_samples) = \\\r\n deq_out\r\n print(f\"Samples per IPU = {np.sum(num_accepted_samples, axis=0)}\")\r\n else:\r\n (param_vector, reduced_distances) = deq_out\r\n if time_it and not args.sparse_output:\r\n print(f'Dequeue-only time: {deq_end_time - start_time}')\r\n\r\n # Filtering relevant samples\r\n if args.replication_factor > 1:\r\n s = tf.shape(param_vector)\r\n pv = param_vector\r\n param_vector = tf.reshape(\r\n pv, tf.concat([[s[0] * s[1]], s[2:]], axis=0))\r\n t = reduced_distances.shape\r\n rd = reduced_distances\r\n reduced_distances = tf.reshape(\r\n rd, tf.concat([[t[0] * t[1]], [t[2]]], axis=0))\r\n\r\n acceptance_vector = tf.cast(\r\n reduced_distances <= args.tolerance, dtype=tf.bool)\r\n\r\n t_param_vector = tf.transpose(param_vector, perm=[1, 0, 2])\r\n eval_param_vector = tf.boolean_mask(\r\n t_param_vector, acceptance_vector, axis=1)\r\n if time_it and not args.sparse_output:\r\n proc_end_time = time.time()\r\n print(f'Process dequeued samples time: {proc_end_time - deq_end_time}')\r\n return param_vector, reduced_distances, eval_param_vector\r\n else:\r\n return None, None, None\r\n\r\n\r\ndef main():\r\n \"\"\"Warmup, timing, and stats output handling.\"\"\"\r\n with strategy.scope():\r\n # Warm-up\r\n if not args.sparse_output:\r\n print(\"Warming up...\")\r\n strategy.experimental_run_v2(\r\n loop_collect_samples,\r\n [args.n_samples_target,\r\n tf.constant(1, dtype=tf.int32),\r\n args.tolerance])\r\n if not args.no_outfeed_ops:\r\n outfeed_data.dequeue()\r\n\r\n # Time the compute\r\n if not args.sparse_output:\r\n print(\"Running...\")\r\n start_time = time.time()\r\n num_accepted_samples, num_runs = strategy.experimental_run_v2(\r\n loop_collect_samples,\r\n [args.n_samples_target,\r\n 10 if args.profile else args.max_n_runs,\r\n args.tolerance])\r\n end_time = time.time()\r\n samples_collected = np.int(num_accepted_samples)\r\n num_runs = np.int(num_runs)\r\n run_duration = end_time - start_time\r\n\r\n # Dequeue the data\r\n if args.no_outfeed_ops:\r\n start_time = end_time = time.time()\r\n else:\r\n start_time = time.time()\r\n param_vector, reduced_distances, eval_param_vector = \\\r\n dequeue_and_postproc(time_it=True)\r\n end_time = time.time()\r\n deq_proc_duration = end_time - start_time\r\n duration = run_duration + deq_proc_duration\r\n if args.sparse_output:\r\n print(f\"{duration:.3f} \\t {1e3*duration/num_runs:.3f} \\t \"\r\n f\"{run_duration:.3f} \\t {1e3*run_duration/num_runs:.3f}\")\r\n else:\r\n print(f\"Running ABC inference for {args.country}\\n\"\r\n f\"\\tBatch size: {args.n_samples_per_batch}\\n\"\r\n f\"\\tTolerance: {args.tolerance}\"\r\n f\"\\tTarget number of samples: {args.n_samples_target}\"\r\n f\"\\tEnqueue chunk size: {args.enqueue_chunk_size}\")\r\n print(\"=========================================\")\r\n print(\"IPU runs completed in {0:.3f} seconds\\n\".format(\r\n run_duration))\r\n print(f\"Samples collected: {samples_collected:.0f}\")\r\n print(f\"Number of runs: {num_runs:.0f} \"\r\n f\"with {args.replication_factor} replica(s)\")\r\n print(\"Time per run: {0:.3f} milliseconds\\n\".format(\r\n 1e3*run_duration/num_runs))\r\n\r\n print(\"Debug: Time for dequeue and processing: \"\r\n \"{0:.3f} second\\n\".format(deq_proc_duration))\r\n print(\"Debug: Total Time (inc dequeue): {0:.3f} second\\n\".format(\r\n duration))\r\n print(\"Debug: Time per run (inc dequeue): \"\r\n \"{0:.3f} milliseconds\\n\".format(1e3*duration/num_runs))\r\n if not args.no_outfeed_ops:\r\n print(f\"param_vector.shape = {param_vector.shape}\")\r\n print(f\"reduced_distances.shape = {reduced_distances.shape}\")\r\n print(f\"eval_param_vector.shape = {eval_param_vector.shape}\")\r\n\r\n if samples_collected < args.n_samples_target and not args.profile:\r\n raise NotImplementedError(\r\n \"Too few iterations. Increase max_num_runs parameter.\")\r\n\r\n if args.samples_filepath:\r\n # Save the accepted samples if filepath given\r\n np.savetxt(args.samples_filepath,\r\n eval_param_vector.numpy(),\r\n delimiter=\",\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport pickle\nimport random\nimport logging\nimport argparse\nimport multiprocessing\n\nimport numpy as np\nimport poptorch\nimport popdist\nimport popdist.poptorch\nimport torch\nimport torch.nn as nn\nimport horovod.torch as hvd\nimport torch.nn.utils.rnn as rnn_utils\n\nfrom torch import float16, float32\nfrom torch.utils.data import Dataset, IterableDataset\nfrom poptorch.optim import LAMB, AdamW, Adam\nfrom tfrecord.reader import tfrecord_loader\nfrom transformers import (get_constant_schedule,\n get_cosine_schedule_with_warmup,\n get_linear_schedule_with_warmup)\n\nTFRECORD_KEYS = ['input_ids'] # Torch Model Keys\n\n\ndef str_to_bool(value):\n if isinstance(value, bool) or value is None:\n return value\n if value.lower() in {'false', 'f', '0', 'no', 'n'}:\n return False\n elif value.lower() in {'true', 't', '1', 'yes', 'y'}:\n return True\n raise argparse.ArgumentTypeError(f'{value} is not a valid boolean value')\n\n\ndef expand_glob_files(files):\n result = []\n for filepath in files:\n expanded = glob.glob(filepath)\n if len(expanded) < 1:\n raise FileNotFoundError(f\"Could not find file: {filepath}\")\n result += expanded\n return result\n\n\nclass TFRecordPretrainingDataset(IterableDataset):\n \"\"\"\n Preprocessed GPT2 pretraining dataset read from TFRecord files.\n\n\n This Dataset is compatible with multiprocessing. Each Dataloader worker\n will only read a shard of each TFRecord file, which will speed up the Dataloader\n and ensure no worker loads the same data as another worker. You are strongly\n advised to use a large number (e.g. 64) of dataloader workers because firstly,\n more workers could support high throughput, and secondly, more workers could\n give us more stochasticity and thus better convergence.\n\n\n Parameters\n ----------\n files: List of TFRecord files containing the preprocessed pretraining data\n shuffle: Shuffle the data?\n \"\"\"\n\n def __init__(self,\n input_files,\n shuffle=True):\n self.files = expand_glob_files(input_files)\n self.shuffle = shuffle\n self.reset()\n\n def reset(self):\n self.file_index = 0\n self.reader = iter([])\n\n def samples_per_file(self, filename):\n index_filename = filename.replace(\".tfrecord\", \".index\")\n count = sum(1 for _ in open(index_filename))\n return count\n\n def __len__(self):\n if getattr(self, \"_len\", None) is None:\n pool = multiprocessing.Pool(\n min(multiprocessing.cpu_count(), len(self.files)))\n num_samples = pool.map(self.samples_per_file, self.files)\n pool.close()\n pool.join()\n self._len = sum(num_samples)\n return self._len\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n if popdist.isPopdistEnvSet():\n self.worker_id = worker_info.id + worker_info.num_workers * popdist.getInstanceIndex()\n self.shard = worker_info.id + worker_info.num_workers * popdist.getInstanceIndex(), worker_info.num_workers * popdist.getNumInstances()\n else:\n self.worker_id = worker_info.id\n self.shard = worker_info.id, worker_info.num_workers\n else:\n self.shard = None\n self.reset()\n if self.shuffle:\n np.random.shuffle(self.files)\n return self\n\n def __next__(self):\n try:\n datum = next(self.reader)\n except StopIteration:\n if self.file_index >= len(self.files):\n raise StopIteration\n self.reader = tfrecord_loader(self.files[self.file_index],\n self.files[self.file_index].replace(\".tfrecord\", \".index\"),\n list(TFRECORD_KEYS),\n self.shard)\n self.file_index += 1\n datum = next(self.reader)\n input_ids = torch.tensor(datum[TFRECORD_KEYS[0]], dtype=torch.long)\n return input_ids\n\n\nclass MyDataset(Dataset):\n def __init__(self, input_list, max_len):\n self.input_list = input_list\n self.max_len = max_len\n\n def __getitem__(self, index):\n input_ids = self.input_list[index]\n if len(input_ids) > self.max_len:\n input_ids = input_ids[:self.max_len]\n input_ids = torch.tensor(input_ids, dtype=torch.long)\n return input_ids\n\n def __len__(self):\n return len(self.input_list)\n\n\ndef load_dataset(logger, args, vocab_size):\n \"\"\"\n load train and valid dataset\n \"\"\"\n logger(\"loading training dataset and validating dataset\")\n train_path = args.train_path\n\n if train_path == 'generated':\n num_instances = args.popdist_size if args.use_popdist else 1\n generated = np.random.randint(low=1, high=vocab_size,\n size=(4 * num_instances * args.replication_factor *\n args.batches_per_step * args.batch_size * args.gradient_accumulation,\n args.max_len + 1))\n train_dataset = MyDataset(generated, args.max_len + 1)\n val_dataset = MyDataset(generated, args.max_len + 1)\n elif 'tfrecord' in args.train_path:\n train_dataset = TFRecordPretrainingDataset(args.tfrecord_path[:])\n val_dataset = TFRecordPretrainingDataset(args.tfrecord_path[-1:])\n elif 'dynamic' in args.train_path:\n from data.indexed_dataset import make_indexed_dataset, GPTDataset\n data_prefix = args.data_prefix\n indexed_dataset = make_indexed_dataset(data_prefix)\n total_num_of_documents = indexed_dataset.sizes.shape[0]\n documents = np.arange(start=0, stop=total_num_of_documents, step=1, dtype=np.int32)\n train_dataset = GPTDataset(args, data_prefix, documents[:int(total_num_of_documents*0.997)], indexed_dataset)\n val_dataset = GPTDataset(args, data_prefix, documents[int(total_num_of_documents*0.997):], indexed_dataset, num_epochs=1)\n else:\n try:\n with open(train_path, \"rb\") as f:\n input_list = pickle.load(f)\n\n samples = []\n for article in input_list:\n start_point = 0\n while start_point < len(article) - args.max_len:\n samples.append(article[start_point: start_point + args.max_len])\n start_point += args.stride\n if start_point < len(article) and len(article) >= (args.max_len // 2):\n samples.append(article[len(article) - args.max_len:])\n random.shuffle(samples)\n\n # split train and valid dataset\n val_num = args.val_num\n input_list_train = samples[val_num:]\n input_list_val = samples[:val_num]\n\n train_dataset = MyDataset(input_list_train, args.max_len)\n val_dataset = MyDataset(input_list_val, args.max_len)\n except:\n raise RuntimeError(f\"Unknown dataset '{train_path}', you can try \\'generated\\'.\")\n\n return train_dataset, val_dataset\n\n\nclass GeneratedPretrainingDataset(Dataset):\n def __init__(self, vocab_size, sequence_length, seed=42):\n self.vocab_size = vocab_size\n self.sequence_length = sequence_length\n self.seed = seed\n self.data = self.generate_data()\n\n def generate_data(self):\n with torch.random.fork_rng():\n torch.manual_seed(self.seed)\n input_ids = torch.randint(0, self.vocab_size, [self.sequence_length],dtype=torch.long)\n label = input_ids\n return input_ids, label\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, __):\n return self.data\n\n\ndef get_generated_datum(config, vocab_size):\n samples_per_step = config.replication_factor * config.gradient_accumulation * config.batch_size * config.batches_per_step\n result = []\n dataset = GeneratedPretrainingDataset(vocab_size, config.max_len)\n data = (dataset[i] for i in range(samples_per_step))\n for batches in zip(*data):\n result.append(torch.stack(batches))\n return result\n\n\ndef calculate_acc(logit, labels, ignore_index=-100, reduction='mean'):\n mask = (labels != ignore_index).float()\n non_pad_mask = mask.sum(-1).unsqueeze(-1)\n if reduction == 'sum':\n return (logit.argmax(dim=-1) == labels).float().mul(mask).sum(-1)\n return (logit.argmax(dim=-1) == labels).float().mul(mask).div(non_pad_mask).sum(-1).mean()\n\n\ndef collate_fn(batch):\n input_ids = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=0)\n labels = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=-100)\n return input_ids, labels\n\nclass _WorkerInit:\n def __init__(self, seed):\n self.seed = seed\n\n def __call__(self, worker_id):\n np.random.seed((self.seed + worker_id) % np.iinfo(np.uint32).max)\n\n\ndef logger(msg):\n if not popdist.isPopdistEnvSet() or popdist.getInstanceIndex() == 0:\n logging.info(msg)\n\n\ndef cycle(iterator):\n \"\"\"\n Loop `iterator` forever\n \"\"\"\n while True:\n for item in iterator:\n yield item\n\n\ndef get_lr_scheduler(optimizer,\n scheduler_type,\n lr_warmup=None,\n num_steps=None):\n if scheduler_type == \"linear\":\n scheduler = get_linear_schedule_with_warmup(optimizer, lr_warmup, num_steps)\n elif scheduler_type == \"constant\":\n scheduler = get_constant_schedule(optimizer)\n elif scheduler_type == \"cosine\":\n scheduler = get_cosine_schedule_with_warmup(optimizer, lr_warmup, num_steps)\n else:\n raise ValueError(\"Unknown scheduler_type:\", scheduler_type)\n\n # Initialize step as Poptorch does not call optimizer.step() explicitly\n optimizer._step_count = 1\n\n return scheduler\n\n\ndef get_optimizer(optimizer, weight_decay, learning_rate, loss_scaling, model, use_popdist=False,\n enable_half_first_order_momentum=True):\n # Do not apply weight_decay for one-dimensional parameters\n regularized_params = []\n non_regularized_params = []\n for param in model.parameters():\n if param.requires_grad:\n if len(param.shape) == 1:\n non_regularized_params.append(param)\n else:\n regularized_params.append(param)\n\n params = [\n {\"params\": regularized_params, \"weight_decay\": weight_decay},\n {\"params\": non_regularized_params, \"weight_decay\": 0}\n ]\n\n first_order_type = float16 if enable_half_first_order_momentum else float32\n\n if optimizer == \"AdamW\":\n optimizer = AdamW(params,\n lr=learning_rate,\n weight_decay=0.01,\n eps=1e-6,\n bias_correction=False,\n loss_scaling=loss_scaling,\n accum_type=float16,\n first_order_momentum_accum_type=first_order_type,\n second_order_momentum_accum_type=float32)\n elif optimizer == \"Adam\":\n optimizer = Adam(params,\n lr=learning_rate,\n weight_decay=0.01,\n eps=1e-6,\n loss_scaling=loss_scaling,\n accum_type=float16,\n first_order_momentum_accum_type=first_order_type,\n second_order_momentum_accum_type=float32)\n elif optimizer == \"LAMBNoBiasCorrection\":\n optimizer = LAMB(params,\n lr=learning_rate,\n weight_decay=0,\n eps=1e-6,\n loss_scaling=loss_scaling,\n max_weight_norm=None,\n accum_type=float16,\n first_order_momentum_accum_type=first_order_type,\n second_order_momentum_accum_type=float32,\n bias_correction=False)\n elif optimizer == \"LAMB\":\n optimizer = LAMB(params,\n lr=learning_rate,\n weight_decay=0,\n eps=1e-6,\n loss_scaling=loss_scaling,\n max_weight_norm=None,\n accum_type=float16,\n first_order_momentum_accum_type=first_order_type,\n second_order_momentum_accum_type=float32,\n bias_correction=True)\n else:\n raise ValueError(\"Unknown Optimizer:\", optimizer)\n\n # Make optimizers distributed\n if use_popdist:\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n\n return optimizer\n\n\ndef sync_metrics(outputs, factor=1, average=True):\n if popdist.isPopdistEnvSet():\n if isinstance(outputs, float):\n return float(hvd.allreduce(torch.Tensor([outputs]), average=average).item())\n else:\n return [hvd.allreduce(output.div(factor), average=average).mean().item() for output in outputs]\n else:\n if isinstance(outputs, float):\n return outputs\n else:\n return [output.div(factor).mean().item() for output in outputs]\n\n\ndef outline_attribute(module: nn.Module, value: str):\n \"\"\"Adds an attribute to a module. This attribute will be used\n when comparing operation equivalence in outlining. For example:\n\n layer1 = nn.Linear(...)\n layer2 = nn.Linear(...)\n layer3 = nn.Linear(...)\n layer4 = nn.Linear(...)\n outline_attribute(layer1, \"A\")\n outline_attribute(layer2, \"A\")\n outline_attribute(layer3, \"B\")\n\n The code for layer1 can be reused for layer2.\n But it can't be used for layer3 or layer4.\n \"\"\"\n context = poptorch.Attribute(__outline={\"layer\": value})\n\n def enable(*args):\n context.__enter__()\n\n def disable(*args):\n context.__exit__(None, None, None)\n\n module.register_forward_pre_hook(enable)\n module.register_forward_hook(disable)\n\n\ndef _get_layer_ipu(layers_per_ipu):\n # List of the IPU Id for each encoder layer\n layer_ipu = []\n for ipu, n_layers in enumerate(layers_per_ipu):\n layer_ipu += [ipu] * n_layers\n return layer_ipu\n\n\ndef recomputation_checkpoint(module: nn.Module):\n \"\"\"Annotates the output of a module to be checkpointed instead of\n recomputed\"\"\"\n\n def recompute_outputs(module, inputs, outputs):\n return tuple(poptorch.recomputationCheckpoint(y) for y in outputs)\n\n module.register_forward_hook(recompute_outputs)\n\n\nclass SerializedLinear(nn.Linear):\n def __init__(self, in_features, out_features, factor, bias=False,\n mode=poptorch.MatMulSerializationMode.OutputChannels):\n super().__init__(in_features, out_features, bias)\n self.mode = mode\n self.factor = factor\n\n def forward(self, x):\n size_out = x.size()[:-1] + (self.out_features,)\n output = poptorch.serializedMatMul(x, self.weight.t(), self.mode, self.factor)\n if self.bias is not None:\n output += self.bias\n return output.view(*size_out)\n\n\nclass SerializedEmbedding(nn.Module):\n \"\"\"\n Wrapper for `nn.Embedding` layer that performs the embedding look-up into\n smaller serialized steps in order to reduce memory in the embedding gradient\n calculation.\n\n Args:\n embedding: A `nn.Embedding` to wrap\n serialization_factor: The number of serialized embedding look-ups\n \"\"\"\n\n def __init__(self, embedding: nn.Embedding, serialization_factor: int):\n super().__init__()\n self.serialization_factor = serialization_factor\n self.num_embeddings = embedding.num_embeddings\n\n # Num embeddings should be divisible by the serialization factor\n assert self.num_embeddings % self.serialization_factor == 0\n self.split_size = self.num_embeddings // self.serialization_factor\n self.split_embeddings = nn.ModuleList(\n [nn.Embedding.from_pretrained(embedding.weight[i * self.split_size:(i + 1) * self.split_size, :].detach(),\n freeze=False,\n padding_idx=embedding.padding_idx if i == 0 else None)\n for i in range(self.serialization_factor)])\n\n def deserialize(self):\n \"\"\"\n Deserialize the internal wrapped embedding layer and return it as a\n `nn.Embedding` object.\n\n Returns:\n `nn.Embedding` layer\n \"\"\"\n return nn.Embedding.from_pretrained(torch.vstack([l.weight for l in self.split_embeddings]), padding_idx=0)\n\n def forward(self, indices):\n # iterate through the splits\n x_sum = None\n for i in range(self.serialization_factor):\n # mask out the indices not in this split\n split_indices = indices - i * self.split_size\n mask = (split_indices >= 0) * (split_indices < self.split_size)\n mask = mask.detach()\n split_indices *= mask\n\n # do the embedding lookup\n x = self.split_embeddings[i](split_indices)\n\n # multiply the output by mask\n x *= mask.unsqueeze(-1)\n\n # add to partial\n if x_sum is not None:\n x_sum += x\n else:\n x_sum = x\n return x_sum\n" ]
[ [ "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.cast", "tensorflow.python.ipu.ipu_strategy.IPUStrategy", "numpy.int", "tensorflow.python.ipu.utils.auto_select_ipus", "tensorflow.boolean_mask", "tensorflow.while_loop", "tensorflow.python.ipu.cross_replica_ops.cross_replica_sum", "tensorflow.gather", "tensorflow.python.ipu.utils.create_ipu_config", "tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue", "tensorflow.python.ipu.utils.configure_ipu_system", "tensorflow.shape", "tensorflow.less", "tensorflow.pow", "tensorflow.identity", "tensorflow.function", "tensorflow.no_op", "tensorflow.math.floor", "numpy.sum", "tensorflow.clip_by_value", "tensorflow.constant", "tensorflow.transpose", "tensorflow.reshape", "tensorflow.ones", "tensorflow.math.greater", "tensorflow.sqrt" ], [ "torch.randint", "torch.Tensor", "torch.random.fork_rng", "torch.manual_seed", "torch.nn.utils.rnn.pad_sequence", "numpy.arange", "torch.utils.data.get_worker_info", "numpy.random.shuffle", "torch.tensor", "torch.vstack", "numpy.iinfo", "torch.stack", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
oesteban/dipy
[ "57f7ec926f914d72f7f2f8feb8ccb51ab827895d", "57f7ec926f914d72f7f2f8feb8ccb51ab827895d" ]
[ "dipy/reconst/tests/test_shore_odf.py", "dipy/reconst/shore.py" ]
[ "import numpy as np\r\nfrom dipy.data import get_sphere, get_3shell_gtab, get_isbi2013_2shell_gtab\r\nfrom dipy.reconst.shore import ShoreModel\r\nfrom dipy.reconst.shm import QballModel, sh_to_sf\r\nfrom dipy.reconst.peaks import gfa, peak_directions\r\nfrom numpy.testing import (assert_equal,\r\n assert_almost_equal,\r\n run_module_suite,\r\n assert_array_equal,\r\n assert_raises)\r\nfrom dipy.sims.voxel import SticksAndBall\r\nfrom dipy.core.subdivide_octahedron import create_unit_sphere\r\nfrom dipy.core.sphere_stats import angular_similarity\r\nfrom dipy.reconst.tests.test_dsi import sticks_and_ball_dummies\r\n\r\n\r\ndef test_shore_odf():\r\n gtab = get_isbi2013_2shell_gtab()\r\n\r\n # load symmetric 724 sphere\r\n sphere = get_sphere('symmetric724')\r\n\r\n # load icosahedron sphere\r\n sphere2 = create_unit_sphere(5) \r\n data, golden_directions = SticksAndBall(gtab, d=0.0015,\r\n S0=100, angles=[(0, 0), (90, 0)],\r\n fractions=[50, 50], snr=None)\r\n asm = ShoreModel(gtab,radial_order=6, zeta=700, lambdaN=1e-8, lambdaL=1e-8)\r\n # symmetric724\r\n asmfit = asm.fit(data)\r\n odf = asmfit.odf(sphere)\r\n odf_sh = asmfit.odf_sh()\r\n odf_from_sh = sh_to_sf(odf_sh, sphere, 6, basis_type=None)\r\n assert_almost_equal(odf, odf_from_sh, 10)\r\n\r\n\r\n directions, _ , _ = peak_directions(odf, sphere, .35, 25)\r\n assert_equal(len(directions), 2)\r\n assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1)\r\n\r\n # 5 subdivisions\r\n odf = asmfit.odf(sphere2)\r\n directions, _ , _ = peak_directions(odf, sphere2, .35, 25)\r\n assert_equal(len(directions), 2)\r\n assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1)\r\n\r\n sb_dummies = sticks_and_ball_dummies(gtab)\r\n for sbd in sb_dummies:\r\n data, golden_directions = sb_dummies[sbd]\r\n asmfit = asm.fit(data)\r\n odf = asmfit.odf(sphere2)\r\n directions, _ , _ = peak_directions(odf, sphere2, .35, 25)\r\n if len(directions) <= 3:\r\n assert_equal(len(directions), len(golden_directions))\r\n if len(directions) > 3:\r\n assert_equal(gfa(odf) < 0.1, True)\r\n\r\n\r\ndef test_multivox_shore(): \r\n gtab = get_3shell_gtab()\r\n\r\n data = np.random.random([20, 30, 1, gtab.gradients.shape[0]])\r\n radial_order = 4\r\n zeta = 700\r\n asm = ShoreModel(gtab, radial_order=radial_order, zeta=zeta, lambdaN=1e-8, lambdaL=1e-8)\r\n asmfit = asm.fit(data)\r\n c_shore=asmfit.shore_coeff\r\n assert_equal(c_shore.shape[0:3], data.shape[0:3])\r\n assert_equal(np.alltrue(np.isreal(c_shore)), True)\r\n\r\n\r\nif __name__ == '__main__':\r\n run_module_suite()\r\n\r\n \r\n", "from __future__ import division\n\nfrom warnings import warn\nfrom math import factorial\n\nimport numpy as np\n\nfrom scipy.special import genlaguerre, gamma, hyp2f1\n\nfrom .cache import Cache\nfrom .multi_voxel import multi_voxel_fit\nfrom .shm import real_sph_harm\nfrom ..core.geometry import cart2sphere\n\nfrom ..utils.optpkg import optional_package\n\ncvxopt, have_cvxopt, _ = optional_package(\"cvxopt\")\n\n\nclass ShoreModel(Cache):\n\n r\"\"\"Simple Harmonic Oscillator based Reconstruction and Estimation\n (SHORE) [1]_ of the diffusion signal.\n\n The main idea is to model the diffusion signal as a linear combination of\n continuous functions $\\phi_i$,\n\n ..math::\n :nowrap:\n \\begin{equation}\n S(\\mathbf{q})= \\sum_{i=0}^I c_{i} \\phi_{i}(\\mathbf{q}).\n \\end{equation}\n\n where $\\mathbf{q}$ is the wavector which corresponds to different gradient\n directions. Numerous continuous functions $\\phi_i$ can be used to model\n $S$. Some are presented in [2,3,4]_.\n\n From the $c_i$ coefficients, there exist analytical formulae to estimate\n the ODF, the return to the origin porbability (RTOP), the mean square\n displacement (MSD), amongst others [5]_.\n\n References\n ----------\n .. [1] Ozarslan E. et. al, \"Simple harmonic oscillator based reconstruction\n and estimation for one-dimensional q-space magnetic resonance\n 1D-SHORE)\", eapoc Intl Soc Mag Reson Med, vol. 16, p. 35., 2008.\n\n .. [2] Merlet S. et. al, \"Continuous diffusion signal, EAP and ODF\n estimation via Compressive Sensing in diffusion MRI\", Medical\n Image Analysis, 2013.\n\n .. [3] Rathi Y. et. al, \"Sparse multi-shell diffusion imaging\", MICCAI,\n 2011.\n\n .. [4] Cheng J. et. al, \"Theoretical Analysis and eapactical Insights on\n EAP Estimation via a Unified HARDI Framework\", MICCAI workshop on\n Computational Diffusion MRI, 2011.\n\n .. [5] Ozarslan E. et. al, \"Mean apparent propagator (MAP) MRI: A novel\n diffusion imaging method for mapping tissue microstructure\",\n NeuroImage, 2013.\n\n Notes\n -----\n The implementation of SHORE depends on CVXOPT (http://cvxopt.org/). This\n software is licensed under the GPL (see:\n http://cvxopt.org/copyright.html).and you may be subject to this license\n when using SHORE.\n \"\"\"\n\n def __init__(self,\n gtab,\n radial_order=6,\n zeta=700,\n lambdaN=1e-8,\n lambdaL=1e-8,\n tau=1. / (4 * np.pi ** 2),\n constrain_e0=False,\n positive_constraint=False,\n pos_grid=11,\n pos_radius=20e-03\n ):\n r\"\"\" Analytical and continuous modeling of the diffusion signal with\n respect to the SHORE basis [1,2]_.\n This implementation is a modification of SHORE presented in [1]_.\n The modification was made to obtain the same ordering of the basis\n presented in [2,3]_.\n\n The main idea is to model the diffusion signal as a linear\n combination of continuous functions $\\phi_i$,\n\n ..math::\n :nowrap:\n \\begin{equation}\n S(\\mathbf{q})= \\sum_{i=0}^I c_{i} \\phi_{i}(\\mathbf{q}).\n \\end{equation}\n\n where $\\mathbf{q}$ is the wavector which corresponds to different\n gradient directions.\n\n From the $c_i$ coefficients, there exists an analytical formula to\n estimate the ODF.\n\n\n Parameters\n ----------\n gtab : GradientTable,\n gradient directions and bvalues container class\n radial_order : unsigned int,\n an even integer that represent the order of the basis\n zeta : unsigned int,\n scale factor\n lambdaN : float,\n radial regularisation constant\n lambdaL : float,\n angular regularisation constant\n tau : float,\n diffusion time. By default the value that makes q equal to the\n square root of the b-value.\n constrain_e0 : bool,\n Constrain the optimization such that E(0) = 1.\n positive_constraint : bool,\n Constrain the propagator to be positive.\n pos_grid : int,\n Grid that define the points of the EAP in which we want to enforce\n positivity.\n pos_radius : float,\n Radius of the grid of the EAP in which enforce positivity in\n millimeters. By default 20e-03 mm.\n\n References\n ----------\n .. [1] Merlet S. et al., \"Continuous diffusion signal, EAP and\n ODF estimation via Compressive Sensing in diffusion MRI\", Medical\n Image Analysis, 2013.\n\n .. [2] Cheng J. et al., \"Theoretical Analysis and eapactical Insights\n on EAP Estimation via a Unified HARDI Framework\", MICCAI workshop on\n Computational Diffusion MRI, 2011.\n\n .. [3] Ozarslan E. et al., \"Mean apparent propagator (MAP) MRI: A novel\n diffusion imaging method for mapping tissue microstructure\",\n NeuroImage, 2013.\n\n Examples\n --------\n In this example, where the data, gradient table and sphere tessellation\n used for reconstruction are provided, we model the diffusion signal\n with respect to the SHORE basis and compute the real and analytical\n ODF.\n\n from dipy.data import get_data,get_sphere\n sphere = get_sphere('symmetric724')\n fimg, fbvals, fbvecs = get_data('ISBI_testing_2shells_table')\n bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)\n gtab = gradient_table(bvals, bvecs)\n from dipy.sims.voxel import SticksAndBall\n data, golden_directions = SticksAndBall(gtab, d=0.0015,\n S0=1, angles=[(0, 0), (90, 0)],\n fractions=[50, 50], snr=None)\n from dipy.reconst.canal import ShoreModel\n radial_order = 4\n zeta = 700\n asm = ShoreModel(gtab, radial_order=radial_order, zeta=zeta,\n lambdaN=1e-8, lambdaL=1e-8)\n asmfit = asm.fit(data)\n odf= asmfit.odf(sphere)\n \"\"\"\n\n self.bvals = gtab.bvals\n self.bvecs = gtab.bvecs\n self.gtab = gtab\n self.constrain_e0 = constrain_e0\n if radial_order > 0 and not(bool(radial_order % 2)):\n self.radial_order = radial_order\n else:\n msg = \"radial_order must be a non-zero even positive number.\"\n raise ValueError(msg)\n self.zeta = zeta\n self.lambdaL = lambdaL\n self.lambdaN = lambdaN\n if (gtab.big_delta is None) or (gtab.small_delta is None):\n self.tau = tau\n else:\n self.tau = gtab.big_delta - gtab.small_delta / 3.0\n\n if positive_constraint and not(constrain_e0):\n msg = \"Constrain_e0 must be True to enfore positivity.\"\n raise ValueError(msg)\n self.positive_constraint = positive_constraint\n self.pos_grid = pos_grid\n self.pos_radius = pos_radius\n\n @multi_voxel_fit\n def fit(self, data):\n\n Lshore = l_shore(self.radial_order)\n Nshore = n_shore(self.radial_order)\n # Generate the SHORE basis\n M = self.cache_get('shore_matrix', key=self.gtab)\n if M is None:\n M = shore_matrix(\n self.radial_order, self.zeta, self.gtab, self.tau)\n self.cache_set('shore_matrix', self.gtab, M)\n\n MpseudoInv = self.cache_get('shore_matrix_reg_pinv', key=self.gtab)\n if MpseudoInv is None:\n MpseudoInv = np.dot(\n np.linalg.inv(np.dot(M.T, M) + self.lambdaN * Nshore + self.lambdaL * Lshore), M.T)\n self.cache_set('shore_matrix_reg_pinv', self.gtab, MpseudoInv)\n\n # Compute the signal coefficients in SHORE basis\n if not self.constrain_e0:\n coef = np.dot(MpseudoInv, data)\n\n signal_0 = 0\n\n for n in range(int(self.radial_order / 2) + 1):\n signal_0 += (\n coef[n] * (genlaguerre(n, 0.5)(0) * (\n (factorial(n)) /\n (2 * np.pi * (self.zeta ** 1.5) * gamma(n + 1.5))\n ) ** 0.5)\n )\n\n coef = coef / signal_0\n else:\n data = data / data[self.gtab.b0s_mask].mean()\n\n # If cvxopt is not available, bail (scipy is ~100 times slower)\n if not have_cvxopt:\n raise ValueError(\n 'CVXOPT package needed to enforce constraints')\n w_s = \"The implementation of SHORE depends on CVXOPT \"\n w_s += \" (http://cvxopt.org/). This software is licensed \"\n w_s += \"under the GPL (see: http://cvxopt.org/copyright.html) \"\n w_s += \" and you may be subject to this license when using SHORE.\"\n warn(w_s)\n import cvxopt.solvers\n M0 = M[self.gtab.b0s_mask, :]\n M0_mean = M0.mean(0)[None, :]\n Mprime = np.r_[M0_mean, M[~self.gtab.b0s_mask, :]]\n Q = cvxopt.matrix(np.ascontiguousarray(\n np.dot(Mprime.T, Mprime)\n + self.lambdaN * Nshore + self.lambdaL * Lshore\n ))\n\n data_b0 = data[self.gtab.b0s_mask].mean()\n data_single_b0 = np.r_[\n data_b0, data[~self.gtab.b0s_mask]] / data_b0\n p = cvxopt.matrix(np.ascontiguousarray(\n -1 * np.dot(Mprime.T, data_single_b0))\n )\n\n cvxopt.solvers.options['show_progress'] = False\n\n if not(self.positive_constraint):\n G = None\n h = None\n else:\n lg = int(np.floor(self.pos_grid ** 3 / 2))\n G = self.cache_get(\n 'shore_matrix_positive_constraint', key=(self.pos_grid, self.pos_radius))\n if G is None:\n v, t = create_rspace(self.pos_grid, self.pos_radius)\n\n psi = shore_matrix_pdf(\n self.radial_order, self.zeta, t[:lg])\n G = cvxopt.matrix(-1 * psi)\n self.cache_set(\n 'shore_matrix_positive_constraint', (self.pos_grid, self.pos_radius), G)\n h = cvxopt.matrix((1e-10) * np.ones((lg)), (lg, 1))\n\n A = cvxopt.matrix(np.ascontiguousarray(M0_mean))\n b = cvxopt.matrix(np.array([1.]))\n sol = cvxopt.solvers.qp(Q, p, G, h, A, b)\n\n if sol['status'] != 'optimal':\n warn('Optimization did not find a solution')\n\n coef = np.array(sol['x'])[:, 0]\n\n return ShoreFit(self, coef)\n\n\nclass ShoreFit():\n\n def __init__(self, model, shore_coef):\n \"\"\" Calculates diffusion properties for a single voxel\n\n Parameters\n ----------\n model : object,\n AnalyticalModel\n shore_coef : 1d ndarray,\n shore coefficients\n \"\"\"\n\n self.model = model\n self._shore_coef = shore_coef\n self.gtab = model.gtab\n self.radial_order = model.radial_order\n self.zeta = model.zeta\n\n def pdf_grid(self, gridsize, radius_max):\n r\"\"\" Applies the analytical FFT on $S$ to generate the diffusion\n propagator. This is calculated on a discrete 3D grid in order to\n obtain an EAP similar to that which is obtained with DSI.\n\n Parameters\n ----------\n gridsize : unsigned int\n dimension of the propagator grid\n radius_max : float\n maximal radius in which to compute the propagator\n\n Returns\n -------\n eap : ndarray\n the ensemble average propagator in the 3D grid\n\n \"\"\"\n # Create the grid in which to compute the pdf\n rgrid_rtab = self.model.cache_get(\n 'pdf_grid', key=(gridsize, radius_max))\n if rgrid_rtab is None:\n rgrid_rtab = create_rspace(gridsize, radius_max)\n self.model.cache_set(\n 'pdf_grid', (gridsize, radius_max), rgrid_rtab)\n rgrid, rtab = rgrid_rtab\n\n psi = self.model.cache_get(\n 'shore_matrix_pdf', key=(gridsize, radius_max))\n if psi is None:\n psi = shore_matrix_pdf(self.radial_order, self.zeta, rtab)\n self.model.cache_set(\n 'shore_matrix_pdf', (gridsize, radius_max), psi)\n\n propagator = np.dot(psi, self._shore_coef)\n eap = np.empty((gridsize, gridsize, gridsize), dtype=float)\n eap[tuple(rgrid.astype(int).T)] = propagator\n eap *= (2 * radius_max / (gridsize - 1)) ** 3\n\n return eap\n\n def pdf(self, r_points):\n \"\"\" Diffusion propagator on a given set of real points.\n if the array r_points is non writeable, then intermediate\n results are cached for faster recalculation\n \"\"\"\n if not r_points.flags.writeable:\n psi = self.model.cache_get(\n 'shore_matrix_pdf', key=hash(r_points.data))\n else:\n psi = None\n if psi is None:\n psi = shore_matrix_pdf(self.radial_order, self.zeta, r_points)\n if not r_points.flags.writeable:\n self.model.cache_set(\n 'shore_matrix_pdf', hash(r_points.data), psi)\n\n eap = np.dot(psi, self._shore_coef)\n\n return np.clip(eap, 0, eap.max())\n\n def odf_sh(self):\n r\"\"\" Calculates the real analytical ODF in terms of Spherical Harmonics.\n \"\"\"\n # Number of Spherical Harmonics involved in the estimation\n J = (self.radial_order + 1) * (self.radial_order + 2) / 2\n\n # Compute the Spherical Harmonics Coefficients\n c_sh = np.zeros(J)\n counter = 0\n\n for l in range(0, self.radial_order + 1, 2):\n for n in range(l, int((self.radial_order + l) / 2) + 1):\n for m in range(-l, l + 1):\n\n j = int(l + m + (2 * np.array(range(0, l, 2)) + 1).sum())\n\n Cnl = ((-1) ** (n - l / 2)) / (2.0 * (4.0 * np.pi ** 2 * self.zeta) ** (3.0 / 2.0)) * ((2.0 * (\n 4.0 * np.pi ** 2 * self.zeta) ** (3.0 / 2.0) * factorial(n - l)) / (gamma(n + 3.0 / 2.0))) ** (1.0 / 2.0)\n Gnl = (gamma(l / 2 + 3.0 / 2.0) * gamma(3.0 / 2.0 + n)) / (gamma(\n l + 3.0 / 2.0) * factorial(n - l)) * (1.0 / 2.0) ** (-l / 2 - 3.0 / 2.0)\n Fnl = hyp2f1(-n + l, l / 2 + 3.0 / 2.0, l + 3.0 / 2.0, 2.0)\n\n c_sh[j] += self._shore_coef[counter] * Cnl * Gnl * Fnl\n counter += 1\n\n return c_sh\n\n def odf(self, sphere):\n r\"\"\" Calculates the ODF for a given discrete sphere.\n \"\"\"\n upsilon = self.model.cache_get('shore_matrix_odf', key=sphere)\n if upsilon is None:\n upsilon = shore_matrix_odf(\n self.radial_order, self.zeta, sphere.vertices)\n self.model.cache_set('shore_matrix_odf', sphere, upsilon)\n\n odf = np.dot(upsilon, self._shore_coef)\n return odf\n\n def rtop_signal(self):\n r\"\"\" Calculates the analytical return to origin probability (RTOP)\n from the signal [1]_.\n\n References\n ----------\n .. [1] Ozarslan E. et. al, \"Mean apparent propagator (MAP) MRI: A novel\n diffusion imaging method for mapping tissue microstructure\",\n NeuroImage, 2013.\n \"\"\"\n rtop = 0\n c = self._shore_coef\n\n for n in range(int(self.radial_order / 2) + 1):\n rtop += c[n] * (-1) ** n * \\\n ((16 * np.pi * self.zeta ** 1.5 * gamma(n + 1.5)) / (\n factorial(n))) ** 0.5\n\n return np.clip(rtop, 0, rtop.max())\n\n def rtop_pdf(self):\n r\"\"\" Calculates the analytical return to origin probability (RTOP)\n from the pdf [1]_.\n\n References\n ----------\n .. [1] Ozarslan E. et. al, \"Mean apparent propagator (MAP) MRI: A novel\n diffusion imaging method for mapping tissue microstructure\",\n NeuroImage, 2013.\n \"\"\"\n rtop = 0\n c = self._shore_coef\n for n in range(int(self.radial_order / 2) + 1):\n rtop += c[n] * (-1) ** n * \\\n ((4 * np.pi ** 2 * self.zeta ** 1.5 * factorial(n)) / (gamma(n + 1.5))) ** 0.5 * \\\n genlaguerre(n, 0.5)(0)\n\n return np.clip(rtop, 0, rtop.max())\n\n def msd(self):\n r\"\"\" Calculates the analytical mean squared displacement (MSD) [1]_\n\n ..math::\n :nowrap:\n \\begin{equation}\n MSD:{DSI}=\\int_{-\\infty}^{\\infty}\\int_{-\\infty}^{\\infty}\\int_{-\\infty}^{\\infty} P(\\hat{\\mathbf{r}}) \\cdot \\hat{\\mathbf{r}}^{2} \\ dr_x \\ dr_y \\ dr_z\n \\end{equation}\n\n where $\\hat{\\mathbf{r}}$ is a point in the 3D propagator space (see Wu et. al [1]_).\n\n References\n ----------\n .. [1] Wu Y. et. al, \"Hybrid diffusion imaging\", NeuroImage, vol 36,\n p. 617-629, 2007.\n \"\"\"\n msd = 0\n c = self._shore_coef\n\n for n in range(int(self.radial_order / 2) + 1):\n msd += c[n] * (-1) ** n *\\\n (9 * (gamma(n + 1.5)) / (8 * np.pi ** 6 * self.zeta ** 3.5 * factorial(n))) ** 0.5 *\\\n hyp2f1(-n, 2.5, 1.5, 2)\n\n return np.clip(msd, 0, msd.max())\n\n def fitted_signal(self):\n \"\"\" The fitted signal.\n \"\"\"\n phi = self.model.cache_get('shore_matrix', key=self.model.gtab)\n return np.dot(phi, self._shore_coef)\n\n @property\n def shore_coeff(self):\n \"\"\"The SHORE coefficients\n \"\"\"\n return self._shore_coef\n\n\ndef shore_matrix(radial_order, zeta, gtab, tau=1 / (4 * np.pi ** 2)):\n r\"\"\"Compute the SHORE matrix for modified Merlet's 3D-SHORE [1]_\n\n ..math::\n :nowrap:\n \\begin{equation}\n \\textbf{E}(q\\textbf{u})=\\sum_{l=0, even}^{N_{max}}\n \\sum_{n=l}^{(N_{max}+l)/2}\n \\sum_{m=-l}^l c_{nlm}\n \\phi_{nlm}(q\\textbf{u})\n \\end{equation}\n\n where $\\phi_{nlm}$ is\n ..math::\n :nowrap:\n \\begin{equation}\n \\phi_{nlm}^{SHORE}(q\\textbf{u})=\\Biggl[\\dfrac{2(n-l)!}\n {\\zeta^{3/2} \\Gamma(n+3/2)} \\Biggr]^{1/2}\n \\Biggl(\\dfrac{q^2}{\\zeta}\\Biggr)^{l/2}\n exp\\Biggl(\\dfrac{-q^2}{2\\zeta}\\Biggr)\n L^{l+1/2}_{n-l} \\Biggl(\\dfrac{q^2}{\\zeta}\\Biggr)\n Y_l^m(\\textbf{u}).\n \\end{equation}\n\n Parameters\n ----------\n radial_order : unsigned int,\n an even integer that represent the order of the basis\n zeta : unsigned int,\n scale factor\n gtab : GradientTable,\n gradient directions and bvalues container class\n tau : float,\n diffusion time. By default the value that makes q=sqrt(b).\n\n References\n ----------\n .. [1] Merlet S. et. al, \"Continuous diffusion signal, EAP and\n ODF estimation via Compressive Sensing in diffusion MRI\", Medical\n Image Analysis, 2013.\n\n \"\"\"\n\n qvals = np.sqrt(gtab.bvals / (4 * np.pi ** 2 * tau))\n qvals[gtab.b0s_mask] = 0\n bvecs = gtab.bvecs\n\n qgradients = qvals[:, None] * bvecs\n\n r, theta, phi = cart2sphere(qgradients[:, 0], qgradients[:, 1],\n qgradients[:, 2])\n theta[np.isnan(theta)] = 0\n F = radial_order / 2\n n_c = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))\n M = np.zeros((r.shape[0], n_c))\n\n counter = 0\n for l in range(0, radial_order + 1, 2):\n for n in range(l, int((radial_order + l) / 2) + 1):\n for m in range(-l, l + 1):\n M[:, counter] = real_sph_harm(m, l, theta, phi) * \\\n genlaguerre(n - l, l + 0.5)(r ** 2 / zeta) * \\\n np.exp(- r ** 2 / (2.0 * zeta)) * \\\n _kappa(zeta, n, l) * \\\n (r ** 2 / zeta) ** (l / 2)\n counter += 1\n return M\n\n\ndef _kappa(zeta, n, l):\n return np.sqrt((2 * factorial(n - l)) / (zeta ** 1.5 * gamma(n + 1.5)))\n\n\ndef shore_matrix_pdf(radial_order, zeta, rtab):\n r\"\"\"Compute the SHORE propagator matrix [1]_\"\n\n Parameters\n ----------\n radial_order : unsigned int,\n an even integer that represent the order of the basis\n zeta : unsigned int,\n scale factor\n rtab : array, shape (N,3)\n real space points in which calculates the pdf\n\n References\n ----------\n .. [1] Merlet S. et. al, \"Continuous diffusion signal, EAP and\n ODF estimation via Compressive Sensing in diffusion MRI\", Medical\n Image Analysis, 2013.\n \"\"\"\n\n r, theta, phi = cart2sphere(rtab[:, 0], rtab[:, 1], rtab[:, 2])\n theta[np.isnan(theta)] = 0\n F = radial_order / 2\n n_c = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))\n psi = np.zeros((r.shape[0], n_c))\n counter = 0\n for l in range(0, radial_order + 1, 2):\n for n in range(l, int((radial_order + l) / 2) + 1):\n for m in range(-l, l + 1):\n psi[:, counter] = real_sph_harm(m, l, theta, phi) * \\\n genlaguerre(n - l, l + 0.5)(4 * np.pi ** 2 * zeta * r ** 2 ) *\\\n np.exp(-2 * np.pi ** 2 * zeta * r ** 2) *\\\n _kappa_pdf(zeta, n, l) *\\\n (4 * np.pi ** 2 * zeta * r ** 2) ** (l / 2) * \\\n (-1) ** (n - l / 2)\n counter += 1\n return psi\n\n\ndef _kappa_pdf(zeta, n, l):\n return np.sqrt((16 * np.pi ** 3 * zeta ** 1.5 * factorial(n - l)) / gamma(n + 1.5))\n\n\ndef shore_matrix_odf(radial_order, zeta, sphere_vertices):\n r\"\"\"Compute the SHORE ODF matrix [1]_\"\n\n Parameters\n ----------\n radial_order : unsigned int,\n an even integer that represent the order of the basis\n zeta : unsigned int,\n scale factor\n sphere_vertices : array, shape (N,3)\n vertices of the odf sphere\n\n References\n ----------\n .. [1] Merlet S. et. al, \"Continuous diffusion signal, EAP and\n ODF estimation via Compressive Sensing in diffusion MRI\", Medical\n Image Analysis, 2013.\n \"\"\"\n\n r, theta, phi = cart2sphere(sphere_vertices[:, 0], sphere_vertices[:, 1],\n sphere_vertices[:, 2])\n theta[np.isnan(theta)] = 0\n F = radial_order / 2\n n_c = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))\n upsilon = np.zeros((len(sphere_vertices), n_c))\n counter = 0\n for l in range(0, radial_order + 1, 2):\n for n in range(l, int((radial_order + l) / 2) + 1):\n for m in range(-l, l + 1):\n upsilon[:, counter] = (-1) ** (n - l / 2.0) * _kappa_odf(zeta, n, l) * \\\n hyp2f1(l - n, l / 2.0 + 1.5, l + 1.5, 2.0) * \\\n real_sph_harm(m, l, theta, phi)\n counter += 1\n\n return upsilon\n\n\ndef _kappa_odf(zeta, n, l):\n return np.sqrt((gamma(l / 2.0 + 1.5) ** 2 * gamma(n + 1.5) * 2 ** (l + 3)) /\n (16 * np.pi ** 3 * (zeta) ** 1.5 * factorial(n - l) * gamma(l + 1.5) ** 2))\n\n\ndef l_shore(radial_order):\n \"Returns the angular regularisation matrix for SHORE basis\"\n F = radial_order / 2\n n_c = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))\n diagL = np.zeros(n_c)\n counter = 0\n for l in range(0, radial_order + 1, 2):\n for n in range(l, int((radial_order + l) / 2) + 1):\n for m in range(-l, l + 1):\n diagL[counter] = (l * (l + 1)) ** 2\n counter += 1\n\n return np.diag(diagL)\n\n\ndef n_shore(radial_order):\n \"Returns the angular regularisation matrix for SHORE basis\"\n F = radial_order / 2\n n_c = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))\n diagN = np.zeros(n_c)\n counter = 0\n for l in range(0, radial_order + 1, 2):\n for n in range(l, int((radial_order + l) / 2) + 1):\n for m in range(-l, l + 1):\n diagN[counter] = (n * (n + 1)) ** 2\n counter += 1\n\n return np.diag(diagN)\n\n\ndef create_rspace(gridsize, radius_max):\n \"\"\" Create the real space table, that contains the points in which\n to compute the pdf.\n\n Parameters\n ----------\n gridsize : unsigned int\n dimension of the propagator grid\n radius_max : float\n maximal radius in which compute the propagator\n\n Returns\n -------\n vecs : array, shape (N,3)\n positions of the pdf points in a 3D matrix\n\n tab : array, shape (N,3)\n real space points in which calculates the pdf\n \"\"\"\n\n radius = gridsize // 2\n vecs = []\n for i in range(-radius, radius + 1):\n for j in range(-radius, radius + 1):\n for k in range(-radius, radius + 1):\n vecs.append([i, j, k])\n\n vecs = np.array(vecs, dtype=np.float32)\n tab = vecs / radius\n tab = tab * radius_max\n vecs = vecs + radius\n\n return vecs, tab\n\n\ndef shore_indices(radial_order, index):\n r\"\"\"Given the basis order and the index, return the shore indices n, l, m\n for modified Merlet's 3D-SHORE\n ..math::\n :nowrap:\n \\begin{equation}\n \\textbf{E}(q\\textbf{u})=\\sum_{l=0, even}^{N_{max}}\n \\sum_{n=l}^{(N_{max}+l)/2}\n \\sum_{m=-l}^l c_{nlm}\n \\phi_{nlm}(q\\textbf{u})\n \\end{equation}\n\n where $\\phi_{nlm}$ is\n ..math::\n :nowrap:\n \\begin{equation}\n \\phi_{nlm}^{SHORE}(q\\textbf{u})=\\Biggl[\\dfrac{2(n-l)!}\n {\\zeta^{3/2} \\Gamma(n+3/2)} \\Biggr]^{1/2}\n \\Biggl(\\dfrac{q^2}{\\zeta}\\Biggr)^{l/2}\n exp\\Biggl(\\dfrac{-q^2}{2\\zeta}\\Biggr)\n L^{l+1/2}_{n-l} \\Biggl(\\dfrac{q^2}{\\zeta}\\Biggr)\n Y_l^m(\\textbf{u}).\n \\end{equation}\n\n Parameters\n ----------\n radial_order : unsigned int\n an even integer that represent the maximal order of the basis\n index : unsigned int\n index of the coefficients, start from 0\n\n Returns\n -------\n n : unsigned int\n the index n of the modified shore basis\n l : unsigned int\n the index l of the modified shore basis\n m : unsigned int\n the index m of the modified shore basis\n \"\"\"\n\n F = radial_order / 2\n n_c = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))\n n_i = 0\n l_i = 0\n m_i = 0\n\n if n_c < (index + 1):\n msg = \"The index is higher than the number of coefficients of the truncated basis.\"\n raise ValueError(msg)\n else:\n counter = 0\n for l in range(0, radial_order + 1, 2):\n for n in range(l, int((radial_order + l) / 2) + 1):\n for m in range(-l, l + 1):\n if counter == index:\n n_i = n\n l_i = l\n m_i = m\n counter += 1\n return n_i, l_i, m_i\n\n\ndef shore_order(n, l, m):\n r\"\"\"Given the indices (n,l,m) of the basis, return the minimum order\n for those indices and their index for modified Merlet's 3D-SHORE.\n\n Parameters\n ----------\n n : unsigned int\n the index n of the modified shore basis\n l : unsigned int\n the index l of the modified shore basis\n m : unsigned int\n the index m of the modified shore basis\n\n Returns\n -------\n radial_order : unsigned int\n an even integer that represent the maximal order of the basis\n index : unsigned int\n index of the coefficient correspondig to (n,l,m), start from 0\n\n \"\"\"\n if l % 2 == 1 or l > n or l < 0 or n < 0 or np.abs(m) > l:\n msg = \"The index l must be even and 0 <= l <= n, the index m must be -l <= m <= l.\"\n raise ValueError(msg)\n else:\n if n % 2 == 1:\n radial_order = n + 1\n else:\n radial_order = n\n\n counter_i = 0\n\n counter = 0\n for l_i in range(0, radial_order + 1, 2):\n for n_i in range(l_i, int((radial_order + l_i) / 2) + 1):\n for m_i in range(-l_i, l_i + 1):\n if n == n_i and l == l_i and m == m_i:\n counter_i = counter\n counter += 1\n\n return radial_order, counter_i\n" ]
[ [ "numpy.testing.assert_equal", "numpy.testing.run_module_suite", "numpy.random.random", "numpy.testing.assert_almost_equal", "numpy.isreal" ], [ "numpy.diag", "numpy.dot", "scipy.special.gamma", "numpy.sqrt", "numpy.abs", "numpy.isnan", "numpy.ascontiguousarray", "scipy.special.genlaguerre", "numpy.ones", "numpy.round", "scipy.special.hyp2f1", "numpy.floor", "numpy.exp", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andmikey/kedro
[ "9b4e4135720609d44ffdf5248246fe805f0b5469", "9b4e4135720609d44ffdf5248246fe805f0b5469" ]
[ "kedro/extras/datasets/pandas/sql_dataset.py", "kedro/extras/datasets/pandas/excel_dataset.py" ]
[ "# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"``SQLDataSet`` to load and save data to a SQL backend.\"\"\"\n\nimport copy\nimport re\nfrom typing import Any, Dict, Optional\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import NoSuchModuleError\n\nfrom kedro.io.core import AbstractDataSet, DataSetError\n\n__all__ = [\"SQLTableDataSet\", \"SQLQueryDataSet\"]\n\nKNOWN_PIP_INSTALL = {\n \"psycopg2\": \"psycopg2\",\n \"mysqldb\": \"mysqlclient\",\n \"cx_Oracle\": \"cx_Oracle\",\n}\n\nDRIVER_ERROR_MESSAGE = \"\"\"\nA module/driver is missing when connecting to your SQL server. SQLDataSet\n supports SQLAlchemy drivers. Please refer to\n https://docs.sqlalchemy.org/en/13/core/engines.html#supported-databases\n for more information.\n\\n\\n\n\"\"\"\n\n\ndef _find_known_drivers(module_import_error: ImportError) -> Optional[str]:\n \"\"\"Looks up known keywords in a ``ModuleNotFoundError`` so that it can\n provide better guideline for the user.\n\n Args:\n module_import_error: Error raised while connecting to a SQL server.\n\n Returns:\n Instructions for installing missing driver. An empty string is\n returned in case error is related to an unknown driver.\n\n \"\"\"\n\n # module errors contain string \"No module name 'module_name'\"\n # we are trying to extract module_name surrounded by quotes here\n res = re.findall(r\"'(.*?)'\", str(module_import_error.args[0]).lower())\n\n # in case module import error does not match our expected pattern\n # we have no recommendation\n if not res:\n return None\n\n missing_module = res[0]\n\n if KNOWN_PIP_INSTALL.get(missing_module):\n return (\n \"You can also try installing missing driver with\\n\"\n \"\\npip install {}\".format(KNOWN_PIP_INSTALL.get(missing_module))\n )\n\n return None\n\n\ndef _get_missing_module_error(import_error: ImportError) -> DataSetError:\n missing_module_instruction = _find_known_drivers(import_error)\n\n if missing_module_instruction is None:\n return DataSetError(\n \"{}Loading failed with error:\\n\\n{}\".format(\n DRIVER_ERROR_MESSAGE, str(import_error)\n )\n )\n\n return DataSetError(\"{}{}\".format(DRIVER_ERROR_MESSAGE, missing_module_instruction))\n\n\ndef _get_sql_alchemy_missing_error() -> DataSetError:\n return DataSetError(\n \"The SQL dialect in your connection is not supported by \"\n \"SQLAlchemy. Please refer to \"\n \"https://docs.sqlalchemy.org/en/13/core/engines.html#supported-databases \"\n \"for more information.\"\n )\n\n\nclass SQLTableDataSet(AbstractDataSet):\n \"\"\"``SQLTableDataSet`` loads data from a SQL table and saves a pandas\n dataframe to a table. It uses ``pandas.DataFrame`` internally,\n so it supports all allowed pandas options on ``read_sql_table`` and\n ``to_sql`` methods. Since Pandas uses SQLAlchemy behind the scenes, when\n instantiating ``SQLTableDataSet`` one needs to pass a compatible connection\n string either in ``credentials`` (see the example code snippet below) or in\n ``load_args`` and ``save_args``. Connection string formats supported by\n SQLAlchemy can be found here:\n https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls\n\n ``SQLTableDataSet`` modifies the save parameters and stores\n the data with no index. This is designed to make load and save methods\n symmetric.\n\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.pandas import SQLTableDataSet\n >>> import pandas as pd\n >>>\n >>> data = pd.DataFrame({\"col1\": [1, 2], \"col2\": [4, 5],\n >>> \"col3\": [5, 6]})\n >>> table_name = \"table_a\"\n >>> credentials = {\n >>> \"con\": \"postgresql://scott:tiger@localhost/test\"\n >>> }\n >>> data_set = SQLTableDataSet(table_name=table_name,\n >>> credentials=credentials)\n >>>\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>>\n >>> assert data.equals(reloaded)\n\n \"\"\"\n\n DEFAULT_LOAD_ARGS = {} # type: Dict[str, Any]\n DEFAULT_SAVE_ARGS = {\"index\": False} # type: Dict[str, Any]\n\n def _describe(self) -> Dict[str, Any]:\n load_args = self._load_args.copy()\n save_args = self._save_args.copy()\n del load_args[\"table_name\"]\n del load_args[\"con\"]\n del save_args[\"name\"]\n del save_args[\"con\"]\n return dict(\n table_name=self._load_args[\"table_name\"],\n load_args=load_args,\n save_args=save_args,\n )\n\n def __init__(\n self,\n table_name: str,\n credentials: Dict[str, Any],\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n ) -> None:\n \"\"\"Creates a new ``SQLTableDataSet``.\n\n Args:\n table_name: The table name to load or save data to. It\n overwrites name in ``save_args`` and ``table_name``\n parameters in ``load_args``.\n credentials: A dictionary with a ``SQLAlchemy`` connection string.\n Users are supposed to provide the connection string 'con'\n through credentials. It overwrites `con` parameter in\n ``load_args`` and ``save_args`` in case it is provided. To find\n all supported connection string formats, see here:\n https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls\n load_args: Provided to underlying pandas ``read_sql_table``\n function along with the connection string.\n To find all supported arguments, see here:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html\n To find all supported connection string formats, see here:\n https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls\n save_args: Provided to underlying pandas ``to_sql`` function along\n with the connection string.\n To find all supported arguments, see here:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html\n To find all supported connection string formats, see here:\n https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls\n It has ``index=False`` in the default parameters.\n\n Raises:\n DataSetError: When either ``table_name`` or ``con`` is empty.\n\n \"\"\"\n\n if not table_name:\n raise DataSetError(\"`table_name` argument cannot be empty.\")\n\n if not (credentials and \"con\" in credentials and credentials[\"con\"]):\n raise DataSetError(\n \"`con` argument cannot be empty. Please \"\n \"provide a SQLAlchemy connection string.\"\n )\n\n # Handle default load and save arguments\n self._load_args = copy.deepcopy(self.DEFAULT_LOAD_ARGS)\n if load_args is not None:\n self._load_args.update(load_args)\n self._save_args = copy.deepcopy(self.DEFAULT_SAVE_ARGS)\n if save_args is not None:\n self._save_args.update(save_args)\n\n self._load_args[\"table_name\"] = table_name\n self._save_args[\"name\"] = table_name\n\n self._load_args[\"con\"] = self._save_args[\"con\"] = credentials[\"con\"]\n\n def _load(self) -> pd.DataFrame:\n try:\n return pd.read_sql_table(**self._load_args)\n except ImportError as import_error:\n raise _get_missing_module_error(import_error)\n except NoSuchModuleError:\n raise _get_sql_alchemy_missing_error()\n\n def _save(self, data: pd.DataFrame) -> None:\n try:\n data.to_sql(**self._save_args)\n except ImportError as import_error:\n raise _get_missing_module_error(import_error)\n except NoSuchModuleError:\n raise _get_sql_alchemy_missing_error()\n\n def _exists(self) -> bool:\n eng = create_engine(self._load_args[\"con\"])\n schema = self._load_args.get(\"schema\", None)\n exists = self._load_args[\"table_name\"] in eng.table_names(schema)\n eng.dispose()\n return exists\n\n\nclass SQLQueryDataSet(AbstractDataSet):\n \"\"\"``SQLQueryDataSet`` loads data from a provided SQL query. It\n uses ``pandas.DataFrame`` internally, so it supports all allowed\n pandas options on ``read_sql_query``. Since Pandas uses SQLAlchemy behind\n the scenes, when instantiating ``SQLQueryDataSet`` one needs to pass\n a compatible connection string either in ``credentials`` (see the example\n code snippet below) or in ``load_args``. Connection string formats supported\n by SQLAlchemy can be found here:\n https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls\n\n It does not support save method so it is a read only data set.\n To save data to a SQL server use ``SQLTableDataSet``.\n\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.pandas import SQLQueryDataSet\n >>> import pandas as pd\n >>>\n >>> data = pd.DataFrame({\"col1\": [1, 2], \"col2\": [4, 5],\n >>> \"col3\": [5, 6]})\n >>> sql = \"SELECT * FROM table_a\"\n >>> credentials = {\n >>> \"con\": \"postgresql://scott:tiger@localhost/test\"\n >>> }\n >>> data_set = SQLQueryDataSet(sql=sql,\n >>> credentials=credentials)\n >>>\n >>> sql_data = data_set.load()\n >>>\n\n \"\"\"\n\n def _describe(self) -> Dict[str, Any]:\n load_args = self._load_args.copy()\n del load_args[\"sql\"]\n del load_args[\"con\"]\n return dict(sql=self._load_args[\"sql\"], load_args=load_args)\n\n def __init__(\n self, sql: str, credentials: Dict[str, Any], load_args: Dict[str, Any] = None\n ) -> None:\n \"\"\"Creates a new ``SQLQueryDataSet``.\n\n Args:\n sql: The sql query statement.\n credentials: A dictionary with a ``SQLAlchemy`` connection string.\n Users are supposed to provide the connection string 'con'\n through credentials. It overwrites `con` parameter in\n ``load_args`` and ``save_args`` in case it is provided. To find\n all supported connection string formats, see here:\n https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls\n load_args: Provided to underlying pandas ``read_sql_query``\n function along with the connection string.\n To find all supported arguments, see here:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_query.html\n To find all supported connection string formats, see here:\n https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls\n\n Raises:\n DataSetError: When either ``sql`` or ``con`` parameters is emtpy.\n\n \"\"\"\n\n if not sql:\n raise DataSetError(\n \"`sql` argument cannot be empty. Please provide a sql query\"\n )\n\n if not (credentials and \"con\" in credentials and credentials[\"con\"]):\n raise DataSetError(\n \"`con` argument cannot be empty. Please \"\n \"provide a SQLAlchemy connection string.\"\n )\n\n default_load_args = {} # type: Dict[str, Any]\n\n self._load_args = (\n {**default_load_args, **load_args}\n if load_args is not None\n else default_load_args\n )\n\n self._load_args[\"sql\"] = sql\n\n self._load_args[\"con\"] = credentials[\"con\"]\n\n def _load(self) -> pd.DataFrame:\n try:\n return pd.read_sql_query(**self._load_args)\n except ImportError as import_error:\n raise _get_missing_module_error(import_error)\n except NoSuchModuleError:\n raise _get_sql_alchemy_missing_error()\n\n def _save(self, data: pd.DataFrame) -> None:\n raise DataSetError(\"`save` is not supported on SQLQueryDataSet\")\n", "# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"``ExcelDataSet`` loads/saves data from/to a Excel file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses pandas to handle the Excel file.\n\"\"\"\nfrom copy import deepcopy\nfrom io import BytesIO\nfrom pathlib import PurePosixPath\nfrom typing import Any, Dict\n\nimport fsspec\nimport pandas as pd\n\nfrom kedro.io.core import (\n AbstractVersionedDataSet,\n DataSetError,\n Version,\n get_filepath_str,\n get_protocol_and_path,\n)\n\n\nclass ExcelDataSet(AbstractVersionedDataSet):\n \"\"\"``ExcelDataSet`` loads/saves data from/to a Excel file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses pandas to handle the Excel file.\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.pandas import ExcelDataSet\n >>> import pandas as pd\n >>>\n >>> data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],\n >>> 'col3': [5, 6]})\n >>>\n >>> # data_set = ExcelDataSet(filepath=\"gcs://bucket/test.xlsx\")\n >>> data_set = ExcelDataSet(filepath=\"test.xlsx\")\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data.equals(reloaded)\n\n \"\"\"\n\n DEFAULT_LOAD_ARGS = {\"engine\": \"xlrd\"}\n DEFAULT_SAVE_ARGS = {\"index\": False}\n\n # pylint: disable=too-many-arguments\n def __init__(\n self,\n filepath: str,\n engine: str = \"xlsxwriter\",\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n credentials: Dict[str, Any] = None,\n fs_args: Dict[str, Any] = None,\n ) -> None:\n \"\"\"Creates a new instance of ``ExcelDataSet`` pointing to a concrete Excel file\n on a specific filesystem.\n\n Args:\n filepath: Filepath to a Excel file prefixed with a protocol like `s3://`.\n If prefix is not provided, `file` protocol (local filesystem) will be used.\n The prefix should be any protocol supported by ``fsspec``.\n Note: `http(s)` doesn't support versioning.\n engine: The engine used to write to excel files. The default\n engine is 'xlsxwriter'.\n load_args: Pandas options for loading Excel files.\n Here you can find all available arguments:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_excel.html\n All defaults are preserved, but \"engine\", which is set to \"xlrd\".\n save_args: Pandas options for saving Excel files.\n Here you can find all available arguments:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_excel.html\n All defaults are preserved, but \"index\", which is set to False.\n If you would like to specify options for the `ExcelWriter`,\n you can include them under \"writer\" key. Here you can\n find all available arguments:\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.ExcelWriter.html\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n credentials: Credentials required to get access to the underlying filesystem.\n E.g. for ``GCSFileSystem`` it should look like `{\"token\": None}`.\n fs_args: Extra arguments to pass into underlying filesystem class.\n E.g. for ``GCSFileSystem`` class: `{\"project\": \"my-project\", ...}`.\n \"\"\"\n _fs_args = deepcopy(fs_args) or {}\n _credentials = deepcopy(credentials) or {}\n\n protocol, path = get_protocol_and_path(filepath, version)\n\n self._protocol = protocol\n self._fs = fsspec.filesystem(self._protocol, **_credentials, **_fs_args)\n\n super().__init__(\n filepath=PurePosixPath(path),\n version=version,\n exists_function=self._fs.exists,\n glob_function=self._fs.glob,\n )\n\n # Handle default load and save arguments\n self._load_args = deepcopy(self.DEFAULT_LOAD_ARGS)\n if load_args is not None:\n self._load_args.update(load_args)\n\n self._save_args = deepcopy(self.DEFAULT_SAVE_ARGS)\n self._writer_args = {\"engine\": engine} # type: Dict[str, Any]\n if save_args is not None:\n writer_args = save_args.pop(\"writer\", {}) # type: Dict[str, Any]\n self._writer_args.update(writer_args)\n self._save_args.update(save_args)\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n protocol=self._protocol,\n load_args=self._load_args,\n save_args=self._save_args,\n writer_args=self._writer_args,\n version=self._version,\n )\n\n def _load(self) -> pd.DataFrame:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n\n with self._fs.open(load_path, mode=\"rb\") as fs_file:\n return pd.read_excel(fs_file, **self._load_args)\n\n def _save(self, data: pd.DataFrame) -> None:\n output = BytesIO()\n save_path = get_filepath_str(self._get_save_path(), self._protocol)\n\n # pylint: disable=abstract-class-instantiated\n with pd.ExcelWriter(output, **self._writer_args) as writer:\n data.to_excel(writer, **self._save_args)\n\n with self._fs.open(save_path, mode=\"wb\") as fs_file:\n fs_file.write(output.getvalue())\n\n self.invalidate_cache()\n\n def _exists(self) -> bool:\n try:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n except DataSetError:\n return False\n\n return self._fs.exists(load_path)\n\n def _release(self) -> None:\n self.invalidate_cache()\n\n def invalidate_cache(self) -> None:\n \"\"\"Invalidate underlying filesystem caches.\"\"\"\n filepath = get_filepath_str(self._filepath, self._protocol)\n self._fs.invalidate_cache(filepath)\n" ]
[ [ "pandas.read_sql_table", "pandas.read_sql_query" ], [ "pandas.read_excel", "pandas.ExcelWriter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
jensenjhwang/TensorNetwork
[ "35d1247cc3fb80768965f7429ac9b8b914a144a8", "35d1247cc3fb80768965f7429ac9b8b914a144a8", "35d1247cc3fb80768965f7429ac9b8b914a144a8" ]
[ "tensornetwork/contractors/opt_einsum_paths/path_contractors_node_test.py", "tensornetwork/backends/numpy/decompositions_test.py", "tensornetwork/block_sparse/linalg_test.py" ]
[ "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\nfrom tensornetwork import Node\nfrom tensornetwork.contractors import auto\nfrom tensornetwork.contractors.opt_einsum_paths import path_contractors\n\n\[email protected](\n name=\"path_algorithm\", params=[\"optimal\", \"branch\", \"greedy\", \"auto\"])\ndef path_algorithm_fixture(request):\n return getattr(path_contractors, request.param)\n\n\ndef test_sanity_check(backend, path_algorithm):\n a = Node(np.eye(2), backend=backend)\n b = Node(np.ones((2, 7, 11)), backend=backend)\n c = Node(np.ones((7, 11, 13, 2)), backend=backend)\n d = Node(np.eye(13), backend=backend)\n\n # pylint: disable=pointless-statement\n a[0] ^ b[0]\n b[1] ^ c[0]\n b[2] ^ c[1]\n c[2] ^ d[1]\n c[3] ^ a[1]\n nodes = [a, b, c, d]\n final_node = path_algorithm(nodes)\n assert final_node.shape == (13,)\n\n\ndef test_trace_edge(backend, path_algorithm):\n a = Node(np.ones((2, 2, 2, 2, 2)), backend=backend)\n b = Node(np.ones((2, 2, 2)), backend=backend)\n c = Node(np.ones((2, 2, 2)), backend=backend)\n\n # pylint: disable=pointless-statement\n a[0] ^ a[1]\n a[2] ^ b[0]\n a[3] ^ c[0]\n b[1] ^ c[1]\n b[2] ^ c[2]\n nodes = [a, b, c]\n node = path_algorithm(nodes)\n np.testing.assert_allclose(node.tensor, np.ones(2) * 32.0)\n\n\ndef test_single_node(backend, path_algorithm):\n a = Node(np.ones((2, 2, 2)), backend=backend)\n # pylint: disable=pointless-statement\n a[0] ^ a[1]\n nodes = [a]\n node = path_algorithm(nodes)\n np.testing.assert_allclose(node.tensor, np.ones(2) * 2.0)\n\n\ndef test_custom_sanity_check(backend):\n a = Node(np.ones(2), backend=backend)\n b = Node(np.ones((2, 5)), backend=backend)\n\n # pylint: disable=pointless-statement\n a[0] ^ b[0]\n nodes = [a, b]\n\n class PathOptimizer:\n\n def __call__(self, inputs, output, size_dict, memory_limit=None):\n return [(0, 1)]\n\n optimizer = PathOptimizer()\n final_node = path_contractors.custom(nodes, optimizer)\n np.testing.assert_allclose(final_node.tensor, np.ones(5) * 2.0)\n\n\ndef test_subgraph_contraction(backend, path_algorithm):\n a_tensor = np.arange(4).reshape((2, 2))\n b_tensor = np.arange(4).reshape((2, 2)) + 10\n c_tensor = np.arange(4).reshape((2, 2)) + 20\n a = Node(a_tensor, backend=backend)\n b = Node(b_tensor, backend=backend)\n c = Node(c_tensor, backend=backend)\n a[0] ^ b[1]\n c[1] ^ b[0]\n remaining_edges = [c[0], a[1]]\n result = path_algorithm({a, b}, [b[0], a[1]])\n np.testing.assert_allclose(result.tensor, b_tensor @ a_tensor)\n final = (c @ result).reorder_edges(remaining_edges)\n np.testing.assert_allclose(final.tensor, c_tensor @ b_tensor @ a_tensor)\n\n\ndef test_multiple_partial_contractions(backend, path_algorithm):\n a_tensor = np.arange(4).reshape((2, 2))\n b_tensor = np.arange(4).reshape((2, 2)) + 10\n c_tensor = np.arange(4).reshape((2, 2)) + 20\n d_tensor = np.arange(4).reshape((2, 2)) + 30\n a = Node(a_tensor, backend=backend)\n b = Node(b_tensor, backend=backend)\n c = Node(c_tensor, backend=backend)\n d = Node(d_tensor, backend=backend)\n a[1] ^ b[0]\n b[1] ^ c[0]\n c[1] ^ d[0]\n d[1] ^ a[0]\n ab = path_algorithm({a, b}, [a[0], b[1]])\n np.testing.assert_allclose(ab.tensor, a_tensor @ b_tensor)\n cd = path_algorithm({c, d}, [c[0], d[1]])\n np.testing.assert_allclose(cd.tensor, c_tensor @ d_tensor)\n result = path_algorithm({ab, cd})\n np.testing.assert_allclose(\n result.tensor, np.trace(a_tensor @ b_tensor @ c_tensor @ d_tensor))\n\n\ndef test_single_node_reorder(backend, path_algorithm):\n a = Node(np.arange(4).reshape((2, 2)), backend=backend)\n expected_edge_order = [a[1], a[0]]\n result = path_algorithm({a}, expected_edge_order)\n assert result.edges == expected_edge_order\n np.testing.assert_allclose(result.tensor, np.arange(4).reshape((2, 2)).T)\n\n\ndef test_ignore_edge_order(backend, path_algorithm):\n a = Node(np.ones((1, 1, 1)), backend=backend)\n b = Node(np.ones((1, 1, 1, 2, 3)), backend=backend)\n\n a[0] ^ b[0]\n a[1] ^ b[1]\n a[2] ^ b[2]\n\n e0 = b[3]\n e1 = b[4]\n\n final_node = path_algorithm({a, b},\n ignore_edge_order=True)\n\n assert set(final_node.edges) == {e0, e1}\n\n\ndef test_ignore_edge_order_with_order(backend, path_algorithm):\n a = Node(np.ones((1, 1, 1)), backend=backend)\n b = Node(np.ones((1, 1, 1, 2, 3)), backend=backend)\n\n a[0] ^ b[0]\n a[1] ^ b[1]\n a[2] ^ b[2]\n\n e0 = b[3]\n e1 = b[4]\n\n final_node = path_algorithm({a, b},\n [e1, e0],\n ignore_edge_order=True)\n\n assert set(final_node.edges) == {e0, e1}\n\ndef test_disconnected_network(backend, path_algorithm):\n a = Node(np.eye(2), backend=backend)\n b = Node(np.eye(2), backend=backend)\n c = Node(np.eye(2), backend=backend)\n d = Node(np.eye(2), backend=backend)\n e = Node(np.eye(2), backend=backend)\n f = Node(np.eye(2), backend=backend)\n g = Node(np.eye(2), backend=backend)\n a[1] ^ b[0]\n c[0] ^ d[1]\n e[0] ^ f[0]\n g[0] ^ f[1]\n final_edges = [a[0], b[1], c[1], d[0], e[1], g[1]]\n result = path_algorithm(\n {a, b, c, d, e, f, g},\n final_edges)\n assert result.edges == final_edges\n\ndef test_passes_ignore_edge_order_from_auto(backend):\n a = Node(np.eye(2), backend=backend)\n b = Node(np.eye(2), backend=backend)\n c = Node(np.eye(2), backend=backend)\n d = Node(np.eye(2), backend=backend)\n e = Node(np.eye(2), backend=backend)\n # pylint: disable=pointless-statement\n a[1] ^ b[0]\n c[0] ^ d[1]\n c[1] ^ e[0]\n nodes = [a, b, c, d, e]\n try:\n auto(nodes, ignore_edge_order=True)\n except ValueError:\n pytest.fail(\"auto should pass ignore_edge_order when n >= 5 && n < 7\")\n", "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport numpy as np\nimport tensorflow as tf\nfrom tensornetwork.backends.numpy import decompositions\n\n\nclass DecompositionsTest(tf.test.TestCase):\n\n def test_expected_shapes(self):\n val = np.zeros((2, 3, 4, 5))\n u, s, vh, _ = decompositions.svd_decomposition(np, val, 2)\n self.assertEqual(u.shape, (2, 3, 6))\n self.assertEqual(s.shape, (6,))\n self.assertAllClose(s, np.zeros(6))\n self.assertEqual(vh.shape, (6, 4, 5))\n\n def test_expected_shapes_qr(self):\n val = np.zeros((2, 3, 4, 5))\n q, r = decompositions.qr_decomposition(np, val, 2)\n self.assertEqual(q.shape, (2, 3, 6))\n self.assertEqual(r.shape, (6, 4, 5))\n\n def test_expected_shapes_rq(self):\n val = np.zeros((2, 3, 4, 5))\n r, q = decompositions.rq_decomposition(np, val, 2)\n self.assertEqual(r.shape, (2, 3, 6))\n self.assertEqual(q.shape, (6, 4, 5))\n\n def test_rq_decomposition(self):\n random_matrix = np.random.rand(10, 10)\n r, q = decompositions.rq_decomposition(np, random_matrix, 1)\n self.assertAllClose(r.dot(q), random_matrix)\n\n def test_qr_decomposition(self):\n random_matrix = np.random.rand(10, 10)\n q, r = decompositions.qr_decomposition(np, random_matrix, 1)\n self.assertAllClose(q.dot(r), random_matrix)\n\n def test_max_singular_values(self):\n random_matrix = np.random.rand(10, 10)\n unitary1, _, unitary2 = np.linalg.svd(random_matrix)\n singular_values = np.array(range(10))\n val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))\n u, s, vh, trun = decompositions.svd_decomposition(\n np, val, 1, max_singular_values=7)\n self.assertEqual(u.shape, (10, 7))\n self.assertEqual(s.shape, (7,))\n self.assertAllClose(s, np.arange(9, 2, -1))\n self.assertEqual(vh.shape, (7, 10))\n self.assertAllClose(trun, np.arange(2, -1, -1))\n\n def test_max_singular_values_larger_than_bond_dimension(self):\n random_matrix = np.random.rand(10, 6)\n unitary1, _, unitary2 = np.linalg.svd(random_matrix, full_matrices=False)\n singular_values = np.array(range(6))\n val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))\n u, s, vh, _ = decompositions.svd_decomposition(\n np, val, 1, max_singular_values=30)\n self.assertEqual(u.shape, (10, 6))\n self.assertEqual(s.shape, (6,))\n self.assertEqual(vh.shape, (6, 6))\n\n\n def test_max_truncation_error(self):\n random_matrix = np.random.rand(10, 10)\n unitary1, _, unitary2 = np.linalg.svd(random_matrix)\n singular_values = np.array(range(10))\n val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))\n u, s, vh, trun = decompositions.svd_decomposition(\n np, val, 1, max_truncation_error=math.sqrt(5.1))\n self.assertEqual(u.shape, (10, 7))\n self.assertEqual(s.shape, (7,))\n self.assertAllClose(s, np.arange(9, 2, -1))\n self.assertEqual(vh.shape, (7, 10))\n self.assertAllClose(trun, np.arange(2, -1, -1))\n\n\n def test_max_truncation_error_relative(self):\n absolute = np.diag([2.0, 1.0, 0.2, 0.1])\n relative = np.diag([2.0, 1.0, 0.2, 0.1])\n max_truncation_err = 0.2\n _, _, _, trunc_sv_absolute = decompositions.svd_decomposition(\n np, absolute, 1,\n max_truncation_error=max_truncation_err,\n relative=False)\n _, _, _, trunc_sv_relative = decompositions.svd_decomposition(\n np, relative, 1,\n max_truncation_error=max_truncation_err,\n relative=True)\n np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])\n np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "import numpy as np\nimport pytest\n# pylint: disable=line-too-long\nfrom tensornetwork.block_sparse.charge import U1Charge, fuse_charges, charge_equal, BaseCharge\nfrom tensornetwork.block_sparse.index import Index\nfrom tensornetwork.block_sparse.blocksparsetensor import ChargeArray, BlockSparseTensor\nfrom tensornetwork.block_sparse.utils import _find_diagonal_sparse_blocks\nfrom tensornetwork import ncon\n# pylint: disable=line-too-long\nfrom tensornetwork.block_sparse.linalg import norm, diag, reshape, transpose, conj, svd, qr, eigh, eig, inv, sqrt, trace, eye, pinv, zeros, ones, randn, random\n\nnp_dtypes = [np.float64, np.complex128]\nnp_tensordot_dtypes = [np.float64, np.complex128]\n\n\[email protected]('dtype', np_dtypes)\ndef test_norm(dtype):\n np.random.seed(10)\n Ds = np.asarray([8, 9, 10, 11])\n rank = Ds.shape[0]\n flows = np.random.choice([True, False], size=rank, replace=True)\n indices = [Index(U1Charge.random(-5, 5, Ds[n]), flows[n]) for n in range(4)]\n arr = BlockSparseTensor.random(indices, dtype=dtype)\n dense_norm = np.linalg.norm(arr.todense())\n np.testing.assert_allclose(norm(arr), dense_norm)\n\n\[email protected]('dtype', np_dtypes)\[email protected]('num_charges', [1, 2, 3])\[email protected]('Ds', [[200, 100], [100, 200]])\[email protected]('flow', [False, True])\ndef test_get_diag(dtype, num_charges, Ds, flow):\n np.random.seed(10)\n np_flow = -np.int((np.int(flow) - 0.5) * 2)\n indices = [\n Index(\n BaseCharge(\n np.random.randint(-2, 3, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges), flow) for n in range(2)\n ]\n arr = BlockSparseTensor.random(indices, dtype=dtype)\n fused = fuse_charges(arr.flat_charges, arr.flat_flows)\n inds = np.nonzero(fused == np.zeros((num_charges, 1), dtype=np.int16))[0]\n # pylint: disable=no-member\n left, _ = np.divmod(inds, Ds[1])\n unique = np.unique(\n np_flow * (indices[0]._charges[0].charges[:, left]), axis=1)\n diagonal = diag(arr)\n sparse_blocks, _, block_shapes = _find_diagonal_sparse_blocks(\n arr.flat_charges, arr.flat_flows, 1)\n data = np.concatenate([\n np.diag(np.reshape(arr.data[sparse_blocks[n]], block_shapes[:, n]))\n for n in range(len(sparse_blocks))\n ])\n np.testing.assert_allclose(data, diagonal.data)\n np.testing.assert_allclose(unique, diagonal.flat_charges[0].unique_charges)\n\n\[email protected]('dtype', np_dtypes)\[email protected]('num_charges', [1, 2, 3])\[email protected]('Ds', [[0, 100], [100, 0]])\ndef test_get_empty_diag(dtype, num_charges, Ds):\n np.random.seed(10)\n indices = [\n Index(\n BaseCharge(\n np.random.randint(-2, 3, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges), False) for n in range(2)\n ]\n arr = BlockSparseTensor.random(indices, dtype=dtype)\n diagonal = diag(arr)\n np.testing.assert_allclose([], diagonal.data)\n for c in diagonal.flat_charges:\n assert len(c) == 0\n\n\[email protected]('dtype', np_dtypes)\[email protected]('num_charges', [1, 2, 3])\[email protected]('flow', [False, True])\ndef test_create_diag(dtype, num_charges, flow):\n np.random.seed(10)\n D = 200\n index = Index(\n BaseCharge(\n np.random.randint(-2, 3, (num_charges, D)),\n charge_types=[U1Charge] * num_charges), flow)\n\n arr = ChargeArray.random([index], dtype=dtype)\n diagarr = diag(arr)\n dense = np.ravel(diagarr.todense())\n np.testing.assert_allclose(\n np.sort(dense[dense != 0.0]), np.sort(diagarr.data[diagarr.data != 0.0]))\n\n sparse_blocks, charges, block_shapes = _find_diagonal_sparse_blocks(\n diagarr.flat_charges, diagarr.flat_flows, 1)\n\n for n, block in enumerate(sparse_blocks):\n shape = block_shapes[:, n]\n block_diag = np.diag(np.reshape(diagarr.data[block], shape))\n np.testing.assert_allclose(\n arr.data[np.squeeze((index._charges[0] * flow) == charges[n])],\n block_diag)\n\n\ndef test_diag_raises():\n np.random.seed(10)\n Ds = [8, 9, 10]\n rank = len(Ds)\n indices = [\n Index(\n BaseCharge(\n np.random.randint(-2, 3, (1, Ds[n])), charge_types=[U1Charge]),\n False) for n in range(rank)\n ]\n arr = BlockSparseTensor.random(indices)\n chargearr = ChargeArray.random([indices[0], indices[1]])\n with pytest.raises(ValueError):\n diag(arr)\n with pytest.raises(ValueError):\n diag(chargearr)\n\n\[email protected]('dtype', np_dtypes)\ndef test_tn_reshape(dtype):\n np.random.seed(10)\n Ds = [8, 9, 10, 11]\n indices = [Index(U1Charge.random(-5, 5, Ds[n]), False) for n in range(4)]\n arr = BlockSparseTensor.random(indices, dtype=dtype)\n arr2 = reshape(arr, [72, 110])\n for n in range(2):\n for m in range(2):\n assert charge_equal(arr2.charges[n][m], indices[n * 2 + m].charges)\n np.testing.assert_allclose(arr2.shape, [72, 110])\n np.testing.assert_allclose(arr2._order, [[0, 1], [2, 3]])\n np.testing.assert_allclose(arr2.flows, [[False, False], [False, False]])\n assert arr2.ndim == 2\n arr3 = reshape(arr, Ds)\n for n in range(4):\n assert charge_equal(arr3.charges[n][0], indices[n].charges)\n\n np.testing.assert_allclose(arr3.shape, Ds)\n np.testing.assert_allclose(arr3._order, [[0], [1], [2], [3]])\n np.testing.assert_allclose(arr3.flows, [[False], [False], [False], [False]])\n assert arr3.ndim == 4\n\n\ndef test_tn_transpose():\n np.random.seed(10)\n Ds = np.array([8, 9, 10, 11])\n flows = [True, False, True, False]\n indices = [Index(U1Charge.random(-5, 5, Ds[n]), flows[n]) for n in range(4)]\n arr = BlockSparseTensor.random(indices)\n order = [2, 1, 0, 3]\n arr2 = transpose(arr, order)\n np.testing.assert_allclose(Ds[order], arr2.shape)\n np.testing.assert_allclose(arr2._order, [[2], [1], [0], [3]])\n np.testing.assert_allclose(arr2.flows, [[True], [False], [True], [False]])\n\n\ndef test_tn_transpose_reshape():\n np.random.seed(10)\n Ds = np.array([8, 9, 10, 11])\n flows = [True, False, True, False]\n indices = [Index(U1Charge.random(-5, 5, Ds[n]), flows[n]) for n in range(4)]\n arr = BlockSparseTensor.random(indices)\n arr2 = transpose(arr, [2, 0, 1, 3])\n arr3 = reshape(arr2, [80, 99])\n np.testing.assert_allclose(arr3.shape, [80, 99])\n np.testing.assert_allclose(arr3._order, [[2, 0], [1, 3]])\n np.testing.assert_allclose(arr3.flows, [[True, True], [False, False]])\n\n arr4 = transpose(arr3, [1, 0])\n np.testing.assert_allclose(arr4.shape, [99, 80])\n np.testing.assert_allclose(arr4._order, [[1, 3], [2, 0]])\n np.testing.assert_allclose(arr4.flows, [[False, False], [True, True]])\n\n arr5 = reshape(arr4, [9, 11, 10, 8])\n np.testing.assert_allclose(arr5.shape, [9, 11, 10, 8])\n np.testing.assert_allclose(arr5._order, [[1], [3], [2], [0]])\n np.testing.assert_allclose(arr5.flows, [[False], [False], [True], [True]])\n\n\[email protected]('dtype', np_dtypes)\ndef test_tn_conj(dtype):\n np.random.seed(10)\n indices = [Index(U1Charge.random(-5, 5, 10), False) for _ in range(4)]\n a = BlockSparseTensor.randn(indices, dtype=dtype)\n b = conj(a)\n np.testing.assert_allclose(b.data, np.conj(a.data))\n\n\[email protected](\"dtype\", np_dtypes)\[email protected](\"Ds, R1\", [([20, 21], 1), ([18, 19, 20], 2),\n ([18, 19, 20], 1), ([0, 10], 1),\n ([10, 0], 1)])\[email protected]('num_charges', [1, 2, 3])\ndef test_svd_prod(dtype, Ds, R1, num_charges):\n np.random.seed(10)\n R = len(Ds)\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = [True] * R\n A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)],\n dtype=dtype)\n d1 = np.prod(Ds[:R1])\n d2 = np.prod(Ds[R1:])\n A = A.reshape([d1, d2])\n\n U, S, V = svd(A, full_matrices=False)\n A_ = U @ diag(S) @ V\n assert A_.dtype == A.dtype\n np.testing.assert_allclose(A.data, A_.data)\n for n in range(len(A._charges)):\n assert charge_equal(A_._charges[n], A._charges[n])\n\n\[email protected](\"dtype\", np_dtypes)\[email protected](\"Ds, R1\", [([20, 21], 1), ([18, 19, 20], 2),\n ([18, 19, 20], 1), ([0, 10], 1),\n ([10, 0], 1)])\[email protected]('num_charges', [1, 2, 3])\ndef test_svd_singvals(dtype, Ds, R1, num_charges):\n np.random.seed(10)\n R = len(Ds)\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = [True] * R\n A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)],\n dtype=dtype)\n\n d1 = np.prod(Ds[:R1])\n d2 = np.prod(Ds[R1:])\n A = A.reshape([d1, d2])\n _, S1, _ = svd(A, full_matrices=False)\n S2 = svd(A, full_matrices=False, compute_uv=False)\n np.testing.assert_allclose(S1.data, S2.data)\n Sdense = np.linalg.svd(A.todense(), compute_uv=False)\n np.testing.assert_allclose(\n np.sort(Sdense[Sdense > 1E-15]), np.sort(S2.data[S2.data > 0.0]))\n\n\[email protected](\"mode\", ['complete', 'reduced'])\[email protected](\"dtype\", np_dtypes)\[email protected](\"Ds, R1\", [([20, 21], 1), ([18, 19, 20], 2),\n ([18, 19, 20], 1), ([10, 0], 1),\n ([0, 10], 1)])\[email protected]('num_charges', [1, 2, 3])\ndef test_qr_prod(dtype, Ds, R1, mode, num_charges):\n np.random.seed(10)\n R = len(Ds)\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = [True] * R\n A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)],\n dtype=dtype)\n d1 = np.prod(Ds[:R1])\n d2 = np.prod(Ds[R1:])\n A = A.reshape([d1, d2])\n Q, R = qr(A, mode=mode)\n A_ = Q @ R\n assert A_.dtype == A.dtype\n np.testing.assert_allclose(A.data, A_.data)\n for n in range(len(A._charges)):\n assert charge_equal(A_._charges[n], A._charges[n])\n\n\ndef test_qr_raises():\n np.random.seed(10)\n dtype = np.float64\n num_charges = 1\n Ds = [20, 21]\n R1 = 1\n R = len(Ds)\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = [True] * R\n A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)],\n dtype=dtype)\n d1 = np.prod(Ds[:R1])\n d2 = np.prod(Ds[R1:])\n A = A.reshape([d1, d2])\n with pytest.raises(ValueError):\n qr(A, mode='fake_mode')\n\n\[email protected](\"dtype\", np_dtypes)\[email protected](\"Ds\", [[20], [9, 10], [6, 7, 8], [0]])\[email protected]('num_charges', [1, 2, 3])\ndef test_eigh_prod(dtype, Ds, num_charges):\n np.random.seed(10)\n R = len(Ds)\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n]), dtype=np.int16),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = [False] * R\n inds = [Index(charges[n], flows[n]) for n in range(R)]\n A = BlockSparseTensor.random(\n inds + [i.copy().flip_flow() for i in inds], dtype=dtype)\n dims = np.prod(Ds)\n A = A.reshape([dims, dims])\n B = A + A.T.conj()\n E, V = eigh(B)\n B_ = V @ diag(E) @ V.conj().T\n np.testing.assert_allclose(B.data, B_.data)\n for n in range(len(B._charges)):\n assert charge_equal(B_._charges[n], B._charges[n])\n\n\ndef test_eigh_raises():\n np.random.seed(10)\n num_charges = 1\n D = 20\n R = 3\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = [False] * R\n inds = [Index(charges[n], flows[n]) for n in range(R)]\n A = BlockSparseTensor.random(inds)\n with pytest.raises(NotImplementedError):\n eigh(A)\n\n\[email protected](\"dtype\", [np.float64, np.complex128])\[email protected]('num_charges', [1, 2, 3])\ndef test_inv(dtype, num_charges):\n np.random.seed(10)\n R = 2\n D = 10\n charge = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n flows = [True, False]\n A = BlockSparseTensor.random([Index(charge, flows[n]) for n in range(R)],\n (-0.5, 0.5),\n dtype=dtype)\n invA = inv(A)\n left_eye = invA @ A\n\n blocks, _, shapes = _find_diagonal_sparse_blocks(left_eye.flat_charges,\n left_eye.flat_flows, 1)\n for n, block in enumerate(blocks):\n t = np.reshape(left_eye.data[block], shapes[:, n])\n assert np.linalg.norm(t - np.eye(t.shape[0], t.shape[1])) < 1E-12\n\n right_eye = A @ invA\n blocks, _, shapes = _find_diagonal_sparse_blocks(right_eye.flat_charges,\n right_eye.flat_flows, 1)\n for n, block in enumerate(blocks):\n t = np.reshape(right_eye.data[block], shapes[:, n])\n assert np.linalg.norm(t - np.eye(t.shape[0], t.shape[1])) < 1E-12\n\n\ndef test_inv_raises():\n num_charges = 1\n np.random.seed(10)\n R = 3\n D = 10\n charge = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n A = BlockSparseTensor.random([Index(charge, False) for n in range(R)],\n (-0.5, 0.5))\n with pytest.raises(ValueError):\n inv(A)\n\n\[email protected](\"dtype\", np_dtypes)\[email protected](\"Ds\", [[20], [9, 10], [6, 7, 8], [0]])\[email protected]('num_charges', [1, 2, 3])\ndef test_eig_prod(dtype, Ds, num_charges):\n np.random.seed(10)\n R = len(Ds)\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n]), dtype=np.int16),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = [False] * R\n inds = [Index(charges[n], flows[n]) for n in range(R)]\n\n A = BlockSparseTensor.random(\n inds + [i.copy().flip_flow() for i in inds], dtype=dtype)\n dims = np.prod(Ds)\n A = A.reshape([dims, dims])\n E, V = eig(A)\n A_ = V @ diag(E) @ inv(V)\n np.testing.assert_allclose(A.data, A_.data)\n\n\ndef test_eig_raises():\n np.random.seed(10)\n num_charges = 1\n D = 20\n R = 3\n charges = [\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges) for n in range(R)\n ]\n flows = [False] * R\n inds = [Index(charges[n], flows[n]) for n in range(R)]\n A = BlockSparseTensor.random(inds)\n with pytest.raises(NotImplementedError):\n eig(A)\n\n\n#Note the case num_charges=4 is most likely testing empty tensors\[email protected](\"dtype\", np_tensordot_dtypes)\[email protected]('num_charges', [1, 2, 3])\[email protected](\"Ds\", [[20], [9, 10], [6, 7, 8], [9, 8, 0, 10]])\ndef test_sqrt(dtype, num_charges, Ds):\n np.random.seed(10)\n R = len(Ds)\n flows = np.random.choice([True, False], replace=True, size=R)\n indices = [\n Index(\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n]), dtype=np.int16),\n charge_types=[U1Charge] * num_charges), flows[n])\n for n in range(R)\n ]\n arr = BlockSparseTensor.random(indices, dtype=dtype)\n sqrtarr = sqrt(arr)\n np.testing.assert_allclose(sqrtarr.data, np.sqrt(arr.data))\n\n\[email protected](\"dtype\", np_dtypes)\[email protected]('num_charges', [1, 2, 3])\[email protected]('D', [0, 10])\ndef test_eye(dtype, num_charges, D):\n charge = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n flow = False\n index = Index(charge, flow)\n A = eye(index, dtype=dtype)\n blocks, _, shapes = _find_diagonal_sparse_blocks(A.flat_charges, A.flat_flows,\n 1)\n for n, block in enumerate(blocks):\n t = np.reshape(A.data[block], shapes[:, n])\n np.testing.assert_almost_equal(t, np.eye(t.shape[0], t.shape[1]))\n\n\[email protected](\"dtype\", [np.float64, np.complex128])\[email protected]('num_charges', [1, 2, 3])\[email protected]('D', [0, 100])\ndef test_trace_matrix(dtype, num_charges, D):\n np.random.seed(10)\n R = 2\n charge = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n flows = [True, False]\n matrix = BlockSparseTensor.random([Index(charge, flows[n]) for n in range(R)],\n dtype=dtype)\n res = trace(matrix)\n res_dense = np.trace(matrix.todense())\n np.testing.assert_allclose(res.data, res_dense)\n\n\[email protected](\"dtype\", [np.float64, np.complex128])\[email protected]('num_charges', [1, 2, 3])\[email protected]('D1, D2', [(10, 12), (0, 10)])\ndef test_trace_tensor(dtype, num_charges, D1, D2):\n np.random.seed(10)\n charge1 = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D1), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n charge2 = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D2), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n indices = [Index(charge1, False), Index(charge2, False), Index(charge1, True)]\n tensor = BlockSparseTensor.random(indices, dtype=dtype)\n res = trace(tensor, (0, 2))\n assert res.sparse_shape[0] == indices[1]\n res_dense = np.trace(tensor.todense(), axis1=0, axis2=2)\n np.testing.assert_allclose(res.todense(), res_dense)\n\n\[email protected]('num_charges', [1, 2, 3])\ndef test_trace_raises(num_charges):\n np.random.seed(10)\n D = 20\n charge1 = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n A1 = BlockSparseTensor.random([Index(charge1, False)])\n with pytest.raises(ValueError):\n trace(A1)\n\n charge2 = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D + 1), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n indices = [\n Index(charge1, False),\n Index(charge2, False),\n Index(charge1, False)\n ]\n A2 = BlockSparseTensor.random(indices)\n with pytest.raises(ValueError):\n trace(A2, axes=(0, 1))\n with pytest.raises(ValueError):\n trace(A2, axes=(0, 2))\n with pytest.raises(ValueError):\n trace(A2, axes=(0, 1, 2))\n\n\[email protected](\"dtype\", [np.float64, np.complex128])\[email protected]('num_charges', [1, 2, 3])\ndef test_pinv(dtype, num_charges):\n np.random.seed(10)\n R = 2\n D = 10\n charge = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n flows = [True, False]\n A = BlockSparseTensor.random([Index(charge, flows[n]) for n in range(R)],\n (-0.5, 0.5),\n dtype=dtype)\n invA = pinv(A)\n left_eye = invA @ A\n\n blocks, _, shapes = _find_diagonal_sparse_blocks(left_eye.flat_charges,\n left_eye.flat_flows, 1)\n for n, block in enumerate(blocks):\n t = np.reshape(left_eye.data[block], shapes[:, n])\n assert np.linalg.norm(t - np.eye(t.shape[0], t.shape[1])) < 1E-12\n\n right_eye = A @ invA\n blocks, _, shapes = _find_diagonal_sparse_blocks(right_eye.flat_charges,\n right_eye.flat_flows, 1)\n for n, block in enumerate(blocks):\n t = np.reshape(right_eye.data[block], shapes[:, n])\n assert np.linalg.norm(t - np.eye(t.shape[0], t.shape[1])) < 1E-12\n\n\ndef test_pinv_raises():\n num_charges = 1\n np.random.seed(10)\n R = 3\n D = 10\n charge = BaseCharge(\n np.random.randint(-5, 6, (num_charges, D), dtype=np.int16),\n charge_types=[U1Charge] * num_charges)\n A = BlockSparseTensor.random([Index(charge, False) for n in range(R)],\n (-0.5, 0.5))\n with pytest.raises(ValueError):\n pinv(A)\n\n\[email protected]('dtype', np_dtypes)\[email protected]('num_charges', [1, 2, 3])\ndef test_tn_zeros(dtype, num_charges):\n np.random.seed(10)\n Ds = [8, 9, 10, 11]\n rank = 4\n flows = np.random.choice([True, False], size=rank, replace=True)\n indices = [\n Index(\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges), flows[n])\n for n in range(rank)\n ]\n arr = zeros(indices, dtype=dtype)\n np.testing.assert_allclose(arr.data, 0)\n np.testing.assert_allclose(Ds, arr.shape)\n np.testing.assert_allclose(arr.flat_flows, flows)\n for n in range(4):\n assert charge_equal(arr.charges[n][0], indices[n].flat_charges[0])\n\n\[email protected]('dtype', np_dtypes)\[email protected]('num_charges', [1, 2, 3])\ndef test_tn_ones(dtype, num_charges):\n np.random.seed(10)\n Ds = [8, 9, 10, 11]\n rank = 4\n flows = np.random.choice([True, False], size=rank, replace=True)\n indices = [\n Index(\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges), flows[n])\n for n in range(rank)\n ]\n\n arr = ones(indices, dtype=dtype)\n np.testing.assert_allclose(arr.data, 1)\n np.testing.assert_allclose(Ds, arr.shape)\n np.testing.assert_allclose(arr.flat_flows, flows)\n for n in range(4):\n assert charge_equal(arr.charges[n][0], indices[n].flat_charges[0])\n\n\[email protected]('dtype', np_dtypes)\[email protected]('num_charges', [1, 2, 3])\ndef test_tn_random(dtype, num_charges):\n np.random.seed(10)\n Ds = [8, 9, 10, 11]\n rank = 4\n flows = np.random.choice([True, False], size=rank, replace=True)\n indices = [\n Index(\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges), flows[n])\n for n in range(rank)\n ]\n arr = random(indices, dtype=dtype)\n\n np.testing.assert_allclose(Ds, arr.shape)\n np.testing.assert_allclose(arr.flat_flows, flows)\n for n in range(4):\n assert charge_equal(arr.charges[n][0], indices[n].flat_charges[0])\n\n\[email protected]('dtype', np_dtypes)\[email protected]('num_charges', [1, 2, 3])\ndef test_tn_randn(dtype, num_charges):\n np.random.seed(10)\n Ds = [8, 9, 10, 11]\n rank = 4\n flows = np.random.choice([True, False], size=rank, replace=True)\n indices = [\n Index(\n BaseCharge(\n np.random.randint(-5, 6, (num_charges, Ds[n])),\n charge_types=[U1Charge] * num_charges), flows[n])\n for n in range(rank)\n ]\n arr = randn(indices, dtype=dtype)\n\n np.testing.assert_allclose(Ds, arr.shape)\n np.testing.assert_allclose(arr.flat_flows, flows)\n for n in range(4):\n assert charge_equal(arr.charges[n][0], indices[n].flat_charges[0])\n" ]
[ [ "numpy.arange", "numpy.eye", "numpy.ones", "numpy.testing.assert_allclose", "numpy.trace" ], [ "numpy.diag", "numpy.linalg.svd", "numpy.arange", "tensorflow.test.main", "numpy.testing.assert_almost_equal", "numpy.random.rand", "numpy.zeros" ], [ "numpy.conj", "numpy.random.seed", "numpy.unique", "numpy.asarray", "numpy.random.choice", "numpy.reshape", "numpy.sqrt", "numpy.eye", "numpy.squeeze", "numpy.sort", "numpy.int", "numpy.prod", "numpy.testing.assert_allclose", "numpy.divmod", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yiheng-wang-nv/MONAI
[ "885d5b947aeafc1a9bee2899cfd48fff9036e68a", "2fef7ff5c064a9ff6b6d6b4f2323180afed99934" ]
[ "monai/networks/utils.py", "tests/test_occlusion_sensitivity.py" ]
[ "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUtilities and types for defining networks, these depend on PyTorch.\n\"\"\"\nimport re\nimport warnings\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom typing import Any, Callable, Dict, Mapping, Optional, Sequence, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom monai.utils.deprecate_utils import deprecated_arg\nfrom monai.utils.misc import ensure_tuple, set_determinism\nfrom monai.utils.module import pytorch_after\n\n__all__ = [\n \"one_hot\",\n \"slice_channels\",\n \"predict_segmentation\",\n \"normalize_transform\",\n \"to_norm_affine\",\n \"normal_init\",\n \"icnr_init\",\n \"pixelshuffle\",\n \"eval_mode\",\n \"train_mode\",\n \"copy_model_state\",\n \"convert_to_torchscript\",\n]\n\n\ndef one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.float, dim: int = 1) -> torch.Tensor:\n \"\"\"\n For every value v in `labels`, the value in the output will be either 1 or 0. Each vector along the `dim`-th\n dimension has the \"one-hot\" format, i.e., it has a total length of `num_classes`,\n with a one and `num_class-1` zeros.\n Note that this will include the background label, thus a binary mask should be treated as having two classes.\n\n Args:\n labels: input tensor of integers to be converted into the 'one-hot' format. Internally `labels` will be\n converted into integers `labels.long()`.\n num_classes: number of output channels, the corresponding length of `labels[dim]` will be converted to\n `num_classes` from `1`.\n dtype: the data type of the output one_hot label.\n dim: the dimension to be converted to `num_classes` channels from `1` channel, should be non-negative number.\n\n Example:\n\n For a tensor `labels` of dimensions [B]1[spatial_dims], return a tensor of dimensions `[B]N[spatial_dims]`\n when `num_classes=N` number of classes and `dim=1`.\n\n .. code-block:: python\n\n from monai.networks.utils import one_hot\n import torch\n\n a = torch.randint(0, 2, size=(1, 2, 2, 2))\n out = one_hot(a, num_classes=2, dim=0)\n print(out.shape) # torch.Size([2, 2, 2, 2])\n\n a = torch.randint(0, 2, size=(2, 1, 2, 2, 2))\n out = one_hot(a, num_classes=2, dim=1)\n print(out.shape) # torch.Size([2, 2, 2, 2, 2])\n\n \"\"\"\n\n # if `dim` is bigger, add singleton dim at the end\n if labels.ndim < dim + 1:\n shape = list(labels.shape) + [1] * (dim + 1 - len(labels.shape))\n labels = torch.reshape(labels, shape)\n\n sh = list(labels.shape)\n\n if sh[dim] != 1:\n raise AssertionError(\"labels should have a channel with length equal to one.\")\n\n sh[dim] = num_classes\n\n o = torch.zeros(size=sh, dtype=dtype, device=labels.device)\n labels = o.scatter_(dim=dim, index=labels.long(), value=1)\n\n return labels\n\n\ndef slice_channels(tensor: torch.Tensor, *slicevals: Optional[int]) -> torch.Tensor:\n slices = [slice(None)] * len(tensor.shape)\n slices[1] = slice(*slicevals)\n\n return tensor[slices]\n\n\ndef predict_segmentation(logits: torch.Tensor, mutually_exclusive: bool = False, threshold: float = 0.0) -> Any:\n \"\"\"\n Given the logits from a network, computing the segmentation by thresholding all values above 0\n if multi-labels task, computing the `argmax` along the channel axis if multi-classes task,\n logits has shape `BCHW[D]`.\n\n Args:\n logits: raw data of model output.\n mutually_exclusive: if True, `logits` will be converted into a binary matrix using\n a combination of argmax, which is suitable for multi-classes task. Defaults to False.\n threshold: thresholding the prediction values if multi-labels task.\n \"\"\"\n if not mutually_exclusive:\n return (logits >= threshold).int()\n if logits.shape[1] == 1:\n warnings.warn(\"single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.\")\n return (logits >= threshold).int()\n return logits.argmax(1, keepdim=True)\n\n\ndef normalize_transform(\n shape: Sequence[int],\n device: Optional[torch.device] = None,\n dtype: Optional[torch.dtype] = None,\n align_corners: bool = False,\n) -> torch.Tensor:\n \"\"\"\n Compute an affine matrix according to the input shape.\n The transform normalizes the homogeneous image coordinates to the\n range of `[-1, 1]`.\n\n Args:\n shape: input spatial shape\n device: device on which the returned affine will be allocated.\n dtype: data type of the returned affine\n align_corners: if True, consider -1 and 1 to refer to the centers of the\n corner pixels rather than the image corners.\n See also: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample\n \"\"\"\n norm = torch.tensor(shape, dtype=torch.float64, device=device) # no in-place change\n if align_corners:\n norm[norm <= 1.0] = 2.0\n norm = 2.0 / (norm - 1.0)\n norm = torch.diag(torch.cat((norm, torch.ones((1,), dtype=torch.float64, device=device))))\n norm[:-1, -1] = -1.0\n else:\n norm[norm <= 0.0] = 2.0\n norm = 2.0 / norm\n norm = torch.diag(torch.cat((norm, torch.ones((1,), dtype=torch.float64, device=device))))\n norm[:-1, -1] = 1.0 / torch.tensor(shape, dtype=torch.float64, device=device) - 1.0\n norm = norm.unsqueeze(0).to(dtype=dtype)\n norm.requires_grad = False\n return norm\n\n\ndef to_norm_affine(\n affine: torch.Tensor, src_size: Sequence[int], dst_size: Sequence[int], align_corners: bool = False\n) -> torch.Tensor:\n \"\"\"\n Given ``affine`` defined for coordinates in the pixel space, compute the corresponding affine\n for the normalized coordinates.\n\n Args:\n affine: Nxdxd batched square matrix\n src_size: source image spatial shape\n dst_size: target image spatial shape\n align_corners: if True, consider -1 and 1 to refer to the centers of the\n corner pixels rather than the image corners.\n See also: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample\n\n Raises:\n TypeError: When ``affine`` is not a ``torch.Tensor``.\n ValueError: When ``affine`` is not Nxdxd.\n ValueError: When ``src_size`` or ``dst_size`` dimensions differ from ``affine``.\n\n \"\"\"\n if not isinstance(affine, torch.Tensor):\n raise TypeError(f\"affine must be a torch.Tensor but is {type(affine).__name__}.\")\n if affine.ndimension() != 3 or affine.shape[1] != affine.shape[2]:\n raise ValueError(f\"affine must be Nxdxd, got {tuple(affine.shape)}.\")\n sr = affine.shape[1] - 1\n if sr != len(src_size) or sr != len(dst_size):\n raise ValueError(f\"affine suggests {sr}D, got src={len(src_size)}D, dst={len(dst_size)}D.\")\n\n src_xform = normalize_transform(src_size, affine.device, affine.dtype, align_corners)\n dst_xform = normalize_transform(dst_size, affine.device, affine.dtype, align_corners)\n return src_xform @ affine @ torch.inverse(dst_xform)\n\n\ndef normal_init(\n m, std: float = 0.02, normal_func: Callable[[torch.Tensor, float, float], Any] = torch.nn.init.normal_\n) -> None:\n \"\"\"\n Initialize the weight and bias tensors of `m' and its submodules to values from a normal distribution with a\n stddev of `std'. Weight tensors of convolution and linear modules are initialized with a mean of 0, batch\n norm modules with a mean of 1. The callable `normal_func', used to assign values, should have the same arguments\n as its default normal_(). This can be used with `nn.Module.apply` to visit submodules of a network.\n \"\"\"\n cname = m.__class__.__name__\n\n if getattr(m, \"weight\", None) is not None and (cname.find(\"Conv\") != -1 or cname.find(\"Linear\") != -1):\n normal_func(m.weight.data, 0.0, std)\n if getattr(m, \"bias\", None) is not None:\n nn.init.constant_(m.bias.data, 0.0)\n\n elif cname.find(\"BatchNorm\") != -1:\n normal_func(m.weight.data, 1.0, std)\n nn.init.constant_(m.bias.data, 0)\n\n\ndef icnr_init(conv, upsample_factor, init=nn.init.kaiming_normal_):\n \"\"\"\n ICNR initialization for 2D/3D kernels adapted from Aitken et al.,2017 , \"Checkerboard artifact free\n sub-pixel convolution\".\n \"\"\"\n out_channels, in_channels, *dims = conv.weight.shape\n scale_factor = upsample_factor ** len(dims)\n\n oc2 = int(out_channels / scale_factor)\n\n kernel = torch.zeros([oc2, in_channels] + dims)\n kernel = init(kernel)\n kernel = kernel.transpose(0, 1)\n kernel = kernel.reshape(oc2, in_channels, -1)\n kernel = kernel.repeat(1, 1, scale_factor)\n kernel = kernel.reshape([in_channels, out_channels] + dims)\n kernel = kernel.transpose(0, 1)\n conv.weight.data.copy_(kernel)\n\n\n@deprecated_arg(\n name=\"dimensions\", new_name=\"spatial_dims\", since=\"0.6\", msg_suffix=\"Please use `spatial_dims` instead.\"\n)\ndef pixelshuffle(\n x: torch.Tensor, spatial_dims: int, scale_factor: int, dimensions: Optional[int] = None\n) -> torch.Tensor:\n \"\"\"\n Apply pixel shuffle to the tensor `x` with spatial dimensions `spatial_dims` and scaling factor `scale_factor`.\n\n See: Shi et al., 2016, \"Real-Time Single Image and Video Super-Resolution\n Using a nEfficient Sub-Pixel Convolutional Neural Network.\"\n\n See: Aitken et al., 2017, \"Checkerboard artifact free sub-pixel convolution\".\n\n Args:\n x: Input tensor\n spatial_dims: number of spatial dimensions, typically 2 or 3 for 2D or 3D\n scale_factor: factor to rescale the spatial dimensions by, must be >=1\n\n .. deprecated:: 0.6.0\n ``dimensions`` is deprecated, use ``spatial_dims`` instead.\n\n Returns:\n Reshuffled version of `x`.\n\n Raises:\n ValueError: When input channels of `x` are not divisible by (scale_factor ** spatial_dims)\n \"\"\"\n if dimensions is not None:\n spatial_dims = dimensions\n dim, factor = spatial_dims, scale_factor\n input_size = list(x.size())\n batch_size, channels = input_size[:2]\n scale_divisor = factor ** dim\n\n if channels % scale_divisor != 0:\n raise ValueError(\n f\"Number of input channels ({channels}) must be evenly \"\n f\"divisible by scale_factor ** dimensions ({factor}**{dim}={scale_divisor}).\"\n )\n\n org_channels = channels // scale_divisor\n output_size = [batch_size, org_channels] + [d * factor for d in input_size[2:]]\n\n indices = tuple(range(2, 2 + 2 * dim))\n indices_factor, indices_dim = indices[:dim], indices[dim:]\n permute_indices = (0, 1) + sum(zip(indices_dim, indices_factor), ())\n\n x = x.reshape(batch_size, org_channels, *([factor] * dim + input_size[2:]))\n x = x.permute(permute_indices).reshape(output_size)\n return x\n\n\n@contextmanager\ndef eval_mode(*nets: nn.Module):\n \"\"\"\n Set network(s) to eval mode and then return to original state at the end.\n\n Args:\n nets: Input network(s)\n\n Examples\n\n .. code-block:: python\n\n t=torch.rand(1,1,16,16)\n p=torch.nn.Conv2d(1,1,3)\n print(p.training) # True\n with eval_mode(p):\n print(p.training) # False\n print(p(t).sum().backward()) # will correctly raise an exception as gradients are calculated\n \"\"\"\n\n # Get original state of network(s)\n training = [n for n in nets if n.training]\n\n try:\n # set to eval mode\n with torch.no_grad():\n yield [n.eval() for n in nets]\n finally:\n # Return required networks to training\n for n in training:\n n.train()\n\n\n@contextmanager\ndef train_mode(*nets: nn.Module):\n \"\"\"\n Set network(s) to train mode and then return to original state at the end.\n\n Args:\n nets: Input network(s)\n\n Examples\n\n .. code-block:: python\n\n t=torch.rand(1,1,16,16)\n p=torch.nn.Conv2d(1,1,3)\n p.eval()\n print(p.training) # False\n with train_mode(p):\n print(p.training) # True\n print(p(t).sum().backward()) # No exception\n \"\"\"\n\n # Get original state of network(s)\n eval_list = [n for n in nets if not n.training]\n\n try:\n # set to train mode\n with torch.set_grad_enabled(True):\n yield [n.train() for n in nets]\n finally:\n # Return required networks to eval_list\n for n in eval_list:\n n.eval()\n\n\ndef copy_model_state(\n dst: Union[torch.nn.Module, Mapping],\n src: Union[torch.nn.Module, Mapping],\n dst_prefix=\"\",\n mapping=None,\n exclude_vars=None,\n inplace=True,\n):\n \"\"\"\n Compute a module state_dict, of which the keys are the same as `dst`. The values of `dst` are overwritten\n by the ones from `src` whenever their keys match. The method provides additional `dst_prefix` for\n the `dst` key when matching them. `mapping` can be a `{\"src_key\": \"dst_key\"}` dict, indicating\n `dst[dst_prefix + dst_key] = src[src_key]`.\n This function is mainly to return a model state dict\n for loading the `src` model state into the `dst` model, `src` and `dst` can have different dict keys, but\n their corresponding values normally have the same shape.\n\n Args:\n dst: a pytorch module or state dict to be updated.\n src: a pytorch module or state dist used to get the values used for the update.\n dst_prefix: `dst` key prefix, so that `dst[dst_prefix + src_key]`\n will be assigned to the value of `src[src_key]`.\n mapping: a `{\"src_key\": \"dst_key\"}` dict, indicating that `dst[dst_prefix + dst_key]`\n to be assigned to the value of `src[src_key]`.\n exclude_vars: a regular expression to match the `dst` variable names,\n so that their values are not overwritten by `src`.\n inplace: whether to set the `dst` module with the updated `state_dict` via `load_state_dict`.\n This option is only available when `dst` is a `torch.nn.Module`.\n\n Examples:\n .. code-block:: python\n\n from monai.networks.nets import BasicUNet\n from monai.networks.utils import copy_model_state\n\n model_a = BasicUNet(in_channels=1, out_channels=4)\n model_b = BasicUNet(in_channels=1, out_channels=2)\n model_a_b, changed, unchanged = copy_model_state(\n model_a, model_b, exclude_vars=\"conv_0.conv_0\", inplace=False)\n # dst model updated: 76 of 82 variables.\n model_a.load_state_dict(model_a_b)\n # <All keys matched successfully>\n\n Returns: an OrderedDict of the updated `dst` state, the changed, and unchanged keys.\n \"\"\"\n\n if isinstance(src, (nn.DataParallel, nn.parallel.DistributedDataParallel)):\n src = src.module\n if isinstance(dst, (nn.DataParallel, nn.parallel.DistributedDataParallel)):\n dst = dst.module\n src_dict = src.state_dict() if isinstance(src, torch.nn.Module) else src\n dst_dict = dst.state_dict() if isinstance(dst, torch.nn.Module) else dst\n dst_dict = OrderedDict(dst_dict)\n\n to_skip = {s_key for s_key in src_dict if exclude_vars and re.compile(exclude_vars).search(s_key)}\n\n # update dst with items from src\n all_keys, updated_keys = list(dst_dict), list()\n for s, val in src_dict.items():\n dst_key = f\"{dst_prefix}{s}\"\n if dst_key in dst_dict and dst_key not in to_skip and dst_dict[dst_key].shape == val.shape:\n dst_dict[dst_key] = val\n updated_keys.append(dst_key)\n for s in mapping if mapping else {}:\n dst_key = f\"{dst_prefix}{mapping[s]}\"\n if dst_key in dst_dict and dst_key not in to_skip:\n if dst_dict[dst_key].shape != src_dict[s].shape:\n warnings.warn(f\"Param. shape changed from {dst_dict[dst_key].shape} to {src_dict[s].shape}.\")\n dst_dict[dst_key] = src_dict[s]\n updated_keys.append(dst_key)\n\n updated_keys = sorted(set(updated_keys))\n unchanged_keys = sorted(set(all_keys).difference(updated_keys))\n print(f\"'dst' model updated: {len(updated_keys)} of {len(dst_dict)} variables.\")\n if inplace and isinstance(dst, torch.nn.Module):\n dst.load_state_dict(dst_dict)\n return dst_dict, updated_keys, unchanged_keys\n\n\ndef convert_to_torchscript(\n model: nn.Module,\n filename_or_obj: Optional[Any] = None,\n extra_files: Optional[Dict] = None,\n verify: bool = False,\n inputs: Optional[Sequence[Any]] = None,\n device: Optional[torch.device] = None,\n rtol: float = 1e-4,\n atol: float = 0.0,\n **kwargs,\n):\n \"\"\"\n Utility to convert a model into TorchScript model and save to file,\n with optional input / output data verification.\n\n Args:\n model: source PyTorch model to save.\n filename_or_obj: if not None, specify a file-like object (has to implement write and flush)\n or a string containing a file path name to save the TorchScript model.\n extra_files: map from filename to contents which will be stored as part of the save model file.\n works for PyTorch 1.7 or later.\n for more details: https://pytorch.org/docs/stable/generated/torch.jit.save.html.\n verify: whether to verify the input and output of TorchScript model.\n if `filename_or_obj` is not None, load the saved TorchScript model and verify.\n inputs: input test data to verify model, should be a sequence of data, every item maps to a argument\n of `model()` function.\n device: target device to verify the model, if None, use CUDA if available.\n rtol: the relative tolerance when comparing the outputs of PyTorch model and TorchScript model.\n atol: the absolute tolerance when comparing the outputs of PyTorch model and TorchScript model.\n kwargs: other arguments except `obj` for `torch.jit.script()` to convert model, for more details:\n https://pytorch.org/docs/master/generated/torch.jit.script.html.\n\n \"\"\"\n model.eval()\n with torch.no_grad():\n script_module = torch.jit.script(model, **kwargs)\n if filename_or_obj is not None:\n if not pytorch_after(1, 7):\n torch.jit.save(m=script_module, f=filename_or_obj)\n else:\n torch.jit.save(m=script_module, f=filename_or_obj, _extra_files=extra_files)\n\n if verify:\n if device is None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n if inputs is None:\n raise ValueError(\"missing input data for verification.\")\n\n inputs = [i.to(device) if isinstance(i, torch.Tensor) else i for i in inputs]\n ts_model = torch.jit.load(filename_or_obj) if filename_or_obj is not None else script_module\n ts_model.eval().to(device)\n model = model.to(device)\n\n with torch.no_grad():\n set_determinism(seed=0)\n torch_out = ensure_tuple(model(*inputs))\n set_determinism(seed=0)\n torchscript_out = ensure_tuple(ts_model(*inputs))\n set_determinism(seed=None)\n # compare TorchScript and PyTorch results\n for r1, r2 in zip(torch_out, torchscript_out):\n if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor):\n torch.testing.assert_allclose(r1, r2, rtol=rtol, atol=atol)\n\n return script_module\n", "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.networks.nets import DenseNet, DenseNet121\nfrom monai.visualize import OcclusionSensitivity\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nout_channels_2d = 4\nout_channels_3d = 3\nmodel_2d = DenseNet121(spatial_dims=2, in_channels=1, out_channels=out_channels_2d).to(device)\nmodel_3d = DenseNet(\n spatial_dims=3, in_channels=1, out_channels=out_channels_3d, init_features=2, growth_rate=2, block_config=(6,)\n).to(device)\nmodel_2d.eval()\nmodel_3d.eval()\n\n# 2D w/ bounding box\nTEST_CASE_0 = [\n {\"nn_module\": model_2d},\n {\"x\": torch.rand(1, 1, 48, 64).to(device), \"b_box\": [-1, -1, 2, 40, 1, 62]},\n (1, 1, 39, 62, out_channels_2d),\n (1, 1, 39, 62),\n]\n# 3D w/ bounding box and stride\nTEST_CASE_1 = [\n {\"nn_module\": model_3d, \"n_batch\": 10, \"stride\": (2, 1, 2), \"mask_size\": (16, 15, 14)},\n {\"x\": torch.rand(1, 1, 6, 6, 6).to(device), \"b_box\": [-1, -1, 2, 3, -1, -1, -1, -1]},\n (1, 1, 2, 6, 6, out_channels_3d),\n (1, 1, 2, 6, 6),\n]\n\nTEST_CASE_FAIL_0 = [ # 2D should fail, since 3 stride values given\n {\"nn_module\": model_2d, \"n_batch\": 10, \"stride\": (2, 2, 2)},\n {\"x\": torch.rand(1, 1, 48, 64).to(device), \"b_box\": [-1, -1, 2, 3, -1, -1]},\n]\n\nTEST_CASE_FAIL_1 = [ # 2D should fail, since stride is not a factor of image size\n {\"nn_module\": model_2d, \"stride\": 3},\n {\"x\": torch.rand(1, 1, 48, 64).to(device)},\n]\n\n\nclass TestComputeOcclusionSensitivity(unittest.TestCase):\n @parameterized.expand([TEST_CASE_0, TEST_CASE_1])\n def test_shape(self, init_data, call_data, map_expected_shape, most_prob_expected_shape):\n occ_sens = OcclusionSensitivity(**init_data)\n m, most_prob = occ_sens(**call_data)\n self.assertTupleEqual(m.shape, map_expected_shape)\n self.assertTupleEqual(most_prob.shape, most_prob_expected_shape)\n # most probable class should be of type int, and should have min>=0, max<num_classes\n self.assertEqual(most_prob.dtype, torch.int64)\n self.assertGreaterEqual(most_prob.min(), 0)\n self.assertLess(most_prob.max(), m.shape[-1])\n\n @parameterized.expand([TEST_CASE_FAIL_0, TEST_CASE_FAIL_1])\n def test_fail(self, init_data, call_data):\n occ_sens = OcclusionSensitivity(**init_data)\n with self.assertRaises(ValueError):\n occ_sens(**call_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.jit.script", "torch.jit.save", "torch.jit.load", "torch.ones", "torch.testing.assert_allclose", "torch.zeros", "torch.nn.init.constant_", "torch.reshape", "torch.tensor", "torch.inverse", "torch.set_grad_enabled", "torch.no_grad", "torch.cuda.is_available" ], [ "torch.rand", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jytime/Deep-SfM-Revisited
[ "7645c7d524df8c8798ccc1902c1368b4ed59708a" ]
[ "models/inverse_warp.py" ]
[ "from __future__ import division\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\npixel_coords = None\n\n\ndef set_id_grid(depth):\n global pixel_coords\n b, h, w = depth.size()\n i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w)).type_as(depth) # [1, H, W]\n j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w)).type_as(depth) # [1, H, W]\n ones = Variable(torch.ones(1,h,w)).type_as(depth)\n\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]\n\n\ndef check_sizes(input, input_name, expected):\n condition = [input.ndimension() == len(expected)]\n for i,size in enumerate(expected):\n if size.isdigit():\n condition.append(input.size(i) == int(size))\n assert(all(condition)), \"wrong size for {}, expected {}, got {}\".format(input_name, 'x'.join(expected), list(input.size()))\n\n\ndef pixel2cam(depth, intrinsics_inv):\n global pixel_coords\n \"\"\"Transform coordinates in the pixel frame to the camera frame.\n Args:\n depth: depth maps -- [B, H, W]\n intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]\n Returns:\n array of (u,v,1) cam coordinates -- [B, 3, H, W]\n \"\"\"\n b, h, w = depth.size()\n if (pixel_coords is None) or (pixel_coords.shape[-2:]!=depth.shape[-2:]):\n set_id_grid(depth)\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).contiguous().view(b, 3, -1).cuda() # [B, 3, H*W]\n cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w)\n return cam_coords * depth.unsqueeze(1)\n\n\ndef cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):\n \"\"\"Transform coordinates in the camera frame to the pixel frame.\n Args:\n cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]\n proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]\n proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]\n Returns:\n array of [-1,1] coordinates -- [B, 2, H, W]\n \"\"\"\n b, _, h, w = cam_coords.size()\n cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]\n if proj_c2p_rot is not None:\n pcoords = proj_c2p_rot.bmm(cam_coords_flat)\n else:\n pcoords = cam_coords_flat\n\n if proj_c2p_tr is not None:\n pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]\n X = pcoords[:, 0]\n Y = pcoords[:, 1]\n Z = pcoords[:, 2].clamp(min=1e-3)\n\n X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]\n if padding_mode == 'zeros':\n X_mask = ((X_norm > 1)+(X_norm < -1)).detach()\n X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray\n Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()\n Y_norm[Y_mask] = 2\n\n pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]\n return pixel_coords.view(b,h,w,2)\n\ndef pose_vec2mat(vec, rotation_mode='euler'):\n \"\"\"\n Convert 6DoF parameters to transformation matrix.\n Args:s\n vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]\n Returns:\n A transformation matrix -- [B, 3, 4]\n \"\"\"\n translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]\n rot = vec[:,3:]\n if rotation_mode == 'euler':\n rot_mat = euler2mat(rot) # [B, 3, 3]\n elif rotation_mode == 'quat':\n rot_mat = quat2mat(rot) # [B, 3, 3]\n transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4]\n return transform_mat\n\ndef pose2flow(depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode=None):\n \"\"\"\n Converts pose parameters to rigid optical flow\n \"\"\"\n check_sizes(depth, 'depth', 'BHW')\n check_sizes(pose, 'pose', 'B34')\n check_sizes(intrinsics, 'intrinsics', 'B33')\n check_sizes(intrinsics_inv, 'intrinsics', 'B33')\n assert(intrinsics_inv.size() == intrinsics.size())\n\n bs, h, w = depth.size()\n\n grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W]\n grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W]\n\n cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W]\n pose_mat = pose.cuda()\n\n # Get projection matrix for tgt camera frame to source pixel frame\n proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]\n src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]\n\n X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5) - grid_x\n Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5) - grid_y\n\n return torch.stack((X,Y), dim=1)\n\ndef inverse_warp(feat, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):\n \"\"\"\n Inverse warp a source image to the target image plane.\n\n Args:\n feat: the source feature (where to sample pixels) -- [B, CH, H, W]\n depth: depth map of the target image -- [B, H, W]\n pose: 6DoF pose parameters from target to source -- [B, 6]\n intrinsics: camera intrinsic matrix -- [B, 3, 3]\n intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]\n Returns:\n Source image warped to the target image plane\n \"\"\"\n check_sizes(depth, 'depth', 'BHW')\n check_sizes(pose, 'pose', 'B34')\n check_sizes(intrinsics, 'intrinsics', 'B33')\n check_sizes(intrinsics_inv, 'intrinsics', 'B33')\n\n assert(intrinsics_inv.size() == intrinsics.size())\n\n batch_size, _, feat_height, feat_width = feat.size()\n\n cam_coords = pixel2cam(depth, intrinsics_inv) \n\n pose_mat = pose.cuda()\n\n # Get projection matrix for tgt camera frame to source pixel frame\n proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]\n\n src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]\n projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, padding_mode=padding_mode,align_corners=True)\n\n return projected_feat\n\n\ndef inverse_warp_map(depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):\n \"\"\"\n Inverse warp a source image to the target image plane.\n\n Args:\n feat: the source feature (where to sample pixels) -- [B, CH, H, W]\n depth: depth map of the target image -- [B, H, W]\n pose: 6DoF pose parameters from target to source -- [B, 6]\n intrinsics: camera intrinsic matrix -- [B, 3, 3]\n intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]\n Returns:\n Source image warped to the target image plane\n \"\"\"\n check_sizes(depth, 'depth', 'BHW')\n check_sizes(pose, 'pose', 'B34')\n check_sizes(intrinsics, 'intrinsics', 'B33')\n check_sizes(intrinsics_inv, 'intrinsics', 'B33')\n\n assert(intrinsics_inv.size() == intrinsics.size())\n\n batch_size, feat_height, feat_width = depth.size()\n\n cam_coords = pixel2cam(depth, intrinsics_inv) \n\n pose_mat = pose.cuda()\n # pose_mat = pose_mat\n\n # Get projection matrix for tgt camera frame to source pixel frame\n proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]\n\n src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]\n # projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, padding_mode=padding_mode,align_corners=True)\n\n bs, h, w = depth.size()\n\n X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5) #- grid_x\n Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5) #- grid_y\n\n return torch.stack((X,Y), dim=1)\n\ndef inverse_warp_im(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros'):\n \"\"\"\n Inverse warp a source image to the target image plane.\n Args:\n img: the source image (where to sample pixels) -- [B, 3, H, W]\n depth: depth map of the target image -- [B, H, W]\n pose: 6DoF pose parameters from target to source -- [B, 6]\n intrinsics: camera intrinsic matrix -- [B, 3, 3]\n Returns:\n projected_img: Source image warped to the target image plane\n valid_points: Boolean array indicating point validity\n \"\"\"\n check_sizes(img, 'img', 'B3HW')\n check_sizes(depth, 'depth', 'BHW')\n check_sizes(pose, 'pose', 'B34')\n check_sizes(intrinsics, 'intrinsics', 'B33')\n\n batch_size, _, img_height, img_width = img.size()\n\n cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W]\n\n pose_mat = pose\n pose_mat = pose_mat.cuda() # [B,3,4]\n\n # Get projection matrix for tgt camera frame to source pixel frame\n proj_cam_to_src_pixel = intrinsics @ pose_mat # [B, 3, 4]\n\n rot, tr = proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:]\n src_pixel_coords = cam2pixel(cam_coords, rot, tr, padding_mode) # [B,H,W,2]\n projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode,align_corners=True)\n\n return projected_img" ]
[ [ "torch.ones", "torch.cat", "torch.nn.functional.grid_sample", "torch.arange", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
embracesource-cv-com/ruler_detection
[ "89318b46b213ffb7774119d502b5aa520d34b50a" ]
[ "train.py" ]
[ "import os\nimport argparse\nimport collections\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import transforms\nimport model\nfrom utils import _transfer_pretrained_weights\nfrom dataloader import CSVDataset, collater, Resizer, AspectRatioBasedSampler, Normalizer\nfrom augment import Augmentation\nfrom torch.utils.data import DataLoader\nimport csv_eval\nfrom tensorboardX import SummaryWriter\nfrom datetime import datetime\n\ntorch.cuda.empty_cache()\n\n\n# torch.cuda.set_device(1)\n# device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef main(args=None):\n parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')\n parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='csv')\n parser.add_argument('--coco_path', help='Path to COCO directory')\n parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)',\n default='./csv/train_annots_div.csv')\n parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)',\n default='./csv/class_list_div.csv')\n parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)',\n default='./csv/val_annots_div.csv')\n parser.add_argument('--weights', help='ckpt', default='./csv/coco_resnet_50_map_0_335_state_dict.pt')\n parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50)\n parser.add_argument('--epochs', help='Number of epochs', type=int, default=100)\n parser = parser.parse_args(args)\n\n # Create the data loaders\n dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes,\n transform=transforms.Compose([Normalizer(), Augmentation(), Resizer()]))\n dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes,\n transform=transforms.Compose([Normalizer(), Resizer()]))\n print('Num training images: {}'.format(len(dataset_train)))\n print('Num validation images: {}'.format(len(dataset_val)))\n sampler = AspectRatioBasedSampler(dataset_train, batch_size=4, drop_last=False)\n dataloader_train = DataLoader(dataset_train, num_workers=4, collate_fn=collater, batch_sampler=sampler)\n # sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=3, drop_last=False)\n # dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val)\n\n # Create the model\n if parser.depth == 18:\n retinanet = model.resnet18(num_classes=dataset_train.num_classes(), )\n elif parser.depth == 34:\n retinanet = model.resnet34(num_classes=dataset_train.num_classes(), )\n elif parser.depth == 50:\n retinanet = model.resnet50(num_classes=dataset_train.num_classes(), )\n elif parser.depth == 101:\n retinanet = model.resnet101(num_classes=dataset_train.num_classes(), )\n elif parser.depth == 152:\n retinanet = model.resnet152(num_classes=dataset_train.num_classes(), )\n else:\n raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')\n retinanet = _transfer_pretrained_weights(retinanet, parser.weights)\n # PATH = '/home/github/ruler_detection/logs/Dec30_15-57-21/csv_retinanet_alldiv_best.pth'\n # retinanet = torch.load(PATH)\n\n # retinanet = retinanet.cuda()\n retinanet = torch.nn.DataParallel(retinanet).cuda()\n retinanet.training = True\n optimizer = optim.Adam(retinanet.parameters(), lr=1e-4)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)\n loss_hist = collections.deque(maxlen=500)\n log_dir = os.path.join('./logs', datetime.now().strftime('%b%d_%H-%M-%S'))\n mAP_best = 0\n\n for epoch_num in range(parser.epochs):\n retinanet.train()\n retinanet.module.freeze_bn()\n epoch_loss = []\n for iter_num, data in enumerate(dataloader_train):\n optimizer.zero_grad()\n cls_loss, regr_loss = retinanet([data['img'].cuda().float(), data['annot'].cuda()])\n cls_loss = cls_loss.mean()\n regr_loss = regr_loss.mean()\n loss = cls_loss + regr_loss\n if bool(loss == 0):\n continue\n loss.backward()\n torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)\n optimizer.step()\n loss_hist.append(float(loss))\n epoch_loss.append(float(loss))\n print('Epoch: {} | Iteration: {} | cls loss: {:1.5f} | regr loss: {:1.5f} | Running loss: {:1.5f}'.format(\n epoch_num, iter_num, float(cls_loss), float(regr_loss), np.mean(loss_hist)))\n\n print('Evaluating dataset')\n retinanet.eval()\n APs, mAP = csv_eval.evaluate(dataset_val, retinanet)\n with SummaryWriter(log_dir=log_dir, comment='train') as writer: # 可以直接使用python的with语法,自动调用close方法\n writer.add_scalar('loss/classification', cls_loss, epoch_num)\n writer.add_scalar('loss/regression', regr_loss, epoch_num)\n writer.add_scalar('loss/total loss', loss, epoch_num)\n writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch_num)\n writer.add_scalar('acc/mAP', mAP, epoch_num)\n writer.add_scalars('acc/AP', {'AP_0': APs[0][0], 'AP_1': APs[1][0], 'AP_2': APs[2][0], 'AP_3': APs[3][0],\n 'AP_4': APs[4][0], 'AP_5': APs[5][0], 'AP_6': APs[6][0], 'AP_7': APs[7][0],\n 'AP_8': APs[8][0], 'AP_9': APs[9][0], 'AP_10': APs[10][0]}, epoch_num)\n\n scheduler.step(np.mean(epoch_loss))\n if mAP > mAP_best:\n mAP_best = mAP\n torch.save(retinanet.module, os.path.join(log_dir, '{}_retinanet_alldiv_best.pth'.format(parser.dataset)))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "numpy.mean", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
romepeng/jaqsplus
[ "2923c01213d1e2f90ddcd95eb722f8120aa74426" ]
[ "tests/test_dataservice.py" ]
[ "# encoding: UTF-8\r\n\r\nimport pytest\r\nfrom jaqs.data import RemoteDataService\r\n\r\nimport jaqs.util as jutil\r\n\r\nfrom config_path import DATA_CONFIG_PATH\r\ndata_config = jutil.read_json(DATA_CONFIG_PATH)\r\n\r\n\r\ndef test_remote_data_service_daily():\r\n # test daily\r\n res, msg = ds.daily('002422.SZ,601607.SH', fields=\"\",\r\n start_date=20180101, end_date=20180512,\r\n adjust_mode=None)\r\n assert msg == '0,'\r\n \r\n stk1 = res.loc[res.loc[:, 'symbol'] == '002422.SZ', :]\r\n stk2 = res.loc[res.loc[:, 'symbol'] == '601607.SH', :]\r\n #print(stk1.shape)\r\n assert set(stk1.columns) == {'close', 'code', 'freq', 'high', 'low', 'oi', 'open', 'preclose',\r\n 'presettle', 'settle', 'symbol', 'trade_date', 'trade_status',\r\n 'turnover', 'volume', 'vwap'}\r\n assert stk1.shape == (85, 16)\r\n # print(stk1.loc[:, 'volume'].values[0])\r\n assert stk1.loc[:, 'volume'].values[0] == 10828204.0\r\n # print(stk2.loc[:, 'volume'].values[0])\r\n assert stk2.loc[:, 'volume'].values[0] == 9171414.0\r\n\r\n\r\ndef test_remote_data_service_daily_quited():\r\n # test daily\r\n res, msg = ds.daily('601607.SH', fields=\"\",\r\n start_date=20140828, end_date=20170831,\r\n adjust_mode=None)\r\n assert msg == '0,'\r\n # print(res.shape)\r\n assert res.shape == (735, 16)\r\n\r\n\r\ndef test_remote_data_service_bar():\r\n # test bar\r\n res2, msg2 = ds.bar('rb1710.SHF,600662.SH', start_time=200000, end_time=160000, trade_date=20170831, fields=\"\")\r\n assert msg2 == '0,'\r\n \r\n rb2 = res2.loc[res2.loc[:, 'symbol'] == 'rb1710.SHF', :]\r\n stk2 = res2.loc[res2.loc[:, 'symbol'] == '600662.SH', :]\r\n assert set(rb2.columns) == {u'close', u'code', u'date', u'freq', u'high', u'low', u'oi', u'open',\r\n u'settle', u'symbol', u'time', u'trade_date', u'turnover', u'volume',\r\n u'vwap'}\r\n assert abs(rb2.loc[:, 'settle'].values[0] - 0.0) < 1e-3\r\n assert rb2.shape == (345, 15)\r\n assert stk2.shape == (240, 15)\r\n assert rb2.loc[:, 'volume'].values[344] == 3366\r\n\r\n\r\ndef test_remote_data_serviece_quote():\r\n res, msg = ds.quote('000001.SH')\r\n assert msg == '0,'\r\n \r\n\r\ndef test_remote_data_service_lb():\r\n # test lb.secDailyIndicator\r\n fields = \"pb,pe,free_share,net_assets,limit_status\"\r\n for res3, msg3 in [ds.query(\"lb.secDailyIndicator\", fields=fields,\r\n filter=\"symbol=600030.SH&start_date=20170907&end_date=20170907\",\r\n orderby=\"trade_date\"),\r\n ds.query_lb_dailyindicator('600030.SH', 20170907, 20170907, fields)]:\r\n assert msg3 == '0,'\r\n assert abs(res3.loc[0, 'pb'] - 1.5135) < 1e-4\r\n assert abs(res3.loc[0, 'free_share'] - 781496.5954) < 1e-4\r\n assert abs(res3.loc[0, 'net_assets'] - 1.437e11) < 1e8\r\n assert res3.loc[0, 'limit_status'] == 0\r\n \r\n # test lb.income\r\n for res4, msg4 in [ds.query(\"lb.income\", fields=\"\",\r\n filter=\"symbol=600000.SH&start_date=20150101&end_date=20170101&report_type=408001000\",\r\n order_by=\"report_date\"),\r\n ds.query_lb_fin_stat('income', '600000.SH', 20150101, 20170101, fields=\"\")]:\r\n assert msg4 == '0,'\r\n assert res4.shape == (8, 12)\r\n assert abs(res4.loc[4, 'oper_rev'] - 120928000000) < 1\r\n\r\n\r\ndef test_remote_data_service_daily_ind_performance():\r\n hs300 = ds.query_index_member('000300.SH', 20151001, 20170101)\r\n hs300_str = ','.join(hs300)\r\n \r\n fields = \"pb,pe,share_float_free,net_assets,limit_status\"\r\n res, msg = ds.query(\"lb.secDailyIndicator\", fields=fields,\r\n filter=(\"symbol=\" + hs300_str\r\n + \"&start_date=20160907&end_date=20170907\"),\r\n orderby=\"trade_date\")\r\n assert msg == '0,'\r\n\r\n\r\ndef test_remote_data_service_components():\r\n res = ds.query_index_member_daily(index='000300.SH', start_date=20140101, end_date=20170505)\r\n assert res.shape == (814, 430)\r\n \r\n arr = ds.query_index_member(index='000300.SH', start_date=20140101, end_date=20170505)\r\n assert len(arr) == 430\r\n\r\n\r\ndef test_remote_data_service_industry():\r\n from jaqs.data.align import align\r\n import pandas as pd\r\n \r\n arr = ds.query_index_member(index='000300.SH', start_date=20130101, end_date=20170505)\r\n df = ds.query_industry_raw(symbol=','.join(arr), type_='SW')\r\n df = ds.query_industry_raw(symbol=','.join(arr), type_='ZZ')\r\n \r\n # errors\r\n try:\r\n ds.query_industry_raw(symbol=','.join(arr), type_='ZZ', level=5)\r\n except ValueError:\r\n pass\r\n try:\r\n ds.query_industry_raw(symbol=','.join(arr), type_='blabla')\r\n except ValueError:\r\n pass\r\n \r\n # df_ann = df.loc[:, ['in_date', 'symbol']]\r\n # df_ann = df_ann.set_index(['symbol', 'in_date'])\r\n # df_ann = df_ann.unstack(level='symbol')\r\n \r\n from jaqs.data import DataView\r\n dic_sec = jutil.group_df_to_dict(df, by='symbol')\r\n dic_sec = {sec: df.reset_index() for sec, df in dic_sec.items()}\r\n \r\n df_ann = pd.concat([df.loc[:, 'in_date'].rename(sec) for sec, df in dic_sec.items()], axis=1)\r\n df_value = pd.concat([df.loc[:, 'industry1_code'].rename(sec) for sec, df in dic_sec.items()], axis=1)\r\n \r\n dates_arr = ds.query_trade_dates(20140101, 20170505)\r\n res = align(df_value, df_ann, dates_arr)\r\n # df_ann = df.pivot(index='in_date', columns='symbol', values='in_date')\r\n # df_value = df.pivot(index=None, columns='symbol', values='industry1_code')\r\n \r\n def align_single_df(df_one_sec):\r\n df_value = df_one_sec.loc[:, ['industry1_code']]\r\n df_ann = df_one_sec.loc[:, ['in_date']]\r\n res = align(df_value, df_ann, dates_arr)\r\n return res\r\n # res_list = [align_single_df(df) for sec, df in dic_sec.items()]\r\n res_list = [align_single_df(df) for df in list(dic_sec.values())[:10]]\r\n res = pd.concat(res_list, axis=1)\r\n \r\n \r\ndef test_remote_data_service_industry_df():\r\n # from jaqs.data import Calendar\r\n \r\n arr = ds.query_index_member(index='000300.SH', start_date=20130101, end_date=20170505)\r\n symbol_arr = ','.join(arr)\r\n \r\n sec = '000008.SZ'\r\n type_ = 'ZZ'\r\n df_raw = ds.query_industry_raw(symbol=sec, type_=type_)\r\n df = ds.query_industry_daily(symbol=symbol_arr,\r\n start_date=df_raw['in_date'].min(), end_date=20170505,\r\n type_=type_, level=1)\r\n \r\n for idx, row in df_raw.iterrows():\r\n in_date = row['in_date']\r\n value = row['industry1_code']\r\n if in_date in df.index:\r\n assert df.loc[in_date, sec] == value\r\n else:\r\n idx = ds.query_next_trade_date(in_date)\r\n assert df.loc[idx, sec] == value\r\n \r\n\r\ndef test_remote_data_service_fin_indicator():\r\n symbol = '000008.SZ'\r\n filter_argument = ds._dic2url({'symbol': symbol})\r\n \r\n df_raw, msg = ds.query(\"lb.finIndicator\", fields=\"\",\r\n filter=filter_argument, orderby=\"symbol\")\r\n\r\n\r\ndef test_remote_data_service_adj_factor():\r\n arr = ds.query_index_member(index='000300.SH', start_date=20160101, end_date=20170505)\r\n symbol_arr = ','.join(arr)\r\n \r\n res = ds.query_adj_factor_daily(symbol_arr, start_date=20160101, end_date=20170101, div=False)\r\n assert abs(res.loc[20160408, '300024.SZ'] - 10.735) < 1e-3\r\n assert abs(res.loc[20160412, '300024.SZ'] - 23.658) < 1e-3\r\n \r\n res = ds.query_adj_factor_daily(symbol_arr, start_date=20160101, end_date=20170101, div=True)\r\n\r\n\r\ndef test_remote_data_service_dividend():\r\n arr = ds.query_index_member(index='000300.SH', start_date=20160101, end_date=20170505)\r\n symbol_arr = ','.join(arr)\r\n \r\n df, msg = ds.query_dividend(symbol_arr, start_date=20160101, end_date=20170101)\r\n df2 = df.pivot(index='exdiv_date', columns='symbol', values='share_ratio')\r\n assert abs(df.loc[(df['exdiv_date'] == 20160504) & (df['symbol'] == '002085.SZ'), 'share_ratio'] - 0.20).iat[0] < 1e-2\r\n\r\n\r\ndef test_remote_data_service_inst_info():\r\n sec = '000001.SZ'\r\n res = ds.query_inst_info(sec, fields='status,selllot,buylot,pricetick,multiplier,product')\r\n assert res.at[sec, 'multiplier'] == 1\r\n assert abs(res.at[sec, 'pricetick'] - 0.01) < 1e-2\r\n assert res.at[sec, 'buylot'] == 100\r\n\r\n res = ds.query_inst_info('000001.SH')\r\n assert not res.empty\r\n\r\n\r\ndef test_remote_data_service_index_weight():\r\n df = ds.query_index_weights_raw(index='000300.SH', trade_date=20140101)\r\n assert df.shape[0] == 300\r\n assert abs(df['weight'].sum() - 1.0) < 1.0\r\n\r\n df = ds.query_index_weights_range(index='000300.SH', start_date=20140101, end_date=20140305)\r\n\r\n df = ds.query_index_weights_raw(index='000016.SH', trade_date=20140101)\r\n assert df.shape[0] == 50\r\n assert abs(df['weight'].sum() - 1.0) < 1.0\r\n \r\n df = ds.query_index_weights_daily(index='000300.SH', start_date=20150101, end_date=20151221)\r\n assert abs(df.at[20150120, '000001.SZ'] - 1.07e-2) < 1e-2\r\n assert df.shape == (236, 321)\r\n\r\n\r\ndef test_remote_data_service_initialize():\r\n import jaqs.data.dataservice as jads\r\n data_config2 = {k: v for k, v in data_config.items()}\r\n \r\n data_config2['remote.data.password'] = ''\r\n try:\r\n ds.init_from_config(data_config2)\r\n except jads.InitializeError:\r\n pass\r\n \r\n data_config2['remote.data.password'] = '123'\r\n msg = ds.init_from_config(data_config2)\r\n assert msg.split(',')[0] == '-1000'\r\n try:\r\n ds.daily('000001.SH', start_date=20170101, end_date=20170109)\r\n except jads.NotLoginError:\r\n pass\r\n \r\n msg = ds.init_from_config(data_config)\r\n assert msg.split(',')[0] == '0'\r\n msg = ds.init_from_config(data_config)\r\n assert msg.split(',')[0] == '0'\r\n \r\n\r\ndef test_remote_data_service_subscribe():\r\n ds.subscribe('000001.SH')\r\n\r\n\r\ndef test_remote_data_bar_quote():\r\n df, msg = ds.bar_quote('000001.SZ', trade_date=20171009, freq='1M')\r\n assert msg == '0,'\r\n assert df['askvolume1'].all()\r\n assert abs(df['bidprice1'].iat[1] - 11.52) < 1e-2\r\n \r\n\r\ndef test_remote_data_service_mkt_data_callback():\r\n from jaqs.data.basic import Quote\r\n q = Quote()\r\n ds.mkt_data_callback(key='quote', quote=q)\r\n\r\n\r\ndef test_calendar():\r\n ds = RemoteDataService()\r\n ds.init_from_config(data_config)\r\n \r\n res1 = ds.query_trade_dates(20121224, 20130201)\r\n assert len(res1) == 27\r\n \r\n day_zero = 20170102\r\n res2 = ds.query_next_trade_date(day_zero)\r\n assert res2 == 20170103\r\n res2_last = ds.query_last_trade_date(res2)\r\n assert res2_last == 20161230\r\n \r\n res3 = ds.query_next_trade_date(20170104)\r\n assert res3 == 20170105\r\n res4 = ds.query_last_trade_date(res3)\r\n assert res4 == 20170104\r\n \r\n res11 = ds.query_trade_dates(20161224, 20170201)\r\n assert len(res11) == 23\r\n \r\n assert not ds.is_trade_date(20150101)\r\n assert not ds.is_trade_date(20130501)\r\n\r\n\r\n'''\r\ndef test_remote_data_service_exception():\r\n from jaqs.data.dataservice import NotLoginError, InitializeError\r\n \r\n del ds\r\n ds2 = RemoteDataService()\r\n try:\r\n ds2.daily('000001.SH', 20170101, 20170109)\r\n except NotLoginError:\r\n pass\r\n except Exception as exc:\r\n raise exc\r\n \r\n try:\r\n ds2.init_from_config({'remote.data.address': 'blabla'})\r\n except InitializeError:\r\n pass\r\n except Exception as exc:\r\n raise exc\r\n'''\r\n\r\n\r\[email protected](autouse=True)\r\ndef my_globals(request):\r\n ds = RemoteDataService()\r\n ds.init_from_config(data_config)\r\n \r\n request.function.__globals__.update({'ds': ds})\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import time\r\n t_start = time.time()\r\n\r\n ds = RemoteDataService()\r\n ds.init_from_config(data_config)\r\n \r\n g = globals()\r\n #print(g)\r\n g = {k: v for k, v in g.items() if k.startswith('test_') and callable(v)}\r\n #print(g)\r\n for test_name, test_func in g.items():\r\n print(\"\\n==========\\nTesting {:s}...\".format(test_name))\r\n test_func()\r\n print(\"Test Complete.\")\r\n \r\n t3 = time.time() - t_start\r\n print(\"\\n\\n\\nTime lapsed in total: {:.1f}\".format(t3))" ]
[ [ "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
DANancy/Coding-Playground
[ "b82e3689ccc4771ee59c3472db78333ba17671b9" ]
[ "Omdena Projects/hourly_merge.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\nfrom datetime import datetime\r\n\r\ndef main(consumption_data, weather_data, public_holidays_data, service_location_id):\r\n # Process consumption data\r\n df = consumption_data.astype({'date':'datetime64[ns]'}).rename(columns={'date':'datetime'}).set_index('datetime')\r\n df = pd.DataFrame(df['consumption'])\r\n df = df.asfreq('1H')\r\n\r\n # Convert consumption column to kWH (its a more common metric than Wh)\r\n df['consumption'] = df['consumption']/1000\r\n df.rename(columns={'consumption':'consumption_kWh'}, inplace=True)\r\n\r\n # Add season column\r\n df['date'] = df.index.strftime('%Y-%m-%d')\r\n df['year'] = df.index.year\r\n df['dayOfYear'] = df.index.dayofyear\r\n df['month'] = df.index.month\r\n df['monthName'] = df.index.month_name()\r\n df['week'] = df.index.isocalendar().week\r\n df['day'] = df.index.day\r\n df['dayName'] = df.index.day_name()\r\n df['hour'] = df.index.hour\r\n df['minute'] = df.index.minute\r\n df['dayOfWeek'] = df.index.dayofweek\r\n df['weekend'] = df['dayOfWeek'].apply(lambda x: 1 if x >= 5 else 0)\r\n df['time'] = df.index.time\r\n df['dayOfMonth'] = df.index.strftime('%m-%d')\r\n df['hourMinute'] = df.index.strftime('%H:%M')\r\n\r\n bins = [0,4,8,12,16,20,24]\r\n #labels = ['Late Night', 'Early Morning','Morning','Noon','Eve','Night']\r\n labels = [1, 2,3,4,5,6]\r\n df['session'] = pd.cut(df['hour'], bins=bins, labels=labels, include_lowest=True)\r\n\r\n def season_df(df):\r\n if df['month'] == 12 | df['month'] == 1 | df['month'] == 2:\r\n return 2 #'Summer'\r\n elif df['month'] == 3 | df['month'] == 4 | df['month'] == 5:\r\n return 3 #'Autumn'\r\n elif df['month'] == 6 | df['month'] == 7 | df['month'] == 8:\r\n return 4 #'Winter'\r\n else:\r\n return 1 #'Spring'\r\n\r\n df['season'] = df.apply(season_df, axis = 1)\r\n\r\n # Process weather data\r\n weather_df = weather_data.astype({'datetime':'datetime64[ns]'})\r\n weather_df = weather_df[['temp', 'humidity', 'clouds','datetime']].set_index('datetime')\r\n weather_df = weather_df.asfreq('1H')\r\n\r\n # Rename and divide by 100 to make it more ML friendly\r\n weather_df['clouds'] = weather_df['clouds']/100\r\n weather_df.rename(columns={'clouds':'cloud_cover'}, inplace=True)\r\n \r\n # Temperature in degrees C, rename with units\r\n weather_df.rename(columns={'temp':'temp_degreeC'}, inplace=True)\r\n\r\n # Humidity is relative humidity as a %\r\n # Rename and divide by 100 to make it more ML friendly\r\n weather_df['humidity'] = weather_df['humidity']/100\r\n weather_df.rename(columns={'humidity':'rel_humidity'}, inplace=True)\r\n\r\n # Process holiday data\r\n holiday_df = public_holidays_data\r\n holiday_df = holiday_df[['day','holiday','holidayName']]\r\n holiday_df.rename(columns = {'day':'date'},inplace=True)\r\n \r\n # Merge all datasets\r\n combined_df = df.join(weather_df)\r\n\r\n combined_df['date'] = pd.to_datetime(combined_df['date'], utc = False)\r\n holiday_df['date'] = pd.to_datetime(holiday_df['date'], utc = False)\r\n combined_df = pd.merge(combined_df.reset_index(), holiday_df)\r\n combined_df = combined_df.rename(columns={'index':'datetime'}).set_index('datetime')\r\n\r\n # Replace Holiday 'Y' with 1\r\n # Replace Holiday NaN with 0\r\n combined_df['holiday'] = np.where(combined_df['holiday']=='Y',1,0)\r\n # Add workingday or non-working day column\r\n combined_df['workingDay'] = np.where(np.logical_and(combined_df['weekend']==0, combined_df['holiday']==0),1,0)\r\n\r\n today = datetime.now()\r\n new_time = str(int((today).timestamp()))\r\n file_name = f'merged_{service_location_id}_timestamp_{new_time}.csv'\r\n return file_name, combined_df\r\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.cut", "numpy.logical_and", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
peterzheng98/fuzzy-system
[ "6ae14714c73d9a70b4d4c0a27e9da0d54a0fe5a8" ]
[ "ProcessScripts/DevsetTransform.py" ]
[ "import pandas as pd\nimport sys\nfrom collections import Counter\nfrom tqdm import tqdm\nimport json\n\n\nif __name__ == '__main__':\n filepath = '../datasets/tokenized/in_domain_dev.tsv'\n output_word_cab = '../datasets/tokenized/wordlist.txt'\n df = pd.read_csv(filepath, sep='\\t', header=0)\n\n word_list_cnt = open(output_word_cab, 'r').readlines()\n word_list_dict = {d.split('\\t')[0]: i for i, d in enumerate(word_list_cnt)}\n\n bar1 = tqdm(desc='Transform sentences', total=len(df))\n sentence_List = []\n label_List = []\n for i in range(len(df)):\n label1, verdict, human, sentences = df.iloc[i]\n label_List.append(human * 2 + verdict)\n word_sentence_list = sentences.split(' ')\n word_ap = []\n for word in word_sentence_list:\n if word in word_list_dict.keys():\n word_ap.append(word_list_dict[word])\n else:\n word_ap.append(len(word_list_dict))\n sentence_List.append(json.dumps(word_ap))\n bar1.update()\n\n df = pd.DataFrame({'data': sentence_List, 'label': label_List})\n df.to_csv('../datasets/tokenized/in_domain_dev.reformed.csv')\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
kamieen03/style-transfer-server
[ "91727ec62080215a0b870ce043faf0657137b84b" ]
[ "libs/parametric_models.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass encoder3(nn.Module):\n def __init__(self, W, v2):\n super(encoder3,self).__init__() # W - width\n # vgg\n # 224 x 224\n self.conv1 = nn.Conv2d(3,3,1,1,0)\n self.reflecPad1 = nn.ZeroPad2d((1,1,1,1))\n # 226 x 226\n\n self.conv2 = nn.Conv2d(3,32 if v2 else int(64*W),3,1,0)\n self.relu2 = nn.ReLU(inplace=True)\n # 224 x 224\n\n self.reflecPad3 = nn.ZeroPad2d((1,1,1,1))\n self.conv3 = nn.Conv2d(32 if v2 else int(64*W),int(64*W),3,1,0)\n self.relu3 = nn.ReLU(inplace=True)\n # 224 x 224\n\n self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = False)\n # 112 x 112\n\n self.reflecPad4 = nn.ZeroPad2d((1,1,1,1))\n self.conv4 = nn.Conv2d(int(64*W),int(128*W),3,1,0)\n self.relu4 = nn.ReLU(inplace=True)\n # 112 x 112\n\n self.reflecPad5 = nn.ZeroPad2d((1,1,1,1))\n self.conv5 = nn.Conv2d(int(128*W),int(128*W),3,1,0)\n self.relu5 = nn.ReLU(inplace=True)\n # 112 x 112\n\n self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = False)\n # 56 x 56\n\n self.reflecPad6 = nn.ZeroPad2d((1,1,1,1))\n self.conv6 = nn.Conv2d(int(128*W),int(256*W),3,1,0)\n self.relu6 = nn.ReLU(inplace=True)\n # 56 x 56\n def forward(self,x):\n x = x / 255.0\n out = self.conv1(x)\n out = self.reflecPad1(out)\n out = self.conv2(out)\n out = self.relu2(out)\n out = self.reflecPad3(out)\n out = self.conv3(out)\n pool1 = self.relu3(out)\n out = self.maxPool(pool1)\n out = self.reflecPad4(out)\n out = self.conv4(out)\n out = self.relu4(out)\n out = self.reflecPad5(out)\n out = self.conv5(out)\n pool2 = self.relu5(out)\n out = self.maxPool2(pool2)\n out = self.reflecPad6(out)\n out = self.conv6(out)\n out = self.relu6(out)\n return out\n\nclass decoder3(nn.Module):\n def __init__(self, W, v2):\n super(decoder3,self).__init__()\n # decoder\n self.reflecPad7 = nn.ZeroPad2d((1,1,1,1))\n self.conv7 = nn.Conv2d(int(256*W),int(128*W),3,1,0)\n self.relu7 = nn.ReLU(inplace=True)\n # 56 x 56\n\n self.unpool = nn.UpsamplingNearest2d(scale_factor=2)\n # 112 x 112\n\n self.reflecPad8 = nn.ZeroPad2d((1,1,1,1))\n self.conv8 = nn.Conv2d(int(128*W),int(128*W),3,1,0)\n self.relu8 = nn.ReLU(inplace=True)\n # 112 x 112\n\n self.reflecPad9 = nn.ZeroPad2d((1,1,1,1))\n self.conv9 = nn.Conv2d(int(128*W),int(64*W),3,1,0)\n self.relu9 = nn.ReLU(inplace=True)\n\n self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)\n # 224 x 224\n\n self.reflecPad10 = nn.ZeroPad2d((1,1,1,1))\n self.conv10 = nn.Conv2d(int(64*W),32 if v2 else int(64*W),3,1,0)\n self.relu10 = nn.ReLU(inplace=True)\n\n self.reflecPad11 = nn.ZeroPad2d((1,1,1,1))\n self.conv11 = nn.Conv2d(32 if v2 else int(64*W),3,3,1,0)\n\n def forward(self,x):\n output = {}\n out = self.reflecPad7(x)\n out = self.conv7(out)\n out = self.relu7(out)\n out = self.unpool(out)\n out = self.reflecPad8(out)\n out = self.conv8(out)\n out = self.relu8(out)\n out = self.reflecPad9(out)\n out = self.conv9(out)\n out = self.relu9(out)\n out = self.unpool2(out)\n out = self.reflecPad10(out)\n out = self.conv10(out)\n out = self.relu10(out)\n out = self.reflecPad11(out)\n out = self.conv11(out)\n out = out.clamp(0,1)*255\n return out\n\nclass CNN(nn.Module):\n def __init__(self,W,matrixSize=32):\n super(CNN,self).__init__()\n # 256x64x64\n self.convs = nn.Sequential(nn.Conv2d(int(256*W),int(128*W),3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(int(128*W),int(64*W),3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(int(64*W),matrixSize,3,1,1))\n\n # 32x8x8\n self.fc = nn.Linear(matrixSize*matrixSize,matrixSize*matrixSize)\n\n def forward(self,x):\n out = self.convs(x)\n # 32x8x8\n #b,c,h,w = out.size()\n #print(1, b,c,h,w)\n out = out.view(1,32, -1)\n # 32x64\n out = torch.bmm(out,out.transpose(1,2)).div(144*256)\n #print(2,out.size())\n # 32x32\n out = out.view(1,-1)\n return self.fc(out)\n\nclass MulLayer(nn.Module):\n def __init__(self,W,matrixSize=32):\n super(MulLayer,self).__init__()\n self.snet = CNN(W,matrixSize)\n self.cnet = CNN(W,matrixSize)\n self.matrixSize = matrixSize\n\n self.compress = nn.Conv2d(int(256*W),matrixSize,1,1,0)\n self.unzip = nn.Conv2d(matrixSize,int(256*W),1,1,0)\n self.transmatrix = None\n\n def forward(self, cF, sF, alpha=1.0, trans=True):\n\n #cFBK = cF.clone()\n #cb, cc, ch, cw = cF.size()\n cFF = cF.view(1, 64, -1)\n cMean = torch.mean(cFF,dim=2,keepdim=True)\n cMean = cMean.unsqueeze(3)\n cF = cF - cMean\n\n #sb, sc, sh, sw = sF.size()\n sFF = sF.view(1, 64, -1)\n sMean = torch.mean(sFF,dim=2,keepdim=True)\n sMean = sMean.unsqueeze(3)\n #self.sMeanC = sMean.expand_as(cF)\n #sMeanS = sMean.expand_as(sF)\n sF = sF - sMean\n\n sF = sF * alpha + (1-alpha) * cF\n\n compress_content = self.compress(cF)\n #b,c,h,w = compress_content.size()\n compress_content = compress_content.view(1,32,-1)\n\n cMatrix = self.cnet(cF)\n sMatrix = self.snet(sF)\n\n sMatrix = sMatrix.view(1,self.matrixSize,self.matrixSize)\n cMatrix = cMatrix.view(1,self.matrixSize,self.matrixSize)\n self.transmatrix = torch.bmm(sMatrix,cMatrix)\n transfeature = torch.bmm(self.transmatrix,compress_content).view(1,32,256,144)\n out = self.unzip(transfeature.view(1,32,256,144))\n out = out + sMean\n return out\n\n" ]
[ [ "torch.mean", "torch.nn.UpsamplingNearest2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.bmm", "torch.nn.ZeroPad2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Neptos/openpilot
[ "01914a1a91ade18bd7aead99e7d1bf38cd22ad89", "01914a1a91ade18bd7aead99e7d1bf38cd22ad89" ]
[ "selfdrive/debug/mpc/tune_longitudinal.py", "selfdrive/controls/lib/cluster/fastcluster_py.py" ]
[ "#! /usr/bin/env python\n# type: ignore\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom selfdrive.controls.lib.longitudinal_mpc import libmpc_py\nfrom selfdrive.controls.lib.drive_helpers import MPC_COST_LONG\n\n# plot liongitudinal MPC trajectory by defining boundary conditions:\n# ego and lead vehicles state. Use this script to tune MPC costs\n\ndef RW(v_ego, v_l):\n TR = 1.8\n G = 9.81\n return (v_ego * TR - (v_l - v_ego) * TR + v_ego*v_ego/(2*G) - v_l*v_l / (2*G))\n\n\ndef NORM_RW_ERROR(v_ego, v_l, p):\n return (RW(v_ego, v_l) + 4.0 - p)\n #return (RW(v_ego, v_l) + 4.0 - p) / (np.sqrt(v_ego + 0.5) + 0.1)\n\n\nv_ego = 20.0\na_ego = 0\n\nx_lead = 10.0\nv_lead = 20.0\na_lead = -3.0\na_lead_tau = 0.\n\n# v_ego = 7.02661012716\n# a_ego = -1.26143024772\n\n# x_lead = 29.625 + 20\n# v_lead = 0.725235462189 + 1\n# a_lead = -1.00025629997\n\n# a_lead_tau = 2.90729817665\n\n#min_a_lead_tau = (a_lead**2 * math.pi) / (2 * (v_lead + 0.01)**2)\nmin_a_lead_tau = 0.0\n\nprint(a_lead_tau, min_a_lead_tau)\na_lead_tau = max(a_lead_tau, min_a_lead_tau)\n\nffi, libmpc = libmpc_py.get_libmpc(1)\nlibmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)\nlibmpc.init_with_simulation(v_ego, x_lead, v_lead, a_lead, a_lead_tau)\n\ncur_state = ffi.new(\"state_t *\")\ncur_state[0].x_ego = 0.0\ncur_state[0].v_ego = v_ego\ncur_state[0].a_ego = a_ego\ncur_state[0].x_l = x_lead\ncur_state[0].v_l = v_lead\n\nmpc_solution = ffi.new(\"log_t *\")\n\nfor _ in range(10):\n print(libmpc.run_mpc(cur_state, mpc_solution, a_lead_tau, a_lead))\n\n\nfor i in range(21):\n print(\"t: %.2f\\t x_e: %.2f\\t v_e: %.2f\\t a_e: %.2f\\t\" % (mpc_solution[0].t[i], mpc_solution[0].x_ego[i], mpc_solution[0].v_ego[i], mpc_solution[0].a_ego[i]))\n print(\"x_l: %.2f\\t v_l: %.2f\\t \\t\" % (mpc_solution[0].x_l[i], mpc_solution[0].v_l[i]))\n\nt = np.hstack([np.arange(0., 1.0, 0.2), np.arange(1.0, 10.1, 0.6)])\n\nprint(map(float, mpc_solution[0].x_ego)[-1])\nprint(map(float, mpc_solution[0].x_l)[-1] - map(float, mpc_solution[0].x_ego)[-1])\n\nplt.figure(figsize=(8, 8))\n\nplt.subplot(4, 1, 1)\nx_l = np.array(map(float, mpc_solution[0].x_l))\nplt.plot(t, map(float, mpc_solution[0].x_ego))\nplt.plot(t, x_l)\nplt.legend(['ego', 'lead'])\nplt.title('x')\nplt.grid()\n\nplt.subplot(4, 1, 2)\nv_ego = np.array(map(float, mpc_solution[0].v_ego))\nv_l = np.array(map(float, mpc_solution[0].v_l))\nplt.plot(t, v_ego)\nplt.plot(t, v_l)\nplt.legend(['ego', 'lead'])\nplt.ylim([-1, max(max(v_ego), max(v_l))])\nplt.title('v')\nplt.grid()\n\nplt.subplot(4, 1, 3)\nplt.plot(t, map(float, mpc_solution[0].a_ego))\nplt.plot(t, map(float, mpc_solution[0].a_l))\nplt.legend(['ego', 'lead'])\nplt.title('a')\nplt.grid()\n\n\nplt.subplot(4, 1, 4)\nd_l = np.array(map(float, mpc_solution[0].x_l)) - np.array(map(float, mpc_solution[0].x_ego))\ndesired = 4.0 + RW(v_ego, v_l)\n\nplt.plot(t, d_l)\nplt.plot(t, desired, '--')\nplt.ylim(-1, max(max(desired), max(d_l)))\nplt.legend(['relative distance', 'desired distance'])\nplt.grid()\n\nplt.show()\n\n# c1 = np.exp(0.3 * NORM_RW_ERROR(v_ego, v_l, d_l))\n# c2 = np.exp(4.5 - d_l)\n# print(c1)\n# print(c2)\n\n# plt.figure()\n# plt.plot(t, c1, label=\"NORM_RW_ERROR\")\n# plt.plot(t, c2, label=\"penalty function\")\n# plt.legend()\n\n# ## OLD MPC\n# a_lead_tau = 1.5\n# a_lead_tau = max(a_lead_tau, -a_lead / (v_lead + 0.01))\n\n# ffi, libmpc = libmpc_py.get_libmpc(1)\n# libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)\n# libmpc.init_with_simulation(v_ego, x_lead, v_lead, a_lead, a_lead_tau)\n\n# cur_state = ffi.new(\"state_t *\")\n# cur_state[0].x_ego = 0.0\n# cur_state[0].v_ego = v_ego\n# cur_state[0].a_ego = a_ego\n# cur_state[0].x_lead = x_lead\n# cur_state[0].v_lead = v_lead\n# cur_state[0].a_lead = a_lead\n\n# mpc_solution = ffi.new(\"log_t *\")\n\n# for _ in range(10):\n# print libmpc.run_mpc(cur_state, mpc_solution, a_lead_tau)\n\n# t = np.hstack([np.arange(0., 1.0, 0.2), np.arange(1.0, 10.1, 0.6)])\n\n# print(map(float, mpc_solution[0].x_ego)[-1])\n# print(map(float, mpc_solution[0].x_lead)[-1] - map(float, mpc_solution[0].x_ego)[-1])\n# plt.subplot(4, 2, 2)\n# plt.plot(t, map(float, mpc_solution[0].x_ego))\n# plt.plot(t, map(float, mpc_solution[0].x_lead))\n# plt.legend(['ego', 'lead'])\n# plt.title('x')\n\n# plt.subplot(4, 2, 4)\n# plt.plot(t, map(float, mpc_solution[0].v_ego))\n# plt.plot(t, map(float, mpc_solution[0].v_lead))\n# plt.legend(['ego', 'lead'])\n# plt.title('v')\n\n# plt.subplot(4, 2, 6)\n# plt.plot(t, map(float, mpc_solution[0].a_ego))\n# plt.plot(t, map(float, mpc_solution[0].a_lead))\n# plt.legend(['ego', 'lead'])\n# plt.title('a')\n\n\n# plt.subplot(4, 2, 8)\n# plt.plot(t, np.array(map(float, mpc_solution[0].x_lead)) - np.array(map(float, mpc_solution[0].x_ego)))\n\n# plt.show()\n", "import os\nimport numpy as np\n\nfrom cffi import FFI\nfrom common.ffi_wrapper import suffix\n\ncluster_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))\ncluster_fn = os.path.join(cluster_dir, \"libfastcluster\"+suffix())\n\nffi = FFI()\nffi.cdef(\"\"\"\nint hclust_fast(int n, double* distmat, int method, int* merge, double* height);\nvoid cutree_cdist(int n, const int* merge, double* height, double cdist, int* labels);\nvoid hclust_pdist(int n, int m, double* pts, double* out);\nvoid cluster_points_centroid(int n, int m, double* pts, double dist, int* idx);\n\"\"\")\n\nhclust = ffi.dlopen(cluster_fn)\n\n\ndef cluster_points_centroid(pts, dist):\n pts = np.ascontiguousarray(pts, dtype=np.float64)\n pts_ptr = ffi.cast(\"double *\", pts.ctypes.data)\n n, m = pts.shape\n\n labels_ptr = ffi.new(\"int[]\", n)\n hclust.cluster_points_centroid(n, m, pts_ptr, dist**2, labels_ptr)\n return list(labels_ptr)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.grid", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.ascontiguousarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Sup3Legacy/TIPE
[ "7e01cef869183c4d609c45d5fcf0bb371a9579f5" ]
[ "creastephGAN2.py" ]
[ "from __future__ import print_function, division\nimport os\nimport random\nimport argparse\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.utils as vutils\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.datasets as dSet\nfrom IPython.display import HTML\nimport matplotlib.animation as animation\nfrom torchvision import transforms, utils\nimport datetime\nseed = 123321\n#random.seed(seed)\n#torch.manual_seed(int(datetime.datetime.now().strftime(\"%H%M%S\")))\n\n\n##Hyperparamètres\nABSOLUTE = 'D:/Documents/Prepa/TIPE'\npathImage = ABSOLUTE + '/Images/Creasteph/'\npathModels = ABSOLUTE + \"/Models/\"\n\nbatchSize = 4 #10 pour Mosa et Mosa2 et 4 pour Mosa3\nimSize = 64 #Ok 128 pour Mosa et Mosa2 et Mosa3\nchannelsNumber = 3 #Couleurs !\ninputSize = 100 #Entrée du générateur 100 pour Mosa, 5000 pour Mosa2 et Mosa3 et Mosa4\nfeaturesGenerator = 64 #64 pour Mosa, Mosa2 et Mosa3, 128 pour Mosa4\nfeaturesDiscriminator = 64 #De même\nlearningRate = 0.0002 #0.0002 pour Mosa, Mosa2 Mosa3\nbeta1 = 0.5\n\n\nsetImages = dSet.ImageFolder(root = pathImage, transform = transforms.Compose([transforms.RandomCrop((imSize, imSize)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ColorJitter(hue = 0.5), transforms.ToTensor()]))\nimagesLoader = torch.utils.data.DataLoader(setImages, batch_size = batchSize, shuffle = True, num_workers=0)\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef weightsInit(m):\n classname = m.__class__.__name__\n if classname.find('conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n## générateur\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( inputSize, featuresGenerator * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(featuresGenerator * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(featuresGenerator * 8, featuresGenerator * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(featuresGenerator * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d( featuresGenerator * 4, featuresGenerator * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(featuresGenerator * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d( featuresGenerator * 2, featuresGenerator, 4, 2, 1, bias=False),\n nn.BatchNorm2d(featuresGenerator),\n nn.ReLU(True),\n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d( featuresGenerator, channelsNumber, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n return self.main(input)\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(channelsNumber, featuresDiscriminator, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(featuresDiscriminator, featuresDiscriminator * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(featuresDiscriminator * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(featuresDiscriminator * 2, featuresDiscriminator * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(featuresDiscriminator * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(featuresDiscriminator * 4, featuresDiscriminator * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(featuresDiscriminator * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(featuresDiscriminator * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n return self.main(input)\n\nnetG = Generator().to(device)\nnetG.apply(weightsInit)\n\nnetD = Discriminator().to(device)\nnetD.apply(weightsInit)\n\n\ncriterion = nn.BCELoss()\nfixedNoise = torch.randn(1, inputSize, 1, 1, device = device)\n\nrealLabel = 1\nfakeLabel = 0\n\noptimD = optim.Adam(netD.parameters(), lr = learningRate, betas = (beta1, 0.999))\noptimG = optim.Adam(netG.parameters(), lr = learningRate, betas = (beta1, 0.999))\n\n\nimgList = []\nGLoss = []\nDLoss = []\n\n\ndef train(number):\n iters = 0\n for epoch in range(number):\n for i, data in enumerate(imagesLoader, 0):\n netD.zero_grad()\n real_cpu = data[0].to(device)\n b_size = real_cpu.size(0)\n label = torch.full((b_size,), realLabel, device = device)\n output = netD(real_cpu).view(-1)\n errD_real = criterion(output, label)\n errD_real.backward()\n D_x = output.mean().item()\n\n noise = torch.randn(b_size, inputSize, 1, 1, device = device)\n fake = netG(noise)\n label.fill_(fakeLabel)\n output = netD(fake.detach()).view(-1)\n errD_fake = criterion(output, label)\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n errD = errD_real + errD_fake\n optimD.step()\n\n netG.zero_grad()\n label.fill_(realLabel)\n output = netD(fake).view(-1)\n errG = criterion(output, label)\n errG.backward()\n D_G_z2 = output.mean().item()\n optimG.step()\n\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f'\n % (epoch, number, i, len(imagesLoader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n GLoss.append(errG.item())\n DLoss.append(errD.item())\n if (iters % 500 == 0) or ((epoch == number) and (i == len(imagesLoader)-1)):\n with torch.no_grad():\n fake = netG(fixedNoise).detach().cpu()\n imgList.append(vutils.make_grid(fake, padding=2, normalize=True))\n iters += 1\n\ndef show():\n fig = plt.figure(figsize=(10,10))\n plt.axis(\"off\")\n ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in imgList]\n ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)\n\n HTML(ani.to_jshtml())\n plt.show()\n\ndef clear():\n imgList = []\n\ndef test():\n w = 5\n h = 5\n fig = plt.figure(figsize = (10,10))\n lol = torch.randn(25, inputSize, 1, 1, device = device)\n image = netG(lol).detach().cpu()\n for i in range(image.size()[0]):\n fig.add_subplot(w, h, i + 1)\n lel = (image[i].numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)\n lel = np.roll(lel, np.random.randint(0, 3), 2)\n plt.imshow(lel)\n plt.show()\n\ndef saveModel(nom):\n torch.save(netD.state_dict(), pathModels + 'D-' + nom + '.pt')\n torch.save(netG.state_dict(), pathModels + 'G-' + nom + '.pt')\n\ndef loadModel(nom):\n netD.load_state_dict(torch.load(pathModels + 'D-' + nom + '.pt'))\n netG.load_state_dict(torch.load(pathModels + 'G-' + nom + '.pt'))\n" ]
[ [ "matplotlib.pyplot.imshow", "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available", "numpy.random.randint", "torch.randn", "torch.nn.Sigmoid", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "matplotlib.animation.ArtistAnimation", "torch.nn.ConvTranspose2d", "torch.nn.init.constant_", "torch.full", "torch.nn.Conv2d", "torch.nn.BCELoss", "torch.nn.init.normal_", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "numpy.transpose", "matplotlib.pyplot.show", "torch.nn.Tanh", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
unilight/cdvae-vc
[ "6470b0e587d40f6d1d91712a0dacef5ff8d661ce" ]
[ "preprocessing/vcc2018/feature_reader.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport argparse\nimport os\nimport sys\nimport numpy as np\nimport h5py\nimport logging\nfrom scipy.io import wavfile\nfrom sprocket.speech.synthesizer import Synthesizer\n\nimport tensorflow as tf\n\ndef Segment_feature_reader(\n file_pattern,\n feat_param,\n batch_size,\n crop_length,\n capacity=256,\n min_after_dequeue=128,\n num_threads=8,\n ):\n \n with tf.name_scope('InputSpectralFrame'):\n \n # get dimensions\n SP_DIM = feat_param['fftl'] // 2 + 1 \n MCC_DIM = feat_param['mcep_dim']\n FEAT_DIM = feat_param['feat_dim']\n record_bytes = FEAT_DIM * 4\n \n files = []\n for p in file_pattern:\n files.extend(tf.gfile.Glob(p))\n\n print('Found {} files'.format(len(files)))\n \n filename_queue = tf.train.string_input_producer(files)\n\n reader = tf.WholeFileReader()\n _, value = reader.read(filename_queue)\n value = tf.decode_raw(value, tf.float32)\n value = tf.reshape(value, [-1, FEAT_DIM,])\n values = tf.random_crop(value, [crop_length, FEAT_DIM])\n\n # WORLD features\n sp = values[:, : SP_DIM]\n mcc = values[:, SP_DIM : SP_DIM + MCC_DIM]\n\n # speaker label\n speaker = tf.cast(values[:, -1], tf.int64)\n\n dictionary = {\n 'sp': sp, \n 'mcc': mcc,\n 'speaker': speaker,\n }\n \n return tf.train.shuffle_batch(\n dictionary,\n batch_size,\n capacity=capacity,\n min_after_dequeue=min_after_dequeue,\n num_threads=num_threads,\n )\n\ndef Whole_feature_reader(filename, feat_param, dtype=np.float32):\n \"\"\"FUNCTION TO READ whole utterance of features\n \"\"\"\n SP_DIM = feat_param['fftl'] // 2 + 1 \n MCC_DIM = feat_param['mcep_dim']\n FEAT_DIM = feat_param['feat_dim']\n\n values = np.fromfile(filename, dtype).astype(np.float64).reshape([-1, FEAT_DIM])\n\n sp = values[:, : SP_DIM].copy(order='C')\n mcc = values[:, SP_DIM : SP_DIM + MCC_DIM].copy(order='C')\n ap = values[:, SP_DIM + MCC_DIM : SP_DIM * 2 + MCC_DIM].copy(order='C')\n f0 = values[:, SP_DIM * 2 + MCC_DIM].copy(order='C')\n en_sp = values[:, SP_DIM * 2 + MCC_DIM + 1].copy(order='C')\n en_mcc = values[:, SP_DIM * 2 + MCC_DIM + 2].copy(order='C')\n speaker = values[:, -1].astype(np.int64)\n\n dictionary = {\n 'sp': sp, \n 'mcc': mcc,\n 'ap': ap,\n 'f0': f0, \n 'en_sp': en_sp,\n 'en_mcc': en_mcc, \n 'speaker': speaker,\n }\n\n return dictionary\n\n\ndef main():\n \"\"\" Feature reader & synthesis check\n Usage: \n 1. read original features\n feature_ready.py --filename filename \n 2. read f0 transformed features\n feature_ready.py --filename filename --tarspk target_speaker\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"test feature readers\")\n parser.add_argument(\n \"--file_pattern\", default=None, type=str,\n help=\"the pattern of the testing feature file(s)\")\n parser.add_argument(\n \"--tarspk\", default=None, type=str,\n help=\"the name of target speaker\")\n parser.add_argument(\n \"--wavname\", default='test.wav', type=str,\n help=\"the name of output wav\")\n args = parser.parse_args()\n\n # parameter setting\n feat_param = {\n 'fs':22050,\n 'shiftms':5,\n 'fftl':1024,\n 'mcep_alpha': 0.455,\n 'sp_dim':513,\n 'mcc_dim':34,\n 'feat_dim': 513 + 34 + 513 + 3 + 39 + 1\n }\n # load acoustic features and synthesis \n if os.path.exists(args.file_pattern):\n sp, mcc, ap, f0, en_sp, en_mcc, acoustic, spk, = Whole_feature_reader(\n args.file_pattern, feat_param)\n en_mcc = np.expand_dims(en_mcc, 1)\n mcc = np.concatenate([en_mcc, mcc], axis=1)\n world_synthesis(args.wavname, feat_param, f0, mcc, ap)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.expand_dims", "numpy.fromfile", "tensorflow.WholeFileReader", "tensorflow.decode_raw", "tensorflow.reshape", "tensorflow.cast", "tensorflow.random_crop", "numpy.concatenate", "tensorflow.train.string_input_producer", "tensorflow.name_scope", "tensorflow.gfile.Glob", "tensorflow.train.shuffle_batch" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
theainerd/transformers
[ "f7328de46dbeda4992a093a0501932bf0fc7b76f" ]
[ "src/transformers/models/big_bird/modeling_big_bird.py" ]
[ "# coding=utf-8\n# Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch BigBird model. \"\"\"\n\n\nimport math\nimport os\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel, SequenceSummary, apply_chunking_to_forward\nfrom ...utils import logging\nfrom .configuration_big_bird import BigBirdConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"google/bigbird-roberta-base\"\n_CONFIG_FOR_DOC = \"BigBirdConfig\"\n_TOKENIZER_FOR_DOC = \"BigBirdTokenizer\"\n\nBIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"google/bigbird-roberta-base\",\n \"google/bigbird-roberta-large\",\n \"google/bigbird-base-trivia-itc\",\n # See all BigBird models at https://huggingface.co/models?filter=big_bird\n]\n\n_TRIVIA_QA_MAPPING = {\n \"big_bird_attention\": \"attention/self\",\n \"output_layer_norm\": \"output/LayerNorm\",\n \"attention_output\": \"attention/output/dense\",\n \"output\": \"output/dense\",\n \"self_attention_layer_norm\": \"attention/output/LayerNorm\",\n \"intermediate\": \"intermediate/dense\",\n \"word_embeddings\": \"bert/embeddings/word_embeddings\",\n \"position_embedding\": \"bert/embeddings/position_embeddings\",\n \"type_embeddings\": \"bert/embeddings/token_type_embeddings\",\n \"embeddings\": \"bert/embeddings\",\n \"layer_normalization\": \"output/LayerNorm\",\n \"layer_norm\": \"LayerNorm\",\n \"trivia_qa_head\": \"qa_classifier\",\n \"dense\": \"intermediate/dense\",\n \"dense_1\": \"qa_outputs\",\n}\n\n\ndef load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n\n def load_tf_weights_bert(init_vars, tf_path):\n names = []\n tf_weights = {}\n\n for name, shape in init_vars:\n array = tf.train.load_variable(tf_path, name)\n name = name.replace(\"bert/encoder/LayerNorm\", \"bert/embeddings/LayerNorm\")\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n names.append(name)\n tf_weights[name] = array\n\n return names, tf_weights\n\n def load_tf_weights_trivia_qa(init_vars):\n names = []\n tf_weights = {}\n\n for i, var in enumerate(init_vars):\n name_items = var.name.split(\"/\")\n\n if \"transformer_scaffold\" in name_items[0]:\n layer_name_items = name_items[0].split(\"_\")\n if len(layer_name_items) < 3:\n layer_name_items += [0]\n\n name_items[0] = f\"bert/encoder/layer_{layer_name_items[2]}\"\n\n name = \"/\".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[\n :-2\n ] # remove last :0 in variable\n\n if \"self/attention/output\" in name:\n name = name.replace(\"self/attention/output\", \"output\")\n\n if i >= len(init_vars) - 2:\n name = name.replace(\"intermediate\", \"output\")\n\n logger.info(f\"Loading TF weight {name} with shape {var.shape}\")\n array = var.value().numpy()\n names.append(name)\n tf_weights[name] = array\n\n return names, tf_weights\n\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n\n # Load weights from TF model\n init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path)\n\n assert len(init_vars) > 0, \"Loaded trained variables cannot be empty.\"\n\n pt_names = list(model.state_dict().keys())\n\n if is_trivia_qa:\n names, tf_weights = load_tf_weights_trivia_qa(init_vars)\n else:\n names, tf_weights = load_tf_weights_bert(init_vars, tf_path)\n\n for txt_name in names:\n array = tf_weights[txt_name]\n name = txt_name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n pt_name = []\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n pt_name.append(\"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n pt_name.append(\"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n pt_name.append(\"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n pt_name.append(\"classifier\")\n elif scope_names[0] == \"transform\":\n pointer = getattr(pointer, \"transform\")\n pt_name.append(\"transform\")\n if (\"bias\" in name) or (\"kernel\" in name):\n pointer = getattr(pointer, \"dense\")\n pt_name.append(\"dense\")\n elif (\"beta\" in name) or (\"gamma\" in name):\n pointer = getattr(pointer, \"LayerNorm\")\n pt_name.append(\"LayerNorm\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n pt_name.append(f\"{scope_names[0]}\")\n except AttributeError:\n logger.info(f\"Skipping {m_name}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n pt_name.append(f\"{num}\")\n if m_name[-11:] == \"_embeddings\" or m_name == \"embeddings\":\n pointer = getattr(pointer, \"weight\")\n pt_name.append(\"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape):\n # print(txt_name, array.shape)\n if (\n txt_name.endswith(\"attention/self/key/kernel\")\n or txt_name.endswith(\"attention/self/query/kernel\")\n or txt_name.endswith(\"attention/self/value/kernel\")\n ):\n array = array.transpose(1, 0, 2).reshape(pointer.shape)\n elif txt_name.endswith(\"attention/output/dense/kernel\"):\n array = array.transpose(0, 2, 1).reshape(pointer.shape)\n else:\n array = array.reshape(pointer.shape)\n\n if pointer.shape != array.shape:\n raise ValueError(\n f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}.\"\n )\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n pt_weight_name = \".\".join(pt_name)\n logger.info(f\"Initialize PyTorch weight {pt_weight_name} from {txt_name}.\")\n pointer.data = torch.from_numpy(array)\n tf_weights.pop(txt_name, None)\n pt_names.remove(pt_weight_name)\n\n logger.info(f\"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.\")\n logger.info(f\"Weights not initialized in PyTorch model: {', '.join(pt_names)}.\")\n return model\n\n\nclass BigBirdEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n # End copy\n\n self.rescale_embeddings = config.rescale_embeddings\n self.hidden_size = config.hidden_size\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.rescale_embeddings:\n inputs_embeds = inputs_embeds * (self.hidden_size ** 0.5)\n\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n\n embeddings = self.dropout(embeddings)\n embeddings = self.LayerNorm(embeddings)\n return embeddings\n\n\nclass BigBirdSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = F.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\nclass BigBirdBlockSparseAttention(nn.Module):\n def __init__(self, config, seed=None):\n super().__init__()\n\n self.max_seqlen = config.max_position_embeddings\n self.seed = seed\n\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size {config.hidden_size} is not a multiple of the number of attention \"\n f\"heads {config.num_attention_heads}.\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.num_random_blocks = config.num_random_blocks\n self.block_size = config.block_size\n\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n from_blocked_mask=None,\n to_blocked_mask=None,\n output_attentions=None,\n ):\n # Currently this `class` can't be used in decoder.\n\n batch_size, seqlen, _ = hidden_states.size()\n to_seq_length = from_seq_length = seqlen\n from_block_size = to_block_size = self.block_size\n\n assert from_seq_length % from_block_size == 0, \"Query sided sequence length must be multiple of block size\"\n assert to_seq_length % to_block_size == 0, \"Key/Value sided sequence length must be multiple of block size\"\n\n query_layer = self.transpose_for_scores(self.query(hidden_states))\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n context_layer, attention_probs = self.bigbird_block_sparse_attention(\n query_layer,\n key_layer,\n value_layer,\n band_mask,\n from_mask,\n to_mask,\n from_blocked_mask,\n to_blocked_mask,\n self.num_attention_heads,\n self.num_random_blocks,\n self.attention_head_size,\n from_block_size,\n to_block_size,\n batch_size,\n from_seq_length,\n to_seq_length,\n seed=self.seed,\n plan_from_length=None,\n plan_num_rand_blocks=None,\n output_attentions=output_attentions,\n )\n\n context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n @staticmethod\n def torch_bmm_nd(inp_1, inp_2, ndim=None):\n \"\"\" Fast nd matrix multiplication \"\"\"\n # faster replacement of torch.einsum (\"bhqk,bhkd->bhqd\")\n return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(\n inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])\n )\n\n @staticmethod\n def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):\n \"\"\" Fast nd matrix multiplication with transpose \"\"\"\n # faster replacement of torch.einsum (bhqd,bhkd->bhqk)\n return torch.bmm(\n inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)\n ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))\n\n def bigbird_block_sparse_attention(\n self,\n query_layer,\n key_layer,\n value_layer,\n band_mask,\n from_mask,\n to_mask,\n from_blocked_mask,\n to_blocked_mask,\n n_heads,\n n_rand_blocks,\n attention_head_size,\n from_block_size,\n to_block_size,\n batch_size,\n from_seq_len,\n to_seq_len,\n seed,\n plan_from_length,\n plan_num_rand_blocks,\n output_attentions,\n ):\n\n # BigBird block-sparse attention as suggested in paper\n\n # ITC:\n # global tokens: 2 x block_size\n # window tokens: 3 x block_size\n # random tokens: num_rand_tokens x block_size\n\n # ETC:\n # global tokens: extra_globals_tokens + 2 x block_size\n # window tokens: 3 x block_size\n # random tokens: num_rand_tokens x block_size\n\n # Note:\n # 1) Currently, ETC is not supported.\n # 2) Window size is fixed to 3 blocks & it can be changed only by\n # changing `block_size`.\n # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be\n # controlled only by `block_size`.\n\n # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)\n # hence following code can be divided into 5 parts.\n\n if from_seq_len // from_block_size != to_seq_len // to_block_size:\n raise ValueError(\"Error the number of blocks needs to be same!\")\n\n rsqrt_d = 1 / math.sqrt(attention_head_size)\n bsz = batch_size\n\n # generate random attention and corresponding masks\n np.random.seed(seed)\n if from_seq_len in [1024, 3072, 4096]: # old plans used in paper\n rand_attn = [\n self._bigbird_block_rand_mask(\n self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024\n )[: (from_seq_len // from_block_size - 2)]\n for _ in range(n_heads)\n ]\n else:\n if plan_from_length is None:\n plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(\n from_seq_len, from_block_size, n_rand_blocks\n )\n\n rand_attn = self._bigbird_block_rand_mask_with_head(\n from_seq_length=from_seq_len,\n to_seq_length=to_seq_len,\n from_block_size=from_block_size,\n to_block_size=to_block_size,\n num_heads=n_heads,\n plan_from_length=plan_from_length,\n plan_num_rand_blocks=plan_num_rand_blocks,\n )\n\n rand_attn = np.stack(rand_attn, axis=0)\n rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)\n rand_attn.unsqueeze_(0)\n rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)\n\n rand_mask = self._create_rand_mask_from_inputs(\n from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size\n )\n\n blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)\n blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)\n blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)\n\n # preparing block for randn attn\n gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)\n gathered_key = gathered_key.view(\n bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1\n ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]\n gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)\n gathered_value = gathered_value.view(\n bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1\n ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]\n\n # 1st PART\n # 1st block (global block) attention scores\n # q[0] x (k[0], k[1], k[2], k[3], k[4] .... )\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]\n first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)\n\n first_product = first_product * rsqrt_d\n first_product += (1.0 - to_mask) * -10000.0\n first_attn_weights = F.softmax(first_product, dim=-1) # [bsz, n_heads, from_block_size, to_seq_len]\n\n # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]\n first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)\n first_context_layer.unsqueeze_(2)\n\n # 2nd PART\n # 2nd block attention scores\n # q[1] x (sliding_keys, random_keys, global_keys)\n # sliding key blocks -> 2nd, 3rd blocks\n # global key blocks -> 1st block\n\n second_key_mat = torch.cat(\n [\n blocked_key_matrix[:, :, 0],\n blocked_key_matrix[:, :, 1],\n blocked_key_matrix[:, :, 2],\n blocked_key_matrix[:, :, -1],\n gathered_key[:, :, 0],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]\n second_value_mat = torch.cat(\n [\n blocked_value_matrix[:, :, 0],\n blocked_value_matrix[:, :, 1],\n blocked_value_matrix[:, :, 2],\n blocked_value_matrix[:, :, -1],\n gathered_value[:, :, 0],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)\n second_seq_pad = torch.cat(\n [\n to_mask[:, :, :, : 3 * to_block_size],\n to_mask[:, :, :, -to_block_size:],\n first_context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),\n ],\n dim=3,\n )\n second_rand_pad = torch.cat(\n [\n first_context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),\n rand_mask[:, :, 0],\n ],\n dim=3,\n )\n second_product = second_product * rsqrt_d\n second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * -10000.0\n second_attn_weights = F.softmax(\n second_product, dim=-1\n ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n\n # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]\n second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)\n\n second_context_layer.unsqueeze_(2)\n\n # 3rd PART\n # Middle blocks attention scores\n # q[-2:2] x (sliding_keys, random_keys, global_keys)\n # sliding attn is calculated using special trick of shifting tokens as discussed in paper\n # random keys are generated by taking random indices as per `rand_attn`\n # global keys -> 1st & last block\n\n exp_blocked_key_matrix = torch.cat(\n [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n exp_blocked_value_matrix = torch.cat(\n [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],\n dim=3,\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n middle_query_matrix = blocked_query_matrix[:, :, 2:-2]\n\n # sliding attention scores for q[-2:2]\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]\n inner_band_product = inner_band_product * rsqrt_d\n\n # randn attention scores for q[-2:2]\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]\n rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]\n rand_band_product = rand_band_product * rsqrt_d\n\n # Including 1st block (since it's global)\n first_band_product = torch.einsum(\n \"bhlqd,bhkd->bhlqk\", middle_query_matrix, blocked_key_matrix[:, :, 0]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]\n first_band_product = first_band_product * rsqrt_d\n\n # Including last block (since it's global)\n last_band_product = torch.einsum(\n \"bhlqd,bhkd->bhlqk\", middle_query_matrix, blocked_key_matrix[:, :, -1]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]\n last_band_product = last_band_product * rsqrt_d\n\n # masking padded tokens\n inner_band_product += (1.0 - band_mask) * -10000.0\n first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * -10000.0\n last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * -10000.0\n rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0\n\n # completing attention scores matrix for all q[-2:2]\n band_product = torch.cat(\n [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]\n\n # safely doing softmax since attention matrix is completed\n attn_weights = F.softmax(\n band_product, dim=-1\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]\n\n # contibution of sliding keys\n # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n context_layer = self.torch_bmm_nd(\n attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5\n )\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # adding contribution of random keys\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]\n context_layer += self.torch_bmm_nd(\n attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5\n )\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # adding contribution of global keys\n context_layer += torch.einsum(\n \"bhlqk,bhkd->bhlqd\", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n context_layer += torch.einsum(\n \"bhlqk,bhkd->bhlqd\", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # 4th PART\n # last 2nd token attention scores\n # q[-2] x (sliding_keys, random_keys, global_keys)\n # sliding key blocks -> last 3 blocks\n # global key block -> 1st block\n # random key block -> based on indices stored in `randn_attn`\n\n second_last_key_mat = torch.cat(\n [\n blocked_key_matrix[:, :, 0],\n blocked_key_matrix[:, :, -3],\n blocked_key_matrix[:, :, -2],\n blocked_key_matrix[:, :, -1],\n gathered_key[:, :, -1],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]\n second_last_value_mat = torch.cat(\n [\n blocked_value_matrix[:, :, 0],\n blocked_value_matrix[:, :, -3],\n blocked_value_matrix[:, :, -2],\n blocked_value_matrix[:, :, -1],\n gathered_value[:, :, -1],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+r)*to_block_size, -1]\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)\n second_last_seq_pad = torch.cat(\n [\n to_mask[:, :, :, :to_block_size],\n to_mask[:, :, :, -3 * to_block_size :],\n context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),\n ],\n dim=3,\n )\n second_last_rand_pad = torch.cat(\n [\n context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),\n rand_mask[:, :, -1],\n ],\n dim=3,\n )\n second_last_product = second_last_product * rsqrt_d\n second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0\n second_last_attn_weights = F.softmax(\n second_last_product, dim=-1\n ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n\n # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]\n second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)\n second_last_context_layer.unsqueeze_(2)\n\n # 5th PART\n # last block (global) attention scores\n # q[-1] x (k[0], k[1], k[2], k[3], .... )\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]\n last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)\n last_product = last_product * rsqrt_d\n last_product += (1.0 - to_mask) * -10000.0\n last_attn_weights = F.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]\n\n # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]\n last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)\n last_context_layer.unsqueeze_(2)\n\n # combining representations of all tokens\n context_layer = torch.cat(\n [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],\n dim=2,\n )\n context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask\n context_layer = torch.transpose(context_layer, 1, 2)\n\n # this is just for visualizing; forward pass doesn't depend on following code\n if output_attentions:\n # TODO(PVP): need to verify if below code is correct\n attention_probs = torch.zeros(\n bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device\n )\n\n # 1st query block\n # corresponding to `first_context_layer`\n attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global\n\n # 2nd query block\n # corresponding to `second_context_layer`\n attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[\n :, :, :, : 3 * to_block_size\n ] # 1st three key blocks (global + sliding)\n attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[\n :, :, :, 3 * to_block_size : 4 * to_block_size\n ] # last key block (global)\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[:, 4 * to_block_size :]\n attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # Middle query blocks\n # corresponding to `context_layer`\n # sliding keys\n for q_idx in range(from_seq_len // from_block_size - 4):\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )[:, :, 2:-2, :, 1:-1, :]\n right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]\n attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(\n bsz, n_heads, from_block_size, 3, to_block_size\n ) # inner_band_product\n # global keys (correspomding to 1st key block)\n attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[\n :, :, :, :, :to_block_size\n ].view(\n bsz, n_heads, -1, to_block_size\n ) # first_band_product\n # global keys (corresponding to last key block)\n attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[\n :, :, :, :, -to_block_size:\n ].view(\n bsz, n_heads, -1, to_block_size\n ) # last_band_product\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n for q_idx in range(1, len(i2) - 1):\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]\n attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # Second-last query block\n # corresponding to `second_last_context_layer`\n attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[\n :, :, :, :to_block_size\n ] # 1st key block (global)\n attention_probs[\n :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :\n ] = second_last_attn_weights[\n :, :, :, to_block_size : 4 * to_block_size\n ] # last three blocks (global + sliding)\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[:, 4 * to_block_size :]\n attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # last query block\n # corresponding to `last_context_layer`\n attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global\n\n else:\n attention_probs = None\n\n return context_layer, attention_probs\n\n @staticmethod\n def torch_gather_b2(params, indices):\n # this operation is equilvalent to tf.gather when batch_dims=2\n\n if params.shape[:2] != indices.shape[:2]:\n raise ValueError(\n f\"Make sure that the first two dimensions of params and indices are identical, \\\n but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}\"\n )\n num_indices_to_gather = indices.shape[-2] * indices.shape[-1]\n num_indices_to_pick_from = params.shape[2]\n\n indices_shift = (\n torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)\n // num_indices_to_gather\n * num_indices_to_pick_from\n )\n\n flattened_indices = indices.view(-1) + indices_shift\n flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])\n\n out_flattened = flattened_params.index_select(0, flattened_indices)\n\n out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])\n return out\n\n @staticmethod\n def _create_rand_mask_from_inputs(\n from_blocked_mask,\n to_blocked_mask,\n rand_attn,\n num_attention_heads,\n num_rand_blocks,\n batch_size,\n from_seq_length,\n from_block_size,\n ):\n \"\"\"\n Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_blocked_mask: 2D Tensor of shape [batch_size,\n from_seq_length//from_block_size, from_block_size].\n to_blocked_mask: int32 Tensor of shape [batch_size,\n to_seq_length//to_block_size, to_block_size].\n rand_attn: [batch_size, num_attention_heads,\n from_seq_length//from_block_size-2, num_rand_blocks]\n num_attention_heads: int. Number of attention heads.\n num_rand_blocks: int. Number of random chunks per row.\n batch_size: int. Batch size for computation.\n from_seq_length: int. length of from sequence.\n from_block_size: int. size of block in from sequence.\n\n Returns:\n float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,\n from_block_size, num_rand_blocks*to_block_size].\n \"\"\"\n num_windows = from_seq_length // from_block_size - 2\n rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])\n rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)\n rand_mask = torch.einsum(\"blq,bhlk->bhlqk\", from_blocked_mask[:, 1:-1], rand_mask)\n return rand_mask\n\n @staticmethod\n def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):\n \"\"\"\n Gives the plan of where to put random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n from_block_size: int. size of block in from sequence.\n num_rand_blocks: int. Number of random chunks per row.\n\n Returns:\n plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for\n each block\n \"\"\"\n\n plan_from_length = []\n plan_num_rand_blocks = []\n if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):\n plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))\n plan_num_rand_blocks.append(num_rand_blocks)\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(0)\n elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):\n plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))\n plan_num_rand_blocks.append(num_rand_blocks // 2)\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))\n else:\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(num_rand_blocks)\n\n return plan_from_length, plan_num_rand_blocks\n\n @staticmethod\n def _bigbird_block_rand_mask(\n from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1\n ):\n \"\"\"\n Create adjacency list of random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n to_seq_length: int. length of to sequence.\n from_block_size: int. size of block in from sequence.\n to_block_size: int. size of block in to sequence.\n num_rand_blocks: int. Number of random chunks per row.\n last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,\n if positive then num_rand_blocks blocks choosen only upto last_idx.\n\n Returns:\n adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks\n \"\"\"\n # using this method when from_seq_length in [1024, 3072, 4096]\n\n assert (\n from_seq_length // from_block_size == to_seq_length // to_block_size\n ), \"Error the number of blocks needs to be same!\"\n\n rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)\n middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)\n last = to_seq_length // to_block_size - 1\n if last_idx > (2 * to_block_size):\n last = (last_idx // to_block_size) - 1\n\n r = num_rand_blocks # shorthand\n for i in range(1, from_seq_length // from_block_size - 1):\n start = i - 2\n end = i\n if i == 1:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]\n elif i == 2:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]\n elif i == from_seq_length // from_block_size - 3:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]\n # Missing -3: should have been sliced till last-3\n elif i == from_seq_length // from_block_size - 2:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]\n # Missing -4: should have been sliced till last-4\n else:\n if start > last:\n start = last\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]\n elif (end + 1) == last:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]\n else:\n rand_attn[i - 1, :] = np.random.permutation(\n np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))\n )[:r]\n return rand_attn\n\n def _bigbird_block_rand_mask_with_head(\n self,\n from_seq_length,\n to_seq_length,\n from_block_size,\n to_block_size,\n num_heads,\n plan_from_length,\n plan_num_rand_blocks,\n window_block_left=1,\n window_block_right=1,\n global_block_top=1,\n global_block_bottom=1,\n global_block_left=1,\n global_block_right=1,\n ):\n \"\"\"\n Create adjacency list of random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n to_seq_length: int. length of to sequence.\n from_block_size: int. size of block in from sequence.\n to_block_size: int. size of block in to sequence.\n num_heads: int. total number of heads.\n plan_from_length: list. plan from length where num_random_blocks are choosen from.\n plan_num_rand_blocks: list. number of rand blocks within the plan.\n window_block_left: int. number of blocks of window to left of a block.\n window_block_right: int. number of blocks of window to right of a block.\n global_block_top: int. number of blocks at the top.\n global_block_bottom: int. number of blocks at the bottom.\n global_block_left: int. Number of blocks globally used to the left.\n global_block_right: int. Number of blocks globally used to the right.\n\n Returns:\n adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by\n num_rand_blocks\n \"\"\"\n # using this method when from_seq_length not in [1024, 3072, 4096]\n\n assert (\n from_seq_length // from_block_size == to_seq_length // to_block_size\n ), \"Error the number of blocks needs to be same!\"\n\n assert from_seq_length in plan_from_length, \"Error from sequence length not in plan!\"\n\n # Total number of blocks in the mmask\n num_blocks = from_seq_length // from_block_size\n # Number of blocks per plan\n plan_block_length = np.array(plan_from_length) // from_block_size\n # till when to follow plan\n max_plan_idx = plan_from_length.index(from_seq_length)\n # Random Attention adjajency list\n rand_attn = [\n np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)\n for i in range(num_heads)\n ]\n\n # We will go iteratively over the plan blocks and pick random number of\n # Attention blocks from the legally allowed blocks\n for plan_idx in range(max_plan_idx + 1):\n rnd_r_cnt = 0\n if plan_idx > 0:\n # set the row for all from_blocks starting from 0 to\n # plan_block_length[plan_idx-1]\n # column indx start fromm plan_block_length[plan_idx-1] and ends at\n # plan_block_length[plan_idx]\n if plan_num_rand_blocks[plan_idx] > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))\n for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=plan_block_length[plan_idx - 1],\n to_end_block_id=plan_block_length[plan_idx],\n num_rand_blocks=plan_num_rand_blocks[plan_idx],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n for pl_id in range(plan_idx):\n if plan_num_rand_blocks[pl_id] == 0:\n continue\n for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):\n rnd_r_cnt = 0\n to_start_block_id = 0\n if pl_id > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))\n to_start_block_id = plan_block_length[pl_id - 1]\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=to_start_block_id,\n to_end_block_id=plan_block_length[pl_id],\n num_rand_blocks=plan_num_rand_blocks[pl_id],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n if plan_num_rand_blocks[plan_idx] == 0:\n continue\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))\n from_start_block_id = global_block_top\n to_start_block_id = 0\n if plan_idx > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))\n from_start_block_id = plan_block_length[plan_idx - 1]\n to_start_block_id = plan_block_length[plan_idx - 1]\n\n for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=to_start_block_id,\n to_end_block_id=plan_block_length[plan_idx],\n num_rand_blocks=plan_num_rand_blocks[plan_idx],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n for nh in range(num_heads):\n rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]\n\n return rand_attn\n\n @staticmethod\n def _get_single_block_row_attention(\n block_id,\n to_start_block_id,\n to_end_block_id,\n num_rand_blocks,\n window_block_left=1,\n window_block_right=1,\n global_block_left=1,\n global_block_right=1,\n ):\n \"\"\"\n For a single row block get random row attention.\n\n Args:\n block_id: int. block id of row.\n to_start_block_id: int. random attention coloum start id.\n to_end_block_id: int. random attention coloum end id.\n num_rand_blocks: int. number of random blocks to be selected.\n window_block_left: int. number of blocks of window to left of a block.\n window_block_right: int. number of blocks of window to right of a block.\n global_block_left: int. Number of blocks globally used to the left.\n global_block_right: int. Number of blocks globally used to the right.\n\n Returns:\n row containing the random attention vector of size num_rand_blocks.\n \"\"\"\n # list of to_blocks from which to choose random attention\n to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)\n # permute the blocks\n perm_block = np.random.permutation(to_block_list)\n\n # illegal blocks for the current block id, using window\n illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))\n\n # Add blocks at the start and at the end\n illegal_blocks.extend(list(range(global_block_left)))\n illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))\n\n # The second from_block cannot choose random attention on second last to_block\n if block_id == 1:\n illegal_blocks.append(to_end_block_id - 2)\n\n # The second last from_block cannot choose random attention on second to_block\n if block_id == to_end_block_id - 2:\n illegal_blocks.append(1)\n\n selected_random_blokcs = []\n\n for i in range(to_end_block_id - to_start_block_id):\n if perm_block[i] not in illegal_blocks:\n selected_random_blokcs.append(perm_block[i])\n if len(selected_random_blokcs) == num_rand_blocks:\n break\n return np.array(selected_random_blokcs, dtype=np.int32)\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird\nclass BigBirdSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BigBirdAttention(nn.Module):\n def __init__(self, config, seed=None):\n super().__init__()\n self.attention_type = config.attention_type\n self.config = config\n self.seed = seed\n\n if self.config.attention_type == \"original_full\":\n self.self = BigBirdSelfAttention(config)\n elif self.config.attention_type == \"block_sparse\":\n self.self = BigBirdBlockSparseAttention(config, seed)\n else:\n raise ValueError(\n f\"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}\"\n )\n\n self.output = BigBirdSelfOutput(config)\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n\n self.attention_type = value\n if value == \"original_full\":\n # copy all weights to new full attention class\n attn_weights = BigBirdSelfAttention(self.config)\n else:\n # copy all weights to new sparse attention class\n attn_weights = BigBirdBlockSparseAttention(self.config, self.seed)\n\n attn_weights.query = self.self.query\n attn_weights.value = self.self.value\n attn_weights.key = self.self.key\n self.self = attn_weights\n self.attention_type = value\n\n if not self.training:\n self.self.eval()\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n # block_sparse config\n band_mask=None,\n from_mask=None,\n to_mask=None,\n from_blocked_mask=None,\n to_blocked_mask=None,\n ):\n\n if self.attention_type == \"original_full\":\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n else:\n assert (\n encoder_hidden_states is None\n ), \"BigBird cannot be used as a decoder when config.attention_type != 'original_full'\"\n self_outputs = self.self(\n hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions\n )\n\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird\nclass BigBirdIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird\nclass BigBirdOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BigBirdLayer(nn.Module):\n def __init__(self, config, seed=None):\n super().__init__()\n self.config = config\n self.attention_type = config.attention_type\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = BigBirdAttention(config, seed=seed)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = BigBirdAttention(config)\n self.intermediate = BigBirdIntermediate(config)\n self.output = BigBirdOutput(config)\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n self.attention_type = value\n self.attention.set_attention_type(value)\n\n if self.add_cross_attention:\n self.crossattention.set_attention_type(value)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n blocked_encoder_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_value=self_attn_past_key_value,\n output_attentions=output_attentions,\n band_mask=band_mask,\n from_mask=from_mask,\n to_mask=to_mask,\n from_blocked_mask=blocked_encoder_mask,\n to_blocked_mask=blocked_encoder_mask,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n if not hasattr(self, \"crossattention\"):\n raise ValueError(\n f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with \\\n cross-attention layers by setting `config.add_cross_attention=True`\"\n )\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BigBirdEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.attention_type = config.attention_type\n\n self.layer = nn.ModuleList(\n [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]\n )\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n self.attention_type = value\n for layer in self.layer:\n layer.set_attention_type(value)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n blocked_encoder_mask=None,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warn(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n band_mask,\n from_mask,\n to_mask,\n blocked_encoder_mask,\n )\n else:\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n band_mask,\n from_mask,\n to_mask,\n blocked_encoder_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird\nclass BigBirdPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird\nclass BigBirdLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = BigBirdPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird\nclass BigBirdOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BigBirdLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird\nclass BigBirdOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird\nclass BigBirdPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BigBirdLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BigBirdPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = BigBirdConfig\n load_tf_weights = load_tf_weights_in_big_bird\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nBIG_BIRD_START_DOCSTRING = r\"\"\"\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config (:class:`~transformers.BigBirdConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nBIG_BIRD_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.BigBirdTokenizer`. See\n :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@dataclass\nclass BigBirdForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.BigBirdtForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction\n (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation\n before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n seq_relationship_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@add_start_docstrings(\n \"The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdModel(BigBirdPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.attention_type = self.config.attention_type\n self.config = config\n\n self.block_size = self.config.block_size\n\n self.embeddings = BigBirdEmbeddings(config)\n self.encoder = BigBirdEncoder(config)\n\n if add_pooling_layer:\n self.pooler = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n else:\n self.pooler = None\n self.activation = None\n\n if self.attention_type != \"original_full\" and config.add_cross_attention:\n logger.warning(\n \"When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`\"\n )\n self.set_attention_type(\"original_full\")\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n self.attention_type = value\n self.encoder.set_attention_type(value)\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # in order to use block_sparse attention, sequence_length has to be at least\n # bigger than all global attentions: 2 * block_size\n # + sliding tokens: 3 * block_size\n # + random tokens: 2 * num_random_blocks * block_size\n max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size\n if self.attention_type == \"block_sparse\" and seq_length <= max_tokens_to_attend:\n # change attention_type from block_sparse to original_full\n sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)\n logger.warning(\n \"Attention type 'block_sparse' is not possible if sequence_length: \"\n f\"{sequence_length} <= num global tokens: 2 * config.block_size \"\n \"+ min. num sliding tokens: 3 * config.block_size \"\n \"+ config.num_random_blocks * config.block_size \"\n \"+ additional buffer: config.num_random_blocks * config.block_size \"\n f\"= {max_tokens_to_attend} with config.block_size \"\n f\"= {self.config.block_size}, config.num_random_blocks \"\n f\"= {self.config.num_random_blocks}.\"\n \"Changing attention type to 'original_full'...\"\n )\n self.set_attention_type(\"original_full\")\n\n if self.attention_type == \"block_sparse\":\n (\n padding_len,\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n inputs_embeds,\n ) = self._pad_to_block_size(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n pad_token_id=self.config.pad_token_id,\n )\n else:\n padding_len = 0\n\n if self.attention_type == \"block_sparse\":\n blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(\n attention_mask, self.block_size\n )\n extended_attention_mask = None\n\n elif self.attention_type == \"original_full\":\n blocked_encoder_mask = None\n band_mask = None\n from_mask = None\n to_mask = None\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n else:\n raise ValueError(\n f\"attention_type can either be original_full or block_sparse, but is {self.attention_type}\"\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n band_mask=band_mask,\n from_mask=from_mask,\n to_mask=to_mask,\n blocked_encoder_mask=blocked_encoder_mask,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n\n pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None\n\n # undo padding\n if padding_len > 0:\n # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)\n sequence_output = sequence_output[:, :-padding_len]\n\n if not return_dict:\n return (sequence_output, pooler_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooler_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n @staticmethod\n def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int):\n\n batch_size, seq_length = attention_mask.size()\n assert (\n seq_length % block_size == 0\n ), f\"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}.\"\n\n def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):\n \"\"\"\n Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_blocked_mask: 2D Tensor of shape [batch_size,\n from_seq_length//from_block_size, from_block_size].\n to_blocked_mask: int32 Tensor of shape [batch_size,\n to_seq_length//to_block_size, to_block_size].\n\n Returns:\n float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,\n 3*to_block_size].\n \"\"\"\n exp_blocked_to_pad = torch.cat(\n [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2\n )\n band_mask = torch.einsum(\"blq,blk->blqk\", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)\n band_mask.unsqueeze_(1)\n return band_mask\n\n blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size)\n band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask)\n\n from_mask = attention_mask.view(batch_size, 1, seq_length, 1)\n to_mask = attention_mask.view(batch_size, 1, 1, seq_length)\n\n return blocked_encoder_mask, band_mask, from_mask, to_mask\n\n def _pad_to_block_size(\n self,\n input_ids: torch.Tensor,\n attention_mask: torch.Tensor,\n token_type_ids: torch.Tensor,\n position_ids: torch.Tensor,\n inputs_embeds: torch.Tensor,\n pad_token_id: int,\n ):\n \"\"\"A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.\"\"\"\n # padding\n block_size = self.config.block_size\n\n input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape\n batch_size, seq_len = input_shape[:2]\n\n padding_len = (block_size - seq_len % block_size) % block_size\n if padding_len > 0:\n logger.info(\n f\"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of \"\n f\"`config.block_size`: {block_size}\"\n )\n if input_ids is not None:\n input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)\n if position_ids is not None:\n # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings\n position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id)\n if inputs_embeds is not None:\n input_ids_padding = inputs_embeds.new_full(\n (batch_size, padding_len),\n self.config.pad_token_id,\n dtype=torch.long,\n )\n inputs_embeds_padding = self.embeddings(input_ids_padding)\n inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)\n\n attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens\n token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0\n\n return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds\n\n\nclass BigBirdForPreTraining(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BigBirdModel(config, add_pooling_layer=True)\n self.cls = BigBirdPreTrainingHeads(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be\n added to masked_lm loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be\n in ``[0, 1]``:\n\n - 0 indicates sequence B is a continuation of sequence A,\n - 1 indicates sequence B is a random sequence.\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Example::\n\n >>> from transformers import BigBirdTokenizer, BigBirdForPreTraining\n >>> import torch\n\n >>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base')\n >>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base')\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n total_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if next_sentence_label is not None and total_loss is not None:\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = total_loss + next_sentence_loss\n\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return BigBirdForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n seq_relationship_logits=seq_relationship_score,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"BigBird Model with a `language modeling` head on top. \"\"\", BIG_BIRD_START_DOCSTRING)\nclass BigBirdForMaskedLM(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.bert = BigBirdModel(config)\n self.cls = BigBirdOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n effective_batch_size = input_shape[0]\n\n # add a dummy token\n assert self.config.pad_token_id is not None, \"The PAD token should be defined for generation\"\n attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)\n dummy_token = torch.full(\n (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device\n )\n input_ids = torch.cat([input_ids, dummy_token], dim=1)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n\n@add_start_docstrings(\n \"\"\"BigBird Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", BIG_BIRD_START_DOCSTRING\n)\nclass BigBirdForCausalLM(BigBirdPreTrainedModel):\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`\")\n\n self.bert = BigBirdModel(config)\n self.cls = BigBirdOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig\n >>> import torch\n\n >>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')\n >>> config = BigBirdConfig.from_pretrained(\"google/bigbird-base\")\n >>> config.is_decoder = True\n >>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past\n\n\nclass BigBirdClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n self.config = config\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = ACT2FN[self.config.hidden_act](x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdForSequenceClassification(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.bert = BigBirdModel(config)\n self.classifier = BigBirdClassificationHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdForMultipleChoice(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BigBirdModel(config)\n self.sequence_summary = SequenceSummary(config)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(\n BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\")\n )\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n pooled_output = self.sequence_summary(sequence_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdForTokenClassification(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BigBirdModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass BigBirdForQuestionAnsweringHead(nn.Module):\n \"\"\"Head for question answering tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.intermediate = BigBirdIntermediate(config)\n self.output = BigBirdOutput(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, encoder_output):\n hidden_states = self.dropout(encoder_output)\n hidden_states = self.intermediate(hidden_states)\n hidden_states = self.output(hidden_states, encoder_output)\n hidden_states = self.qa_outputs(hidden_states)\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdForQuestionAnswering(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n config.num_labels = 2\n self.num_labels = config.num_labels\n self.sep_token_id = config.sep_token_id\n\n self.bert = BigBirdModel(config, add_pooling_layer=False)\n self.qa_classifier = BigBirdForQuestionAnsweringHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/bigbird-base-trivia-itc\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n question_lengths=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)\n\n if question_lengths is None and input_ids is not None:\n # assuming input_ids format: <cls> <question> <sep> context <sep>\n question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1\n question_lengths.unsqueeze_(1)\n\n logits_mask = None\n if question_lengths is not None:\n # setting lengths logits to `-infi`\n logits_mask = self.prepare_question_mask(question_lengths, seqlen)\n if token_type_ids is None:\n token_type_ids = (~logits_mask).long()\n logits_mask = logits_mask\n logits_mask.unsqueeze_(2)\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n logits = self.qa_classifier(sequence_output)\n\n if logits_mask is not None:\n # removing question tokens from the competition\n logits = logits - logits_mask * 1e6\n\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n @staticmethod\n def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int):\n # q_lengths -> (bz, 1)\n mask = torch.arange(0, maxlen).to(q_lengths.device)\n mask.unsqueeze_(0) # -> (1, maxlen)\n mask = mask < q_lengths\n return mask\n" ]
[ [ "torch.nn.functional.softmax", "torch.transpose", "torch.cat", "torch.zeros", "torch.nn.Embedding", "numpy.concatenate", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.einsum", "numpy.arange", "torch.from_numpy", "numpy.stack", "torch.tensor", "tensorflow.train.list_variables", "torch.arange", "numpy.zeros", "torch.nn.functional.pad", "torch.full", "tensorflow.train.load_variable", "torch.nn.Linear", "torch.minimum", "numpy.transpose", "numpy.array", "numpy.sum", "tensorflow.saved_model.load", "numpy.random.seed", "torch.nn.LayerNorm", "torch.nn.Tanh", "torch.matmul", "numpy.random.permutation", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
itsjatin135s/Ekchhat
[ "66d1d14314c75a2937350a467afa571ed4a32fe4" ]
[ "routes.py" ]
[ "from flask import render_template, redirect, url_for, flash,request\nfrom forms import ContactUsForm,DonateForm,PartnerForm\nfrom models import ContactUs,Donate,Partner\nfrom __init__ import db, app\n\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\n\n\n#success page\n\n# routes for index,register,login,logout,error...\n\[email protected]('/' ,methods=['GET','POST'])\n \ndef contact(): \n forms = ContactUsForm()\n if forms.validate_on_submit(): \n contactus = ContactUs(name=forms.name.data,\n email=forms.email.data, address=forms.address.data,phone=forms.phone.data,comments=forms.comments.data)\n db.session.add(contactus)\n db.session.commit()\n #flash('hurreey account created','success')\n #return redirect(url_for('home'))\n #return redirect('contact')\n\n return render_template('index.html', forms=forms)\n\[email protected]('/Donate_Food' ,methods=['GET','POST'])\n\ndef donate():\n\t\t#driver = webdriver.Chrome()\n\t\tproducts=[] #List to store name of the product\n\t\tprices=[] #List to store price of the product\n\t\tratings=[] #List to store rating of the product\n\t\tdriver.get(\"https://www.flipkart.com/search?q=nokia+mobiles&sid=tyy%2C4io&as=on&as-show=on&otracker=AS_QueryStore_OrganicAutoSuggest_1_1_na_na_na&otracker1=AS_QueryStore_OrganicAutoSuggest_1_1_na_na_na&as-pos=1&as-type=RECENT&suggestionId=nokia+mobiles%7CMobiles&requestId=34c5d1f7-8967-44ef-82e4-d7d691ad0f72&as-backfill=on\")\n\n\t\tcontent = driver.page_source\n\t\tsoup = BeautifulSoup(content)\n\t\tfor a in soup.findAll('a',href=True, attrs={'class':'_31qSD5'}):\n\t\t\tname=a.find('div', attrs={'class':'_3wU53n'})\n\t\t\tprice=a.find('div', attrs={'class':'_1vC4OE _2rQ-NK'})\n\t\t\trating=a.find('div', attrs={'class':'hGSR34 _2beYZw'})\n\t\t\tproducts.append(name.text)\n\t\t\tprices.append(price.text)\n \t\t#ratings.append(rating.text)\n\n\t\tdf = pd.DataFrame({'Product Name':products,'Price':prices})\n\t\tdf.to_csv('products.csv', index=False, encoding='utf-8')\n\t\treturn \"Success\"\n\n \n\"\"\"def donate(): \n forms = DonateForm()\n if forms.validate_on_submit(): \n donatefood = Donate(name=forms.name.data,\n email=forms.email.data, address=forms.address.data,phone=forms.phone.data,food=forms.food.data)\n db.session.add(donatefood)\n db.session.commit()\n #flash('hurreey account created','success')\n \n\n return render_template('donate_food.html', forms=forms)\"\"\"\n\[email protected]('/Partner' ,methods=['GET','POST'])\n \ndef partner(): \n forms = PartnerForm()\n if forms.validate_on_submit(): \n partner = Partner(orgname=forms.orgname.data,ownername=forms.ownername.data,\n email=forms.email.data, phone=forms.phone.data,state=forms.state.data,city=forms.city.data,address=forms.address.data)\n db.session.add(partner)\n db.session.commit()\n \n\n import smtplib\n\n from string import Template\n\n from email.mime.multipart import MIMEMultipart\n from email.mime.text import MIMEText\n\n MY_ADDRESS = 'your_mail_id'\n PASSWORD = 'your_password'\n\n \n def get_contacts(filename):\n \"\"\"\n Return two lists names, emails containing names and email addresses\n read from a file specified by filename.\n \"\"\"\n \n names = []\n emails = []\n with open(filename, mode='r', encoding='utf-8') as contacts_file:\n for a_contact in contacts_file:\n names.append(a_contact.split()[0])\n emails.append(a_contact.split()[1])\n return names, emails\n\n def read_template(filename):\n \"\"\"\n Returns a Template object comprising the contents of the \n file specified by filename.\n \"\"\"\n \n with open(filename, 'r', encoding='utf-8') as template_file:\n template_file_content = template_file.read()\n return Template(template_file_content)\n\n def main():\n names, emails = get_contacts('mycontact.txt') # read contacts\n message_template = read_template('message.txt')\n\n # set up the SMTP server\n s = smtplib.SMTP(host='smtp.gmail.com', port=587)\n s.starttls()\n s.login(MY_ADDRESS, PASSWORD)\n\n # For each contact, send the email:\n for name, email in zip(names, emails):\n msg = MIMEMultipart() # create a message\n\n # add in the actual person name to the message template\n message = message_template.substitute(PERSON_NAME=name.title())\n\n # Prints out the message body for our sake\n print(message)\n\n # setup the parameters of the message\n msg['From']=MY_ADDRESS\n msg['To']=email\n msg['Subject']=\"Thanks For Joining\"\n \n # add in the message body\n msg.attach(MIMEText(message, 'plain'))\n \n # send the message via the server set up earlier.\n s.send_message(msg)\n del msg\n \n # Terminate the SMTP session and close the connection\n s.quit()\n main()\n \n\n \n\n\n \n\n return render_template('partner.html', forms=forms)\n\[email protected]('/error')\ndef error():\n return render_template('error.html')\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
oahul14/MetTrack
[ "dce04ad9bb61a0a1c4becafd25c932bb242d73c0" ]
[ "armageddon/ensemble.py" ]
[ "import numpy as np\nimport pandas as pd\nimport scipy.special as ssp\nimport scipy.optimize as sop \n\nclass Dist():\n\n prob_val = 0.1\n\n def __init__(self, prob_vals):\n self.prob_vals = prob_vals\n \n def velocity_dist(self,v):\n return ssp.erf(v/(11*np.sqrt(2))) - (v/11)*(np.sqrt(2/np.pi)) * np.exp(-1*(v**2)/(2*(11**2))) - self.prob_val\n\n def density_dist(self, rho):\n return 0.5*( 1 + ssp.erf((rho-3000)/(1000*np.sqrt(2))) ) - self.prob_val\n\n def inverse_radius_distribution(self, rmin, rmax):\n return self.prob_vals*(rmax-rmin) + rmin \n\n def inverse_strength_distribution(self,ymin=1e3,ymax=10e6):\n return ymin * (10**(self.prob_vals * np.log10(ymax/ymin)))\n\n def inverse_angle_distribution(self,amin=0,amax=np.pi/2):\n return np.arccos(np.sqrt(self.prob_vals))\n\n def inverse_velocity_distribution(self,v_guess=(50-11)/2):\n v_array = []\n for prob in self.prob_vals:\n self.prob_val = prob\n v_val = sop.newton_krylov(self.velocity_dist,v_guess)\n v_array.append(v_val)\n v_np = np.array(v_array)\n return v_np\n\n def inverse_density_distribution(self, rho_guess=(3000)):\n rho_array = []\n for prob in self.prob_vals:\n self.prob_val = prob\n rho_val = sop.diagbroyden(self.density_dist,rho_guess)\n rho_array.append(rho_val)\n rho_np = np.array(rho_array)\n return rho_np\n \n\ndef solve_ensemble(\n planet,\n fiducial_impact,\n variables,\n radians=False,\n rmin=8, rmax=12,\n ):\n \"\"\"\n Run asteroid simulation for a distribution of initial conditions and\n find the burst distribution\n\n Parameters\n ----------\n\n planet : object\n The Planet class instance on which to perform the ensemble calculation\n\n fiducial_impact : dict\n Dictionary of the fiducial values of radius, angle, strength, velocity\n and density\n\n variables : list\n List of strings of all impact parameters to be varied in the ensemble\n calculation\n\n rmin : float, optional\n Minimum radius, in m, to use in the ensemble calculation,\n if radius is one of the parameters to be varied.\n\n rmax : float, optional\n Maximum radius, in m, to use in the ensemble calculation,\n if radius is one of the parameters to be varied.\n\n Returns\n -------\n\n ensemble : DataFrame\n DataFrame with columns of any parameters that are varied and the\n airburst altitude\n \"\"\"\n\n #convert to degrees\n if radians:\n fiducial_impact['angle'] = fiducial_impact['angle'] * 180/np.pi\n\n #Number of samples\n N = 500\n prob_distribution = np.random.uniform(0.0,1.0,N)\n\n distribution = Dist(prob_distribution)\n\n ensemble_df = pd.DataFrame()\n\n for var in variables:\n # Remove these as you implement each distribution\n if var == 'radius':\n radius_dist = distribution.inverse_radius_distribution(rmin,rmax)\n fiducial_impact['radius'] = radius_dist\n ensemble_df['radius'] = radius_dist\n if var == 'angle':\n angle_dist = distribution.inverse_angle_distribution()\n angle_dist = angle_dist*180/np.pi #convert to degrees\n fiducial_impact['angle'] = angle_dist\n ensemble_df['angle'] = angle_dist\n if var == 'strength':\n strength_dist = distribution.inverse_strength_distribution()\n fiducial_impact['strength'] = strength_dist\n ensemble_df['strength'] = strength_dist\n if var == 'velocity':\n velocity_dist = distribution.inverse_velocity_distribution()\n impact_dist = np.sqrt( (11e3)**2 + (velocity_dist*1000)**2 )\n fiducial_impact['velocity'] = impact_dist\n ensemble_df['velocity'] = impact_dist\n if var == 'density':\n density_dist = distribution.inverse_density_distribution()\n fiducial_impact['density'] = density_dist\n ensemble_df['density'] = density_dist\n\n #check for parameters in fiducial_impact that are not in variables\n const_vals = np.setdiff1d([*fiducial_impact], variables)\n \n for val in const_vals:\n fiducial_impact[val] = [fiducial_impact[val]] * N\n fiducial_impact[val] = np.array(fiducial_impact[val])\n \n burst_altitude = []\n \n for rad,ang,vel,dens,stren in np.stack([fiducial_impact['radius'], fiducial_impact['angle'], \n fiducial_impact['velocity'],fiducial_impact['density'], \n fiducial_impact['strength']], axis = -1):\n output = planet.get_only_outcome(rad,vel,dens,stren,ang, dt=0.1)\n if 'burst_altitude' in output:\n burst_altitude.append(output['burst_altitude'])\n else:\n burst_altitude.append(0.0)\n \n ensemble_df['burst_altitude'] = np.array(burst_altitude)\n \n return ensemble_df\n" ]
[ [ "numpy.sqrt", "scipy.optimize.newton_krylov", "numpy.setdiff1d", "numpy.stack", "pandas.DataFrame", "numpy.log10", "scipy.optimize.diagbroyden", "numpy.exp", "numpy.random.uniform", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
usc-isi-i2/missing-fact
[ "834a0b4531170b4a108f765e19d02bd7446e0563", "834a0b4531170b4a108f765e19d02bd7446e0563" ]
[ "missingfact/nn/util.py", "missingfact/models/missing_fact/span_relation_pred_factatt_model.py" ]
[ "import torch\nfrom allennlp.nn.util import replace_masked_values, masked_max\n\n\ndef seq2vec_seq_aggregate(seq_tensor, mask, aggregate, bidirectional, dim=1):\n \"\"\"\n Takes the aggregation of sequence tensor\n\n :param seq_tensor: Batched sequence requires [batch, seq, hs]\n :param mask: binary mask with shape batch, seq_len, 1\n :param aggregate: max, avg, sum\n :param dim: The dimension to take the max. for batch, seq, hs it is 1\n :return:\n \"\"\"\n\n seq_tensor_masked = seq_tensor * mask.unsqueeze(-1)\n aggr_func = None\n if aggregate == \"last\":\n if seq_tensor.dim() > 3:\n seq = get_final_encoder_states_after_squashing(seq_tensor, mask, bidirectional)\n else:\n seq = get_final_encoder_states(seq_tensor, mask, bidirectional)\n elif aggregate == \"max\":\n seq = masked_max(seq_tensor, mask.unsqueeze(-1).expand_as(seq_tensor), dim=dim)\n elif aggregate == \"min\":\n seq = -masked_max(-seq_tensor, mask.unsqueeze(-1).expand_as(seq_tensor), dim=dim)\n elif aggregate == \"sum\":\n aggr_func = torch.sum\n seq = aggr_func(seq_tensor_masked, dim=dim)\n elif aggregate == \"avg\":\n aggr_func = torch.sum\n seq = aggr_func(seq_tensor_masked, dim=dim)\n seq_lens = torch.sum(mask, dim=dim) # this returns batch_size, .. 1 ..\n masked_seq_lens = replace_masked_values(seq_lens, (seq_lens != 0).float(), 1.0)\n masked_seq_lens = masked_seq_lens.unsqueeze(dim=dim).expand_as(seq)\n # print(seq.shape)\n # print(masked_seq_lens.shape)\n seq = seq / masked_seq_lens\n\n return seq\n\n\ndef get_final_encoder_states_after_squashing(embedded_text, text_mask, bidirectional):\n # print(embedded_text.size())\n squashed_shape = [-1, embedded_text.size()[-2], embedded_text.size()[-1]]\n # print(squashed_shape)\n squashed_text = embedded_text.contiguous().view(*squashed_shape)\n squash_mask_shape = [squashed_text.size()[0], squashed_text.size()[1]]\n squashed_mask = text_mask.contiguous().view(*squash_mask_shape)\n squashed_final_seq = get_final_encoder_states(squashed_text, squashed_mask, bidirectional)\n # print(squashed_final_seq.size())\n output_size = [x for x in embedded_text.size()[:-2]] + [-1]\n return squashed_final_seq.contiguous().view(*output_size)\n\n\ndef get_final_encoder_states(encoder_outputs: torch.Tensor,\n mask: torch.Tensor,\n bidirectional: bool = False) -> torch.Tensor:\n \"\"\"\n Modified over the original Allennlp function\n\n Given the output from a ``Seq2SeqEncoder``, with shape ``(batch_size, sequence_length,\n encoding_dim)``, this method returns the final hidden state for each element of the batch,\n giving a tensor of shape ``(batch_size, encoding_dim)``. This is not as simple as\n ``encoder_outputs[:, -1]``, because the sequences could have different lengths. We use the\n mask (which has shape ``(batch_size, sequence_length)``) to find the final state for each batch\n instance.\n\n Additionally, if ``bidirectional`` is ``True``, we will split the final dimension of the\n ``encoder_outputs`` into two and assume that the first half is for the forward direction of the\n encoder and the second half is for the backward direction. We will concatenate the last state\n for each encoder dimension, giving ``encoder_outputs[:, -1, :encoding_dim/2]`` concated with\n ``encoder_outputs[:, 0, encoding_dim/2:]``.\n \"\"\"\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\n # are assuming sequences are right padded.\n # Shape: (batch_size,)\n last_word_indices = mask.sum(1).long() - 1\n\n # handle -1 cases\n ll_ = (last_word_indices != -1).long()\n last_word_indices = last_word_indices * ll_\n\n batch_size, _, encoder_output_dim = encoder_outputs.size()\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\n # Shape: (batch_size, 1, encoder_output_dim)\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\n if bidirectional:\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\n return final_encoder_output", "import logging\nfrom typing import Dict, Optional, List, Any\n\nimport torch\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.data import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules import FeedForward, Seq2SeqEncoder, MatrixAttention, InputVariationalDropout\nfrom allennlp.modules import TextFieldEmbedder\nfrom allennlp.modules.matrix_attention import DotProductMatrixAttention\nfrom allennlp.modules.span_extractors import SpanExtractor\nfrom allennlp.nn import InitializerApplicator\nfrom allennlp.nn.util import replace_masked_values, masked_softmax, combine_tensors, \\\n weighted_sum, masked_mean\nfrom allennlp.training.metrics import CategoricalAccuracy, BooleanAccuracy\nfrom torch.nn.functional import softmax\n\nfrom missingfact.models.missing_fact.utils import add_relation_predictions, add_tuple_predictions\nfrom missingfact.models.missing_fact.utils import get_agg_rep\nfrom missingfact.models.missing_fact.utils import get_embedding\nfrom missingfact.models.missing_fact.utils import get_text_representation\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\nlogger.setLevel(logging.WARN)\n\n\[email protected](\"span_relation_pred_factatt\")\nclass SpanRelationPredFactAttModel(Model):\n \"\"\"\n This ``Model`` implements the main answer span + relation based model (KGG) described in\n What's Missing: A Knowledge Gap Guided Approach for Multi-hop Question Answering (EMNLP '19)\n \"\"\"\n\n def __init__(self, vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n coverage_ff: FeedForward,\n relation_predictor: FeedForward,\n scale_relation_loss: float = 1.0,\n aggregate: str = \"max\",\n combination: str = \"x,y\",\n answer_choice_combination: Optional[str] = None,\n coverage_combination: Optional[str] = None,\n var_dropout: float = 0.0,\n use_projection: bool = False,\n ignore_spans: bool = True,\n ignore_relns: bool = False,\n ignore_ann: bool = False,\n span_extractor: Optional[SpanExtractor] = None,\n reln_ff: Optional[FeedForward] = None,\n attention: Optional[MatrixAttention] = None,\n encoder: Optional[Seq2SeqEncoder] = None,\n initializer: InitializerApplicator = InitializerApplicator()) -> None:\n \"\"\"\n :param vocab: AllenNLP Vocabulary\n :param text_field_embedder: AllenNLP Textfield embedder\n :param coverage_ff: Feedforward network that computes the \"Fact-Relevance\" score_f i.e. how\n well does the fact \"cover\" the question + answer\n :param relation_predictor: Feedforward network that predicts the relation label R_j\n :param scale_relation_loss: Scalar used to scale the relation loss term, \\lambda\n :param aggregate: Pooling function used to aggregate question/fact vector representations in\n \"Relation Prediction Score\". Choices: max, avg, last\n :param combination: Combination string used to combine vector representation \\bigotimes\n :param answer_choice_combination: If set, use this combination string instead of combination\n for combining the answer-based and choice-based fact representation\n :param coverage_combination: If set, use this combination string instead of combination\n for combining the question-choice-based fact rep and fact rep\n :param var_dropout: Variational dropout probability on the input embeddings\n :param use_projection: If set to true, learn a projector to map relation representations to\n a #rel-dimensional vector. Otherwise, the relation predictor should produce embeddings that\n match the #rels.\n :param ignore_spans: If set to true, don't use span representation of the answers in the\n fact_choice_question_rep (default: true)\n :param ignore_relns: If set to true, don't use the relation labels/scores (no relation\n representations computed or scored)\n :param ignore_ann: If set to true, ignore all auxilliary annotation i.e. spans and relations\n Use the entire fact to compute answer span-based representations. No loss computed against\n the relation label. Note that latent relation representations will still be computed\n :param span_extractor: SpanExtractor used to compute answer span representation\n :param reln_ff: Feedforward used to calculate the relation prediction score\n :param attention: Attention function used\n :param encoder: Encoder used to convert seq of word embeddings into contextual (e.g. LSTM)\n representations\n :param initializer: Initializer used for parameters\n \"\"\"\n super(SpanRelationPredFactAttModel, self).__init__(vocab)\n self._text_field_embedder = text_field_embedder\n self._coverage_ff = coverage_ff\n if attention:\n self._attention = attention\n else:\n self._attention = DotProductMatrixAttention()\n if var_dropout > 0.0:\n self._var_dropout = InputVariationalDropout(var_dropout)\n else:\n self._var_dropout = None\n\n self._num_relations = vocab.get_vocab_size(namespace=\"relation_labels\")\n\n self._ignore_spans = ignore_spans\n self._aggregate = aggregate\n self._scale_relation_loss = scale_relation_loss\n if span_extractor is None and not ignore_spans:\n raise ConfigurationError(\"ignore_spans set to False but no span_extractor provided!\")\n self._span_extractor = span_extractor\n self._relation_predictor = relation_predictor\n # simple projector\n if use_projection:\n self._relation_projector = torch.nn.Linear(self._relation_predictor.get_output_dim(),\n self._num_relations)\n else:\n self._relation_projector = None\n self._combination = combination\n if answer_choice_combination:\n self._answer_choice_combination = answer_choice_combination\n else:\n self._answer_choice_combination = combination\n\n if coverage_combination:\n self._coverage_combination = coverage_combination\n else:\n self._coverage_combination = combination\n self._ignore_ann = ignore_ann\n self._ignore_relns = ignore_relns\n if reln_ff is None and not ignore_relns:\n raise ConfigurationError(\"ignore_relns set to False but no reln_ff provided!\")\n self._reln_ff = reln_ff\n self._encoder = encoder\n self._aggr_label_accuracy = BooleanAccuracy()\n self._aggr_choice_accuracy = CategoricalAccuracy()\n self._relation_loss = torch.nn.BCEWithLogitsLoss()\n self._choice_loss = torch.nn.CrossEntropyLoss()\n initializer(self)\n\n def get_text_representation(self, textfield, wrapping_dims):\n return get_text_representation(textfield, wrapping_dims, self._text_field_embedder,\n self._encoder, self._aggregate)\n\n def merge_dimensions(self, input_tensor):\n input_size = input_tensor.size()\n if len(input_size) <= 2:\n raise RuntimeError(\"No dimension to distribute: \" + str(input_size))\n\n # Squash batch_size and time_steps into a single axis; result has shape\n # (batch_size * time_steps, input_size).\n squashed_shape = [-1] + [x for x in input_size[2:]]\n return input_tensor.contiguous().view(*squashed_shape)\n\n def add_dimension(self, input_tensor, dim, num):\n \"\"\"\n Expands the input tensor by introducing an additional dimension at dim with size num\n \"\"\"\n input_size = input_tensor.size()\n if dim < 0:\n dim = len(input_size) + dim + 1\n output_size = [x for x in input_size[0:dim]] + [num] + [x for x in input_size[dim:]]\n return input_tensor.unsqueeze(dim).expand(output_size)\n\n def forward(self, # type: ignore\n question: Dict[str, torch.LongTensor],\n choices_list: Dict[str, torch.LongTensor],\n choice_kb: Dict[str, torch.LongTensor],\n answer_text: Dict[str, torch.LongTensor],\n fact: Dict[str, torch.LongTensor],\n answer_spans: torch.IntTensor,\n relations: torch.IntTensor = None,\n relation_label: torch.IntTensor = None,\n answer_id: torch.IntTensor = None,\n metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n # B X C X Ct X D\n embedded_choice, choice_mask = get_embedding(choices_list, 1, self._text_field_embedder,\n self._encoder, self._var_dropout)\n # B X C X D\n # agg_choice, agg_choice_mask = get_agg_rep(embedded_choice, choice_mask, 1, self._encoder, self._aggregate)\n num_choices = embedded_choice.size()[1]\n batch_size = embedded_choice.size()[0]\n # B X Qt X D\n embedded_question, question_mask = get_embedding(question, 0, self._text_field_embedder,\n self._encoder, self._var_dropout)\n # B X D\n agg_question, agg_question_mask = get_agg_rep(embedded_question, question_mask, 0,\n self._encoder, self._aggregate)\n\n # B X Ft X D\n embedded_fact, fact_mask = get_embedding(fact, 0, self._text_field_embedder, self._encoder,\n self._var_dropout)\n # B X D\n agg_fact, agg_fact_mask = get_agg_rep(embedded_fact, fact_mask, 0, self._encoder,\n self._aggregate)\n\n # ==============================================\n # Interaction between fact and question\n # ==============================================\n # B x Ft x Qt\n fact_question_att = self._attention(embedded_fact, embedded_question)\n fact_question_mask = self.add_dimension(question_mask, 1, fact_question_att.shape[1])\n masked_fact_question_att = replace_masked_values(fact_question_att,\n fact_question_mask, -1e7)\n # B X Ft\n fact_question_att_max = masked_fact_question_att.max(dim=-1)[0].squeeze(-1)\n fact_question_att_softmax = masked_softmax(fact_question_att_max, fact_mask)\n # B X D\n fact_question_att_rep = weighted_sum(embedded_fact, fact_question_att_softmax)\n # B*C X D\n cmerged_fact_question_att_rep = self.merge_dimensions(\n self.add_dimension(fact_question_att_rep, 1, num_choices))\n\n # ==============================================\n # Interaction between fact and answer choices\n # ==============================================\n\n # B*C X Ft X D\n cmerged_embedded_fact = self.merge_dimensions(\n self.add_dimension(embedded_fact, 1, num_choices))\n cmerged_fact_mask = self.merge_dimensions(self.add_dimension(fact_mask, 1, num_choices))\n\n # B*C X Ct X D\n cmerged_embedded_choice = self.merge_dimensions(embedded_choice)\n cmerged_choice_mask = self.merge_dimensions(choice_mask)\n\n # B*C X Ft X Ct\n cmerged_fact_choice_att = self._attention(cmerged_embedded_fact, cmerged_embedded_choice)\n cmerged_fact_choice_mask = self.add_dimension(cmerged_choice_mask, 1,\n cmerged_fact_choice_att.shape[1])\n masked_cmerged_fact_choice_att = replace_masked_values(cmerged_fact_choice_att,\n cmerged_fact_choice_mask, -1e7)\n\n # B*C X Ft\n cmerged_fact_choice_att_max = masked_cmerged_fact_choice_att.max(dim=-1)[0].squeeze(-1)\n cmerged_fact_choice_att_softmax = masked_softmax(cmerged_fact_choice_att_max,\n cmerged_fact_mask)\n\n # B*C X D\n cmerged_fact_choice_att_rep = weighted_sum(cmerged_embedded_fact,\n cmerged_fact_choice_att_softmax)\n\n # ==============================================\n # Combined fact + choice + question + span rep\n # ==============================================\n if not self._ignore_spans and not self._ignore_ann:\n # B X A\n per_span_mask = (answer_spans >= 0).long()[:, :, 0]\n # B X A X D\n per_span_rep = self._span_extractor(embedded_fact, answer_spans, fact_mask,\n per_span_mask)\n # expanded_span_mask = per_span_mask.unsqueeze(-1).expand_as(per_span_rep)\n\n # B X D\n answer_span_rep = per_span_rep[:, 0, :]\n\n # B*C X D\n cmerged_span_rep = self.merge_dimensions(\n self.add_dimension(answer_span_rep, 1, num_choices))\n fact_choice_question_rep = (cmerged_fact_choice_att_rep +\n cmerged_fact_question_att_rep +\n cmerged_span_rep) / 3\n\n else:\n fact_choice_question_rep = (cmerged_fact_choice_att_rep +\n cmerged_fact_question_att_rep) / 2\n # B*C X D\n cmerged_fact_rep = masked_mean(cmerged_embedded_fact,\n cmerged_fact_mask.unsqueeze(-1).expand_as(\n cmerged_embedded_fact),\n 1)\n # B*C X D\n fact_question_combined_rep = combine_tensors(self._coverage_combination,\n [fact_choice_question_rep, cmerged_fact_rep])\n\n # B X C X D\n new_size = [batch_size, num_choices, -1]\n fact_question_combined_rep = fact_question_combined_rep.contiguous().view(*new_size)\n # B X C\n coverage_score = self._coverage_ff(fact_question_combined_rep).squeeze(-1)\n logger.info(\"coverage_score\" + str(coverage_score.shape))\n\n # ==============================================\n # Interaction between spans+choices and KB\n # ==============================================\n\n # B X C X K X Kt x D\n embedded_choice_kb, choice_kb_mask = get_embedding(choice_kb, 2, self._text_field_embedder,\n self._encoder, self._var_dropout)\n num_kb = embedded_choice_kb.size()[2]\n\n # B X A X At X D\n embedded_answer, answer_mask = get_embedding(answer_text, 1, self._text_field_embedder,\n self._encoder, self._var_dropout)\n # B X At X D\n embedded_answer = embedded_answer[:, 0, :, :]\n answer_mask = answer_mask[:, 0, :]\n\n # B*C*K X Kt X D\n ckmerged_embedded_choice_kb = self.merge_dimensions(self.merge_dimensions(\n embedded_choice_kb)\n )\n ckmerged_choice_kb_mask = self.merge_dimensions(self.merge_dimensions(choice_kb_mask))\n\n # B*C X At X D\n cmerged_embedded_answer = self.merge_dimensions(self.add_dimension(embedded_answer,\n 1, num_choices))\n cmerged_answer_mask = self.merge_dimensions(self.add_dimension(answer_mask,\n 1, num_choices))\n # B*C*K X At X D\n ckmerged_embedded_answer = self.merge_dimensions(self.add_dimension(cmerged_embedded_answer,\n 1, num_kb))\n ckmerged_answer_mask = self.merge_dimensions(self.add_dimension(cmerged_answer_mask,\n 1, num_kb))\n # B*C*K X Ct X D\n ckmerged_embedded_choice = self.merge_dimensions(self.add_dimension(cmerged_embedded_choice,\n 1, num_kb))\n ckmerged_choice_mask = self.merge_dimensions(self.add_dimension(cmerged_choice_mask,\n 1, num_kb))\n logger.info(\"ckmerged_choice_mask\" + str(ckmerged_choice_mask.shape))\n\n # == KB rep based on answer span ==\n if self._ignore_ann:\n # B*C*K X Ft X D\n ckmerged_embedded_fact = self.merge_dimensions(self.add_dimension(\n cmerged_embedded_fact, 1, num_kb))\n ckmerged_fact_mask = self.merge_dimensions(self.add_dimension(\n cmerged_fact_mask, 1, num_kb))\n # B*C*K X Kt x Ft\n ckmerged_kb_fact_att = self._attention(ckmerged_embedded_choice_kb,\n ckmerged_embedded_fact)\n ckmerged_kb_fact_mask = self.add_dimension(ckmerged_fact_mask, 1,\n ckmerged_kb_fact_att.shape[1])\n masked_ckmerged_kb_fact_att = replace_masked_values(ckmerged_kb_fact_att,\n ckmerged_kb_fact_mask, -1e7)\n\n # B*C*K X Kt\n ckmerged_kb_answer_att_max = masked_ckmerged_kb_fact_att.max(dim=-1)[0].squeeze(-1)\n else:\n # B*C*K X Kt x At\n ckmerged_kb_answer_att = self._attention(ckmerged_embedded_choice_kb,\n ckmerged_embedded_answer)\n ckmerged_kb_answer_mask = self.add_dimension(ckmerged_answer_mask, 1,\n ckmerged_kb_answer_att.shape[1])\n masked_ckmerged_kb_answer_att = replace_masked_values(ckmerged_kb_answer_att,\n ckmerged_kb_answer_mask, -1e7)\n\n # B*C*K X Kt\n ckmerged_kb_answer_att_max = masked_ckmerged_kb_answer_att.max(dim=-1)[0].squeeze(-1)\n\n ckmerged_kb_answer_att_softmax = masked_softmax(ckmerged_kb_answer_att_max,\n ckmerged_choice_kb_mask)\n\n # B*C*K X D\n kb_answer_att_rep = weighted_sum(ckmerged_embedded_choice_kb,\n ckmerged_kb_answer_att_softmax)\n\n # == KB rep based on answer choice ==\n # B*C*K X Kt x Ct\n ckmerged_kb_choice_att = self._attention(ckmerged_embedded_choice_kb,\n ckmerged_embedded_choice)\n ckmerged_kb_choice_mask = self.add_dimension(ckmerged_choice_mask, 1,\n ckmerged_kb_choice_att.shape[1])\n masked_ckmerged_kb_choice_att = replace_masked_values(ckmerged_kb_choice_att,\n ckmerged_kb_choice_mask, -1e7)\n\n # B*C*K X Kt\n ckmerged_kb_choice_att_max = masked_ckmerged_kb_choice_att.max(dim=-1)[0].squeeze(-1)\n ckmerged_kb_choice_att_softmax = masked_softmax(ckmerged_kb_choice_att_max,\n ckmerged_choice_kb_mask)\n\n # B*C*K X D\n kb_choice_att_rep = weighted_sum(ckmerged_embedded_choice_kb,\n ckmerged_kb_choice_att_softmax)\n\n # B*C*K X D\n answer_choice_kb_combined_rep = combine_tensors(self._answer_choice_combination,\n [kb_answer_att_rep, kb_choice_att_rep])\n logger.info(\"answer_choice_kb_combined_rep\" + str(answer_choice_kb_combined_rep.shape))\n\n # ==============================================\n # Relation Predictions\n # ==============================================\n\n # B*C*K x R\n choice_kb_relation_rep = self._relation_predictor(answer_choice_kb_combined_rep)\n new_choice_kb_size = [batch_size * num_choices, num_kb, -1]\n # B*C*K\n merged_choice_kb_mask = (torch.sum(ckmerged_choice_kb_mask, dim=-1) > 0).float()\n if self._num_relations and not self._ignore_ann:\n if self._relation_projector:\n choice_kb_relation_pred = self._relation_projector(choice_kb_relation_rep)\n else:\n choice_kb_relation_pred = choice_kb_relation_rep\n\n # Aggregate the predictions\n # B*C*K\n choice_kb_relation_mask = self.add_dimension(\n merged_choice_kb_mask,\n -1,\n choice_kb_relation_pred.shape[-1])\n choice_kb_relation_pred_masked = replace_masked_values(choice_kb_relation_pred,\n choice_kb_relation_mask,\n -1e7)\n # B*C X K X R\n relation_pred_perkb = choice_kb_relation_pred_masked.contiguous().view(\n *new_choice_kb_size)\n # B*C X R\n relation_pred_max = relation_pred_perkb.max(dim=1)[0].squeeze(1)\n\n # B X C X R\n choice_relation_size = [batch_size, num_choices, -1]\n relation_label_logits = relation_pred_max.contiguous().view(*choice_relation_size)\n relation_label_probs = softmax(relation_label_logits, dim=-1)\n # B X C\n add_relation_predictions(self.vocab, relation_label_probs, metadata)\n # B X C X K X R\n choice_kb_relation_size = [batch_size, num_choices, num_kb, -1]\n relation_predictions = choice_kb_relation_rep.contiguous().view(\n *choice_kb_relation_size)\n add_tuple_predictions(relation_predictions, metadata)\n logger.info(\"relation_predictions\" + str(relation_predictions.shape))\n else:\n relation_label_logits = None\n relation_label_probs = None\n\n if not self._ignore_relns:\n # B X C X D\n expanded_size = [batch_size, num_choices, -1]\n # Aggregate the relation representation\n if self._relation_projector or self._num_relations == 0 or self._ignore_ann:\n # B*C X K X D\n relation_rep_perkb = choice_kb_relation_rep.contiguous().view(*new_choice_kb_size)\n # B*C*K X D\n merged_relation_rep_mask = self.add_dimension(\n merged_choice_kb_mask,\n -1,\n relation_rep_perkb.shape[-1])\n # B*C X K X D\n relation_rep_perkb_mask = merged_relation_rep_mask.contiguous().view(\n *relation_rep_perkb.size())\n # B*C X D\n agg_relation_rep = masked_mean(relation_rep_perkb, relation_rep_perkb_mask, dim=1)\n # B X C X D\n expanded_relation_rep = agg_relation_rep.contiguous().view(*expanded_size)\n else:\n expanded_relation_rep = relation_label_logits\n\n expanded_question_rep = agg_question.unsqueeze(1).expand(expanded_size)\n expanded_fact_rep = agg_fact.unsqueeze(1).expand(expanded_size)\n question_fact_rep = combine_tensors(self._combination,\n [expanded_question_rep, expanded_fact_rep])\n\n relation_score_rep = torch.cat([question_fact_rep, expanded_relation_rep], dim=-1)\n relation_score = self._reln_ff(relation_score_rep).squeeze(-1)\n choice_label_logits = (coverage_score + relation_score) / 2\n else:\n choice_label_logits = coverage_score\n logger.info(\"choice_label_logits\" + str(choice_label_logits.shape))\n\n choice_label_probs = softmax(choice_label_logits, dim=-1)\n output_dict = {\"label_logits\": choice_label_logits,\n \"label_probs\": choice_label_probs,\n \"metadata\": metadata}\n if relation_label_logits is not None:\n output_dict[\"relation_label_logits\"] = relation_label_logits\n output_dict[\"relation_label_probs\"] = relation_label_probs\n\n if answer_id is not None or relation_label is not None:\n self.compute_loss_and_accuracy(answer_id, relation_label, relation_label_logits,\n choice_label_logits, output_dict)\n return output_dict\n\n def compute_loss_and_accuracy(self, answer_id, relation_label, relation_label_logits,\n choice_label_logits, output_dict):\n loss = None\n if relation_label is not None and answer_id is not None and relation_label_logits is not None:\n batch_size = answer_id.size()[0]\n # B X 1 x R\n expanded_answer_indices = answer_id.unsqueeze(-1).unsqueeze(-1).expand(\n [batch_size, 1, self._num_relations])\n # B\n relation_mask = (torch.sum(relation_label, dim=-1) > 0).float()\n # B X C X R\n expanded_relation_mask = relation_mask.unsqueeze(1).unsqueeze(2).expand_as(\n relation_label_logits)\n\n # B X C X R\n labelled_relation_mask = relation_label.unsqueeze(1).expand_as(relation_label_logits)\n\n relation_label_perchoice = torch.zeros(labelled_relation_mask.size())\n if torch.cuda.is_available():\n relation_label_perchoice = relation_label_perchoice.cuda()\n # All zeros for incorrect choices and true relation labels for correct choices\n relation_label_perchoice.scatter_(1, expanded_answer_indices, labelled_relation_mask)\n\n\n # mask out the label logits for the unmarked relations\n combined_mask = labelled_relation_mask.clone()\n mask_correct_choices = torch.ones(labelled_relation_mask.size())\n if torch.cuda.is_available():\n mask_correct_choices = mask_correct_choices.cuda()\n # True relation labels for incorrect choices and all ones for correct choices\n combined_mask.scatter_(1, expanded_answer_indices, mask_correct_choices)\n # Also zero out questions with no marked relations\n combined_mask = replace_masked_values(combined_mask, expanded_relation_mask, 0)\n # first replace all zero-ed relations with -1e7 which will result in prob=0 in the bce loss\n masked_relation_logits = replace_masked_values(relation_label_logits, combined_mask,\n -1e7)\n\n loss = self._scale_relation_loss * self._relation_loss(masked_relation_logits,\n relation_label_perchoice)\n # compress B x C X R to get per relation accuracy\n collapsed_label_predictions = (relation_label_logits > 0).float().view([-1, 1])\n collapse_relation_labels = relation_label_perchoice.view([-1, 1])\n collapsed_mask = combined_mask.view([-1, 1]).byte()\n if torch.sum(collapsed_mask).item() > 0:\n self._aggr_label_accuracy(collapsed_label_predictions[collapsed_mask],\n collapse_relation_labels[collapsed_mask])\n\n if answer_id is not None:\n # B X C\n if loss is None:\n loss = self._choice_loss(choice_label_logits, answer_id)\n else:\n loss += self._choice_loss(choice_label_logits, answer_id)\n self._aggr_choice_accuracy(choice_label_logits, answer_id, (answer_id >= 0).float())\n\n output_dict[\"loss\"] = loss\n\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n if self._aggr_label_accuracy._total_count == 0:\n self._aggr_label_accuracy._total_count = 1\n return {\n 'label_accuracy': self._aggr_label_accuracy.get_metric(reset),\n 'choice_accuracy': self._aggr_choice_accuracy.get_metric(reset),\n }\n" ]
[ [ "torch.sum", "torch.cat" ], [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "torch.cat", "torch.sum", "torch.nn.BCEWithLogitsLoss", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JonathanSum/transformers
[ "27b68f95e4585713b575603545cf520ab9621621" ]
[ "src/transformers/modeling_tf_ctrl.py" ]
[ "# coding=utf-8\n# Copyright 2018 Salesforce and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 CTRL model.\"\"\"\n\n\nimport logging\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .configuration_ctrl import CTRLConfig\nfrom .file_utils import add_start_docstrings, add_start_docstrings_to_callable\nfrom .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, keras_serializable, shape_list\nfrom .tokenization_utils import BatchEncoding\n\n\nlogger = logging.getLogger(__name__)\n\nTF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {\"ctrl\": \"https://s3.amazonaws.com/models.huggingface.co/bert/ctrl-tf_model.h5\"}\n\n\ndef angle_defn(pos, i, d_model_size):\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model_size))\n return pos * angle_rates\n\n\ndef positional_encoding(position, d_model_size):\n # create the sinusoidal pattern for the positional encoding\n angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)\n\n sines = np.sin(angle_rads[:, 0::2])\n cosines = np.cos(angle_rads[:, 1::2])\n\n # pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1)[np.newaxis, ...], dtype=tf.float32)\n pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1), dtype=tf.float32)\n return pos_encoding\n\n\ndef scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):\n # calculate attention\n matmul_qk = tf.matmul(q, k, transpose_b=True)\n\n dk = tf.cast(shape_list(k)[-1], tf.float32)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n\n if mask is not None:\n scaled_attention_logits += mask * -1e4\n\n if attention_mask is not None:\n # Apply the attention mask\n scaled_attention_logits = scaled_attention_logits + attention_mask\n\n attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_weights = attention_weights * head_mask\n\n output = tf.matmul(attention_weights, v)\n\n return output, attention_weights\n\n\nclass TFMultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):\n super().__init__(**kwargs)\n self.output_attentions = output_attentions\n self.num_heads = num_heads\n self.d_model_size = d_model_size\n\n self.depth = int(d_model_size / self.num_heads)\n\n self.Wq = tf.keras.layers.Dense(d_model_size, name=\"Wq\")\n self.Wk = tf.keras.layers.Dense(d_model_size, name=\"Wk\")\n self.Wv = tf.keras.layers.Dense(d_model_size, name=\"Wv\")\n\n self.dense = tf.keras.layers.Dense(d_model_size, name=\"dense\")\n\n def split_into_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, inputs, training=False):\n v, k, q, mask, layer_past, attention_mask, head_mask, use_cache = inputs\n batch_size = shape_list(q)[0]\n\n q = self.Wq(q)\n k = self.Wk(k)\n v = self.Wv(v)\n\n q = self.split_into_heads(q, batch_size)\n k = self.split_into_heads(k, batch_size)\n v = self.split_into_heads(v, batch_size)\n\n if layer_past is not None:\n past_key, past_value = tf.unstack(layer_past, axis=0)\n k = tf.concat((past_key, k), axis=-2)\n v = tf.concat((past_value, v), axis=-2)\n\n # to cope with keras serialization\n # we need to cast `use_cache` to correct bool\n # if it is a tensor\n if tf.is_tensor(use_cache):\n if hasattr(use_cache, \"numpy\"):\n use_cache = bool(use_cache.numpy())\n else:\n use_cache = True\n\n if use_cache is True:\n present = tf.stack((k, v), axis=0)\n else:\n present = (None,)\n\n output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)\n scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])\n attn = output[1]\n original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))\n output = self.dense(original_size_attention)\n\n outputs = (output, present)\n if self.output_attentions:\n outputs = outputs + (attn,)\n return outputs\n\n\ndef point_wise_feed_forward_network(d_model_size, dff, name=\"\"):\n return tf.keras.Sequential(\n [tf.keras.layers.Dense(dff, activation=\"relu\", name=\"0\"), tf.keras.layers.Dense(d_model_size, name=\"2\")],\n name=\"ffn\",\n )\n\n\nclass TFEncoderLayer(tf.keras.layers.Layer):\n def __init__(\n self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs\n ):\n super().__init__(**kwargs)\n\n self.multi_head_attention = TFMultiHeadAttention(\n d_model_size, num_heads, output_attentions, name=\"multi_head_attention\"\n )\n self.ffn = point_wise_feed_forward_network(d_model_size, dff, name=\"ffn\")\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name=\"layernorm1\")\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name=\"layernorm2\")\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n\n def call(self, inputs, training=False):\n x, mask, layer_past, attention_mask, head_mask, use_cache = inputs\n normed = self.layernorm1(x)\n attn_outputs = self.multi_head_attention(\n [normed, normed, normed, mask, layer_past, attention_mask, head_mask, use_cache], training=training\n )\n attn_output = attn_outputs[0]\n attn_output = self.dropout1(attn_output, training=training)\n out1 = x + attn_output\n\n out2 = self.layernorm2(out1)\n ffn_output = self.ffn(out2)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = out1 + ffn_output\n\n outputs = (out2,) + attn_outputs[1:]\n return outputs\n\n\n@keras_serializable\nclass TFCTRLMainLayer(tf.keras.layers.Layer):\n config_class = CTRLConfig\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.output_hidden_states = config.output_hidden_states\n self.output_attentions = config.output_attentions\n\n self.d_model_size = config.n_embd\n self.num_layers = config.n_layer\n\n self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)\n\n self.w = TFSharedEmbeddings(\n config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name=\"w\"\n )\n\n self.dropout = tf.keras.layers.Dropout(config.embd_pdrop)\n self.h = [\n TFEncoderLayer(\n config.n_embd,\n config.n_head,\n config.dff,\n config.resid_pdrop,\n config.layer_norm_epsilon,\n config.output_attentions,\n name=\"h_._{}\".format(i),\n )\n for i in range(config.n_layer)\n ]\n self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name=\"layernorm\")\n\n def get_input_embeddings(self):\n return self.w\n\n def _resize_token_embeddings(self, new_num_tokens):\n raise NotImplementedError\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n raise NotImplementedError\n\n def call(\n self,\n inputs,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=True,\n training=False,\n ):\n\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n past = inputs[1] if len(inputs) > 1 else past\n attention_mask = inputs[2] if len(inputs) > 2 else attention_mask\n token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids\n position_ids = inputs[4] if len(inputs) > 4 else position_ids\n head_mask = inputs[5] if len(inputs) > 5 else head_mask\n inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds\n use_cache = inputs[7] if len(inputs) > 7 else use_cache\n assert len(inputs) <= 8, \"Too many inputs.\"\n elif isinstance(inputs, (dict, BatchEncoding)):\n input_ids = inputs.get(\"input_ids\")\n past = inputs.get(\"past\", past)\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\n position_ids = inputs.get(\"position_ids\", position_ids)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n use_cache = inputs.get(\"use_cache\", use_cache)\n assert len(inputs) <= 8, \"Too many inputs.\"\n else:\n input_ids = inputs\n\n # If using past key value states, only the last tokens\n # should be given as an input\n if past is not None:\n if input_ids is not None:\n input_ids = input_ids[:, -1:]\n if inputs_embeds is not None:\n inputs_embeds = inputs_embeds[:, -1:]\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1:]\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = shape_list(input_ids)\n input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])\n elif inputs_embeds is not None:\n input_shape = shape_list(inputs_embeds)[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if past is None:\n past_length = 0\n past = [None] * len(self.h)\n else:\n past_length = shape_list(past[0][0])[-2]\n if position_ids is None:\n position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]\n position_ids = tf.tile(position_ids, [input_shape[0], 1])\n\n # Attention mask.\n if attention_mask is not None:\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n\n attention_mask = tf.cast(attention_mask, tf.float32)\n attention_mask = (1.0 - attention_mask) * -10000.0\n else:\n attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.num_layers\n\n if token_type_ids is not None:\n token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])\n token_type_embeds = self.w(token_type_ids, mode=\"embedding\")\n token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32))\n else:\n token_type_embeds = 0\n position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])\n\n if inputs_embeds is None:\n inputs_embeds = self.w(input_ids, mode=\"embedding\")\n seq_len = input_shape[-1]\n mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)\n\n inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32))\n\n pos_embeds = tf.gather(self.pos_encoding, position_ids)\n\n hidden_states = inputs_embeds + pos_embeds + token_type_embeds\n\n hidden_states = self.dropout(hidden_states, training=training)\n\n output_shape = input_shape + [shape_list(hidden_states)[-1]]\n presents = ()\n all_hidden_states = ()\n all_attentions = []\n for i, (h, layer_past) in enumerate(zip(self.h, past)):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)\n outputs = h([hidden_states, mask, layer_past, attention_mask, head_mask[i], use_cache], training=training)\n hidden_states, present = outputs[:2]\n\n if use_cache is True:\n presents = presents + (present,)\n\n if self.output_attentions:\n all_attentions.append(outputs[2])\n\n hidden_states = self.layernorm(hidden_states)\n hidden_states = tf.reshape(hidden_states, output_shape)\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if use_cache is True:\n outputs = outputs + (presents,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n # let the number of heads free (-1) so we can extract attention even after head pruning\n attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]\n all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)\n outputs = outputs + (all_attentions,)\n return outputs\n\n\nclass TFCTRLPreTrainedModel(TFPreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = CTRLConfig\n pretrained_model_archive_map = TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"transformer\"\n\n\nCTRL_START_DOCSTRING = r\"\"\"\n\n .. note::\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having\n all the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors\n in the first positional argument :\n\n - a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`\n\n Parameters:\n config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nCTRL_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n If `past` is used, optionally only the last `input_ids` have to be input (see `past`).\n\n Indices can be obtained using :class:`transformers.CTRLTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model\n (see `past` output below). Can be used to speed up sequential decoding.\n If `past` is used, the user can optionally input only the last `input_ids`\n (those that don't have their past given to this model) of shape :obj:`(batch_size, 1)`\n instead of all `input_ids` of shape :obj:`(batch_size, sequence_length)`.\n attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n If `past` is used, optionally only the last `token_type_ids` have to be input (see `past`).\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n If `past` is used, optionally only the last `input_embeds` have to be input (see `past`).\n use_cache (:obj:`bool`):\n If `use_cache` is True, `past` key value states are returned and\n can be used to speed up decoding (see `past`). Defaults to `True`.\n training (:obj:`boolean`, `optional`, defaults to :obj:`False`):\n Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them\n (if set to :obj:`False`) for evaluation.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.\",\n CTRL_START_DOCSTRING,\n)\nclass TFCTRLModel(TFCTRLPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFCTRLMainLayer(config, name=\"transformer\")\n\n @add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:\n last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the last layer of the model.\n past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):\n Contains pre-computed hidden-states (key and values in the attention blocks).\n Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model\n should not be passed as input ids as they have already been computed.\n hidden_states (:obj:`tuple(tf.Tensor)` `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import CTRLTokenizer, TFCTRLModel\n\n tokenizer = CTRLTokenizer.from_pretrained('ctrl')\n model = TFCTRLModel.from_pretrained('ctrl')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n outputs = self.transformer(inputs, **kwargs)\n return outputs\n\n\nclass TFCTRLLMHead(tf.keras.layers.Layer):\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.input_embeddings = input_embeddings\n\n def build(self, input_shape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n super().build(input_shape)\n\n def call(self, hidden_states):\n hidden_states = self.input_embeddings(hidden_states, mode=\"linear\")\n hidden_states = hidden_states + self.bias\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"The CTRL Model transformer with a language modeling head on top\n (linear layer with weights tied to the input embeddings). \"\"\",\n CTRL_START_DOCSTRING,\n)\nclass TFCTRLLMHeadModel(TFCTRLPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFCTRLMainLayer(config, name=\"transformer\")\n\n self.lm_head = TFCTRLLMHead(config, self.transformer.w, name=\"lm_head\")\n\n def get_output_embeddings(self):\n return self.lm_head.input_embeddings\n\n def prepare_inputs_for_generation(self, inputs, past, **kwargs):\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n inputs = tf.expand_dims(inputs[:, -1], -1)\n\n return {\"inputs\": inputs, \"past\": past, \"use_cache\": kwargs[\"use_cache\"]}\n\n @add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:\n prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):\n Contains pre-computed hidden-states (key and values in the attention blocks).\n Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model\n should not be passed as input ids as they have already been computed.\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import CTRLTokenizer, TFCTRLLMHeadModel\n\n tokenizer = CTRLTokenizer.from_pretrained('ctrl')\n model = TFCTRLLMHeadModel.from_pretrained('ctrl')\n\n input_ids = tf.constant([tokenizer.encode(\"Links Hello, my dog is cute\", add_special_tokens=True)])\n outputs = model(input_ids)\n loss, logits = outputs[:2]\n\n \"\"\"\n transformer_outputs = self.transformer(inputs, **kwargs)\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n\n outputs = (lm_logits,) + transformer_outputs[1:]\n\n return outputs # lm_logits, presents, (all hidden_states), (attentions)\n" ]
[ [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.concat", "tensorflow.stack", "tensorflow.cast", "numpy.concatenate", "numpy.arange", "numpy.sin", "tensorflow.gather", "numpy.float32", "tensorflow.tile", "tensorflow.matmul", "tensorflow.is_tensor", "tensorflow.unstack", "tensorflow.keras.layers.Dense", "tensorflow.nn.softmax", "tensorflow.math.sqrt", "tensorflow.transpose", "tensorflow.range", "tensorflow.reshape", "numpy.cos", "tensorflow.expand_dims", "tensorflow.ones", "tensorflow.keras.layers.Dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
sunset768541/ctw-baseline
[ "f303f9ae0477ef2aa1fe56426a28e0ed9a0a89f8" ]
[ "detection/prepare_train_data.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport darknet_tools\nimport json\nimport numpy as np\nimport os\nimport settings\n\nfrom jinja2 import Template\nfrom pythonapi import anno_tools, common_tools\nfrom six.moves import queue\n\n\ndef write_darknet_data():\n if not os.path.exists(settings.DARKNET_BACKUP_DIR):\n os.makedirs(settings.DARKNET_BACKUP_DIR)\n if not os.path.exists(settings.DARKNET_RESULTS_DIR):\n os.makedirs(settings.DARKNET_RESULTS_DIR)\n data = {\n 'classes': settings.NUM_CHAR_CATES + 1,\n 'train': settings.DARKNET_TRAIN_LIST,\n 'valid': settings.DARKNET_VALID_LIST,\n 'names': settings.DARKNET_NAMES,\n 'backup': settings.DARKNET_BACKUP_DIR,\n 'results': settings.DARKNET_RESULTS_DIR,\n }\n with open(settings.DARKNET_DATA, 'w') as f:\n for k, v in sorted(data.items()):\n f.write('{} = {}\\n'.format(k, v))\n\n\ndef write_darknet_cfg():\n with open('yolo-chinese.template.cfg') as f:\n template = Template(f.read())\n with open(settings.DARKNET_CFG, 'w') as f:\n f.write(template.render({\n 'testing': False,\n 'image_size': settings.TRAIN_IMAGE_SIZE,\n 'classes': settings.NUM_CHAR_CATES + 1,\n 'filters': 25 + 5 * (settings.NUM_CHAR_CATES + 1),\n }))\n f.write('\\n')\n\n\ndef write_darknet_names():\n with open(settings.DARKNET_NAMES, 'w') as f:\n for i in range(settings.NUM_CHAR_CATES + 1):\n f.write('{}\\n'.format(i))\n\n\ndef crop_train_images():\n imshape = (2048, 2048, 3)\n cropshape = (settings.TRAIN_IMAGE_SIZE // 4, settings.TRAIN_IMAGE_SIZE // 4)\n cropoverlap = (16, 16)\n\n with open(settings.CATES) as f:\n cates = json.load(f)\n text2cate = {c['text']: c['cate_id'] for c in cates}\n\n if not os.path.isdir(settings.TRAINVAL_CROPPED_DIR):\n os.makedirs(settings.TRAINVAL_CROPPED_DIR)\n\n with open(settings.TRAIN) as f:\n lines = f.read().splitlines()\n with open(settings.VAL) as f:\n lines += f.read().splitlines()\n\n def in_image_ratio(bbox): # bbox is in darknet bbox representation\n xmid, ymid, w, h = bbox\n\n def cutto01(x):\n return max(0, min(1, x))\n Acut = (cutto01(xmid + w / 2) - cutto01(xmid - w / 2)) * (cutto01(ymid + h / 2) - cutto01(ymid - h / 2))\n return Acut / (w * h)\n\n def crop_once(line, write_images):\n anno = json.loads(line.strip())\n image_id = anno['image_id']\n all = []\n for char in anno_tools.each_char(anno):\n if not char['is_chinese']:\n continue\n cate_id = text2cate[char['text']]\n if cate_id >= settings.NUM_CHAR_CATES:\n cate_id = settings.NUM_CHAR_CATES\n all.append((char['adjusted_bbox'], cate_id))\n if write_images:\n image = cv2.imread(os.path.join(settings.TRAINVAL_IMAGE_DIR, anno['file_name']))\n assert image.shape == imshape\n for o in anno['ignore']:\n poly = (np.array(o['polygon'])).astype(np.int32)\n cv2.fillConvexPoly(image, poly, (128, 128, 128))\n cropped_list = list()\n for o in darknet_tools.get_crop_bboxes(imshape, cropshape, cropoverlap):\n xlo = o['xlo']\n xhi = xlo + cropshape[1]\n ylo = o['ylo']\n yhi = ylo + cropshape[0]\n labels = []\n for bbox, cate_id in all:\n x, y, w, h = bbox\n if x > xhi or x + w < xlo or y > yhi or y + h < ylo:\n continue\n bbox = ((x + w / 2 - xlo) / cropshape[1], (y + h / 2 - ylo) / cropshape[0], w / cropshape[1], h / cropshape[0])\n if 0.5 < in_image_ratio(bbox):\n labels.append((bbox, cate_id))\n if 0 < len(labels):\n basename = '{}_{}'.format(image_id, o['name'])\n cropped_file_name = os.path.join(settings.TRAINVAL_CROPPED_DIR, '{}.jpg'.format(basename))\n cropped_list.append(cropped_file_name)\n if write_images:\n cropped = image[ylo:yhi, xlo:xhi]\n cv2.imwrite(cropped_file_name, cropped)\n with open(os.path.join(settings.TRAINVAL_CROPPED_DIR, '{}.txt'.format(basename)), 'w') as f:\n for bbox, cate_id in labels:\n f.write('%d %f %f %f %f\\n' % ((cate_id, ) + bbox))\n return cropped_list\n\n q_i = queue.Queue()\n q_i.put(0)\n\n def foo(*args):\n i = q_i.get()\n if i % 100 == 0:\n print('crop trainval', i, '/', len(lines))\n q_i.put(i + 1)\n crop_once(*args)\n common_tools.multithreaded(foo, [(line, True) for line in lines], num_thread=4)\n trainset = []\n for i, line in enumerate(lines):\n if i % 1000 == 0:\n print('list trainval', i, '/', len(lines))\n trainset += crop_once(line, False)\n with open(settings.DARKNET_TRAIN_LIST, 'w') as f:\n for file_name in trainset:\n f.write(file_name)\n f.write('\\n')\n\n\ndef main():\n write_darknet_data()\n write_darknet_cfg()\n write_darknet_names()\n assert os.path.isfile(settings.DARKNET_PRETRAIN) and 79327120 == os.path.getsize(settings.DARKNET_PRETRAIN), \\\n 'please download {} to {}'.format('https://pjreddie.com/media/files/darknet19_448.conv.23', settings.DARKNET_PRETRAIN)\n crop_train_images()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fmi-basel/inter-view
[ "e7ebf616ac15eddf1e0d222930750fb4b113d9fa" ]
[ "inter_view/utils.py" ]
[ "import numpy as np\nimport holoviews as hv\nhv.extension('bokeh', logo=False)\nimport param\nimport panel as pn\nimport matplotlib.pyplot as plt\n\nfrom holoviews.operation.datashader import rasterize\nfrom bokeh.models import WheelZoomTool\nfrom holoviews.core import Store\n\nvalid_rgb_options = [\n k for group in ['style', 'plot', 'norm', 'output']\n for k in Store.options(backend='bokeh')['RGB'][group].allowed_keywords\n]\nvalid_rgb_options.remove(\n 'alpha') # remove option set by sliders on individual channels\n\n# TODO move to color module\nimport colorcet as cc\n\n# repeat colormap to handle unint16 values\n# needed to handle non continuous labels because colormap is stretched (and not cycled)\nlabel_cmap = cc.b_glasbey_hv * 256\n\n\n# bokeh hook workaround --> remove if holoviews finally handle this\ndef zoom_bounds_hook(bounds):\n '''restrict zooming out to given bounds'''\n def _hook(plot, element):\n plot.state.x_range.bounds = (bounds[0], bounds[2])\n plot.state.y_range.bounds = (bounds[1], bounds[3])\n plot.state.select(WheelZoomTool).maintain_focus = False\n\n return _hook\n\n\ndef get_img_dims_coords(img, spacing=1):\n\n img_dims = ['x', 'y', 'z'][:img.ndim]\n spacing = np.broadcast_to(np.array(spacing), img.ndim)\n img_coords = [\n np.arange(d) * s for d, s in zip(img.shape[::-1], spacing[::-1])\n ]\n\n return img_dims, img_coords\n\n\ndef image_to_hvds(img, label, spacing=1):\n '''Converts a 2D/3D image to a holoview dataset to facilitate\n plotting with the correct axis bounds/scaling'''\n\n img_dims, img_coords = get_img_dims_coords(img, spacing)\n\n return hv.Dataset((*(img_coords), img),\n kdims=img_dims,\n vdims=['intensity'],\n label=label)\n\n\nclass HvDataset(param.Parameterized):\n '''Converts a numpy image to holoviews Dataset dynamic map'''\n\n img = param.Array(np.zeros((2, 2), dtype=np.uint8),\n doc='numpy iamge array',\n precedence=-1)\n label = param.String('channel',\n doc='label for the generated hv.Dataset',\n precedence=-1)\n spacing = param.Parameter((1, ), doc='pixel/voxel size', precedence=-1)\n\n _update_counter = param.Integer(0, precedence=-1)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._broadcast_spacing()\n\n @param.depends()\n def _broadcast_spacing(self):\n self.spacing = tuple(\n np.broadcast_to(np.array(self.spacing), self.img.ndim).tolist())\n\n @param.depends('img', watch=True)\n def _update_img(self):\n self._broadcast_spacing()\n self._update_counter += 1\n\n # NOTE dynamic map with dependency directly on array is less responsive (hash computation overhead?)\n @param.depends('_update_counter', 'label')\n def _build_dataset(self):\n return image_to_hvds(self.img, self.label, self.spacing)\n\n @param.depends('spacing')\n def dmap(self):\n return hv.DynamicMap(self._build_dataset, cache_size=1)\n\n\ndef make_composite(imgs, cmaps, mode='max'):\n '''embeds colormap and blend grescale input images into a rgb image'''\n\n _modes = {'max': np.max, 'mean': np.mean}\n\n blending_fun = _modes.get(mode, None)\n\n if blending_fun is None:\n raise NotImplementedError(\n 'blending mode note implemented: {}'.format(mode))\n\n imgs = [(plt.get_cmap(name)(img)[..., :-1] * 255).astype(np.uint8)\n for img, name in zip(imgs, cmaps)]\n\n blended_img = blending_fun(np.asarray(imgs), axis=0)\n return np.rint(blended_img).astype(np.uint8)\n\n\ndef blend_overlay(elems):\n '''Transforms a hv.Overlay of hv.Image into a hv.RGB'''\n\n if not isinstance(elems, hv.Overlay):\n # probably a single channel, do nothing\n return elems\n\n imgs = [e.dimension_values(2, flat=False) for e in elems]\n\n if imgs[0].dtype != np.uint8:\n raise ValueError(\n '8 bit images are expected to stack overlays, got {}'.format(\n imgs[0].dtype))\n\n # embed colormap,opacity and blend\n # Note somehow hv.RGB inverts the y axis but not hv.Image???\n cmaps = [e.opts.get().options['cmap'] for e in elems]\n alphas = [e.opts.get().options['alpha'] for e in elems]\n imgs = [(a * img).astype(int) if a < 1.0 else img\n for a, img in zip(alphas, imgs)]\n rgb = make_composite(imgs, cmaps, mode='max')[::-1]\n\n xr = elems.range(0)\n yr = elems.range(1)\n bounds = (xr[1], yr[0], xr[0], yr[1])\n height, width = rgb.shape[:-1]\n\n options = list(elems)[0].opts.get().options\n options = {\n key: val\n for key, val in options.items() if key in valid_rgb_options\n }\n\n return hv.RGB(rgb, bounds=bounds, group='composite').opts(**options)\n\n\ndef split_element(element, axis, values=None):\n '''Applies element.select to all values along axis and returns the result as a list.\n \n Dimension values can also be specified explicitly to select a subset or control the order.'''\n\n new_dims_name = [d.name for d in element.kdims if d.name != axis]\n if values is None:\n values = element.dimension_values(axis, expanded=False)\n\n return tuple(\n element.select(**{\n axis: val\n }).reindex(new_dims_name).relabel(val) for val in values)\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.rint", "matplotlib.pyplot.get_cmap", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lukasz1928/mobile-robots-control
[ "81820b35dab10b14f58d66079b0a8f82ef819bee" ]
[ "tests/localization/color/utils/test_color_converter.py" ]
[ "import cv2\nfrom unittest import TestCase\nimport numpy as np\nfrom parameterized import parameterized\nfrom mrc.localization.color.utils.color_converter import ColorConverter\nfrom tests.test_utils.read_image import read_image\n\n\nclass TestColorConverterGrayscale(TestCase):\n def setUp(self):\n self.converter = ColorConverter()\n\n self.imageBGR = read_image('localization/color/utils/color_conversion/gray/source.png')\n self.imageRGB = cv2.cvtColor(self.imageBGR, cv2.COLOR_BGR2RGB)\n self.expected_grayscale = read_image('localization/color/utils/color_conversion/gray/gray.png')[:, :, 0]\n\n def test_BGR_to_Grayscale(self):\n grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')\n np.testing.assert_array_equal(grayscale, self.expected_grayscale)\n\n def test_RGB_to_Grayscale(self):\n grayscale = self.converter.convert_to_grayscale(self.imageRGB, 'RGB')\n np.testing.assert_array_equal(grayscale, self.expected_grayscale)\n\n def test_BGR_to_Grayscale_special(self):\n grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')\n np.testing.assert_array_equal(grayscale, self.expected_grayscale)\n\n def test_RGB_to_Grayscale_special(self):\n grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')\n np.testing.assert_array_equal(grayscale, self.expected_grayscale)\n\n\nclass TestColorConverterBinary(TestCase):\n def setUp(self):\n self.converter = ColorConverter()\n\n self.imageBGR = read_image('localization/color/utils/color_conversion/binary/source.png')\n self.imageRGB = cv2.cvtColor(self.imageBGR, cv2.COLOR_BGR2RGB)\n self.expected_images = [read_image('localization/color/utils/color_conversion/binary/{}.png'.format(i))[:, :, 0] for i in\n range(9)]\n\n @parameterized.expand([[i] for i in range(9)])\n def test_BGR_to_binary(self, i):\n binary = self.converter.convert_to_binary(self.imageBGR, i / 8 * 255, 'BGR')\n np.testing.assert_array_equal(binary, self.expected_images[i])\n\n @parameterized.expand([[i] for i in range(9)])\n def test_RGB_to_binary(self, i):\n binary = self.converter.convert_to_binary(self.imageRGB, i / 8 * 255, 'RGB')\n np.testing.assert_array_equal(binary, self.expected_images[i])\n" ]
[ [ "numpy.testing.assert_array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michaelbenayoun/optimum
[ "21c5809577e2ef5687f293d31d1d3e28288e1bb7" ]
[ "examples/inc/pytorch/multiple-choice/run_swag.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for multiple choice.\n\"\"\"\n# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional, Union\n\nimport datasets\nimport numpy as np\nimport torch\nimport transformers\nfrom datasets import load_dataset\nfrom transformers import (\n AutoConfig,\n AutoModelForMultipleChoice,\n AutoTokenizer,\n HfArgumentParser,\n TrainingArguments,\n default_data_collator,\n set_seed,\n)\nfrom transformers.file_utils import PaddingStrategy\nfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\nfrom transformers.trainer_utils import get_last_checkpoint\nfrom transformers.utils import check_min_version\nfrom transformers.utils.fx import symbolic_trace\n\nimport yaml\nfrom optimum.intel.neural_compressor import (\n IncOptimizer,\n IncPruner,\n IncPruningConfig,\n IncQuantizationConfig,\n IncQuantizationMode,\n IncQuantizer,\n IncTrainer,\n)\nfrom optimum.intel.neural_compressor.quantization import IncQuantizedModelForMultipleChoice\nfrom optimum.intel.neural_compressor.utils import CONFIG_NAME\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.12.0\")\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n@dataclass\nclass OptimizationArguments:\n \"\"\"\n Arguments pertaining to what type of optimization we are going to apply on the model.\n \"\"\"\n\n quantize: bool = field(\n default=False,\n metadata={\"help\": \"Whether or not to apply quantization.\"},\n )\n quantization_approach: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Quantization approach. Supported approach are static, dynamic and aware_training.\"},\n )\n prune: bool = field(\n default=False,\n metadata={\"help\": \"Whether or not to apply pruning.\"},\n )\n target_sparsity: Optional[float] = field(\n default=None,\n metadata={\"help\": \"Targeted sparsity when pruning the model.\"},\n )\n quantization_config: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Path to the directory containing the YAML configuration file used to control the quantization and \"\n \"tuning behavior.\"\n },\n )\n pruning_config: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Path to the directory containing the YAML configuration file used to control the pruning behavior.\"\n },\n )\n tune_metric: str = field(\n default=\"eval_accuracy\",\n metadata={\"help\": \"Metric used for the tuning strategy.\"},\n )\n perf_tol: Optional[float] = field(\n default=None,\n metadata={\"help\": \"Performance tolerance when optimizing the model.\"},\n )\n verify_loading: bool = field(\n default=False,\n metadata={\"help\": \"Whether or not to verify the loading of the quantized model.\"},\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a text file).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"},\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_seq_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. If passed, sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to the maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n\n def __post_init__(self):\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n\n\n@dataclass\nclass DataCollatorForMultipleChoice:\n \"\"\"\n Data collator that will dynamically pad the inputs for multiple choice received.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n\n def __call__(self, features):\n label_name = \"label\" if \"label\" in features[0].keys() else \"labels\"\n labels = [feature.pop(label_name) for feature in features]\n batch_size = len(features)\n num_choices = len(features[0][\"input_ids\"])\n flattened_features = [\n [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features\n ]\n flattened_features = sum(flattened_features, [])\n\n batch = self.tokenizer.pad(\n flattened_features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=\"pt\",\n )\n\n # Un-flatten\n batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}\n # Add back labels\n batch[\"labels\"] = torch.tensor(labels, dtype=torch.int64)\n return batch\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args, optim_args = parser.parse_json_file(\n json_file=os.path.abspath(sys.argv[1])\n )\n else:\n model_args, data_args, training_args, optim_args = parser.parse_args_into_dataclasses()\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n # Detecting last checkpoint.\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.train_file is not None or data_args.validation_file is not None:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.train_file.split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)\n else:\n # Downloading and loading the swag dataset from the hub.\n raw_datasets = load_dataset(\"swag\", \"regular\", cache_dir=model_args.cache_dir)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model = AutoModelForMultipleChoice.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n # When using your own dataset or a different dataset from swag, you will probably need to change this.\n ending_names = [f\"ending{i}\" for i in range(4)]\n context_name = \"sent1\"\n question_header_name = \"sent2\"\n\n if data_args.max_seq_length is None:\n max_seq_length = tokenizer.model_max_length\n if max_seq_length > 1024:\n logger.warning(\n f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n \"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx.\"\n )\n max_seq_length = 1024\n else:\n if data_args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n # Preprocessing the datasets.\n def preprocess_function(examples):\n first_sentences = [[context] * 4 for context in examples[context_name]]\n question_headers = examples[question_header_name]\n second_sentences = [\n [f\"{header} {examples[end][i]}\" for end in ending_names] for i, header in enumerate(question_headers)\n ]\n\n # Flatten out\n first_sentences = sum(first_sentences, [])\n second_sentences = sum(second_sentences, [])\n\n # Tokenize\n tokenized_examples = tokenizer(\n first_sentences,\n second_sentences,\n truncation=True,\n max_length=max_seq_length,\n padding=\"max_length\" if data_args.pad_to_max_length else False,\n )\n # Un-flatten\n return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}\n\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n with training_args.main_process_first(desc=\"train dataset map pre-processing\"):\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_eval:\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation\"]\n if data_args.max_eval_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))\n with training_args.main_process_first(desc=\"validation dataset map pre-processing\"):\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Data collator\n data_collator = (\n default_data_collator\n if data_args.pad_to_max_length\n else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)\n )\n\n # Metric\n def compute_metrics(eval_predictions):\n predictions, label_ids = eval_predictions\n preds = np.argmax(predictions, axis=1)\n return {\"accuracy\": (preds == label_ids).astype(np.float32).mean().item()}\n\n # Initialize our Trainer\n trainer = IncTrainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics,\n )\n\n eval_dataloader = trainer.get_eval_dataloader()\n it = iter(eval_dataloader)\n try:\n input_names = next(it).keys()\n except StopIteration:\n input_names = None\n logger.warning(\n \"Unable to determine the names of the inputs of the model to trace, input_names is set to None and \"\n \"model.dummy_inputs().keys() will be used instead.\"\n )\n\n resume_from_checkpoint = training_args.resume_from_checkpoint\n metric_name = optim_args.tune_metric\n\n def take_eval_steps(model, trainer, metric_name, save_metrics=False):\n trainer.model = model\n metrics = trainer.evaluate()\n if save_metrics:\n trainer.save_metrics(\"eval\", metrics)\n logger.info(\"{}: {}\".format(metric_name, metrics.get(metric_name)))\n logger.info(\"Throughput: {} samples/sec\".format(metrics.get(\"eval_samples_per_second\")))\n return metrics.get(metric_name)\n\n def eval_func(model):\n return take_eval_steps(model, trainer, metric_name)\n\n def take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint):\n trainer.model_wrapped = model\n trainer.model = model\n checkpoint = None\n if resume_from_checkpoint is not None:\n checkpoint = resume_from_checkpoint\n elif last_checkpoint is not None:\n checkpoint = last_checkpoint\n train_result = trainer.train(pruner, resume_from_checkpoint=checkpoint)\n metrics = train_result.metrics\n trainer.save_model() # Saves the tokenizer too for easy upload\n trainer.log_metrics(\"train\", metrics)\n trainer.save_metrics(\"train\", metrics)\n trainer.save_state()\n\n def train_func(model):\n return take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint)\n\n quantizer = None\n pruner = None\n num_choices = len(eval_dataset[0][\"input_ids\"])\n\n if not optim_args.quantize and not optim_args.prune:\n raise ValueError(\"quantize and prune are both set to False.\")\n\n result_baseline_model = take_eval_steps(model, trainer, metric_name)\n\n default_config = os.path.join(os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir)), \"config\")\n\n if optim_args.quantize:\n\n if not training_args.do_eval:\n raise ValueError(\"do_eval must be set to True for quantization.\")\n\n q8_config = IncQuantizationConfig.from_pretrained(\n optim_args.quantization_config if optim_args.quantization_config is not None else default_config,\n config_file_name=\"quantization.yml\",\n cache_dir=model_args.cache_dir,\n )\n\n # Set metric tolerance if specified\n if optim_args.perf_tol is not None:\n q8_config.set_tolerance(optim_args.perf_tol)\n\n # Set quantization approach if specified\n if optim_args.quantization_approach is not None:\n supported_approach = {\"static\", \"dynamic\", \"aware_training\"}\n if optim_args.quantization_approach not in supported_approach:\n raise ValueError(\n \"Unknown quantization approach. Supported approach are \" + \", \".join(supported_approach)\n )\n quant_approach = getattr(IncQuantizationMode, optim_args.quantization_approach.upper()).value\n q8_config.set_config(\"quantization.approach\", quant_approach)\n\n # torch FX used for post-training quantization and quantization aware training\n # dynamic quantization will be added when torch FX is more mature\n if q8_config.get_config(\"quantization.approach\") != IncQuantizationMode.DYNAMIC.value:\n\n if not training_args.do_train:\n raise ValueError(\"do_train must be set to True for static and aware training quantization.\")\n\n # TODO : Remove when dynamic axes support\n if (\n not training_args.dataloader_drop_last\n and eval_dataset.shape[0] % training_args.per_device_eval_batch_size != 0\n ):\n raise ValueError(\n \"The number of samples of the dataset is not a multiple of the batch size.\"\n \"Use --dataloader_drop_last to overcome.\"\n )\n if not data_args.pad_to_max_length:\n raise ValueError(\n \"All the samples must have the same sequence length, use --pad_to_max_length to overcome.\"\n )\n\n q8_config.set_config(\"model.framework\", \"pytorch_fx\")\n model.config.save_pretrained(training_args.output_dir)\n model = symbolic_trace(\n model,\n input_names=input_names,\n batch_size=training_args.per_device_eval_batch_size,\n sequence_length=max_seq_length,\n num_choices=num_choices,\n )\n\n calib_dataloader = trainer.get_train_dataloader()\n inc_quantizer = IncQuantizer(\n model, q8_config, eval_func=eval_func, train_func=train_func, calib_dataloader=calib_dataloader\n )\n quantizer = inc_quantizer.fit()\n\n if optim_args.prune:\n\n if not training_args.do_train:\n raise ValueError(\"do_train must be set to True for pruning.\")\n\n pruning_config = IncPruningConfig.from_pretrained(\n optim_args.pruning_config if optim_args.pruning_config is not None else default_config,\n config_file_name=\"prune.yml\",\n cache_dir=model_args.cache_dir,\n )\n\n # Set targeted sparsity if specified\n if optim_args.target_sparsity is not None:\n pruning_config.set_config(\n \"pruning.approach.weight_compression.target_sparsity\", optim_args.target_sparsity\n )\n\n pruning_start_epoch = pruning_config.get_config(\"pruning.approach.weight_compression.start_epoch\")\n pruning_end_epoch = pruning_config.get_config(\"pruning.approach.weight_compression.end_epoch\")\n\n if pruning_start_epoch > training_args.num_train_epochs - 1:\n logger.warning(\n f\"Pruning end epoch {pruning_start_epoch} is higher than the total number of training epoch \"\n f\"{training_args.num_train_epochs}. No pruning will be applied.\"\n )\n\n if pruning_end_epoch > training_args.num_train_epochs - 1:\n logger.warning(\n f\"Pruning end epoch {pruning_end_epoch} is higher than the total number of training epoch \"\n f\"{training_args.num_train_epochs}. The target sparsity will not be reached.\"\n )\n\n inc_pruner = IncPruner(model, pruning_config, eval_func=eval_func, train_func=train_func)\n\n # Creation Pruning object used for IncTrainer training loop\n pruner = inc_pruner.fit()\n\n inc_optimizer = IncOptimizer(model, quantizer=quantizer, pruner=pruner)\n opt_model = inc_optimizer.fit()\n\n _, sparsity = opt_model.report_sparsity()\n result_opt_model = take_eval_steps(opt_model.model, trainer, metric_name, save_metrics=True)\n\n trainer.save_model(training_args.output_dir)\n with open(os.path.join(training_args.output_dir, CONFIG_NAME), \"w\") as f:\n yaml.dump(opt_model.tune_cfg, f, default_flow_style=False)\n\n logger.info(\n f\"Optimized model with final sparsity of {sparsity} and {metric_name} of {result_opt_model} saved to: \"\n f\"{training_args.output_dir}. Original model had an {metric_name} of {result_baseline_model}\"\n )\n\n if optim_args.quantize and optim_args.verify_loading:\n\n # Load the model obtained after Intel Neural Compressor (INC) quantization\n loaded_model = IncQuantizedModelForMultipleChoice.from_pretrained(\n training_args.output_dir,\n input_names=input_names,\n batch_size=training_args.per_device_eval_batch_size,\n sequence_length=max_seq_length,\n num_choices=num_choices,\n )\n loaded_model.eval()\n result_loaded_model = take_eval_steps(loaded_model, trainer, metric_name)\n\n if result_loaded_model != result_opt_model:\n raise ValueError(\"The quantized model was not successfully loaded.\")\n else:\n logger.info(f\"The quantized model was successfully loaded.\")\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.argmax", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iankuoli/OSNet-TopDrop
[ "3ab57ba507e9f8939762e27834137172375cd91c", "3ab57ba507e9f8939762e27834137172375cd91c" ]
[ "torchreid/engine/image/viewpoint_aware.py", "torchreid/engine/image/arc_margin.py" ]
[ "from __future__ import division, print_function, absolute_import\n\nfrom ... import metrics\nfrom ...engine.engine import Engine\nfrom ...losses import FocalLoss, CrossEntropyLoss, ALSRLoss\nfrom .vat import VATLoss\n\nimport math\nimport torch\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\nclass ImageVAReIDEngine(Engine):\n r\"\"\"Viewpoint-Aware Loss with Angular Regularization engine for image-reid.\n\n Ref: Viewpoint-Aware Loss with Angular Regularization for Person Re-Identification. AAAI, 2020.\n https://arxiv.org/pdf/1912.01300v1.pdf\n\n Args:\n datamanager (DataManager): an instance of ``deepreid.data.ImageDataManager``\n or ``deepreid.data.VideoDataManager``.\n model (nn.Module): model instance.\n optimizer (Optimizer): an Optimizer.\n weight_f (float, optional): weight for focal loss. Default is 1.\n weight_x (float, optional): weight for softmax loss. Default is 1.\n scheduler (LRScheduler, optional): if None, no learning rate decay will be performed.\n use_gpu (bool, optional): use gpu. Default is True.\n label_smooth (bool, optional): use label smoothing regularizer. Default is True.\n\n Examples::\n\n import deepreid\n datamanager = deepreid.data.ImageDataManager(def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:\n ans =[]\n window = nums[0:k]\n window.sort()\n median = nums[k-1-k//2] if k%2 == 1 else (nums[k-1-k//2] + nums[k-1-k//2+1]) / 2\n ans.append(median)\n for ind in range(k, len(nums)):\n window.remove(nums[ind-k])\n bisect_left(window, nums[ind])\n median = nums[ind-k//2] if k%2 == 1 else (nums[ind-k//2] + nums[ind-k//2+1]) / 2\n ans.append(median)\n return ans\n root='path/to/reid-data',\n sources='market1501',\n height=256,\n width=128,\n combineall=False,\n batch_size=32,\n num_instances=4,\n train_sampler='RandomIdentitySampler' # this is important\n )\n model = deepreid.models.build_model(\n name='resnet50',\n num_classes=datamanager.num_train_pids,\n loss='triplet'\n )\n model = model.cuda()\n optimizer = deepreid.optim.build_optimizer(\n model, optim='adam', lr=0.0003\n )\n scheduler = deepreid.optim.build_lr_scheduler(\n optimizer,\n lr_scheduler='single_step',\n stepsize=20\n )\n engine = deepreid.engine.ImageTripletEngine(\n datamanager, model, optimizer, margin=0.3,\n weight_t=0.7, weight_x=1, scheduler=scheduler\n )\n engine.run(\n max_epoch=60,\n save_dir='log/resnet50-triplet-market1501',\n print_freq=10\n )\n \"\"\"\n\n def __init__(\n self,\n datamanager,\n model,\n arc_margin_y,\n arc_margin_v,\n optimizer,\n gamma=2,\n weight_f=1,\n weight_x=1,\n weight_v=1,\n scheduler=None,\n use_gpu=True,\n ):\n super(ImageVAReIDEngine, self).__init__(datamanager, use_gpu)\n\n self.model = model\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.register_model('model', model, optimizer, scheduler)\n\n self.weight_f = weight_f\n self.weight_x = weight_x\n self.weight_v = weight_v\n\n self.arc_embed_y = arc_margin_y\n self.arc_embed_v = arc_margin_v\n self.criterion_x = CrossEntropyLoss(num_classes=self.datamanager.num_train_pids,\n use_gpu=self.use_gpu,\n label_smooth=True)\n self.criterion_f = FocalLoss(gamma=gamma)\n self.criterion_v = ALSRLoss(num_classes=self.datamanager.num_train_pids,\n use_gpu=self.use_gpu,\n label_smooth=True)\n self.centers_yv = torch.zeros(self.datamanager.num_train_pids, 3, 512)\n self.counts_yv = torch.zeros(self.datamanager.num_train_pids, 3)\n\n def forward_backward(self, data):\n imgs, pids, vids = self.parse_data_for_train(data)\n\n if self.use_gpu:\n imgs = imgs.cuda()\n pids = pids.cuda()\n vids = pids.cuda()\n\n outputs, features = self.model(imgs)\n embeddings_y = self.arc_embed_y(features, pids)\n embeddings_v = self.arc_embed_v(features, pids*3+vids, weight=self.centers_yv.view(-1, 512))\n\n loss_x = self.compute_loss(self.criterion_x, outputs, pids)\n loss_f = self.compute_loss(self.criterion_f, embeddings_y, pids)\n loss_v = self.compute_loss(self.criterion_v, embeddings_v, (pids, vids))\n loss = self.weight_f * loss_f + self.weight_x * loss_x + self.weight_v * loss_v\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update self.centers_yv & self.counts_yv\n for i in range(pids.size(0)):\n self.counts_yv[pids[i], vids[i]] += 1\n tmp = self.counts_yv[pids[i], vids[i]]\n self.centers_yv[pids[i], vids[i]] = (tmp-1/tmp) * self.centers_yv[pids[i], vids[i]] + 1/tmp * features[i]\n\n loss_summary = {'loss_x': loss_x.item(),\n 'loss_f': loss_f.item(),\n 'loss_v': loss_v.item(),\n 'acc_x': metrics.accuracy(outputs, pids)[0].item(),\n 'acc_f': metrics.accuracy(embeddings_y, pids)[0].item(),\n }\n return loss_summary\n\n def forward(self, imgs, pids):\n indexs = torch.where(pids < self.arc_embed.out_features)\n imgs, pids = imgs[indexs], pids[indexs]\n if self.use_gpu:\n imgs = imgs.cuda()\n pids = pids.cuda()\n\n if imgs.shape[0] == 0:\n return None, None, None, None\n\n outputs, features = self.model(imgs)\n embeddings_y = self.arc_embed_y(features, pids)\n embeddings_v = self.arc_embed_v(features, pids)\n loss_x = self.compute_loss(self.criterion_x, outputs, pids).item()\n loss_f = self.compute_loss(self.criterion_f, embeddings_y, pids).item()\n loss_v = self.compute_loss(self.criterion_f, embeddings_v, pids).item()\n acc_x = metrics.accuracy(outputs, pids)[0].item()\n acc_f = metrics.accuracy(embeddings_y, pids)[0].item()\n return loss_x, loss_f, loss_v, acc_x, acc_f\n", "import torch\nimport torch.nn as nn\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\nimport math\n\n\nclass ArcMarginProduct(nn.Module):\n r\"\"\"Implement of large margin arc distance: :\n Args:\n in_features: size of each input sample\n out_features: size of each output sample\n s: norm of input feature\n m: margin\n cos(theta + m)\n \"\"\"\n def __init__(self, in_features, out_features, s=30.0, m=0.10, easy_margin=True, use_gpu=True):\n super(ArcMarginProduct, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.s = s\n self.m = m\n self.use_gpu = use_gpu\n if use_gpu:\n self.weight = Parameter(torch.FloatTensor(out_features, in_features).cuda())\n else:\n self.weight = Parameter(torch.FloatTensor(out_features, in_features))\n nn.init.xavier_uniform_(self.weight)\n\n self.easy_margin = easy_margin\n self.cos_m = math.cos(m)\n self.sin_m = math.sin(m)\n\n # Make the function cos(theta+m) monotonic decreasing while theta in [0°,180°]\n self.th = math.cos(math.pi - m)\n self.mm = math.sin(math.pi - m) * m\n\n def forward(self, input: torch.Tensor, label, weight=None):\n # --------------------------- cos(theta) & phi(theta) ---------------------------\n cos_theta = F.linear(F.normalize(input), F.normalize(weight if weight else self.weight))\n sin_theta = torch.sqrt((1.0 - torch.pow(cos_theta, 2)).clamp(0, 1))\n cos_theta_m = cos_theta * self.cos_m - sin_theta * self.sin_m # \\cos(\\theta + m)\n if self.easy_margin:\n cos_theta_m = torch.where(cos_theta > 0, cos_theta_m, cos_theta)\n else:\n cos_theta_m = torch.where(cos_theta > self.th, cos_theta_m, cos_theta - self.mm)\n # --------------------------- convert label to one-hot ---------------------------\n one_hot = torch.zeros(cos_theta.size(), device='cuda')\n one_hot.scatter_(1, label.view(-1, 1).long(), 1)\n # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------\n output = (one_hot * cos_theta_m) + ((1.0 - one_hot) * cos_theta) # you can use torch.where if your torch.__version__ is 0.4\n output *= self.s\n\n return output\n" ]
[ [ "torch.where", "torch.zeros" ], [ "torch.nn.functional.normalize", "torch.FloatTensor", "torch.where", "torch.nn.init.xavier_uniform_", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
de9uch1/stanza
[ "cafb7d5004842cd3c8a3ac334ce7649bac928830", "cafb7d5004842cd3c8a3ac334ce7649bac928830" ]
[ "stanza/models/parser.py", "stanza/models/lemma/trainer.py" ]
[ "\"\"\"\nEntry point for training and evaluating a dependency parser.\n\nThis implementation combines a deep biaffine graph-based parser with linearization and distance features.\nFor details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.\n\"\"\"\n\n\"\"\"\nTraining and evaluation for the parser.\n\"\"\"\n\nimport sys\nimport os\nimport shutil\nimport time\nfrom datetime import datetime\nimport argparse\nimport numpy as np\nimport random\nimport torch\nfrom torch import nn, optim\n\nfrom stanza.models.depparse.data import DataLoader\nfrom stanza.models.depparse.trainer import Trainer\nfrom stanza.models.depparse import scorer\nfrom stanza.models.common import utils\nfrom stanza.models.common.pretrain import Pretrain\nfrom stanza.models.common.doc import *\nfrom stanza.utils.conll import CoNLL\nfrom stanza.models import _training_logging\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')\n parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors.')\n parser.add_argument('--wordvec_file', type=str, default=None, help='Word vectors filename.')\n parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')\n parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')\n parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')\n parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')\n\n parser.add_argument('--mode', default='train', choices=['train', 'predict'])\n parser.add_argument('--lang', type=str, help='Language')\n parser.add_argument('--shorthand', type=str, help=\"Treebank shorthand\")\n\n parser.add_argument('--hidden_dim', type=int, default=400)\n parser.add_argument('--char_hidden_dim', type=int, default=400)\n parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)\n parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)\n parser.add_argument('--word_emb_dim', type=int, default=75)\n parser.add_argument('--char_emb_dim', type=int, default=100)\n parser.add_argument('--tag_emb_dim', type=int, default=50)\n parser.add_argument('--transformed_dim', type=int, default=125)\n parser.add_argument('--num_layers', type=int, default=3)\n parser.add_argument('--char_num_layers', type=int, default=1)\n parser.add_argument('--pretrain_max_vocab', type=int, default=250000)\n parser.add_argument('--word_dropout', type=float, default=0.33)\n parser.add_argument('--dropout', type=float, default=0.5)\n parser.add_argument('--rec_dropout', type=float, default=0, help=\"Recurrent dropout\")\n parser.add_argument('--char_rec_dropout', type=float, default=0, help=\"Recurrent dropout\")\n parser.add_argument('--no_char', dest='char', action='store_false', help=\"Turn off character model.\")\n parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help=\"Turn off pretrained embeddings.\")\n parser.add_argument('--no_linearization', dest='linearization', action='store_false', help=\"Turn off linearization term.\")\n parser.add_argument('--no_distance', dest='distance', action='store_false', help=\"Turn off distance term.\")\n\n parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')\n parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')\n parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')\n parser.add_argument('--beta2', type=float, default=0.95)\n\n parser.add_argument('--max_steps', type=int, default=50000)\n parser.add_argument('--eval_interval', type=int, default=100)\n parser.add_argument('--max_steps_before_stop', type=int, default=3000)\n parser.add_argument('--batch_size', type=int, default=5000)\n parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')\n parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')\n parser.add_argument('--save_dir', type=str, default='saved_models/depparse', help='Root dir for saving models.')\n parser.add_argument('--save_name', type=str, default=None, help=\"File name to save the model\")\n\n parser.add_argument('--seed', type=int, default=1234)\n parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())\n parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')\n args = parser.parse_args()\n return args\n\ndef main():\n args = parse_args()\n\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n if args.cpu:\n args.cuda = False\n elif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n args = vars(args)\n print(\"Running parser in {} mode\".format(args['mode']))\n\n if args['mode'] == 'train':\n train(args)\n else:\n evaluate(args)\n\ndef train(args):\n utils.ensure_dir(args['save_dir'])\n model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \\\n else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])\n\n # load pretrained vectors if needed\n pretrain = None\n if args['pretrain']:\n vec_file = args['wordvec_file'] if args['wordvec_file'] else utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])\n pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])\n pretrain = Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])\n\n # load data\n print(\"Loading data with batch size {}...\".format(args['batch_size']))\n train_doc = Document(CoNLL.conll2dict(input_file=args['train_file']))\n train_batch = DataLoader(train_doc, args['batch_size'], args, pretrain, evaluation=False)\n vocab = train_batch.vocab\n dev_doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))\n dev_batch = DataLoader(dev_doc, args['batch_size'], args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)\n\n # pred and gold path\n system_pred_file = args['output_file']\n gold_file = args['gold_file']\n\n # skip training if the language does not have training or dev data\n if len(train_batch) == 0 or len(dev_batch) == 0:\n print(\"Skip training because no data available...\")\n sys.exit(0)\n\n print(\"Training parser...\")\n trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])\n\n global_step = 0\n max_steps = args['max_steps']\n dev_score_history = []\n best_dev_preds = []\n current_lr = args['lr']\n global_start_time = time.time()\n format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'\n\n using_amsgrad = False\n last_best_step = 0\n # start training\n train_loss = 0\n while True:\n do_break = False\n for i, batch in enumerate(train_batch):\n start_time = time.time()\n global_step += 1\n loss = trainer.update(batch, eval=False) # update step\n train_loss += loss\n if global_step % args['log_step'] == 0:\n duration = time.time() - start_time\n print(format_str.format(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), global_step,\\\n max_steps, loss, duration, current_lr))\n\n if global_step % args['eval_interval'] == 0:\n # eval on dev\n print(\"Evaluating on dev set...\")\n dev_preds = []\n for batch in dev_batch:\n preds = trainer.predict(batch)\n dev_preds += preds\n dev_preds = utils.unsort(dev_preds, dev_batch.data_orig_idx)\n\n dev_batch.doc.set([HEAD, DEPREL], [y for x in dev_preds for y in x])\n CoNLL.dict2conll(dev_batch.doc.to_dict(), system_pred_file)\n _, _, dev_score = scorer.score(system_pred_file, gold_file)\n\n train_loss = train_loss / args['eval_interval'] # avg loss per batch\n print(\"step {}: train_loss = {:.6f}, dev_score = {:.4f}\".format(global_step, train_loss, dev_score))\n train_loss = 0\n\n # save best model\n if len(dev_score_history) == 0 or dev_score > max(dev_score_history):\n last_best_step = global_step\n trainer.save(model_file)\n print(\"new best model saved.\")\n best_dev_preds = dev_preds\n\n dev_score_history += [dev_score]\n print(\"\")\n\n if global_step - last_best_step >= args['max_steps_before_stop']:\n if not using_amsgrad:\n print(\"Switching to AMSGrad\")\n last_best_step = global_step\n using_amsgrad = True\n trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)\n else:\n do_break = True\n break\n\n if global_step >= args['max_steps']:\n do_break = True\n break\n\n if do_break: break\n\n train_batch.reshuffle()\n\n print(\"Training ended with {} steps.\".format(global_step))\n\n best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1\n print(\"Best dev F1 = {:.2f}, at iteration = {}\".format(best_f, best_eval * args['eval_interval']))\n\ndef evaluate(args):\n # file paths\n system_pred_file = args['output_file']\n gold_file = args['gold_file']\n model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \\\n else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])\n\n # load pretrain; note that we allow the pretrain_file to be non-existent\n pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])\n pretrain = Pretrain(pretrain_file)\n\n # load model\n print(\"Loading model from: {}\".format(model_file))\n use_cuda = args['cuda'] and not args['cpu']\n trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)\n loaded_args, vocab = trainer.args, trainer.vocab\n\n # load config\n for k in args:\n if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':\n loaded_args[k] = args[k]\n\n # load data\n print(\"Loading data with batch size {}...\".format(args['batch_size']))\n doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))\n batch = DataLoader(doc, args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)\n\n if len(batch) > 0:\n print(\"Start evaluation...\")\n preds = []\n for i, b in enumerate(batch):\n preds += trainer.predict(b)\n else:\n # skip eval if dev data does not exist\n preds = []\n preds = utils.unsort(preds, batch.data_orig_idx)\n\n # write to file and score\n batch.doc.set([HEAD, DEPREL], [y for x in preds for y in x])\n CoNLL.dict2conll(batch.doc.to_dict(), system_pred_file)\n\n if gold_file is not None:\n _, _, score = scorer.score(system_pred_file, gold_file)\n\n print(\"Parser score:\")\n print(\"{} {:.2f}\".format(args['shorthand'], score*100))\n\nif __name__ == '__main__':\n main()\n", "\"\"\"\nA trainer class to handle training and testing of models.\n\"\"\"\n\nimport sys\nimport numpy as np\nfrom collections import Counter\nimport logging\nimport torch\nfrom torch import nn\nimport torch.nn.init as init\n\nimport stanza.models.common.seq2seq_constant as constant\nfrom stanza.models.common.seq2seq_model import Seq2SeqModel\nfrom stanza.models.common import utils, loss\nfrom stanza.models.lemma import edit\nfrom stanza.models.lemma.vocab import MultiVocab\n\nlogger = logging.getLogger('stanza')\n\ndef unpack_batch(batch, use_cuda):\n \"\"\" Unpack a batch from the data loader. \"\"\"\n if use_cuda:\n inputs = [b.cuda() if b is not None else None for b in batch[:6]]\n else:\n inputs = [b if b is not None else None for b in batch[:6]]\n orig_idx = batch[6]\n return inputs, orig_idx\n\nclass Trainer(object):\n \"\"\" A trainer for training models. \"\"\"\n def __init__(self, args=None, vocab=None, emb_matrix=None, model_file=None, use_cuda=False):\n self.use_cuda = use_cuda\n if model_file is not None:\n # load everything from file\n self.load(model_file, use_cuda)\n else:\n # build model from scratch\n self.args = args\n self.model = None if args['dict_only'] else Seq2SeqModel(args, emb_matrix=emb_matrix, use_cuda=use_cuda)\n self.vocab = vocab\n # dict-based components\n self.word_dict = dict()\n self.composite_dict = dict()\n if not self.args['dict_only']:\n if self.args.get('edit', False):\n self.crit = loss.MixLoss(self.vocab['char'].size, self.args['alpha'])\n logger.debug(\"Running seq2seq lemmatizer with edit classifier...\")\n else:\n self.crit = loss.SequenceLoss(self.vocab['char'].size)\n self.parameters = [p for p in self.model.parameters() if p.requires_grad]\n if use_cuda:\n self.model.cuda()\n self.crit.cuda()\n else:\n self.model.cpu()\n self.crit.cpu()\n self.optimizer = utils.get_optimizer(self.args['optim'], self.parameters, self.args['lr'])\n\n def update(self, batch, eval=False):\n inputs, orig_idx = unpack_batch(batch, self.use_cuda)\n src, src_mask, tgt_in, tgt_out, pos, edits = inputs\n\n if eval:\n self.model.eval()\n else:\n self.model.train()\n self.optimizer.zero_grad()\n log_probs, edit_logits = self.model(src, src_mask, tgt_in, pos)\n if self.args.get('edit', False):\n assert edit_logits is not None\n loss = self.crit(log_probs.view(-1, self.vocab['char'].size), tgt_out.view(-1), \\\n edit_logits, edits)\n else:\n loss = self.crit(log_probs.view(-1, self.vocab['char'].size), tgt_out.view(-1))\n loss_val = loss.data.item()\n if eval:\n return loss_val\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])\n self.optimizer.step()\n return loss_val\n\n def predict(self, batch, beam_size=1):\n inputs, orig_idx = unpack_batch(batch, self.use_cuda)\n src, src_mask, tgt, tgt_mask, pos, edits = inputs\n\n self.model.eval()\n batch_size = src.size(0)\n preds, edit_logits = self.model.predict(src, src_mask, pos=pos, beam_size=beam_size)\n pred_seqs = [self.vocab['char'].unmap(ids) for ids in preds] # unmap to tokens\n pred_seqs = utils.prune_decoded_seqs(pred_seqs)\n pred_tokens = [\"\".join(seq) for seq in pred_seqs] # join chars to be tokens\n pred_tokens = utils.unsort(pred_tokens, orig_idx)\n if self.args.get('edit', False):\n assert edit_logits is not None\n edits = np.argmax(edit_logits.data.cpu().numpy(), axis=1).reshape([batch_size]).tolist()\n edits = utils.unsort(edits, orig_idx)\n else:\n edits = None\n return pred_tokens, edits\n\n def postprocess(self, words, preds, edits=None):\n \"\"\" Postprocess, mainly for handing edits. \"\"\"\n assert len(words) == len(preds), \"Lemma predictions must have same length as words.\"\n edited = []\n if self.args.get('edit', False):\n assert edits is not None and len(words) == len(edits)\n for w, p, e in zip(words, preds, edits):\n lem = edit.edit_word(w, p, e)\n edited += [lem]\n else:\n edited = preds # do not edit\n # final sanity check\n assert len(edited) == len(words)\n final = []\n for lem, w in zip(edited, words):\n if len(lem) == 0 or constant.UNK in lem:\n final += [w] # invalid prediction, fall back to word\n else:\n final += [lem]\n return final\n\n def update_lr(self, new_lr):\n utils.change_lr(self.optimizer, new_lr)\n\n def train_dict(self, triples):\n \"\"\" Train a dict lemmatizer given training (word, pos, lemma) triples. \"\"\"\n # accumulate counter\n ctr = Counter()\n ctr.update([(p[0], p[1], p[2]) for p in triples])\n # find the most frequent mappings\n for p, _ in ctr.most_common():\n w, pos, l = p\n if (w,pos) not in self.composite_dict:\n self.composite_dict[(w,pos)] = l\n if w not in self.word_dict:\n self.word_dict[w] = l\n return\n\n def predict_dict(self, pairs):\n \"\"\" Predict a list of lemmas using the dict model given (word, pos) pairs. \"\"\"\n lemmas = []\n for p in pairs:\n w, pos = p\n if (w,pos) in self.composite_dict:\n lemmas += [self.composite_dict[(w,pos)]]\n elif w in self.word_dict:\n lemmas += [self.word_dict[w]]\n else:\n lemmas += [w]\n return lemmas\n\n def skip_seq2seq(self, pairs):\n \"\"\" Determine if we can skip the seq2seq module when ensembling with the frequency lexicon. \"\"\"\n\n skip = []\n for p in pairs:\n w, pos = p\n if (w,pos) in self.composite_dict:\n skip.append(True)\n elif w in self.word_dict:\n skip.append(True)\n else:\n skip.append(False)\n return skip\n\n def ensemble(self, pairs, other_preds):\n \"\"\" Ensemble the dict with statistical model predictions. \"\"\"\n lemmas = []\n assert len(pairs) == len(other_preds)\n for p, pred in zip(pairs, other_preds):\n w, pos = p\n if (w,pos) in self.composite_dict:\n lemma = self.composite_dict[(w,pos)]\n elif w in self.word_dict:\n lemma = self.word_dict[w]\n else:\n lemma = pred\n if lemma is None:\n lemma = w\n lemmas.append(lemma)\n return lemmas\n\n def save(self, filename):\n params = {\n 'model': self.model.state_dict() if self.model is not None else None,\n 'dicts': (self.word_dict, self.composite_dict),\n 'vocab': self.vocab.state_dict(),\n 'config': self.args\n }\n try:\n torch.save(params, filename)\n logger.info(\"Model saved to {}\".format(filename))\n except BaseException:\n logger.warning(\"Saving failed... continuing anyway.\")\n\n def load(self, filename, use_cuda=False):\n try:\n checkpoint = torch.load(filename, lambda storage, loc: storage)\n except BaseException:\n logger.error(\"Cannot load model from {}\".format(filename))\n raise\n self.args = checkpoint['config']\n self.word_dict, self.composite_dict = checkpoint['dicts']\n if not self.args['dict_only']:\n self.model = Seq2SeqModel(self.args, use_cuda=use_cuda)\n self.model.load_state_dict(checkpoint['model'])\n else:\n self.model = None\n self.vocab = MultiVocab.load_state_dict(checkpoint['vocab'])\n" ]
[ [ "torch.cuda.manual_seed", "numpy.random.seed", "torch.manual_seed", "numpy.argmax", "torch.cuda.is_available" ], [ "torch.load", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pmeier/kornia
[ "57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf", "57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf", "57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf", "57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf", "57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf" ]
[ "kornia/losses/psnr.py", "kornia/filters/kernels.py", "tutorials/gaussian_blur.py", "test/integration/test_warp.py", "test/geometry/epipolar/test_epipolar_metrics.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn.functional import mse_loss\n\n\nclass PSNRLoss(nn.Module):\n r\"\"\"Creates a criterion that calculates the PSNR between 2 images. Given an m x n image, the PSNR is:\n\n .. math::\n\n \\text{PSNR} = 10 \\log_{10} \\bigg(\\frac{\\text{MAX}_I^2}{MSE(I,T)}\\bigg)\n\n where\n\n .. math::\n\n \\text{MSE}(I,T) = \\frac{1}{mn}\\sum_{i=0}^{m-1}\\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2\n\n and :math:`\\text{MAX}_I` is the maximum possible input value\n (e.g for floating point images :math:`\\text{MAX}_I=1`).\n\n\n Arguments:\n max_val (float): Maximum value of input\n\n Shape:\n - input: :math:`(*)`\n - approximation: :math:`(*)` same shape as input\n - output: :math:`()` a scalar\n\n Examples:\n >>> kornia.losses.psnr_loss(torch.ones(1), 1.2*torch.ones(1), 2)\n tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)\n\n Reference:\n https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition\n \"\"\"\n\n def __init__(self, max_val: float) -> None:\n super(PSNRLoss, self).__init__()\n self.max_val: float = max_val\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: # type: ignore\n return psnr_loss(input, target, self.max_val)\n\n\ndef psnr_loss(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:\n r\"\"\"Function that computes PSNR\n\n See :class:`~kornia.losses.PSNRLoss` for details.\n \"\"\"\n if not torch.is_tensor(input) or not torch.is_tensor(target):\n raise TypeError(f\"Expected 2 torch tensors but got {type(input)} and {type(target)}\")\n\n if input.shape != target.shape:\n raise TypeError(f\"Expected tensors of equal shapes, but got {input.shape} and {target.shape}\")\n mse_val = mse_loss(input, target, reduction='mean')\n max_val_tensor: torch.Tensor = torch.tensor(max_val).to(input.device).to(input.dtype)\n return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)\n", "from typing import Tuple, List, Union, cast\n\nimport torch\nimport torch.nn as nn\n\nfrom kornia.geometry.transform.affwarp import rotate\n\n\ndef normalize_kernel2d(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Normalizes both derivative and smoothing kernel.\n \"\"\"\n if len(input.size()) < 2:\n raise TypeError(\"input should be at least 2D tensor. Got {}\"\n .format(input.size()))\n norm: torch.Tensor = input.abs().sum(dim=-1).sum(dim=-1)\n return input / (norm.unsqueeze(-1).unsqueeze(-1))\n\n\ndef gaussian(window_size, sigma):\n x = torch.arange(window_size).float() - window_size // 2\n if window_size % 2 == 0:\n x = x + 0.5\n gauss = torch.exp((-x.pow(2.0) / float(2 * sigma ** 2)))\n return gauss / gauss.sum()\n\n\ndef laplacian_1d(window_size) -> torch.Tensor:\n r\"\"\"One could also use the Laplacian of Gaussian formula\n to design the filter.\n \"\"\"\n\n filter_1d = torch.ones(window_size)\n filter_1d[window_size // 2] = 1 - window_size\n laplacian_1d: torch.Tensor = filter_1d\n return laplacian_1d\n\n\ndef get_box_kernel2d(kernel_size: Tuple[int, int]) -> torch.Tensor:\n r\"\"\"Utility function that returns a box filter.\"\"\"\n kx: float = float(kernel_size[0])\n ky: float = float(kernel_size[1])\n scale: torch.Tensor = torch.tensor(1.) / torch.tensor([kx * ky])\n tmp_kernel: torch.Tensor = torch.ones(1, kernel_size[0], kernel_size[1])\n return scale.to(tmp_kernel.dtype) * tmp_kernel\n\n\ndef get_binary_kernel2d(window_size: Tuple[int, int]) -> torch.Tensor:\n r\"\"\"Creates a binary kernel to extract the patches. If the window size\n is HxW will create a (H*W)xHxW kernel.\n \"\"\"\n window_range: int = window_size[0] * window_size[1]\n kernel: torch.Tensor = torch.zeros(window_range, window_range)\n for i in range(window_range):\n kernel[i, i] += 1.0\n return kernel.view(window_range, 1, window_size[0], window_size[1])\n\n\ndef get_sobel_kernel_3x3() -> torch.Tensor:\n \"\"\"Utility function that returns a sobel kernel of 3x3\"\"\"\n return torch.tensor([\n [-1., 0., 1.],\n [-2., 0., 2.],\n [-1., 0., 1.],\n ])\n\n\ndef get_sobel_kernel_5x5_2nd_order() -> torch.Tensor:\n \"\"\"Utility function that returns a 2nd order sobel kernel of 5x5\"\"\"\n return torch.tensor([\n [-1., 0., 2., 0., -1.],\n [-4., 0., 8., 0., -4.],\n [-6., 0., 12., 0., -6.],\n [-4., 0., 8., 0., -4.],\n [-1., 0., 2., 0., -1.]\n ])\n\n\ndef _get_sobel_kernel_5x5_2nd_order_xy() -> torch.Tensor:\n \"\"\"Utility function that returns a 2nd order sobel kernel of 5x5\"\"\"\n return torch.tensor([\n [-1., -2., 0., 2., 1.],\n [-2., -4., 0., 4., 2.],\n [0., 0., 0., 0., 0.],\n [2., 4., 0., -4., -2.],\n [1., 2., 0., -2., -1.]\n ])\n\n\ndef get_diff_kernel_3x3() -> torch.Tensor:\n \"\"\"Utility function that returns a sobel kernel of 3x3\"\"\"\n return torch.tensor([\n [-0., 0., 0.],\n [-1., 0., 1.],\n [-0., 0., 0.],\n ])\n\n\ndef get_diff_kernel3d(device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:\n \"\"\"Utility function that returns a first order derivative kernel of 3x3x3\"\"\"\n kernel: torch.Tensor = torch.tensor([[[[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [-0.5, 0.0, 0.5],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n ],\n [[[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, -0.5, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.5, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n ],\n [[[0.0, 0.0, 0.0],\n [0.0, -0.5, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.5, 0.0],\n [0.0, 0.0, 0.0]],\n ],\n ], device=device, dtype=dtype)\n return kernel.unsqueeze(1)\n\n\ndef get_diff_kernel3d_2nd_order(device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:\n \"\"\"Utility function that returns a first order derivative kernel of 3x3x3\"\"\"\n kernel: torch.Tensor = torch.tensor([[[[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [1.0, -2.0, 1.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n ],\n [[[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 1.0, 0.0],\n [0.0, -2.0, 0.0],\n [0.0, 1.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n ],\n [[[0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, -2.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0]],\n ],\n [[[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[1.0, 0.0, -1.0],\n [0.0, 0.0, 0.0],\n [-1.0, 0.0, 1.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n ],\n [[[0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, -1.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, -1.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0]],\n ],\n [[[0.0, 0.0, 0.0],\n [1.0, 0.0, -1.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n\n [[0.0, 0.0, 0.0],\n [-1.0, 0.0, 1.0],\n [0.0, 0.0, 0.0]],\n ],\n ], device=device, dtype=dtype)\n return kernel.unsqueeze(1)\n\n\ndef get_sobel_kernel2d() -> torch.Tensor:\n kernel_x: torch.Tensor = get_sobel_kernel_3x3()\n kernel_y: torch.Tensor = kernel_x.transpose(0, 1)\n return torch.stack([kernel_x, kernel_y])\n\n\ndef get_diff_kernel2d() -> torch.Tensor:\n kernel_x: torch.Tensor = get_diff_kernel_3x3()\n kernel_y: torch.Tensor = kernel_x.transpose(0, 1)\n return torch.stack([kernel_x, kernel_y])\n\n\ndef get_sobel_kernel2d_2nd_order() -> torch.Tensor:\n gxx: torch.Tensor = get_sobel_kernel_5x5_2nd_order()\n gyy: torch.Tensor = gxx.transpose(0, 1)\n gxy: torch.Tensor = _get_sobel_kernel_5x5_2nd_order_xy()\n return torch.stack([gxx, gxy, gyy])\n\n\ndef get_diff_kernel2d_2nd_order() -> torch.Tensor:\n gxx: torch.Tensor = torch.tensor([\n [0., 0., 0.],\n [1., -2., 1.],\n [0., 0., 0.],\n ])\n gyy: torch.Tensor = gxx.transpose(0, 1)\n gxy: torch.Tensor = torch.tensor([\n [-1., 0., 1.],\n [0., 0., 0.],\n [1., 0., -1.],\n ])\n return torch.stack([gxx, gxy, gyy])\n\n\ndef get_spatial_gradient_kernel2d(mode: str, order: int) -> torch.Tensor:\n r\"\"\"Function that returns kernel for 1st or 2nd order image gradients,\n using one of the following operators: sobel, diff\"\"\"\n if mode not in ['sobel', 'diff']:\n raise TypeError(\"mode should be either sobel\\\n or diff. Got {}\".format(mode))\n if order not in [1, 2]:\n raise TypeError(\"order should be either 1 or 2\\\n Got {}\".format(order))\n if mode == 'sobel' and order == 1:\n kernel: torch.Tensor = get_sobel_kernel2d()\n elif mode == 'sobel' and order == 2:\n kernel = get_sobel_kernel2d_2nd_order()\n elif mode == 'diff' and order == 1:\n kernel = get_diff_kernel2d()\n elif mode == 'diff' and order == 2:\n kernel = get_diff_kernel2d_2nd_order()\n else:\n raise NotImplementedError(\"\")\n return kernel\n\n\ndef get_spatial_gradient_kernel3d(mode: str, order: int, device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:\n r\"\"\"Function that returns kernel for 1st or 2nd order scale pyramid gradients,\n using one of the following operators: sobel, diff\"\"\"\n if mode not in ['sobel', 'diff']:\n raise TypeError(\"mode should be either sobel\\\n or diff. Got {}\".format(mode))\n if order not in [1, 2]:\n raise TypeError(\"order should be either 1 or 2\\\n Got {}\".format(order))\n if mode == 'sobel':\n raise NotImplementedError(\"Sobel kernel for 3d gradient is not implemented yet\")\n elif mode == 'diff' and order == 1:\n kernel = get_diff_kernel3d(device, dtype)\n elif mode == 'diff' and order == 2:\n kernel = get_diff_kernel3d_2nd_order(device, dtype)\n else:\n raise NotImplementedError(\"\")\n return kernel\n\n\ndef get_gaussian_kernel1d(kernel_size: int,\n sigma: float,\n force_even: bool = False) -> torch.Tensor:\n r\"\"\"Function that returns Gaussian filter coefficients.\n\n Args:\n kernel_size (int): filter size. It should be odd and positive.\n sigma (float): gaussian standard deviation.\n force_even (bool): overrides requirement for odd kernel size.\n\n Returns:\n Tensor: 1D tensor with gaussian filter coefficients.\n\n Shape:\n - Output: :math:`(\\text{kernel_size})`\n\n Examples::\n\n >>> kornia.image.get_gaussian_kernel(3, 2.5)\n tensor([0.3243, 0.3513, 0.3243])\n\n >>> kornia.image.get_gaussian_kernel(5, 1.5)\n tensor([0.1201, 0.2339, 0.2921, 0.2339, 0.1201])\n \"\"\"\n if (not isinstance(kernel_size, int) or (\n (kernel_size % 2 == 0) and not force_even) or (\n kernel_size <= 0)):\n raise TypeError(\n \"kernel_size must be an odd positive integer. \"\n \"Got {}\".format(kernel_size)\n )\n window_1d: torch.Tensor = gaussian(kernel_size, sigma)\n return window_1d\n\n\ndef get_gaussian_kernel2d(\n kernel_size: Tuple[int, int],\n sigma: Tuple[float, float],\n force_even: bool = False) -> torch.Tensor:\n r\"\"\"Function that returns Gaussian filter matrix coefficients.\n\n Args:\n kernel_size (Tuple[int, int]): filter sizes in the x and y direction.\n Sizes should be odd and positive.\n sigma (Tuple[int, int]): gaussian standard deviation in the x and y\n direction.\n force_even (bool): overrides requirement for odd kernel size.\n\n Returns:\n Tensor: 2D tensor with gaussian filter matrix coefficients.\n\n Shape:\n - Output: :math:`(\\text{kernel_size}_x, \\text{kernel_size}_y)`\n\n Examples::\n\n >>> kornia.image.get_gaussian_kernel2d((3, 3), (1.5, 1.5))\n tensor([[0.0947, 0.1183, 0.0947],\n [0.1183, 0.1478, 0.1183],\n [0.0947, 0.1183, 0.0947]])\n\n >>> kornia.image.get_gaussian_kernel2d((3, 5), (1.5, 1.5))\n tensor([[0.0370, 0.0720, 0.0899, 0.0720, 0.0370],\n [0.0462, 0.0899, 0.1123, 0.0899, 0.0462],\n [0.0370, 0.0720, 0.0899, 0.0720, 0.0370]])\n \"\"\"\n if not isinstance(kernel_size, tuple) or len(kernel_size) != 2:\n raise TypeError(\n \"kernel_size must be a tuple of length two. Got {}\".format(\n kernel_size\n )\n )\n if not isinstance(sigma, tuple) or len(sigma) != 2:\n raise TypeError(\n \"sigma must be a tuple of length two. Got {}\".format(sigma)\n )\n ksize_x, ksize_y = kernel_size\n sigma_x, sigma_y = sigma\n kernel_x: torch.Tensor = get_gaussian_kernel1d(ksize_x, sigma_x, force_even)\n kernel_y: torch.Tensor = get_gaussian_kernel1d(ksize_y, sigma_y, force_even)\n kernel_2d: torch.Tensor = torch.matmul(\n kernel_x.unsqueeze(-1), kernel_y.unsqueeze(-1).t()\n )\n return kernel_2d\n\n\ndef get_laplacian_kernel1d(kernel_size: int) -> torch.Tensor:\n r\"\"\"Function that returns the coefficients of a 1D Laplacian filter.\n\n Args:\n kernel_size (int): filter size. It should be odd and positive.\n\n Returns:\n Tensor (float): 1D tensor with laplacian filter coefficients.\n\n Shape:\n - Output: math:`(\\text{kernel_size})`\n\n Examples::\n >>> kornia.image.get_laplacian_kernel(3)\n tensor([ 1., -2., 1.])\n\n >>> kornia.image.get_laplacian_kernel(5)\n tensor([ 1., 1., -4., 1., 1.])\n\n \"\"\"\n if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \\\n kernel_size <= 0:\n raise TypeError(\"ksize must be an odd positive integer. Got {}\"\n .format(kernel_size))\n window_1d: torch.Tensor = laplacian_1d(kernel_size)\n return window_1d\n\n\ndef get_laplacian_kernel2d(kernel_size: int) -> torch.Tensor:\n r\"\"\"Function that returns Gaussian filter matrix coefficients.\n\n Args:\n kernel_size (int): filter size should be odd.\n\n Returns:\n Tensor: 2D tensor with laplacian filter matrix coefficients.\n\n Shape:\n - Output: :math:`(\\text{kernel_size}_x, \\text{kernel_size}_y)`\n\n Examples::\n\n >>> kornia.image.get_laplacian_kernel2d(3)\n tensor([[ 1., 1., 1.],\n [ 1., -8., 1.],\n [ 1., 1., 1.]])\n\n >>> kornia.image.get_laplacian_kernel2d(5)\n tensor([[ 1., 1., 1., 1., 1.],\n [ 1., 1., 1., 1., 1.],\n [ 1., 1., -24., 1., 1.],\n [ 1., 1., 1., 1., 1.],\n [ 1., 1., 1., 1., 1.]])\n\n \"\"\"\n if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \\\n kernel_size <= 0:\n raise TypeError(\"ksize must be an odd positive integer. Got {}\"\n .format(kernel_size))\n\n kernel = torch.ones((kernel_size, kernel_size))\n mid = kernel_size // 2\n kernel[mid, mid] = 1 - kernel_size ** 2\n kernel_2d: torch.Tensor = kernel\n return kernel_2d\n\n\ndef get_motion_kernel2d(kernel_size: int, angle: Union[torch.Tensor, float],\n direction: Union[torch.Tensor, float] = 0.) -> torch.Tensor:\n r\"\"\"Function that returns motion blur filter.\n\n Args:\n kernel_size (int): motion kernel width and height. It should be odd and positive.\n angle (torch.Tensor, float): angle of the motion blur in degrees (anti-clockwise rotation).\n direction (float): forward/backward direction of the motion blur.\n Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),\n while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a\n uniformly (but still angled) motion blur.\n\n Returns:\n torch.Tensor: the motion blur kernel.\n\n Shape:\n - Output: :math:`(ksize, ksize)`\n\n Examples::\n >>> kornia.filters.get_motion_kernel2d(5, 0., 0.)\n tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.0000, 0.0000, 0.0000],\n [0.2000, 0.2000, 0.2000, 0.2000, 0.2000],\n [0.0000, 0.0000, 0.0000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.0000, 0.0000, 0.0000]])\n >>> kornia.filters.get_motion_kernel2d(3, 215., -0.5)\n tensor([[0.0000, 0.0412, 0.0732],\n [0.1920, 0.3194, 0.0804],\n [0.2195, 0.0743, 0.0000]])\n \"\"\"\n if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or kernel_size < 3:\n raise TypeError(\"ksize must be an odd integer >= than 3\")\n\n if not isinstance(angle, torch.Tensor):\n angle = torch.tensor([angle])\n\n angle = cast(torch.Tensor, angle)\n if angle.dim() == 0:\n angle = angle.unsqueeze(dim=0)\n assert angle.dim() == 1, f\"angle must be a 1-dim tensor. Got {angle}.\"\n\n if not isinstance(direction, torch.Tensor):\n direction = torch.tensor([direction])\n\n direction = cast(torch.Tensor, direction)\n if direction.dim() == 0:\n direction = direction.unsqueeze(dim=0)\n assert direction.dim() == 1, f\"direction must be a 1-dim tensor. Got {direction}.\"\n\n kernel_tuple: Tuple[int, int] = (kernel_size, kernel_size)\n # direction from [-1, 1] to [0, 1] range\n direction = (torch.clamp(direction, -1., 1.).item() + 1.) / 2.\n kernel = torch.zeros(kernel_tuple, dtype=torch.float)\n kernel[kernel_tuple[0] // 2, :] = torch.linspace(direction, 1. - direction, steps=kernel_tuple[0])\n kernel = kernel.unsqueeze(0).unsqueeze(0)\n # rotate (counterclockwise) kernel by given angle\n kernel = rotate(kernel, angle)\n kernel = kernel[0][0]\n kernel = kernel / kernel.sum()\n return kernel\n", "\"\"\"\n\nBlur image using GaussianBlur operator\n======================================\n\n\"\"\"\n\nimport torch\nimport kornia\nimport cv2\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n# read the image with OpenCV\nimg: np.ndarray = cv2.imread('./data/lena.jpg')\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n# convert to torch tensor\ndata: torch.tensor = kornia.image_to_tensor(img, keepdim=False) # BxCxHxW\n\n# create the operator\ngauss = kornia.filters.GaussianBlur2d((11, 11), (10.5, 10.5))\n\n# blur the image\nx_blur: torch.tensor = gauss(data.float())\n\n# convert back to numpy\nimg_blur: np.ndarray = kornia.tensor_to_image(x_blur.byte())\n\n# Create the plot\nfig, axs = plt.subplots(1, 2, figsize=(16, 10))\naxs = axs.ravel()\n\naxs[0].axis('off')\naxs[0].set_title('image source')\naxs[0].imshow(img)\n\naxs[1].axis('off')\naxs[1].set_title('image blurred')\naxs[1].imshow(img_blur)\n", "import pytest\n\nimport kornia\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\nclass MyHomography(nn.Module):\n\n def __init__(self, init_homo: torch.Tensor) -> None:\n super().__init__()\n self.homo = nn.Parameter(init_homo.clone().detach())\n\n def forward(self) -> torch.Tensor:\n return torch.unsqueeze(self.homo, dim=0)\n\n\nclass TestWarping:\n # optimization\n lr = 1e-3\n num_iterations = 100\n\n def test_smoke(self, device):\n\n img_src_t: torch.Tensor = torch.rand(1, 3, 120, 120).to(device)\n img_dst_t: torch.Tensor = torch.rand(1, 3, 120, 120).to(device)\n\n init_homo: torch.Tensor = torch.from_numpy(\n np.array([\n [0.0415, 1.2731, -1.1731],\n [-0.9094, 0.5072, 0.4272],\n [0.0762, 1.3981, 1.0646]\n ])\n ).float()\n\n height, width = img_dst_t.shape[-2:]\n warper = kornia.HomographyWarper(height, width)\n dst_homo_src = MyHomography(init_homo=init_homo).to(device)\n\n learning_rate = self.lr\n optimizer = optim.Adam(dst_homo_src.parameters(), lr=learning_rate)\n\n for iter_idx in range(self.num_iterations):\n # warp the reference image to the destiny with current homography\n img_src_to_dst = warper(img_src_t, dst_homo_src())\n\n # compute the photometric loss\n loss = F.l1_loss(img_src_to_dst, img_dst_t)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n assert not bool(torch.isnan(dst_homo_src.homo.grad).any())\n", "import pytest\n\nimport torch\nfrom torch.autograd import gradcheck\nfrom torch.testing import assert_allclose\n\nimport kornia.geometry.epipolar as epi\nimport kornia.testing as utils\n\n\nclass TestSymmetricalEpipolarDistance:\n\n def test_smoke(self, device, dtype):\n pts1 = torch.rand(1, 4, 3, device=device, dtype=dtype)\n pts2 = torch.rand(1, 4, 3, device=device, dtype=dtype)\n Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)\n assert epi.symmetrical_epipolar_distance(pts1, pts2, Fm).shape == (1, 4)\n\n def test_batch(self, device, dtype):\n batch_size = 5\n pts1 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)\n pts2 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)\n Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)\n assert epi.symmetrical_epipolar_distance(pts1, pts2, Fm).shape == (5, 4)\n\n def test_gradcheck(self, device):\n # generate input data\n batch_size, num_points, num_dims = 2, 3, 2\n points1 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64, requires_grad=True)\n points2 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64)\n Fm = utils.create_random_fundamental_matrix(batch_size).type_as(points2)\n assert gradcheck(epi.symmetrical_epipolar_distance, (points1, points2, Fm),\n raise_exception=True)\n\n\nclass TestSampsonEpipolarDistance:\n\n def test_smoke(self, device, dtype):\n pts1 = torch.rand(1, 4, 3, device=device, dtype=dtype)\n pts2 = torch.rand(1, 4, 3, device=device, dtype=dtype)\n Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)\n assert epi.sampson_epipolar_distance(pts1, pts2, Fm).shape == (1, 4)\n\n def test_batch(self, device, dtype):\n batch_size = 5\n pts1 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)\n pts2 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)\n Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)\n assert epi.sampson_epipolar_distance(pts1, pts2, Fm).shape == (5, 4)\n\n def test_gradcheck(self, device):\n # generate input data\n batch_size, num_points, num_dims = 2, 3, 2\n points1 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64, requires_grad=True)\n points2 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64)\n Fm = utils.create_random_fundamental_matrix(batch_size).type_as(points2)\n assert gradcheck(epi.sampson_epipolar_distance, (points1, points2, Fm),\n raise_exception=True)\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.log10", "torch.is_tensor", "torch.tensor" ], [ "torch.linspace", "torch.ones", "torch.zeros", "torch.tensor", "torch.arange", "torch.device", "torch.clamp", "torch.stack" ], [ "matplotlib.pyplot.subplots" ], [ "torch.isnan", "torch.nn.functional.l1_loss", "torch.unsqueeze", "torch.rand", "numpy.array" ], [ "torch.autograd.gradcheck", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
learn2free/GeoCAT-examples
[ "3ac152a767e78a362a8ebb6f677005f3de320ca6", "3ac152a767e78a362a8ebb6f677005f3de320ca6", "3ac152a767e78a362a8ebb6f677005f3de320ca6", "3ac152a767e78a362a8ebb6f677005f3de320ca6", "3ac152a767e78a362a8ebb6f677005f3de320ca6" ]
[ "Plots/Skew-T/NCL_skewt_3_2.py", "Plots/Overlays/NCL_overlay_1.py", "Plots/Colors/CB_Height.py", "Plots/Polygons/NCL_polyg_18.py", "Plots/Panels/NCL_panel_15.py" ]
[ "\"\"\"\nNCL_skewt_3_2.py\n=================\nThis script illustrates the following concepts:\n - Drawing Skew-T plots\n - Thinning the wind barbs in a Skew-T plot\n - Customizing the background of a Skew_T plot\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/skewt_3.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/skewt_3_2_lg.png\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport geocat.datafiles as gdf\nimport matplotlib.lines as mlines\nimport matplotlib.pyplot as plt\nimport metpy.calc as mpcalc\nimport numpy as np\nimport pandas as pd\nfrom geocat.viz import util as gvutil\nfrom metpy.plots import SkewT\nfrom metpy.units import units\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into xarrays\nds = pd.read_csv(gdf.get('ascii_files/sounding_ATS.csv'), header=None)\n\n# Extract the data\np = ds[0].values * units.hPa # Pressure [mb/hPa]\ntc = ds[1].values * units.degC # Temperature [C]\ntdc = ds[2].values * units.degC # Dew pt temp [C]\nwspd = ds[5].values * units.knots # Wind speed [knots or m/s]\nwdir = ds[6].values * units.degrees # Meteorological wind dir\nu, v = mpcalc.wind_components(wspd, wdir) # Calculate wind components\n\n###############################################################################\n# Plot\n\nfig = plt.figure(figsize=(12, 12))\n\n# Adding the \"rotation\" kwarg will over-ride the default MetPy rotation of\n# 30 degrees for the 45 degree default found in NCL Skew-T plots\nskew = SkewT(fig, rotation=45)\nax = skew.ax\n\n# Shade every other section between isotherms\nx1 = np.linspace(-100, 40, 8) # The starting x values for the shaded regions\nx2 = np.linspace(-90, 50, 8) # The ending x values for the shaded regions\ny = [1050, 100] # The range of y values that the shaded regions should cover\n\nfor i in range(0, 8):\n skew.shade_area(y=y,\n x1=x1[i],\n x2=x2[i],\n color='limegreen',\n alpha=0.25,\n zorder=1)\n\nskew.plot(p, tc, 'black')\nskew.plot(p, tdc, 'blue')\n# Plot only every third windbarb\nskew.plot_barbs(pressure=p[::3],\n u=u[::3],\n v=v[::3],\n xloc=1.05,\n fill_empty=True,\n sizes=dict(emptybarb=0.075, width=0.1, height=0.2))\n\n# Draw line underneath wind barbs\nline = mlines.Line2D([1.05, 1.05], [0, 1],\n color='gray',\n linewidth=0.5,\n transform=ax.transAxes,\n dash_joinstyle='round',\n clip_on=False,\n zorder=0)\nax.add_line(line)\n\n# Add relevant special lines\n# Choose starting temperatures in Kelvin for the dry adiabats\nt0 = units.K * np.arange(243.15, 473.15, 10)\nskew.plot_dry_adiabats(t0=t0, linestyles='solid', colors='gray', linewidth=1.5)\n\n# Choose temperatures for moist adiabats\nt0 = units.K * np.arange(281.15, 306.15, 4)\nmsa = skew.plot_moist_adiabats(t0=t0,\n linestyles='solid',\n colors='lime',\n linewidths=1.5)\n\n# Choose mixing ratios\nw = np.array([0.001, 0.002, 0.003, 0.005, 0.008, 0.012, 0.020]).reshape(-1, 1)\n\n# Choose the range of pressures that the mixing ratio lines are drawn over\np_levs = units.hPa * np.linspace(1000, 400, 7)\nskew.plot_mixing_lines(mixing_ratio=w, pressure=p_levs, colors='lime')\n\nskew.ax.set_ylim(1000, 100)\n\ngvutil.set_titles_and_labels(ax, maintitle=\"ATS Rawinsonde: degC + Thin wind\")\n\n# Set axes limits and ticks\ngvutil.set_axes_limits_and_ticks(\n ax=ax,\n xlim=[-30, 50],\n yticks=[1000, 850, 700, 500, 400, 300, 250, 200, 150, 100])\n\n# Change the style of the gridlines\nplt.grid(True,\n which='major',\n axis='both',\n color='tan',\n linewidth=1.5,\n alpha=0.5)\nplt.xlabel(\"Temperature (C)\")\nplt.ylabel(\"P (hPa)\")\nplt.show()\n", "\"\"\"\nNCL_overlay_1.py\n================\nThis script illustrates the following concepts:\n - Overlaying line contours on filled contours\n - Explicitly setting contour levels\n - Adding custom formatted contour labels\n - Manually selecting where contour labels will be drawn\n - Adding label textbox\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/overlay_1.ncl\n - Original NCL plots: https://www.ncl.ucar.edu/Applications/Images/overlay_1_lg.png\n\n\"\"\"\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport geocat.datafiles as gdf\nimport matplotlib.pyplot as plt\n###############################################################################\n# Import packages:\nimport numpy as np\nimport xarray as xr\nfrom cartopy.mpl.gridliner import LatitudeFormatter, LongitudeFormatter\nfrom geocat.viz import cmaps as gvcmaps\nfrom geocat.viz import util as gvutil\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into xarrays\nds = xr.open_dataset(gdf.get(\"netcdf_files/80.nc\"))\n\n# Extract slice of data\nu = ds.U.isel(time=0).drop('time').isel(lev=10).drop('lev')\nt = ds.T.isel(time=0).drop('time').isel(lev=10).drop('lev')\n\n###############################################################################\n# Specify levels and color map for contour\nt_lev = np.arange(210, 275, 5)\ncmap = gvcmaps.BlueDarkRed18\nu_lev = np.arange(-5, 40, 5)\n\n###############################################################################\n# Crate plot:\nplt.figure(figsize=(10, 8))\nax = plt.axes(projection=ccrs.PlateCarree())\n\n# Set extent around US\nax.set_extent([230, 300, 20, 60], crs=ccrs.PlateCarree())\n\n# Draw map features\nax.add_feature(cfeature.LAKES,\n linewidth=0.5,\n edgecolor='black',\n facecolor='None')\nax.add_feature(cfeature.COASTLINE, linewidth=0.5)\n\n# Plot filled contour\ntemp = t.plot.contourf(ax=ax,\n transform=ccrs.PlateCarree(),\n cmap=cmap,\n levels=t_lev,\n extend='neither',\n add_colorbar=False,\n add_labels=False)\nplt.colorbar(temp,\n ax=ax,\n ticks=np.arange(215, 270, 5),\n orientation='horizontal',\n pad=0.075)\n\n# Plot line contour\nwind = u.plot.contour(ax=ax,\n transform=ccrs.PlateCarree(),\n vmin=-5,\n vmax=35,\n levels=u_lev,\n colors='black',\n linewidths=0.5,\n add_labels=False)\n\n# Manually specify where contour labels will go using lat and lon coordiantes\nmanual = [(-107, 52), (-79, 57), (-78, 47), (-103, 32), (-86, 23)]\nax.clabel(wind, u_lev, fmt='%d', inline=True, fontsize=10, manual=manual)\n\n# Set label backgrounds white\n[\n txt.set_bbox(dict(facecolor='white', edgecolor='none', pad=2))\n for txt in wind.labelTexts\n]\n\n# Add lower text box\nax.text(1,\n -0.3,\n \"CONTOUR FROM -5 TO 35 BY 5\",\n horizontalalignment='right',\n transform=ax.transAxes,\n bbox=dict(boxstyle='square, pad=0.25',\n facecolor='white',\n edgecolor='black'))\n\n# Use geocat.viz.util convenience function to set titles and labels\ngvutil.set_titles_and_labels(ax,\n maintitle=r\"$\\bf{T/U @500hPa}$\",\n lefttitle=t.long_name,\n righttitle=t.units)\n# Add secondary title below the one placed by gvutil\nax.text(0, 1.01, u.long_name, transform=ax.transAxes)\nax.text(0.97, 1.01, u.units, transform=ax.transAxes)\n\n# Use geocat.viz.util convenience function to make plots look like NCL plots by\n# using latitude, longitude tick labels\ngvutil.add_lat_lon_ticklabels(ax)\n\n# Remove the degree symbol from tick labels\nax.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\nax.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n\n# Use geocat.viz.util convenience function to add minor and major tick lines\ngvutil.add_major_minor_ticks(ax,\n x_minor_per_major=3,\n y_minor_per_major=5,\n labelsize=12)\n\n# Use geocat.viz.util convenience function to set axes tick values\ngvutil.set_axes_limits_and_ticks(ax,\n xticks=np.arange(-120, -30, 30),\n yticks=np.arange(20, 70, 10))\n\nplt.show()\n", "\"\"\"\nCB_Height.py\n=============\n\nThis script illustrates multiple color schemes for color maps which will allow for those\nimpacted by color blindness to see visualizations. Using rainbow color schemes is also\na poor choice in color scheme for images that may be transferred to a black and white\nscale for printing. This code addresses a handful of options to use in place of rainbow\ncolor schemes for use in the matplotlib.pyplot library.\n\nMore information on this subject can be found here:\n - https://agilescientific.com/blog/2017/12/14/no-more-rainbows\n - `https://www.researchgate.net/publication/328361220 <https://www.researchgate.net/publication/328361220_The_Effect_of_Color_Scales_on_Climate_Scientists'_Objective_and_Subjective_Performance_in_Spatial_Data_Analysis_Tasks>`_\n\nMore color schemes can be found here:\n - https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html\n\nFigure 1.\n - The rainbow color scheme is problematic due to the lack of a natural perceived ordering of colors,\n perceptual changes in the colors (ex: yellow and green blend together easily), and is sensitive to\n deficiencies in vision\n\nFigure 2.\n - This is an example of a less distinct contrasting color gradient. This choice in color scheme would\n not be a good choice for printing in black and white but may ok for individuals who\n experience blue-green colorblindness.\n\nFigure 3.\n - The coolwarm diverging scheme should be used when both high and low values are interesting.\n However, be careful using this scheme if the projection will be printed to black and white.\n\nFigure 4.\n - This plot shows how a singular color like blue can be incredibly useful for plotting this type of data.\n This color scheme will work well for color blind impacted individuals and is black and white print friendly.\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport geocat.datafiles as gdf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\nfrom geocat.viz import cmaps as gvcmaps\nfrom geocat.viz import util as gvutil\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into xarrays\nds = xr.open_dataset(gdf.get(\"netcdf_files/atmos.nc\"), decode_times=False)\n\n# Extract variable\nv = ds.PBLH.isel(time=0)\n\n# Fix the artifact of not-shown-data around 0 and 360-degree longitudes\nt = gvutil.xr_add_cyclic_longitudes(v, \"lon\")\n\n###############################################################################\n#Plot:\n\nfig = plt.figure(figsize=(12, 12))\n\n\ndef Plot(color, row, col, pos, title):\n\n # Generate axes, using Cartopy, drawing coastlines, and adding features\n projection = ccrs.PlateCarree()\n ax1 = plt.subplot(row, col, pos, projection=projection)\n ax1.coastlines(linewidths=0.5)\n ax1.add_feature(cfeature.LAND, facecolor=\"lightgray\")\n\n # Import an NCL colormap\n newcmp = color\n\n # Contourf-plot data\n hgt = t.plot.contourf(ax=ax1,\n transform=projection,\n levels=40,\n vmin=100,\n vmax=1600,\n cmap=newcmp,\n add_colorbar=False)\n\n # Add color bar\n cbar_ticks = np.arange(100, 1600, 100)\n cbar = plt.colorbar(hgt,\n orientation='vertical',\n shrink=0.8,\n pad=0.05,\n extendrect=True,\n ticks=cbar_ticks)\n\n cbar.ax.tick_params(labelsize=10)\n\n # Use geocat.viz.util convenience function to set axes parameters without calling several matplotlib functions\n # Set axes limits, and tick values\n gvutil.set_axes_limits_and_ticks(ax1,\n xlim=(0, 90),\n ylim=(0, 90),\n xticks=np.linspace(-180, 180, 13),\n yticks=np.linspace(-90, 90, 7))\n\n # Use geocat.viz.util convenience function to make plots look like NCL plots by using latitude, longitude tick labels\n gvutil.add_lat_lon_ticklabels(ax1)\n\n # Use geocat.viz.util convenience function to add minor and major tick lines\n gvutil.add_major_minor_ticks(ax1, labelsize=12)\n\n # Use geocat.viz.util convenience function to set titles and labels without calling several matplotlib functions\n gvutil.set_titles_and_labels(ax1,\n maintitle=title,\n maintitlefontsize=16,\n righttitlefontsize=14,\n xlabel=\"\",\n ylabel=\"\")\n\n\n#Plot first color map\nPlot(gvcmaps.BlAqGrYeOrRe, 2, 2, 1, \"Rainbow Color Projection\")\n\n#plot second color map\nPlot('magma', 2, 2, 2, \"Magma Color Projection\")\n\n#plot third color map\nPlot('coolwarm', 2, 2, 3, \"Coolwarm Color Projection\")\n\n#Plot fourth color map\nPlot('Reds', 2, 2, 4, \"Reds Color Projection\")\n\nfig.suptitle(\"Projections of Planetary Boundary Layer Height\",\n x=.5,\n y=.93,\n fontsize=18)\n", "\"\"\"\nNCL_polyg_18.py\n===============\nThis script illustrates the following concepts:\n - Adding lines, markers, and polygons to a map\n - Drawing lines, markers, polygons, and text in inset axes\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/polyg_18.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/polyg_18_2_lg.png\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport cartopy\nimport cartopy.crs as ccrs\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter\nfrom geocat.viz import util as gvutil\n\n###############################################################################\n# Define helper function to remove ticks/frames from axes\n\n\ndef removeTicks(axis):\n axis.get_xaxis().set_visible(False)\n axis.get_yaxis().set_visible(False)\n\n\n###############################################################################\n# Plot map, markers, and polygons\n\n# Set size of figure\nfig = plt.figure(figsize=(10, 10))\n\n# Make grid on figure with 2 rows, 1 column\ngrid = plt.GridSpec(2, 20, figure=fig)\n\n# Make subplot for map\nax = plt.subplot(grid[:-1, 1:], projection=ccrs.PlateCarree())\n\n# Add continents\ncontinents = cartopy.feature.NaturalEarthFeature(name='land',\n category='physical',\n scale='50m',\n edgecolor='None',\n facecolor='lightgray')\n\nax.add_feature(continents)\n\n# Set map extent\nax.set_global()\n\n# Create arrays with location of each marker\nlon = np.arange(-160, 160, 20)\nlat = np.arange(-80, 80, 10)\n\n# Create array with marker symbols\n# Matplotlib provides a different set of markers than NCL, so plot appearance differs\nmarker = [\n '.', '+', '*', 'o', 'x', 's', '^', 'v', 'D', '>', '<', 'p', 'h', '8', 'X',\n 'd'\n]\n\n# Draw markers on diagonal line across graph\nfor x in range(len(lon)):\n ax.plot(lon[x],\n lat[x],\n marker=marker[x],\n color='blue',\n fillstyle='none',\n markersize=18,\n zorder=3)\n\n# Draw small red box in upper center\nax.add_patch(\n mpatches.Rectangle(xy=[7, 47],\n width=9,\n height=7,\n facecolor='None',\n edgecolor='red',\n alpha=1.0,\n transform=ccrs.PlateCarree(),\n zorder=5))\n\n# Draw green window in bottom right\nax.add_patch(\n mpatches.Rectangle(xy=[110, -45],\n width=50,\n height=35,\n facecolor='lime',\n alpha=0.3,\n transform=ccrs.PlateCarree(),\n zorder=5))\n\n# Use gvutil function to set the ticks on axes\ngvutil.set_axes_limits_and_ticks(ax,\n xlim=None,\n ylim=None,\n xticks=np.arange(-180, 210, 30),\n yticks=np.arange(-90, 120, 30),\n xticklabels=None,\n yticklabels=None)\n\n# Use gvutil function to give ticks W/N/E/S labels\ngvutil.add_lat_lon_ticklabels(ax,\n zero_direction_label=True,\n dateline_direction_label=True)\n\n# Took out degree symbols in latitude/longitude\nax.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\nax.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n\n# Use gvutil function to set title of plot\n# Set title font to bold using the r\"$\\bf{_____}$\" formatting characters\n# Spaces in title will not show up if included in curly brackets\ngvutil.set_titles_and_labels(ax,\n maintitle=r\"$\\bf{Big}$\" + \" \" +\n r\"$\\bf{centered}$\" + \" \" + r\"$\\bf{title}$\",\n maintitlefontsize=25)\n\n# Use gvutil function to plot three minor ticks for every major tick on axes\ngvutil.add_major_minor_ticks(ax,\n x_minor_per_major=3,\n y_minor_per_major=3,\n labelsize=\"small\")\n\n# Make second subplot for legend\nax2 = plt.subplot(grid[-1, 1:], frameon=False)\nremoveTicks(ax2)\n\n# Create 6 inset axes within subplot for each field in legend\n# Inset_axes positional array argument takes four values:\n# [starting (bottom left) x coordinate of window, starting y coordinate of window, width of field, height of field]\n\n# Add circle\naxin1 = ax2.inset_axes([0.1, 0.8, .1, .1], frameon=False)\nremoveTicks(axin1)\naxin1.add_patch(mpatches.Circle((0.1, 0.1), radius=.1, color='blue'))\naxin1.axis('equal')\n\n# Add label for circle\naxin2 = ax2.inset_axes([0.0, 0.65, .20, .5], frameon=False)\nremoveTicks(axin2)\naxin2.text(0,\n .7,\n 'Marker (left justified text)',\n color='blue',\n fontsize=12,\n verticalalignment='center')\n\n# Add red line\naxin3 = ax2.inset_axes([0.30, 0.6, .33, .5], frameon=False)\nremoveTicks(axin3)\naxin3.plot([0, 4], [3, 3], color='red')\naxin1.axis('scaled')\n\n# Add label for red line\naxin4 = ax2.inset_axes([0.33, 0.65, .33, .5], frameon=False)\nremoveTicks(axin4)\naxin4.text(0,\n .7,\n 'Polyline (centered text)',\n color='red',\n fontsize=12,\n verticalalignment='center')\n\n# Add green polygon\naxin5 = ax2.inset_axes([0.62, 0.6, .33, .5], frameon=False)\nremoveTicks(axin5)\naxin5.add_patch(\n mpatches.Rectangle(xy=[.3, .3],\n width=.6,\n height=.3,\n facecolor='lime',\n alpha=0.3))\naxin1.axis('scaled')\n\n# Add label for green polygon\naxin6 = ax2.inset_axes([0.66, 0.65, .33, .5], frameon=False)\nremoveTicks(axin6)\naxin6.text(0,\n .7,\n 'Polygon (right justified text)',\n color='lime',\n fontsize=12,\n verticalalignment='center')\n\nplt.show()\n", "\"\"\"\nNCL_panel_15.py\n===============\nThis script illustrates the following concepts:\n - Paneling three plots vertically\n - Making a color bar span over two axes\n - Selecting a different colormap to abide by best practices. See the `color examples <https://geocat-examples.readthedocs.io/en/latest/gallery/index.html#colors>`_ for more information.\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: http://www.ncl.ucar.edu/Applications/Scripts/panel_15.ncl\n - Original NCL plot: http://www.ncl.ucar.edu/Applications/Images/panel_15_lg.png\n\"\"\"\n\n##############################################################################\n# Import packages:\nimport cartopy.crs as ccrs\nimport geocat.datafiles as gdf\nimport geocat.viz.util as gvutil\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\nfrom cartopy.mpl.gridliner import LatitudeFormatter, LongitudeFormatter\n\n##############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into\n# xarrays\nds = xr.open_dataset(gdf.get(\"netcdf_files/h_avg_Y0191_D000.00.nc\"),\n decode_times=False)\n\n# Ensure longitudes range from 0 to 360 degrees\nt = gvutil.xr_add_cyclic_longitudes(ds.T, \"lon_t\")\n\n# Selecting the first time step and then the three levels of interest\nt = t.isel(time=0)\nt_1 = t.isel(z_t=0)\nt_2 = t.isel(z_t=1)\nt_6 = t.isel(z_t=5)\n\n##############################################################################\n# Plot:\nfig = plt.figure(figsize=(8, 12))\n\ngrid = gridspec.GridSpec(nrows=3, ncols=1, figure=fig)\n\n# Choose the map projection\nproj = ccrs.PlateCarree()\n\n# Add the subplots\nax1 = fig.add_subplot(grid[0], projection=proj) # upper cell of grid\nax2 = fig.add_subplot(grid[1], projection=proj) # middle cell of grid\nax3 = fig.add_subplot(grid[2], projection=proj) # lower cell of grid\n\nfor (ax, title) in [(ax1, 'level 0'), (ax2, 'level 1'), (ax3, 'level 6')]:\n # Use geocat.viz.util convenience function to set axes tick values\n gvutil.set_axes_limits_and_ticks(ax=ax,\n xlim=(-180, 180),\n ylim=(-90, 90),\n xticks=np.linspace(-180, 180, 13),\n yticks=np.linspace(-90, 90, 7))\n\n # Use geocat.viz.util convenience function to make plots look like NCL\n # plots by using latitude, longitude tick labels\n gvutil.add_lat_lon_ticklabels(ax)\n\n # Remove the degree symbol from tick labels\n ax.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n ax.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n\n # Use geocat.viz.util convenience function to add minor and major ticks\n gvutil.add_major_minor_ticks(ax)\n\n # Draw coastlines\n ax.coastlines(linewidth=0.5)\n\n # Use geocat.viz.util convenience function to set titles\n gvutil.set_titles_and_labels(ax,\n lefttitle=t_1.long_name,\n righttitle=t_1.units,\n lefttitlefontsize=10,\n righttitlefontsize=10)\n # Add center title\n ax.set_title(title, loc='center', y=1.04, fontsize=10)\n\n# Select an appropriate colormap\ncmap = 'magma'\n\n# Plot data\nC = ax1.contourf(t_1['lon_t'],\n t_1['lat_t'],\n t_1.data,\n levels=np.arange(0, 30, 2),\n cmap=cmap,\n extend='both')\nax2.contourf(t_2['lon_t'],\n t_2['lat_t'],\n t_2.data,\n levels=np.arange(0, 30, 2),\n cmap=cmap,\n extend='both')\nC_2 = ax3.contourf(t_6['lon_t'],\n t_6['lat_t'],\n t_6.data,\n levels=np.arange(0, 22, 2),\n cmap=cmap,\n extend='both')\n\n# Add colorbars\n# By specifying two axes for `ax` the colorbar will span both of them\nplt.colorbar(C,\n ax=[ax1, ax2],\n ticks=range(0, 30, 2),\n extendrect=True,\n extendfrac='auto',\n shrink=0.85,\n aspect=13,\n drawedges=True)\nplt.colorbar(C_2,\n ax=ax3,\n ticks=range(0, 22, 2),\n extendrect=True,\n extendfrac='auto',\n shrink=0.85,\n aspect=5.5,\n drawedges=True)\n\nplt.show()\n" ]
[ [ "numpy.linspace", "numpy.arange", "matplotlib.lines.Line2D", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.linspace", "numpy.arange", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "matplotlib.pyplot.figure" ], [ "numpy.arange", "matplotlib.patches.Rectangle", "matplotlib.patches.Circle", "matplotlib.pyplot.subplot", "matplotlib.pyplot.GridSpec", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.linspace", "numpy.arange", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
awwong1/ml-research
[ "6f0bb585fef0c4567a5f02937fea62726b9c88dd", "6f0bb585fef0c4567a5f02937fea62726b9c88dd" ]
[ "models/cifar/resnet.py", "util/accuracy.py" ]
[ "from __future__ import absolute_import\nimport torch.nn as nn\nimport math\n\n\n__all__ = [\"resnet\"]\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(\n inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n \"\"\"ResNet for CIFAR10/100 dataset.\"\"\"\n\n def __init__(self, depth, num_classes=1000, block_name=\"BasicBlock\"):\n super(ResNet, self).__init__()\n # Model type specifies number of layers for CIFAR-10 model\n if block_name.lower() == \"basicblock\":\n assert (\n depth - 2\n ) % 6 == 0, \"When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202\"\n n = (depth - 2) // 6\n block = BasicBlock\n elif block_name.lower() == \"bottleneck\":\n assert (\n depth - 2\n ) % 9 == 0, \"When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199\"\n n = (depth - 2) // 9\n block = Bottleneck\n else:\n raise ValueError(\"block_name shoule be Basicblock or Bottleneck\")\n\n self.inplanes = 16\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 16, n)\n self.layer2 = self._make_layer(block, 32, n, stride=2)\n self.layer3 = self._make_layer(block, 64, n, stride=2)\n self.avgpool = nn.AvgPool2d(8)\n self.fc = nn.Linear(64 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x) # 32x32\n\n x = self.layer1(x) # 32x32\n x = self.layer2(x) # 16x16\n x = self.layer3(x) # 8x8\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet(**kwargs):\n \"\"\"\n Constructs a ResNet model.\n \"\"\"\n return ResNet(**kwargs)\n", "import torch\n\n\[email protected]_grad()\ndef calculate_accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\[email protected]_grad()\ndef calculate_multires_accuracy(output, targets, topk=(1,)):\n \"\"\"Computes the accuracy for the k top predictions for the specified values of k.\n Support multi resolution model output\"\"\"\n assert topk == (1,), \"only top 1 correct currently supported\"\n maxk = max(topk)\n # batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred.squeeze_()\n\n correct = []\n for batch_idx, target in enumerate(targets):\n res_pred, counts = torch.unique(pred[batch_idx], return_counts=True)\n correct.append((res_pred[counts.argmax()] == target).float())\n return (sum(correct).float().div(len(correct)).mul(100), )\n # res = []\n # for k in topk:\n # correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n # res.append(correct_k.mul_(100.0 / batch_size))\n # res.append(correct.sum().float().div(correct.numel()).mul(100))\n # return res\n\[email protected]_grad()\ndef calculate_binary_accuracy(output, target, threshold=0.5, apply_sigmoid=True):\n \"\"\"Compute the accuracy for binary classification where\n target is either 0, 1 and output is a tensor of same size with arbitrary values\"\"\"\n if apply_sigmoid:\n check = torch.nn.Sigmoid()(output)\n else:\n check = output\n pred_y = (check >= threshold).view(-1) == target.view(-1).bool()\n return (pred_y.sum().float() / pred_y.numel()) * 100\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "torch.unique", "torch.no_grad", "torch.nn.Sigmoid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
goncaloperes/greykite
[ "160bb3ada71e3c778e1fb3d242676c42ff619e3a" ]
[ "greykite/tests/framework/output/test_univariate_forecast.py" ]
[ "import datetime\nimport math\nimport sys\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.util.testing import assert_frame_equal\nfrom pandas.util.testing import assert_series_equal\nfrom sklearn.pipeline import Pipeline\n\nfrom greykite.common import constants as cst\nfrom greykite.common.evaluation import ElementwiseEvaluationMetricEnum\nfrom greykite.common.evaluation import EvaluationMetricEnum\nfrom greykite.common.python_utils import assert_equal\nfrom greykite.common.testing_utils import gen_sliced_df\nfrom greykite.framework.input.univariate_time_series import UnivariateTimeSeries\nfrom greykite.framework.output.univariate_forecast import UnivariateForecast\nfrom greykite.framework.pipeline.utils import get_forecast\nfrom greykite.sklearn.estimator.prophet_estimator import ProphetEstimator\nfrom greykite.sklearn.estimator.silverkite_estimator import SilverkiteEstimator\n\n\ntry:\n import fbprophet # noqa\nexcept ModuleNotFoundError:\n pass\n\n\[email protected]\ndef df():\n return pd.DataFrame({\n cst.TIME_COL: [\n datetime.datetime(2018, 1, 1),\n datetime.datetime(2018, 1, 2),\n datetime.datetime(2018, 1, 3),\n datetime.datetime(2018, 1, 4)],\n cst.ACTUAL_COL: [1, 2, 3, 4],\n cst.PREDICTED_COL: [1, 4, 1, 2],\n cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],\n cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],\n cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]\n })\n\n\[email protected]\ndef df2():\n return pd.DataFrame({\n cst.TIME_COL: pd.date_range(start=\"2018-01-01\", periods=7),\n cst.ACTUAL_COL:\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],\n cst.PREDICTED_COL:\n [1.0, 4.0, 3.0, 2.0, 3.0, 4.0, 8.0],\n cst.PREDICTED_LOWER_COL:\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n cst.PREDICTED_UPPER_COL:\n [4.0, 5.0, 4.0, 4.0, 5.0, 6.0, 9.0],\n cst.NULL_PREDICTED_COL:\n [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]\n })\n\n\ndef test_univariate_forecast(df):\n \"\"\"Checks univariate forecast class\"\"\"\n # Without test_start_date\n forecast = UnivariateForecast(\n df,\n train_end_date=datetime.datetime(2018, 1, 2),\n test_start_date=None,\n forecast_horizon=2)\n\n assert forecast.forecast_horizon == 2\n assert forecast.df_train.shape == (2, 6)\n assert forecast.df_test.shape == (2, 6)\n assert forecast.relative_error_tolerance is None\n\n # evaluation metrics\n enum = EvaluationMetricEnum.Correlation\n assert forecast.train_evaluation[enum.get_metric_name()] == 1.0\n assert forecast.test_evaluation[enum.get_metric_name()] == 1.0\n enum = EvaluationMetricEnum.MeanAbsoluteError\n assert forecast.train_evaluation[enum.get_metric_name()] == 1.0\n assert forecast.test_evaluation[enum.get_metric_name()] == 2.0\n enum = EvaluationMetricEnum.RootMeanSquaredError\n assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)\n assert forecast.test_evaluation[enum.get_metric_name()] == 2.0\n enum = EvaluationMetricEnum.MedianAbsoluteError\n assert forecast.train_evaluation[enum.get_metric_name()] == 1.0\n assert forecast.test_evaluation[enum.get_metric_name()] == 2.0\n enum = EvaluationMetricEnum.MeanAbsolutePercentError\n assert forecast.train_evaluation[enum.get_metric_name()] == 50.0\n assert forecast.test_evaluation[enum.get_metric_name()] == pytest.approx(58.33333, 1e-4)\n assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0\n assert forecast.test_evaluation[cst.R2_null_model_score] == pytest.approx(0.058824, 1e-4)\n assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None\n assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None\n # validation metrics\n assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0\n assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 87.5\n assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5\n assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5\n assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5\n assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0\n assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0\n assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.5\n assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)\n assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)\n\n # With test_start_date, relative_error_tolerance\n with pytest.warns(UserWarning):\n forecast = UnivariateForecast(\n df,\n train_end_date=datetime.datetime(2018, 1, 2),\n test_start_date=datetime.datetime(2018, 1, 4),\n relative_error_tolerance=0.05)\n\n assert forecast.forecast_horizon is None\n assert forecast.df_train.shape == (2, 6)\n assert forecast.df_test.shape == (1, 6)\n assert forecast.relative_error_tolerance == 0.05\n\n # evaluation metrics (train_metrics remain the same, test_metrics change)\n enum = EvaluationMetricEnum.Correlation\n assert forecast.train_evaluation[enum.get_metric_name()] == 1.0\n assert forecast.test_evaluation[enum.get_metric_name()] is None\n enum = EvaluationMetricEnum.MeanAbsoluteError\n assert forecast.train_evaluation[enum.get_metric_name()] == 1.0\n assert forecast.test_evaluation[enum.get_metric_name()] == 2.0\n enum = EvaluationMetricEnum.RootMeanSquaredError\n assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)\n assert forecast.test_evaluation[enum.get_metric_name()] == 2.0\n enum = EvaluationMetricEnum.MedianAbsoluteError\n assert forecast.train_evaluation[enum.get_metric_name()] == 1.0\n assert forecast.test_evaluation[enum.get_metric_name()] == 2.0\n enum = EvaluationMetricEnum.MeanAbsolutePercentError\n assert forecast.train_evaluation[enum.get_metric_name()] == 50.0\n assert forecast.test_evaluation[enum.get_metric_name()] == 50.0\n assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0\n assert forecast.test_evaluation[cst.R2_null_model_score] == 0.36\n assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5\n assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 1.0\n # validation metrics\n assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0\n assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 75.0\n assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5\n assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.0\n assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5\n assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0\n assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0\n assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0\n assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)\n assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.95)\n\n\ndef test_subset_columns(df):\n \"\"\"Tests if intervals and null prediction are truly optional,\n and relative_error_tolerance parameter\"\"\"\n forecast = UnivariateForecast(df[[cst.TIME_COL, cst.ACTUAL_COL, cst.PREDICTED_COL]],\n predicted_lower_col=None,\n predicted_upper_col=None,\n null_model_predicted_col=None,\n train_end_date=datetime.datetime(2018, 1, 2),\n relative_error_tolerance=0.7)\n\n forecast_full = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))\n\n for enum in EvaluationMetricEnum:\n assert forecast.train_evaluation[enum.get_metric_name()] == forecast_full.train_evaluation[enum.get_metric_name()]\n assert forecast.test_evaluation[enum.get_metric_name()] == forecast_full.test_evaluation[enum.get_metric_name()]\n for metric in [cst.R2_null_model_score, cst.PREDICTION_BAND_WIDTH, cst.PREDICTION_BAND_COVERAGE, cst.LOWER_BAND_COVERAGE,\n cst.UPPER_BAND_COVERAGE, cst.COVERAGE_VS_INTENDED_DIFF]:\n assert forecast.train_evaluation[metric] is None\n assert forecast.test_evaluation[metric] is None\n\n assert forecast.relative_error_tolerance == 0.7\n assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5\n assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.0\n\n\ndef test_input_validation(df):\n \"\"\"Tests input validation\"\"\"\n with pytest.raises(ValueError, match=\"`coverage` must be provided\"):\n UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=None)\n\n with pytest.raises(ValueError, match=\"`coverage` must be between 0.0 and 1.0\"):\n UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=80.0)\n\n with pytest.raises(ValueError, match=\"2018-01-05 is not found in time column\"):\n UnivariateForecast(df, train_end_date=\"2018-01-05\")\n\n with pytest.raises(ValueError, match=\"Column not found in data frame\"):\n UnivariateForecast(df, actual_col=\"not_a_column\")\n\n\ndef test_no_train_end_date(df):\n \"\"\"Tests if train end date can be None\"\"\"\n forecast = UnivariateForecast(\n df,\n train_end_date=None)\n forecast2 = UnivariateForecast(\n df,\n train_end_date=datetime.datetime(2018, 1, 4))\n assert_equal(forecast.train_evaluation, forecast2.train_evaluation)\n assert forecast.test_evaluation is None\n\n\ndef test_partial_test_data():\n \"\"\"Tests if forecast evaluation can handle partially missing data\"\"\"\n df = pd.DataFrame({\n cst.TIME_COL: [\"2018-01-01\", datetime.datetime(2018, 1, 2), \"2018-01-03\", \"2018-01-04\", \"2018-01-05\"],\n cst.ACTUAL_COL: [1, 2, 3, 2, np.nan],\n cst.PREDICTED_COL: [1, 4, 1, 2, 4],\n cst.PREDICTED_LOWER_COL: [1, 1, 1, 1, 2],\n cst.PREDICTED_UPPER_COL: [4, 5, 4, 4, 6],\n cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5, 1.5]\n })\n\n with pytest.warns(UserWarning) as record:\n forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))\n forecast2 = UnivariateForecast(df.iloc[:4, ], train_end_date=datetime.datetime(2018, 1, 2))\n assert forecast.test_na_count == 1\n assert \"1 value(s) in y_true were NA or infinite and are omitted in error calc.\" in record[0].message.args[0:2]\n assert_equal(forecast.train_evaluation, forecast2.train_evaluation)\n assert_equal(forecast.test_evaluation, forecast2.test_evaluation)\n\n\ndef test_no_test_data():\n \"\"\"Tests if test evaluation is skipped when there are no test data\"\"\"\n df = pd.DataFrame({\n cst.TIME_COL: [\"2018-01-01\", datetime.datetime(2018, 1, 2), \"2018-01-03\", \"2018-01-04\"],\n cst.ACTUAL_COL: [1, 2, np.nan, np.nan],\n cst.PREDICTED_COL: [1, 4, 1, 2],\n cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],\n cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],\n cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]\n })\n forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))\n assert forecast.test_na_count == 2\n assert forecast.train_evaluation is not None\n assert forecast.test_evaluation is None\n\n\ndef test_custom_loss_function(df):\n \"\"\"Tests the custom loss function argument\"\"\"\n def custom_loss(y_pred, y_true):\n \"\"\"Root mean absolute error\"\"\"\n return np.sqrt(np.sum(np.abs(np.array(y_pred) - np.array(y_true))))\n forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), r2_loss_function=custom_loss)\n assert forecast.train_evaluation[cst.R2_null_model_score] == 1 - math.sqrt(2)\n assert forecast.test_evaluation[cst.R2_null_model_score] == 0\n\n\ndef test_plot(df):\n \"\"\"Tests plot function\"\"\"\n forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))\n fig = forecast.plot()\n assert fig is not None\n\n forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 4))\n fig = forecast.plot(vertical_line_color=\"green\")\n assert fig is not None\n\n\ndef test_get_grouping_evaluation(df2):\n \"\"\"Tests get_grouping_evaluation function\"\"\"\n forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))\n\n # MAPE, groupby_time_feature, train set\n metric = EvaluationMetricEnum.MeanAbsolutePercentError\n metric_name = metric.get_metric_name()\n grouped_df = forecast.get_grouping_evaluation(\n score_func=metric.get_metric_func(),\n score_func_name=metric_name,\n which=\"train\",\n groupby_time_feature=\"dow\")\n expected = pd.DataFrame({\n \"dow\": [1, 2, 3, 4, 5], # Monday, Tuesday, etc. Time feature is used as column name\n f\"train {metric_name}\": [0.0, 100.0, 0.0, 50.0, 40.0]\n })\n assert_equal(grouped_df, expected)\n\n # MSE, groupby_sliding_window_size\n metric = EvaluationMetricEnum.MeanSquaredError\n metric_name = metric.get_metric_name()\n grouped_df = forecast.get_grouping_evaluation(\n score_func=metric.get_metric_func(),\n score_func_name=metric_name,\n which=\"train\",\n groupby_sliding_window_size=2)\n expected = pd.DataFrame({\n f\"{cst.TIME_COL}_downsample\": [\n datetime.datetime(2018, 1, 1),\n datetime.datetime(2018, 1, 3),\n datetime.datetime(2018, 1, 5)],\n f\"train {metric_name}\": [0.0, 2.0, 4.0]\n })\n assert_equal(grouped_df, expected)\n\n # MAE, groupby_custom_column, test set\n forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))\n metric = EvaluationMetricEnum.MeanAbsoluteError\n custom_groups = pd.Series([\"g1\", \"g2\", \"g1\", \"g3\", \"g2\"], name=\"custom_groups\")\n grouped_df = forecast.get_grouping_evaluation(\n score_func=metric.get_metric_func(),\n score_func_name=None,\n which=\"test\",\n groupby_custom_column=custom_groups)\n expected = pd.DataFrame({\n \"custom_groups\": [\"g1\", \"g2\", \"g3\"],\n \"test metric\": [1.0, 1.5, 2.0]\n })\n assert_equal(grouped_df, expected)\n\n\ndef test_plot_grouping_evaluation(df2):\n \"\"\"Tests plot_grouping_evaluation function\"\"\"\n forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))\n # MAPE, groupby_time_feature, train set\n metric = EvaluationMetricEnum.MeanAbsolutePercentError\n metric_name = metric.get_metric_name()\n fig = forecast.plot_grouping_evaluation(\n score_func=metric.get_metric_func(),\n score_func_name=metric_name,\n which=\"train\",\n groupby_time_feature=\"dow\")\n assert fig.data[0].name == f\"train {metric_name}\"\n assert fig.layout.xaxis.title.text == \"dow\"\n assert fig.layout.yaxis.title.text == f\"train {metric_name}\"\n assert fig.layout.title.text == f\"train {metric_name} vs dow\"\n assert fig.data[0].x.shape[0] == 5\n\n # MSE, groupby_sliding_window_size, train set\n metric = EvaluationMetricEnum.MeanSquaredError\n metric_name = metric.get_metric_name()\n fig = forecast.plot_grouping_evaluation(\n score_func=metric.get_metric_func(),\n score_func_name=metric_name,\n which=\"train\",\n groupby_sliding_window_size=2) # there are 5 training points, so this creates groups of size (1, 2, 2)\n assert fig.data[0].name == f\"train {metric_name}\"\n assert fig.layout.xaxis.title.text == f\"{cst.TIME_COL}_downsample\"\n assert fig.layout.yaxis.title.text == f\"train {metric_name}\"\n assert fig.layout.title.text == f\"train {metric_name} vs {cst.TIME_COL}_downsample\"\n assert fig.data[0].x.shape[0] == 3\n\n # MAE, groupby_custom_column, test set\n forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))\n metric = EvaluationMetricEnum.MeanAbsoluteError\n metric_name = metric.get_metric_name()\n custom_groups = pd.Series([\"g1\", \"g2\", \"g1\", \"g3\", \"g2\"], name=\"custom_groups\")\n fig = forecast.plot_grouping_evaluation(\n groupby_custom_column=custom_groups,\n score_func=metric.get_metric_func(),\n score_func_name=metric_name,\n which=\"test\",\n title=None)\n assert fig.data[0].name == f\"test {metric_name}\"\n assert fig.layout.xaxis.title.text == \"custom_groups\"\n assert fig.layout.yaxis.title.text == f\"test {metric_name}\"\n assert fig.layout.title.text == f\"test {metric_name} vs custom_groups\"\n assert fig.data[0].x.shape[0] == 3\n\n # custom xlabel, ylabel, title\n fig = forecast.plot_grouping_evaluation(\n groupby_custom_column=custom_groups,\n score_func=metric.get_metric_func(),\n score_func_name=metric_name,\n which=\"test\",\n xlabel=\"Custom labels\",\n ylabel=\"Mean Absolute Error of y\",\n title=\"Mean Absolute Error of y by Custom labels\")\n assert fig.layout.xaxis.title.text == \"Custom labels\"\n assert fig.layout.yaxis.title.text == \"Mean Absolute Error of y\"\n assert fig.layout.title.text == \"Mean Absolute Error of y by Custom labels\"\n\n\ndef test_autocomplete_map_func_dict(df2):\n \"\"\"Tests autocomplete_map_func_dict function\"\"\"\n map_func_dict = {\n \"residual\": ElementwiseEvaluationMetricEnum.Residual.name,\n \"squared_error\": ElementwiseEvaluationMetricEnum.SquaredError.name,\n \"coverage\": ElementwiseEvaluationMetricEnum.Coverage.name,\n \"custom_metric\": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**4\n }\n\n df_renamed = df2.rename({\n cst.TIME_COL: \"custom_time_col\",\n cst.ACTUAL_COL: \"custom_actual_col\",\n cst.PREDICTED_COL: \"custom_predicted_col\",\n cst.PREDICTED_LOWER_COL: \"custom_predicted_lower_col\",\n cst.PREDICTED_UPPER_COL: \"custom_predicted_upper_col\",\n cst.NULL_PREDICTED_COL: \"custom_null_predicted_col\",\n })\n\n forecast = UnivariateForecast(df_renamed, train_end_date=datetime.datetime(2018, 1, 5))\n map_func_dict = forecast.autocomplete_map_func_dict(map_func_dict)\n\n actual = df2.apply(map_func_dict[\"residual\"], axis=1)\n expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL])\n assert_series_equal(actual, expected)\n\n actual = df2.apply(map_func_dict[\"squared_error\"], axis=1)\n expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(2)\n assert_series_equal(actual, expected)\n\n actual = df2.apply(map_func_dict[\"coverage\"], axis=1)\n expected = ((df2[cst.ACTUAL_COL] > df2[cst.PREDICTED_LOWER_COL]) & (df2[cst.ACTUAL_COL] < df2[cst.PREDICTED_UPPER_COL])).astype('float')\n assert_series_equal(actual, expected)\n\n actual = df2.apply(map_func_dict[\"custom_metric\"], axis=1)\n expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(4)\n assert_series_equal(actual, expected)\n\n assert forecast.autocomplete_map_func_dict(None) is None\n\n valid_names = \", \".join(ElementwiseEvaluationMetricEnum.__dict__[\"_member_names_\"])\n with pytest.raises(ValueError, match=f\"unknown_func is not a recognized elementwise \"\n f\"evaluation metric. Must be one of: {valid_names}\"):\n map_func_dict = {\"unknown_func\": \"unknown_func\"}\n forecast.autocomplete_map_func_dict(map_func_dict)\n\n\ndef test_get_flexible_grouping_evaluation(df2):\n \"\"\"Tests get_flexible_grouping_evaluation function\"\"\"\n forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))\n # Checks residual quantiles, MSE/median squared error, and coverage\n map_func_dict = {\n \"residual\": ElementwiseEvaluationMetricEnum.Residual.name,\n \"squared_error\": ElementwiseEvaluationMetricEnum.SquaredError.name,\n \"coverage\": ElementwiseEvaluationMetricEnum.Coverage.name\n }\n agg_kwargs = {\n \"residual_mean\": pd.NamedAgg(column=\"residual\", aggfunc=np.nanmean),\n \"residual_q05\": pd.NamedAgg(column=\"residual\", aggfunc=partial(np.nanquantile, q=0.05)),\n \"residual_q95\": pd.NamedAgg(column=\"residual\", aggfunc=partial(np.nanquantile, q=0.95)),\n \"MSE\": pd.NamedAgg(column=\"squared_error\", aggfunc=np.nanmean),\n \"median_squared_error\": pd.NamedAgg(column=\"squared_error\", aggfunc=np.nanmedian),\n \"coverage\": pd.NamedAgg(column=\"coverage\", aggfunc=np.nanmean),\n }\n\n result = forecast.get_flexible_grouping_evaluation(\n which=\"train\",\n groupby_time_feature=\"dow\",\n groupby_sliding_window_size=None,\n groupby_custom_column=None,\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs,\n extend_col_names=False)\n expected = pd.DataFrame({\n # Only one value per group, so the mean/median/quantiles are the same\n \"residual_mean\": [0.0, -2.0, 0.0, 2.0, 2.0],\n \"residual_q05\": [0.0, -2.0, 0.0, 2.0, 2.0],\n \"residual_q95\": [0.0, -2.0, 0.0, 2.0, 2.0],\n \"MSE\": [0.0, 4.0, 0.0, 4.0, 4.0],\n \"median_squared_error\": [0.0, 4.0, 0.0, 4.0, 4.0],\n \"coverage\": [0.0, 1.0, 1.0, 0.0, 0.0],\n }, index=pd.Series([1, 2, 3, 4, 5], name=\"dow\"))\n assert_frame_equal(result, expected)\n\n # Equivalent way to specify `map_func_dict` (without autocomplete)\n map_func_dict = {\n \"residual\": lambda row: ElementwiseEvaluationMetricEnum.Residual.get_metric_func()(\n row[forecast.actual_col],\n row[forecast.predicted_col]),\n \"squared_error\": lambda row: ElementwiseEvaluationMetricEnum.SquaredError.get_metric_func()(\n row[forecast.actual_col],\n row[forecast.predicted_col]),\n \"coverage\": lambda row: ElementwiseEvaluationMetricEnum.Coverage.get_metric_func()(\n row[forecast.actual_col],\n row[forecast.predicted_lower_col],\n row[forecast.predicted_upper_col]),\n }\n result = forecast.get_flexible_grouping_evaluation(\n which=\"train\",\n groupby_time_feature=\"dow\",\n groupby_sliding_window_size=None,\n groupby_custom_column=None,\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs,\n extend_col_names=False)\n assert_frame_equal(result, expected)\n\n # Equivalent way to specify `map_func_dict` (without autocomplete)\n map_func_dict = {\n \"residual\": lambda row: row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL],\n \"squared_error\": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**2,\n \"coverage\": lambda row: 1.0 if row[cst.PREDICTED_LOWER_COL] < row[cst.ACTUAL_COL] < row[cst.PREDICTED_UPPER_COL] else 0.0\n }\n result = forecast.get_flexible_grouping_evaluation(\n which=\"train\",\n groupby_time_feature=\"dow\",\n groupby_sliding_window_size=None,\n groupby_custom_column=None,\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs,\n extend_col_names=False)\n assert_frame_equal(result, expected)\n\n # Groupby sliding window\n result = forecast.get_flexible_grouping_evaluation(\n which=\"train\",\n groupby_time_feature=None,\n groupby_sliding_window_size=3,\n groupby_custom_column=None,\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs,\n extend_col_names=False)\n expected = pd.DataFrame({\n \"residual_mean\": [-1.0, 4/3],\n \"residual_q05\": [-1.9, 0.2],\n \"residual_q95\": [-0.1, 2.0],\n \"MSE\": [2.0, 2.0 + 2/3],\n \"median_squared_error\": [2.0, 4.0],\n \"coverage\": [0.5, 1/3],\n }, index=pd.DatetimeIndex([\"2018-01-01\", \"2018-01-04\"], name=\"ts_downsample\"))\n assert_frame_equal(result, expected)\n\n # On test set with custom groupby column\n custom_groups = pd.Series([\"val1\"], name=\"value_group\").repeat(forecast.df_test.shape[0])\n result = forecast.get_flexible_grouping_evaluation(\n which=\"test\",\n groupby_time_feature=None,\n groupby_sliding_window_size=None,\n groupby_custom_column=custom_groups,\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs)\n\n colindex = pd.Index(\n [\"residual_mean\", \"residual_q05\", \"residual_q95\",\n \"MSE\", \"median_squared_error\", \"coverage\"])\n expected = pd.DataFrame(\n [[0.5, -0.85, 1.85, 2.5, 2.5, 0.5]],\n columns=colindex,\n index=pd.Series([\"val1\"], name=custom_groups.name))\n assert_frame_equal(result, expected)\n\n\ndef test_plot_flexible_grouping_evaluation():\n \"\"\"Tests plot_flexible_grouping_evaluation function\"\"\"\n df = gen_sliced_df(sample_size_dict={\"a\": 300, \"b\": 200, \"c\": 300, \"d\": 80, \"e\": 300})\n actual_col = \"y\"\n predicted_col = \"y_hat\"\n groupby_col = \"x\"\n groupby_col2 = \"z\"\n df = df[[actual_col, predicted_col, groupby_col, groupby_col2]]\n df[cst.TIME_COL] = pd.date_range(start=\"2020-01-01\", periods=df.shape[0], freq=\"D\")\n end_index = math.floor(df.shape[0] * 0.8)\n forecast = UnivariateForecast(\n df,\n train_end_date=df[cst.TIME_COL][end_index],\n time_col=cst.TIME_COL,\n actual_col=actual_col,\n predicted_col=predicted_col,\n predicted_lower_col=None,\n predicted_upper_col=None,\n null_model_predicted_col=None)\n\n # MSE and quantiles of squared error\n metric_col = \"squared_err\"\n map_func_dict = {metric_col: ElementwiseEvaluationMetricEnum.SquaredError.name}\n agg_kwargs = {f\"Q{quantile}\": pd.NamedAgg(column=metric_col, aggfunc=partial(np.nanquantile, q=quantile)) for quantile in [0.1, 0.9]}\n agg_kwargs.update({\"mean\": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean)})\n\n # group by \"dom\", \"auto-fill\" styling\n fig = forecast.plot_flexible_grouping_evaluation(\n which=\"train\",\n groupby_time_feature=\"dom\",\n groupby_sliding_window_size=None,\n groupby_custom_column=None,\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs,\n extend_col_names=False,\n y_col_style_dict=\"auto-fill\",\n default_color=\"rgba(0, 145, 202, 1.0)\",\n xlabel=None,\n ylabel=metric_col,\n title=None,\n showlegend=True)\n\n assert [fig.data[i].name for i in range(len(fig.data))] == [\"Q0.1\", \"mean\", \"Q0.9\"]\n assert fig.layout.xaxis.title.text == \"dom\"\n assert fig.layout.yaxis.title.text == metric_col\n assert fig.layout.title.text == f\"{metric_col} vs dom\"\n assert fig.data[0].x.shape[0] == 31 # 31 unique days in month\n assert fig.data[1].line[\"color\"] == \"rgba(0, 145, 202, 1.0)\"\n assert fig.data[1].fill == \"tonexty\" # from auto-fill\n assert fig.layout.showlegend\n\n # group by sliding window, \"auto\" styling\n # provide default color, xlabel, hide legend\n fig = forecast.plot_flexible_grouping_evaluation(\n which=\"train\",\n groupby_time_feature=None,\n groupby_sliding_window_size=7,\n groupby_custom_column=None,\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs,\n extend_col_names=False,\n y_col_style_dict=\"auto\",\n default_color=\"rgba(145, 0, 202, 1.0)\",\n xlabel=\"ts\",\n ylabel=None,\n title=None,\n showlegend=False)\n\n assert [fig.data[i].name for i in range(len(fig.data))] == [\"Q0.1\", \"mean\", \"Q0.9\"]\n assert fig.layout.xaxis.title.text == \"ts\"\n assert fig.layout.yaxis.title.text is None\n assert fig.layout.title.text is None\n assert fig.data[0].x[0] == datetime.datetime(2020, 1, 1, 0, 0)\n assert fig.data[1].line[\"color\"] == \"rgba(145, 0, 202, 1.0)\"\n assert fig.data[1].fill is None\n assert not fig.layout.showlegend\n\n # custom groups, \"plotly\" styling, provide ylabel, title\n fig = forecast.plot_flexible_grouping_evaluation(\n which=\"train\",\n groupby_time_feature=None,\n groupby_sliding_window_size=None,\n groupby_custom_column=forecast.df_train[\"x\"],\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs,\n extend_col_names=False,\n y_col_style_dict=\"plotly\",\n default_color=None,\n xlabel=None,\n ylabel=metric_col,\n title=\"custom title\",\n showlegend=True)\n\n assert [fig.data[i].name for i in range(len(fig.data))] == [\"Q0.1\", \"Q0.9\", \"mean\"] # not sorted\n assert fig.layout.xaxis.title.text == \"x\"\n assert fig.layout.yaxis.title.text == metric_col\n assert fig.layout.title.text == \"custom title\"\n assert list(fig.data[0].x) == list(\"abcde\")\n assert fig.data[0].line[\"color\"] is None # color is up to plotly\n assert fig.data[1].fill is None\n assert fig.layout.showlegend\n\n # test set, absolute percent error, custom `y_col_style_dict` styling\n metric_col = \"squared_error\"\n map_func_dict = {\n metric_col: ElementwiseEvaluationMetricEnum.AbsolutePercentError.name\n }\n agg_kwargs = {\n \"median\": pd.NamedAgg(column=metric_col, aggfunc=np.nanmedian),\n \"mean\": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean),\n }\n y_col_style_dict = {\n \"median\": {\n \"mode\": \"lines+markers\",\n \"line\": {\n \"color\": \"rgba(202, 145, 0, 0.5)\"\n }\n },\n \"mean\": {\n \"mode\": \"lines+markers\",\n \"line\": {\n \"color\": \"rgba(0, 145, 202, 1.0)\"\n }\n },\n }\n with pytest.warns(UserWarning, match=\"true_val is less than 1e-8\"):\n fig = forecast.plot_flexible_grouping_evaluation(\n which=\"test\",\n groupby_time_feature=\"dow\",\n groupby_sliding_window_size=None,\n groupby_custom_column=None,\n map_func_dict=map_func_dict,\n agg_kwargs=agg_kwargs,\n extend_col_names=False,\n y_col_style_dict=y_col_style_dict,\n xlabel=\"x value\",\n ylabel=\"y value\",\n title=\"error plot\",\n showlegend=True)\n assert [fig.data[i].name for i in range(len(fig.data))] == [\"median\", \"mean\"] # not sorted\n assert fig.layout.xaxis.title.text == \"x value\"\n assert fig.layout.yaxis.title.text == \"y value\"\n assert fig.layout.title.text == \"error plot\"\n assert len(fig.data[0].x) == 7\n assert fig.data[0].mode == \"lines+markers\"\n assert fig.data[1].mode == \"lines+markers\"\n assert fig.data[0].line[\"color\"] == y_col_style_dict[\"median\"][\"line\"][\"color\"]\n assert fig.data[1].line[\"color\"] == y_col_style_dict[\"mean\"][\"line\"][\"color\"]\n assert fig.data[1].fill is None\n assert fig.layout.showlegend\n\n # median actual vs forecast value by group\n agg_kwargs = {\n \"y_median\": pd.NamedAgg(column=\"y\", aggfunc=np.nanmedian),\n \"y_hat_median\": pd.NamedAgg(column=\"y_hat\", aggfunc=np.nanmedian),\n }\n fig = forecast.plot_flexible_grouping_evaluation(\n which=\"train\",\n groupby_time_feature=\"dow\",\n groupby_sliding_window_size=None,\n groupby_custom_column=None,\n map_func_dict=None,\n agg_kwargs=agg_kwargs,\n extend_col_names=True,\n y_col_style_dict=\"plotly\",\n xlabel=None,\n ylabel=forecast.ylabel,\n title=\"true vs actual by dow\",\n showlegend=True)\n assert [fig.data[i].name for i in range(len(fig.data))] == [\"y_median\", \"y_hat_median\"]\n assert fig.layout.xaxis.title.text == \"dow\"\n assert fig.layout.yaxis.title.text == \"y\"\n assert fig.layout.title.text == \"true vs actual by dow\"\n assert len(fig.data[0].x) == 7\n assert fig.layout.showlegend\n\n\ndef test_make_univariate_time_series(df):\n \"\"\"Tests make_univariate_time_series function\"\"\"\n forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))\n ts = UnivariateTimeSeries()\n ts.load_data(pd.DataFrame({\n cst.TIME_COL: df[cst.TIME_COL],\n cst.VALUE_COL: df[cst.PREDICTED_COL]\n }), cst.TIME_COL, cst.VALUE_COL)\n assert forecast.make_univariate_time_series().df.equals(ts.df)\n\n\ndef test_plot_components():\n \"\"\"Test plot_components of UnivariateForecast class\"\"\"\n X = pd.DataFrame({\n cst.TIME_COL: pd.date_range(\"2018-01-01\", periods=10, freq=\"D\"),\n cst.VALUE_COL: np.arange(1, 11)\n })\n coverage = 0.95\n\n # Test Silverkite\n trained_model = Pipeline([(\"estimator\", SilverkiteEstimator(coverage=coverage))])\n with pytest.warns(Warning) as record:\n trained_model.fit(X, X[cst.VALUE_COL])\n assert \"No slice had sufficient sample size\" in record[0].message.args[0]\n forecast = get_forecast(X, trained_model)\n\n with pytest.warns(Warning) as record:\n title = \"Custom component plot\"\n fig = forecast.plot_components(names=[\"trend\", \"YEARLY_SEASONALITY\", \"DUMMY\"], title=title)\n\n expected_rows = 3\n assert len(fig.data) == expected_rows\n assert [fig.data[i].name for i in range(expected_rows)] == \\\n [cst.VALUE_COL, \"trend\", \"YEARLY_SEASONALITY\"]\n\n assert fig.layout.xaxis.title[\"text\"] == cst.TIME_COL\n assert fig.layout.xaxis2.title[\"text\"] == cst.TIME_COL\n assert fig.layout.xaxis3.title[\"text\"] == \"Time of year\"\n\n assert fig.layout.yaxis.title[\"text\"] == cst.VALUE_COL\n assert fig.layout.yaxis2.title[\"text\"] == \"trend\"\n assert fig.layout.yaxis3.title[\"text\"] == \"yearly\"\n\n assert fig.layout.title[\"text\"] == title\n\n assert f\"The following components have not been specified in the model: \" \\\n f\"{{'DUMMY'}}, plotting the rest.\" in record[0].message.args[0]\n\n\[email protected](\"fbprophet\" not in sys.modules,\n reason=\"Module 'fbprophet' not installed, pytest for 'ProphetTemplate' skipped.\")\ndef test_plot_components_prophet():\n X = pd.DataFrame({\n cst.TIME_COL: pd.date_range(\"2018-01-01\", periods=10, freq=\"D\"),\n cst.VALUE_COL: np.arange(1, 11)\n })\n coverage = 0.95\n\n # Test Prophet\n trained_model = Pipeline([(\"estimator\", ProphetEstimator(coverage=coverage))])\n trained_model.fit(X, X[cst.VALUE_COL])\n forecast = get_forecast(X, trained_model)\n fig = forecast.plot_components()\n assert fig is not None\n" ]
[ [ "pandas.Series", "numpy.arange", "pandas.util.testing.assert_series_equal", "pandas.DataFrame", "pandas.Index", "pandas.util.testing.assert_frame_equal", "pandas.DatetimeIndex", "pandas.date_range", "pandas.NamedAgg", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "1.5", "1.4" ], "scipy": [], "tensorflow": [] } ]
icyray/proGENTRL
[ "c48305c3411ecb604c4f26f5e6b62f285e42e696" ]
[ "progentrl/gen_rl.py" ]
[ "import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom math import pi, log\nfrom .lp import LP\nfrom .utils import save, load\nimport joblib\nfrom collections import OrderedDict\n\nfrom moses.metrics.utils import get_mol\nimport pytorch_lightning as pl\n\nclass oneDataSet(Dataset):\n def __init__(self):\n self.one_elem = [1]\n \n def __len__(self):\n return len(self.one_elem)\n \n def __getitem__(self, idx):\n return self.one_elem[0]\n\n\nclass GENTRL_RL(pl.LightningModule):\n '''\n GENTRL model\n '''\n def __init__(self,\n reward_fn,\n enc,\n dec,\n latent_descr,\n feature_descr,\n rl_batch_size = 200,\n tt_int=40,\n tt_type='usual', \n beta=0.01, \n gamma=0.1,\n load_model=None\n ):\n super().__init__()\n \n self.reward_fn = reward_fn\n self.rl_batch_size = rl_batch_size\n \n self.num_latent = len(latent_descr)\n self.num_features = len(feature_descr)\n\n self.latent_descr = latent_descr\n self.feature_descr = feature_descr\n\n self.tt_int = tt_int\n self.tt_type = tt_type\n\n self.enc = enc\n self.dec = dec\n \n self.beta = beta\n self.gamma = gamma\n \n self.lp = LP(distr_descr=self.latent_descr + self.feature_descr,\n tt_int=self.tt_int, tt_type=self.tt_type)\n \n if load_model is not None:\n self = load(self, load_model)\n \n \n def forward(self, num_samples):\n z = self.lp.sample(num_samples, 50 * ['s'] + ['m'])\n smiles = self.dec.sample(50, z, argmax=False)\n return smiles\n \n \n def training_step(self, batch, batch_idx):\n exploit_size = int(self.rl_batch_size * (1 - 0.3))\n exploit_z = self.lp.sample(exploit_size, 50 * ['s'] + ['m'])\n\n z_means = exploit_z.mean(dim=0)\n z_stds = exploit_z.std(dim=0)\n\n expl_size = int(self.rl_batch_size * 0.3)\n expl_z = torch.randn(expl_size, exploit_z.shape[1]).to(self.device)\n expl_z = 2 * expl_z * z_stds[None, :]\n expl_z += z_means[None, :]\n\n z = torch.cat([exploit_z, expl_z])\n smiles = self.dec.sample(50, z, argmax=False)\n zc = torch.zeros(z.shape[0], 1).to(z.device)\n conc_zy = torch.cat([z, zc], dim=1)\n log_probs = self.lp.log_prob(conc_zy, marg=50 * [False] + [True])\n log_probs += self.dec(smiles, z)\n r_list = [self.reward_fn(s) for s in smiles]\n\n rewards = torch.tensor(r_list).float().to(exploit_z.device)\n rewards_bl = rewards - rewards.mean()\n loss = -(log_probs * rewards_bl).mean()\n\n valid_sm = [s for s in smiles if get_mol(s) is not None]\n cur_stats = {\n 'mean_reward': torch.tensor(sum(r_list) / len(smiles)),\n 'valid_perc': torch.tensor(len(valid_sm) / len(smiles))\n }\n \n output_dict = OrderedDict({\n 'loss': loss,\n 'log': cur_stats,\n 'progress_bar': cur_stats\n })\n \n return output_dict\n \n def configure_optimizers(self):\n lr_lp=1e-5\n lr_dec=1e-6\n \n optimizer = optim.Adam([\n {'params': self.lp.parameters()},\n {'params': self.dec.latent_fc.parameters(), 'lr': lr_dec}\n ], lr=lr_lp)\n# scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.1)\n return [optimizer]#, [scheduler]\n \n def train_dataloader(self):\n oneElementDataSet = oneDataSet()\n oneElementDataLoader = DataLoader(oneElementDataSet, batch_size=1)\n return oneElementDataLoader" ]
[ [ "torch.zeros", "torch.cat", "torch.randn", "torch.utils.data.DataLoader", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
migvanderlei/dataset-parser
[ "b6febccbdc829737e50640d980b2034d2c54c95a" ]
[ "src/collector.py" ]
[ "import os\nimport json\nimport logging\nfrom pandas import json_normalize\nfrom src.configurable import Configurable\nfrom datetime import datetime\nfrom glob import glob\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nclass Collector(Configurable):\n\n def __init__(self, input_path=None, output_file=\"collected-{}.csv\", config_file=None, output_base_dir=\"./generated\"):\n Configurable.__init__(self, config_file)\n logging.basicConfig(format=\"%(asctime)s: [%(levelname)s] %(message)s\", level=logging.INFO,\n datefmt=\"%H:%M:%S\", filename=self.get_log_file('collector'))\n\n if input_path is None:\n self.input_path = self.config.get(\"outputPath\")\n else:\n self.input_path = input_path\n self.output_file = output_file.format(self.get_timestamp())\n max_threads = self.config.get(\"maxThreadCount\")\n self.max_threads = max_threads if max_threads is not None else 50\n self.output_file_path = self.get_output_file_path(output_base_dir, self.output_file)\n self.csv_headers = []\n self.create_csv_headers_from_keys()\n\n def create_csv_headers_from_keys(self, template=None, parent_keys=\"\"): \n if template is None:\n template = self.config[\"template\"]\n \n for key in template.keys():\n if type(template[key]) is dict:\n self.create_csv_headers_from_keys(\n template[key], parent_keys+key+\"_\"\n )\n else: \n self.csv_headers.append(parent_keys+key)\n\n def get_output_file_path(self, base_dir, output_file):\n if base_dir.startswith(os.sep):\n file_name = os.path.join(base_dir, output_file)\n else:\n dir_name = os.path.dirname(__file__)\n base_dir = os.path.join(dir_name, \"..\", base_dir)\n file_name = os.path.join(base_dir, output_file)\n file_name = os.path.abspath(file_name)\n if not os.path.exists(base_dir):\n try:\n os.makedirs(base_dir)\n except FileExistsError:\n pass\n return file_name\n\n def get_timestamp(self):\n return datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n def get_files_to_collect(self):\n if \"*\" in self.input_path:\n files = glob(self.input_path)\n else:\n files = glob(self.input_path + \"/*/*/*.json\")\n\n if len(files) > 0:\n return files\n else:\n message = \"No files found in {}\".format(self.input_path)\n logging.error(message)\n raise Exception(message)\n\n def collect_file(self, file_path):\n with open(file_path, \"r\") as f:\n data = json.load(f)\n return data\n\n def save_collected_json(self, data):\n json_file_name = self.output_file_path.replace(\".csv\", \".json\")\n with open(json_file_name, \"w+\") as f:\n f.write(json.dumps(data))\n logging.info(\"JSON file created at \\\"{}\\\".\".format((json_file_name)))\n\n def save_json_to_csv(self, data):\n dataframe = json_normalize(data)\n dataframe.to_csv(self.output_file_path, index=False)\n logging.info(\"CSV file created at \\\"{}\\\".\".format((self.output_file_path)))\n\n\n def collect(self):\n files_to_collect = self.get_files_to_collect()\n\n logging.info(\"Found %d files to process.\" % len(files_to_collect))\n\n with ThreadPoolExecutor(max_workers=50) as executor:\n logging.info(\"Starting collection process with {} parallel threads.\".format(self.max_threads))\n\n start_time = datetime.now()\n futures = []\n collected_lines = []\n for file_path in files_to_collect:\n futures.append(executor.submit(self.collect_file, file_path))\n for future in as_completed(futures):\n collected_lines.append(future.result())\n\n self.save_collected_json(collected_lines)\n self.save_json_to_csv(collected_lines) \n\n elapsed_time = datetime.now() - start_time\n logging.info(\"Collection process finished in {}.\".format((elapsed_time)))\n" ]
[ [ "pandas.json_normalize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0" ], "scipy": [], "tensorflow": [] } ]
pelegrichman/IML.HUJI
[ "b8c158b9a6e332313a8a69cbbfd42ed8aede2858" ]
[ "IMLearn/learners/classifiers/perceptron.py" ]
[ "from __future__ import annotations\nfrom typing import Callable, List\nfrom typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\n\nfrom ...metrics import misclassification_error\n\n\ndef default_callback(fit: Perceptron, x: np.ndarray, y: int):\n pass\n\n\nclass Perceptron(BaseEstimator):\n \"\"\"\n Perceptron half-space classifier\n\n Finds a separating hyperplane for given linearly separable data.\n\n Attributes\n ----------\n include_intercept: bool, default = True\n Should fitted model include an intercept or not\n\n max_iter_: int, default = 1000\n Maximum number of passes over training data\n\n coefs_: ndarray of shape (n_features,) or (n_features+1,)\n Coefficients vector fitted by Perceptron algorithm. To be set in\n `Perceptron.fit` function.\n\n training_loss_: array of floats\n holds the loss value of the algorithm during training.\n training_loss_[i] is the loss value of the i'th training iteration.\n to be filled in `Perceptron.fit` function.\n\n \"\"\"\n\n def __init__(self,\n include_intercept: bool = True,\n max_iter: int = 1000,\n callback: Callable[[Perceptron, np.ndarray, int], None] = default_callback):\n \"\"\"\n Instantiate a Perceptron classifier\n\n Parameters\n ----------\n include_intercept: bool, default=True\n Should fitted model include an intercept or not\n\n max_iter: int, default = 1000\n Maximum number of passes over training data\n\n callback: Callable[[Perceptron, np.ndarray, int], None]\n A callable to be called after each update of the model while fitting to given data\n Callable function should receive as input a Perceptron instance, current sample and current response\n\n Attributes\n ----------\n include_intercept_: bool\n Should fitted model include an intercept or not\n\n max_iter): int, default = 1000\n Maximum number of passes over training data\n\n callback_: Callable[[Perceptron, np.ndarray, int], None]\n A callable to be called after each update of the model while fitting to given data\n Callable function should receive as input a Perceptron instance, current sample and current response\n\n coefs_: ndarray of shape (n_features,) or (n_features+1,)\n Coefficients vector fitted by Perceptron. To be set in `Perceptron.fit` function.\n \"\"\"\n super().__init__()\n self.include_intercept_ = include_intercept\n self.max_iter_ = max_iter\n self.callback_ = callback\n self.coefs_ = None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit a halfspace to to given samples. Iterate over given data as long as there exists a sample misclassified\n or that did not reach `self.max_iter_`\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n\n Notes\n -----\n Fits model with or without an intercept depending on value of `self.fit_intercept_`\n \"\"\"\n\n if self.include_intercept_:\n X = np.c_[np.ones(len(X)), X]\n\n # Init weights, training loss\n self.coefs_: np.ndarray = np.zeros(X.shape[1])\n self.training_loss_: List[float] = []\n\n # Iterate until max_iter reach\n for i in range(self.max_iter_):\n\n # Check for misclassified sample.\n misclassified_exist: bool = False\n for sample, label in zip(X, y):\n\n label_pred = np.dot(self.coefs_, sample)\n if label * label_pred <= 0:\n misclassified_exist = True\n self.coefs_ += label * sample\n self.fitted_ = True\n\n # Update loss of current iter\n self.callback_(self, sample, label)\n break\n\n # If no miss classifications than end iteration.\n if not misclassified_exist:\n break\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n if self.include_intercept_:\n X = np.c_[np.ones(len(X)), X]\n\n return X @ self.coefs_\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under misclassification loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under missclassification loss function\n \"\"\"\n return misclassification_error(y, X @ self.coefs_)\n" ]
[ [ "numpy.dot", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vivekparasharr/Social-Text-Web-Data-Analysis
[ "9c19edc2a2917454b558ee7e4464e0c41418b6cd" ]
[ "Twitter-Data-Analysis/twitter_streaming.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 28 12:25:54 2017\n\n@author: vivekparashar\n\"\"\"\n\n#Import the necessary methods from tweepy library\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\n\n#Variables that contains the user credentials to access Twitter API \naccess_token = \"----\"\naccess_token_secret = \"----\"\nconsumer_key = \"----\"\nconsumer_secret = \"----\"\n\n#import csv \n\n#This is a basic listener that just prints received tweets to stdout.\nclass StdOutListener(StreamListener):\n\n def on_data(self, data):\n if 'text' in data:\n file = open('/Users/vivekparashar/Documents/python dsp jupyter notebook/trump_data2.txt', 'a')\n file.write(data) \n file.close()\n #with open('/Users/vivekparashar/Documents/python dsp jupyter notebook/trump_data2.csv', 'w') as csvfile:\n # tweetwriter = csv.writer(csvfile)\n # tweetwriter.writerow([data])\n #print (data)\n return True\n\n def on_error(self, status):\n #print (status)\n return True\n\n\nif __name__ == '__main__':\n\n #This handles Twitter authetification and the connection to Twitter Streaming API\n l = StdOutListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, l)\n\n #This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'\n #stream.filter(track=['python', 'javascript', 'ruby'])\n\n #This line filter Twitter Streams to capture data by the keywords: 'MonacoGP'\n stream.filter(track=['trump'])\n\n\n\n\nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ntweets_data_path = '/Users/vivekparashar/Documents/python dsp jupyter notebook/trump_data2.txt'\n\ntweets_data = []\ntweets_file = open(tweets_data_path, \"r\")\nfor line in tweets_file:\n try:\n tweet = json.loads(line)\n tweets_data.append(tweet)\n except:\n continue\n \n\n#print the number of tweets\nprint (len(tweets_data))\n\nimport numpy as np\nfor i in np.arange(len(tweets_data)):\n print(tweets_data[i]['text'])\n\n#structure the tweets data into a pandas DataFrame\n#start by creating an empty DataFrame called tweets\ntweets = pd.DataFrame()\ntweets.loc[0:1,:] # print out first 2 tweets \ntweets.loc[0:1,'text'] # print out first 2 tweets, but only text\n\n \n#add 3 columns to the tweets DataFrame called text, lang, and country. text column contains the tweet\ntweets['text'] = list(map(lambda tweet: tweet.get['text',''], tweets_data))\ntweets['text'] = list(map(lambda tweet: tweet['text'], tweets_data))\ntweets['lang'] = list(map(lambda tweet: tweet['lang'], tweets_data))\n#tweets['country'] = list(map(lambda tweet: tweet['place']['country'] if tweet['place'] != None else None, tweets_data))\ntweets['retweet_count'] = list(map(lambda tweet: tweet['retweet_count'], tweets_data))\ntweets['type'] = list(map(lambda tweet: tweet['type'], tweets_data))\n\n\n\n\n\"\"\"\nHow about:\n\ntweets['text'] = map(lambda tweet: tweet.get('text', ''), tweets_data)\n\nDictionary method .get() by default return None in case of missing key, but it might be any object. \nIn this case tweet['text'] is possibly a string, so it might be a good idea to put empty for missing \none.\n\"\"\"\n\n\n#create 2 charts: \n#The first one describing the Top 5 languages in which the tweets were written\ntweets_by_lang = tweets['lang'].value_counts()\n\nfig, ax = plt.subplots()\nax.tick_params(axis='x', labelsize=15)\nax.tick_params(axis='y', labelsize=10)\nax.set_xlabel('Languages', fontsize=15)\nax.set_ylabel('Number of tweets' , fontsize=15)\nax.set_title('Top 5 languages', fontsize=15, fontweight='bold')\ntweets_by_lang[:5].plot(ax=ax, kind='bar', color='red')\n\n\n#The second the Top 5 countries from which the tweets were sent\ntweets_by_country = tweets['country'].value_counts()\n\nfig, ax = plt.subplots()\nax.tick_params(axis='x', labelsize=15)\nax.tick_params(axis='y', labelsize=10)\nax.set_xlabel('Countries', fontsize=15)\nax.set_ylabel('Number of tweets' , fontsize=15)\nax.set_title('Top 5 countries', fontsize=15, fontweight='bold')\ntweets_by_country[:5].plot(ax=ax, kind='bar', color='blue')\n\n\n#new vs retweet\n#The second the Top 5 countries from which the tweets were sent\nretweet_count = tweets['type'].value_counts()\n\nfig, ax = plt.subplots()\nax.tick_params(axis='x', labelsize=15)\nax.tick_params(axis='y', labelsize=10)\nax.set_xlabel('Tweet type', fontsize=15)\nax.set_ylabel('Number of tweets' , fontsize=15)\nax.set_title('tweet type - photo, video, etc.', fontsize=15, fontweight='bold')\nretweet_count[:3].plot(ax=ax, kind='bar', color='blue')\n\n\n\n'''\nYou usually run across the KeyError when Python cannot find a specified key. This is often the case with JSON generated by the Twitter API that certain fields/keys will not be present for some tweets.\n\nInstead of :\n\ntweets['text'] = map(lambda tweet: tweet['text'], tweets_data)\n\nReplace this with:\n\ntweets['text'] = map(lambda tweet: tweet.get('text', None),tweets_data)\n\nSimilarly, say you are looking for a key that is nested two or more levels deep, you can chain multiple .get() functions like below. \ntweets['child'] = map(lambda tweet: tweet.get('grandparent', {}).get('parent', {}).get('child') , tweets_data)\n\nA more specific example: \ntweets['user'] = map(lambda tweet: tweet.get('user', {}).get('name'),tweets_data)\n'''\n\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 3 18:43:16 2017\n\n@author: vivekparashar\n\"\"\"\n\n#Import the necessary methods from tweepy library\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\n\n#Variables that contains the user credentials to access Twitter API \naccess_token = \"----\"\naccess_token_secret = \"----\"\nconsumer_key = \"----\"\nconsumer_secret = \"----\"\n\n\n\n#This is a basic listener that just prints received tweets to stdout.\nclass StdOutListener(StreamListener):\n\n def on_data(self, data):\n print (data)\n return True\n\n def on_error(self, status):\n print (status)\n\n\nif __name__ == '__main__':\n\n #This handles Twitter authetification and the connection to Twitter Streaming API\n l = StdOutListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, l)\n\n #This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'\n stream.filter(track=['trump'])\n \nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ntweets_data_path = '../twitter_data.txt'\n\ntweets_data = []\ntweets_file = open(tweets_data_path, \"r\")\nfor line in tweets_file:\n try:\n tweet = json.loads(line)\n tweets_data.append(tweet)\n except:\n continue\n \n " ]
[ [ "matplotlib.pyplot.subplots", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
stevezhangz/ournn
[ "a5d8383971e9a921c38380507f1abbd93a88ca88" ]
[ "losses.py" ]
[ "import numpy as np\nfrom ournn.tools.matrix_tools import dot_mul2d\nimport math\n\n\n\nclass MSE:\n def __init__(self):\n pass\n def loss(self,x,y,delta=1e-3):\n if isinstance(x,int) or isinstance(x,float):\n if isinstance(y, int) or isinstance(y, float):\n return (x-y)*(x-y)\n assert x.shape==y.shape\n add_=0\n self.err = np.square((x - y))\n return self.err\n\n def __call__(self,x,y):\n self.x=x\n self.y=y\n return self.loss(x,y)\n\n def backward(self):\n return 2*(self.x-self.y)\n\n\"\"\"\nCross entropy\n\"\"\"\nclass sparse_logit_cross_entropy:\n def __init__(self):\n pass\n def loss(self,x,y):\n if isinstance(x,int) or isinstance(x,float):\n if isinstance(y, int) or isinstance(y, float):\n return -y*np.log(x)\n x=x.reshape(y.shape)\n assert x.shape==y.shape\n out=-np.log(x)*y\n return out\n\n def __call__(self, x,y):\n self.x=x\n self.y=y\n return self.loss(x,y)\n def backward(self):\n if isinstance(self.x,int) or isinstance(self.x,float):\n if isinstance(self.y, int) or isinstance(self.y, float):\n return self.y/(self.x)\n self.x=self.x.reshape(self.y.shape)\n cross_entropy=[]\n assert self.x.shape==self.y.shape\n out=-(1/(self.x))*self.y\n return out\n\n\n\"\"\"\nThe predicted values were processed by softmax and then calculated by cross entropy\nIn another word the last layer of the Model dont have to use the softmax act function\n\"\"\"\nclass sparse_softmax_cross_entropy:\n def __init__(self):\n pass\n def loss(self,x,y,logit=sparse_logit_cross_entropy(),down_delta=1e-3,upsume=1e5):\n self.x=x\n self.y=y\n if isinstance(x,int) or isinstance(x,float):\n raise FileExistsError\n assert x.shape==y.shape\n out=[]\n x+=1e-5\n for i in range(x.shape[0]):\n line_sotmax=[]\n line_sotmax.append((x[i,:]/(np.sum(x[i,:]))))\n out.append(line_sotmax)\n out=np.squeeze(np.array(out))\n cross_entropy_out=logit(out,y)\n self.logit=logit\n self.softout=out\n return cross_entropy_out\n def __call__(self,x,y):\n return self.loss(x,y)\n\n def backward(self):\n logit_back=self.logit.backward()\n exp_x_n=1/(np.exp(-(self.x))+1e-5)\n bac=self.softout*(-1+self.softout/exp_x_n)*logit_back\n return bac" ]
[ [ "numpy.square", "numpy.log", "numpy.exp", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
proy3/Abnormal_Trajectory_Classifier
[ "a6b27c6847262e9703a0f3404c85c135415c1d4c", "a6b27c6847262e9703a0f3404c85c135415c1d4c", "a6b27c6847262e9703a0f3404c85c135415c1d4c" ]
[ "ALREC_Method/stmarc/train_new_method_v4_for_atd.py", "ALREC_Method/sherbrooke/train_deep_aae_model_for_atd.py", "DAE_Method/sherbrooke/train_deep_abnormal_traj_detect_model.py" ]
[ "\"\"\"\nTrain Abnormal trajectory detection with deep autoencoder.\n\"\"\"\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport ae_utilities as aeu\nimport input_data as data\nimport abnormal_data_generation as adg\nimport dataset_defines as dd\nimport numpy as np\nimport os\n\n\nabspath = os.path.abspath(__file__)\ndir_name = os.path.dirname(abspath)\ndataset_name = dir_name[dir_name.rfind('/')+1:] + '_gt_data.csv'\ndataset_file_path = os.path.join(dir_name + '/data', dataset_name)\nabnormal_name = dir_name[dir_name.rfind('/')+1:] + '_gt_real_abnormal_2.csv'\nabnormal_file_path = os.path.join(dir_name + '/data', abnormal_name)\n\n# Extract trajectories and export data to array\ndataset = np.genfromtxt(dataset_file_path, delimiter=',')\n\n# Ignore first column representing object_id\ndataset = dataset[:,1:]\n\n# Generate abnormal data\nabnormal_data = np.genfromtxt(abnormal_file_path, delimiter=',')\nabnormal_data = abnormal_data[:,1:]\n\n# Best layer type tested in main_test.py\n# ref.: https://deeplearning4j.org/deepautoencoder\nbest_layer_type = (128,64,32,16,8)\n\n# Files setup\ntest_score_filename = 'results/new_method_v4_5/test_scores.csv'\nsummary_results_filename = test_score_filename[:test_score_filename.rfind('/')] + '/summary_results.csv'\nglobal_summary_filename = test_score_filename[:test_score_filename.rfind('/')] + '/global_summary.log'\nmodel_files_dir_name = 'model/new_method_v4_5/'\n\ndata.make_dir_if_new(test_score_filename)\ndata.make_dir_if_new(model_files_dir_name)\n\nn_acc_list = []\nv_acc_list = []\nt_acc_list = []\nae_n_acc_list = []\nae_v_acc_list = []\nae_t_acc_list = []\n\nfor i in range(aeu.repeat_number):\n print('======================== Iteration {} ========================'.format(i))\n # Shuffle the data by row only\n # and get the seed in order to reproduce the random sequence\n train_data, validation_data, random_shuffle_seed = data.split_dataset_uniformly(dataset)\n\n # The trained model will be saved\n saved_ae_network_path = os.path.join(data.dir_name, model_files_dir_name)\n\n mv4 = aeu.BuildOurMethodV4(original_dim=train_data.shape[1],\n hidden_units=best_layer_type,\n model_dir_path=saved_ae_network_path,\n iteration_number=i)\n\n mv4.train(train_data=train_data,\n save_model=True,\n print_and_plot_history=True,\n show_plots=False)\n\n n_loss, n_acc, ae_n_mse, ae_n_mses = mv4.test_model(test_data=train_data, test_ae=True)\n v_loss, v_acc, ae_v_mse, ae_v_mses = mv4.test_model(test_data=validation_data, test_ae=True)\n t_loss, t_acc, ae_t_mse, ae_t_mses = mv4.test_model(test_data=abnormal_data, is_abnormal=True, test_ae=True)\n\n output_string = 'Iteration {} with layer type {}: n_loss = {}; v_loss = {}; t_loss = {}'\\\n .format(i, best_layer_type, n_loss, v_loss, t_loss)\n\n print('\\n')\n\n # Save the result to a global summary file\n output_string += '\\n'\n\n # Compute the threshold value for the autoencoder method. Used for the comparison purpose\n ae_threshold = ae_n_mse + ae_v_mse + 3 * (np.std(ae_n_mses) + np.std(ae_v_mses))\n\n # Compute the accuracy using the old method: only using the autoencoder with the computed threshold\n ae_n_acc = sum([score < ae_threshold for score in ae_n_mses])/float(len(ae_n_mses))\n ae_v_acc = sum([score < ae_threshold for score in ae_v_mses])/float(len(ae_v_mses))\n ae_t_acc = sum([score > ae_threshold for score in ae_t_mses])/float(len(ae_t_mses))\n\n # Summary file format: [Iteration, ae_train_score, ae_validate_score, threshold_value,\n # normal_train_ratio, normal_valid_ratio, abnormal_ratio]\n if i == 0:\n with open(os.path.join(data.dir_name, summary_results_filename), 'wb') as summary_file:\n summary_file.write(b'iteration,random_shuffle_seed,ae_threshold,ae_n_acc,ae_v_acc,ae_t_acc,'\n b'n_loss,n_acc,v_loss,v_acc,t_loss,t_acc\\n')\n\n n_acc_list.append(n_acc*100.0)\n v_acc_list.append(v_acc*100.0)\n t_acc_list.append(t_acc*100.0)\n ae_n_acc_list.append(ae_n_acc*100.0)\n ae_v_acc_list.append(ae_v_acc*100.0)\n ae_t_acc_list.append(ae_t_acc*100.0)\n\n with open(os.path.join(data.dir_name, summary_results_filename), 'ab') as summary_file:\n np.savetxt(summary_file, np.array([i,random_shuffle_seed,ae_threshold,ae_n_acc,ae_v_acc,ae_t_acc,\n n_loss,n_acc,v_loss,v_acc,t_loss,t_acc]).reshape(1, -1),delimiter=',')\n\n output_string += '{:.2f}% (old: {:.2f}%) of normal train samples are detected as normal.\\n'.format(n_acc*100.0,\n ae_n_acc*100.0)\n output_string += '{:.2f}% (old: {:.2f}%) of normal valid samples are detected as normal.\\n'.format(v_acc*100.0,\n ae_v_acc*100.0)\n output_string += '{:.2f}% (old: {:.2f}%) of abnormal samples are detected as abnormal.\\n'.format(t_acc*100.0,\n ae_t_acc*100.0)\n\n print(output_string)\n print('==============================================================')\n\n# Global summary\nglobal_summary_file = open(global_summary_filename, 'w')\n\noutput_string = 'Global summary of abnormal trajectory detection with our new method v.4\\n'\noutput_string += '-----------------------------------------------------------------------\\n'\noutput_string += 'On average, using layer type {},\\n'.format(best_layer_type)\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\\n'.format(\n np.mean(n_acc_list), np.mean(ae_n_acc_list))\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\\n'.format(\n np.mean(v_acc_list), np.mean(ae_v_acc_list))\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\\n'.format(\n np.mean(t_acc_list), np.mean(ae_t_acc_list))\noutput_string += '-----------------------------------------------------------------------\\n'\noutput_string += 'On maximum, using layer type {},\\n'.format(best_layer_type)\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\\n'.format(\n np.max(n_acc_list), np.max(ae_n_acc_list))\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\\n'.format(\n np.max(v_acc_list), np.max(ae_v_acc_list))\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\\n'.format(\n np.max(t_acc_list), np.max(ae_t_acc_list))\noutput_string += '-----------------------------------------------------------------------\\n'\noutput_string += 'On minimum, using layer type {},\\n'.format(best_layer_type)\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\\n'.format(\n np.min(n_acc_list), np.min(ae_n_acc_list))\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\\n'.format(\n np.min(v_acc_list), np.min(ae_v_acc_list))\noutput_string += '\\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\\n'.format(\n np.min(t_acc_list), np.min(ae_t_acc_list))\noutput_string += '-----------------------------------------------------------------------\\n'\n\nglobal_summary_file.write(output_string)\nprint(output_string)\n\nglobal_summary_file.close()\n", "\"\"\"\nTrain Abnormal trajectory detection with deep autoencoder.\n\"\"\"\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport ae_utilities as aeu\nimport input_data as data\nimport abnormal_data_generation as adg\nimport dataset_defines as dd\nimport numpy as np\nimport os\n\n\nabspath = os.path.abspath(__file__)\ndir_name = os.path.dirname(abspath)\ndataset_name = dir_name[dir_name.rfind('/')+1:] + '_gt_data.csv'\ndataset_file_path = os.path.join(dir_name + '/data', dataset_name)\nabnormal_name = dir_name[dir_name.rfind('/')+1:] + '_gt_real_abnormal_2.csv'\nabnormal_file_path = os.path.join(dir_name + '/data', abnormal_name)\n\n# Extract trajectories and export data to array\ndataset = np.genfromtxt(dataset_file_path, delimiter=',')\n\n# Ignore first column representing object_id\ndataset = dataset[:,1:]\n\n# Generate abnormal data\nabnormal_data = np.genfromtxt(abnormal_file_path, delimiter=',')\nabnormal_data = abnormal_data[:,1:]\n\n# Best layer type tested in main_test.py\n# ref.: https://deeplearning4j.org/deepautoencoder\nbest_layer_type = (128,64,32,16,8)\n\n# Files setup\ntest_score_filename = 'results/deep_aae/test_scores.csv'\nsummary_results_filename = test_score_filename[:test_score_filename.rfind('/')] + '/summary_results.csv'\nglobal_summary_filename = test_score_filename[:test_score_filename.rfind('/')] + '/global_summary.log'\nmodel_files_dir_name = 'model/deep_aae/'\n\ndata.make_dir_if_new(test_score_filename)\ndata.make_dir_if_new(model_files_dir_name)\n\nnormal_train_ratio_list = []\nnormal_valid_ratio_list = []\nabnormal_ratio_list = []\n\nfor i in range(aeu.repeat_number):\n print('======================== Iteration {} ========================'.format(i))\n # Shuffle the data by row only\n # and get the seed in order to reproduce the random sequence\n train_data, validation_data, random_shuffle_seed = data.split_dataset_uniformly(dataset)\n\n # The trained model will be saved\n saved_ae_network_path = os.path.join(data.dir_name, model_files_dir_name)\n\n aae = aeu.BuildSimpleAAE(original_dim=train_data.shape[1],\n hidden_units=best_layer_type,\n model_dir_path=saved_ae_network_path)\n\n aae.train(train_data=train_data,\n save_model=True,\n iteration_number=i,\n print_and_plot_history=True,\n show_plots=False)\n\n # Get the scores\n ae_train_score = aae.global_mse\n ae_train_scores = aae.mse_per_sample\n\n ae_validate_score, ae_validate_scores = aae.test_model(test_data=validation_data,\n iteration_number=i)\n\n ae_tests_score, ae_tests_scores = aae.test_model(test_data=abnormal_data,\n iteration_number=i)\n\n # Format the test scores: [Iteration, Test_sample_No, Score]\n if i == 0:\n with open(os.path.join(data.dir_name, test_score_filename), 'wb') as score_file:\n score_file.write(b'iteration,test_sample_no,score\\n')\n\n rows_number = len(ae_tests_scores)\n test_scores_array = np.hstack((np.full((rows_number, 1), i), np.array(range(rows_number)).reshape(-1, 1),\n np.array(ae_tests_scores).reshape(-1, 1)))\n\n # Export the test scores to text file\n with open(os.path.join(data.dir_name, test_score_filename), 'ab') as score_file:\n np.savetxt(score_file, test_scores_array, delimiter=',')\n\n output_string = 'Iteration {} with layer type {}: ae_train_score = {}; ae_validation_score = {}'\\\n .format(i, best_layer_type, ae_train_score, ae_validate_score)\n\n print('\\n')\n\n # Save the result to a global summary file\n output_string += '\\n'\n\n threshold_value = ae_train_score + 3 * np.std(ae_train_scores)\n\n # Summary file format: [Iteration, ae_train_score, ae_validate_score, threshold_value,\n # normal_train_ratio, normal_valid_ratio, abnormal_ratio]\n if i == 0:\n with open(os.path.join(data.dir_name, summary_results_filename), 'wb') as summary_file:\n summary_file.write(b'iteration,random_shuffle_seed,ae_train_score,ae_validate_score,threshold_value,'\n b'normal_train_ratio,normal_valid_ratio,abnormal_ratio\\n')\n\n normal_train_ratio = sum([score < threshold_value for score in ae_train_scores])/float(len(ae_train_scores))\n normal_valid_ratio = sum([score < threshold_value for score in ae_validate_scores])/float(len(ae_validate_scores))\n abnormal_ratio = sum([score > threshold_value for score in ae_tests_scores])/float(len(ae_tests_scores))\n\n normal_train_ratio_list.append(normal_train_ratio*100.0)\n normal_valid_ratio_list.append(normal_valid_ratio*100.0)\n abnormal_ratio_list.append(abnormal_ratio*100.0)\n\n with open(os.path.join(data.dir_name, summary_results_filename), 'ab') as summary_file:\n np.savetxt(summary_file, np.array([i,random_shuffle_seed,ae_train_score,ae_validate_score,threshold_value,\n normal_train_ratio,normal_valid_ratio,abnormal_ratio]).reshape(1, -1),\n delimiter=',')\n\n output_string += '{0:.2f}% of normal training samples are detected as normal.\\n'.format(normal_train_ratio*100.0)\n output_string += '{0:.2f}% of normal validation samples are detected as normal.\\n'.format(normal_valid_ratio*100.0)\n output_string += '{0:.2f}% of abnormal samples are detected as abnormal.\\n'.format(abnormal_ratio*100.0)\n\n print(output_string)\n print('==============================================================')\n\n# Global summary\nglobal_summary_file = open(global_summary_filename, 'w')\n\noutput_string = 'Global summary of abnormal trajectory detection with deep adversarial autoencoder\\n'\noutput_string += '---------------------------------------------------------------------------------\\n'\noutput_string += 'On average, using layer type {},\\n'.format(best_layer_type)\noutput_string += '\\t{0:.2f}% of normal training samples are detected as normal;\\n'.format(np.mean(\n normal_train_ratio_list))\noutput_string += '\\t{0:.2f}% of normal validation samples are detected as normal;\\n'.format(np.mean(\n normal_valid_ratio_list))\noutput_string += '\\t{0:.2f}% of abnormal samples are detected as abnormal.\\n'.format(np.mean(abnormal_ratio_list))\n\nglobal_summary_file.write(output_string)\nprint(output_string)\n\nglobal_summary_file.close()\n", "\"\"\"\nTrain Abnormal trajectory detection with deep autoencoder.\n\"\"\"\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport ae_utilities as aeu\nimport input_data as data\nimport abnormal_data_generation as adg\nimport dataset_defines as dd\nimport numpy as np\nimport os\nfrom sklearn.preprocessing import MinMaxScaler\n\n\n# Extract trajectories and export data to array\ndataset_file = data.extract_augment_and_export_data(raw_input_file_all=dd.raw_input_file_all,\n input_raw_image_frame_path=dd.input_raw_image_frame_path,\n raw_input_file_names=dd.raw_input_file_names,\n video_data_fps=dd.video_data_fps,\n generate_graph=False,\n show_graph=False)\n\ndataset = np.genfromtxt(os.path.join(data.dir_name, dataset_file), delimiter=',')\n\n# Ignore first column representing object_id\ndataset = dataset[:,1:]\n\n# Generate abnormal data\nabnormal_data = adg.generate_abnormal_data(n_objects=20, generate_graph=False, show_graph=False)\nabnormal_data = abnormal_data[:,1:]\n\n# Best layer type tested in main_test.py\n# ref.: https://deeplearning4j.org/deepautoencoder\nbest_layer_type = (128,64,32,16,8)\n\n# Files setup\ntest_score_filename = 'results/deep_ae/test_scores.csv'\nsummary_results_filename = test_score_filename[:test_score_filename.rfind('/')] + '/summary_results.csv'\nglobal_summary_filename = test_score_filename[:test_score_filename.rfind('/')] + '/global_summary.log'\nmodel_files_dir_name = 'model/deep_ae/'\n\ndata.make_dir_if_new(test_score_filename)\ndata.make_dir_if_new(model_files_dir_name)\n\nnormal_train_ratio_list = []\nnormal_valid_ratio_list = []\nabnormal_ratio_list = []\n\nfor i in range(aeu.repeat_number):\n print('======================== Iteration {} ========================'.format(i))\n # Shuffle the data by row only\n # and get the seed in order to reproduce the random sequence\n train_data, validation_data, random_shuffle_seed = data.split_dataset_uniformly(dataset)\n\n # The trained model will be saved\n saved_ae_network_path = os.path.join(data.dir_name, model_files_dir_name)\n\n autoencoder = aeu.BuildSimpleAutoencoder(input_size=train_data.shape[1],\n hidden_units=best_layer_type)\n\n autoencoder.train(train_data=train_data,\n save_model=True,\n test_saved_model=True,\n model_dir_path=saved_ae_network_path,\n iteration_number=i,\n print_and_plot_history=True,\n show_plots=False)\n\n # Get the scores\n ae_train_score = autoencoder.global_mse\n ae_train_scores = autoencoder.mse_per_sample\n\n ae_validate_score, ae_validate_scores = aeu.test_trained_ae_model(test_data=validation_data,\n model_dir_path=saved_ae_network_path,\n iteration_number=i)\n\n ae_tests_score, ae_tests_scores = aeu.test_trained_ae_model(test_data=abnormal_data,\n model_dir_path=saved_ae_network_path,\n iteration_number=i)\n\n # Format the test scores: [Iteration, Test_sample_No, Score]\n if i == 0:\n with open(os.path.join(data.dir_name, test_score_filename), 'wb') as score_file:\n score_file.write(b'iteration,test_sample_no,score\\n')\n\n rows_number = len(ae_tests_scores)\n test_scores_array = np.hstack((np.full((rows_number, 1), i), np.array(range(rows_number)).reshape(-1, 1),\n np.array(ae_tests_scores).reshape(-1, 1)))\n\n # Export the test scores to text file\n with open(os.path.join(data.dir_name, test_score_filename), 'ab') as score_file:\n np.savetxt(score_file, test_scores_array, delimiter=',')\n\n output_string = 'Iteration {} with layer type {}: ae_train_score = {}; ae_validation_score = {}'\\\n .format(i, best_layer_type, ae_train_score, ae_validate_score)\n\n print('\\n')\n\n # Save the result to a global summary file\n output_string += '\\n'\n\n threshold_value = ae_train_score + ae_validate_score + 3 * (np.std(ae_train_scores) + np.std(ae_validate_scores))\n\n # Summary file format: [Iteration, ae_train_score, ae_validate_score, threshold_value,\n # normal_train_ratio, normal_valid_ratio, abnormal_ratio]\n if i == 0:\n with open(os.path.join(data.dir_name, summary_results_filename), 'wb') as summary_file:\n summary_file.write(b'iteration,random_shuffle_seed,ae_train_score,ae_validate_score,threshold_value,'\n b'normal_train_ratio,normal_valid_ratio,abnormal_ratio\\n')\n\n normal_train_ratio = sum([score < threshold_value for score in ae_train_scores])/float(len(ae_train_scores))\n normal_valid_ratio = sum([score < threshold_value for score in ae_validate_scores])/float(len(ae_validate_scores))\n abnormal_ratio = sum([score > threshold_value for score in ae_tests_scores])/float(len(ae_tests_scores))\n\n normal_train_ratio_list.append(normal_train_ratio*100.0)\n normal_valid_ratio_list.append(normal_valid_ratio*100.0)\n abnormal_ratio_list.append(abnormal_ratio*100.0)\n\n with open(os.path.join(data.dir_name, summary_results_filename), 'ab') as summary_file:\n np.savetxt(summary_file, np.array([i,random_shuffle_seed,ae_train_score,ae_validate_score,threshold_value,\n normal_train_ratio,normal_valid_ratio,abnormal_ratio]).reshape(1, -1),\n delimiter=',')\n\n output_string += '{0:.2f}% of normal training samples are detected as normal.\\n'.format(normal_train_ratio*100.0)\n output_string += '{0:.2f}% of normal validation samples are detected as normal.\\n'.format(normal_valid_ratio*100.0)\n output_string += '{0:.2f}% of abnormal samples are detected as abnormal.\\n'.format(abnormal_ratio*100.0)\n\n print(output_string)\n print('==============================================================')\n\n# Global summary\nglobal_summary_file = open(global_summary_filename, 'w')\n\noutput_string = 'Global summary of abnormal trajectory detection with deep autoencoder\\n'\noutput_string += '---------------------------------------------------------------------\\n'\noutput_string += 'On average, using layer type {},\\n'.format(best_layer_type)\noutput_string += '\\t{0:.2f}% of normal training samples are detected as normal;\\n'.format(np.mean(\n normal_train_ratio_list))\noutput_string += '\\t{0:.2f}% of normal validation samples are detected as normal;\\n'.format(np.mean(\n normal_valid_ratio_list))\noutput_string += '\\t{0:.2f}% of abnormal samples are detected as abnormal.\\n'.format(np.mean(abnormal_ratio_list))\n\nglobal_summary_file.write(output_string)\nprint(output_string)\n\nglobal_summary_file.close()\n" ]
[ [ "numpy.min", "numpy.genfromtxt", "numpy.max", "numpy.std", "numpy.mean", "numpy.array" ], [ "numpy.full", "numpy.genfromtxt", "numpy.std", "numpy.mean", "numpy.savetxt", "numpy.array" ], [ "numpy.full", "numpy.std", "numpy.mean", "numpy.savetxt", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ahmdtaha/tf_retrieval_baseline
[ "31b1588f888cecc1d4287f77bd046314956482d5" ]
[ "ranking/semi_hard_triplet.py" ]
[ "import numbers\nimport tensorflow as tf\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.framework import dtypes\n\ndef masked_minimum(data, mask, dim=1):\n \"\"\"Computes the axis wise minimum over chosen elements.\n\n Args:\n data: 2-D float `Tensor` of size [n, m].\n mask: 2-D Boolean `Tensor` of size [n, m].\n dim: The dimension over which to compute the minimum.\n\n Returns:\n masked_minimums: N-D `Tensor`.\n The minimized dimension is of size 1 after the operation.\n \"\"\"\n axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)\n masked_minimums = math_ops.reduce_min(\n math_ops.multiply(data - axis_maximums, mask), dim,\n keepdims=True) + axis_maximums\n return masked_minimums\n\n\n\ndef masked_maximum(data, mask, dim=1):\n \"\"\"Computes the axis wise maximum over chosen elements.\n\n Args:\n data: 2-D float `Tensor` of size [n, m].\n mask: 2-D Boolean `Tensor` of size [n, m].\n dim: The dimension over which to compute the maximum.\n\n Returns:\n masked_maximums: N-D `Tensor`.\n The maximized dimension is of size 1 after the operation.\n \"\"\"\n axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)\n masked_maximums = math_ops.reduce_max(\n math_ops.multiply(data - axis_minimums, mask), dim,\n keepdims=True) + axis_minimums\n return masked_maximums\n\ndef all_diffs(a, b):\n \"\"\" Returns a tensor of all combinations of a - b.\n\n Args:\n a (2D tensor): A batch of vectors shaped (B1, F).\n b (2D tensor): A batch of vectors shaped (B2, F).\n\n Returns:\n The matrix of all pairwise differences between all vectors in `a` and in\n `b`, will be of shape (B1, B2).\n\n Note:\n For convenience, if either `a` or `b` is a `Distribution` object, its\n mean is used.\n \"\"\"\n return tf.expand_dims(a, axis=1) - tf.expand_dims(b, axis=0)\n\n\ndef cdist(a, b, metric='euclidean'):\n \"\"\"Similar to scipy.spatial's cdist, but symbolic.\n\n The currently supported metrics can be listed as `cdist.supported_metrics` and are:\n - 'euclidean', although with a fudge-factor epsilon.\n - 'sqeuclidean', the squared euclidean.\n - 'cityblock', the manhattan or L1 distance.\n\n Args:\n a (2D tensor): The left-hand side, shaped (B1, F).\n b (2D tensor): The right-hand side, shaped (B2, F).\n metric (string): Which distance metric to use, see notes.\n\n Returns:\n The matrix of all pairwise distances between all vectors in `a` and in\n `b`, will be of shape (B1, B2).\n\n Note:\n When a square root is taken (such as in the Euclidean case), a small\n epsilon is added because the gradient of the square-root at zero is\n undefined. Thus, it will never return exact zero in these cases.\n \"\"\"\n with tf.name_scope(\"cdist\"):\n diffs = all_diffs(a, b)\n if metric == 'sqeuclidean':\n return tf.reduce_sum(tf.square(diffs), axis=-1)\n elif metric == 'euclidean':\n return tf.sqrt(tf.reduce_sum(tf.square(diffs), axis=-1) + 1e-12)\n elif metric == 'cityblock':\n return tf.reduce_sum(tf.abs(diffs), axis=-1)\n elif metric == 'cosine':\n # https://stackoverflow.com/questions/48485373/pairwise-cosine-similarity-using-tensorflow\n # normalized_input = tf.nn.l2_normalize(a, dim=1)\n # Embedding are assumed to be normalized\n prod = tf.matmul(a, b,adjoint_b=True) # transpose second matrix\n return 1 - prod\n else:\n raise NotImplementedError(\n 'The following metric is not implemented by `cdist` yet: {}'.format(metric))\n\ndef pairwise_distance(feature, squared=False):\n \"\"\"Computes the pairwise distance matrix with numerical stability.\n output[i, j] = || feature[i, :] - feature[j, :] ||_2\n Args:\n feature: 2-D Tensor of size [number of data, feature dimension].\n squared: Boolean, whether or not to square the pairwise distances.\n Returns:\n pairwise_distances: 2-D Tensor of size [number of data, number of data].\n \"\"\"\n pairwise_distances_squared = math_ops.add(\n math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),\n math_ops.reduce_sum(\n math_ops.square(array_ops.transpose(feature)),\n axis=[0],\n keepdims=True)) - 2.0 * math_ops.matmul(feature,\n array_ops.transpose(feature))\n\n # Deal with numerical inaccuracies. Set small negatives to zero.\n pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)\n # Get the mask where the zero distances are at.\n error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)\n\n # Optionally take the sqrt.\n if squared:\n pairwise_distances = pairwise_distances_squared\n else:\n pairwise_distances = math_ops.sqrt(\n pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)\n\n # Undo conditionally adding 1e-16.\n pairwise_distances = math_ops.multiply(\n pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))\n\n num_data = array_ops.shape(feature)[0]\n # Explicitly set diagonals to zero.\n mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(\n array_ops.ones([num_data]))\n pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)\n return pairwise_distances\n\ndef triplet_semihard_loss(embeddings,labels, margin=1.0):\n \"\"\"Computes the triplet loss with semi-hard negative mining.\n\n The loss encourages the positive distances (between a pair of embeddings with\n the same labels) to be smaller than the minimum negative distance among\n which are at least greater than the positive distance plus the margin constant\n (called semi-hard negative) in the mini-batch. If no such negative exists,\n uses the largest negative distance instead.\n See: https://arxiv.org/abs/1503.03832.\n\n Args:\n labels: 1-D tf.int32 `Tensor` with shape [batch_size] of\n multiclass integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should\n be l2 normalized.\n margin: Float, margin term in the loss definition.\n\n Returns:\n triplet_loss: tf.float32 scalar.\n \"\"\"\n # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.\n #pdist_matrix = cdist(embeddings, embeddings, metric=metric)\n\n lshape = array_ops.shape(labels)\n assert lshape.shape == 1\n labels = array_ops.reshape(labels, [lshape[0], 1])\n\n # Build pairwise squared distance matrix.\n pdist_matrix = pairwise_distance(embeddings, squared=True)\n # Build pairwise binary adjacency matrix.\n adjacency = math_ops.equal(labels, array_ops.transpose(labels))\n # Invert so we can select negatives only.\n adjacency_not = math_ops.logical_not(adjacency)\n\n batch_size = array_ops.size(labels)\n\n # Compute the mask.\n ## Is there any element with different label and is farther than me? If Yes, then there exists a semi-hard negative\n pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])\n mask = math_ops.logical_and(\n array_ops.tile(adjacency_not, [batch_size, 1]),\n math_ops.greater(\n pdist_matrix_tile, array_ops.reshape(\n array_ops.transpose(pdist_matrix), [-1, 1])))\n\n mask_final = array_ops.reshape(\n math_ops.greater(\n math_ops.reduce_sum(\n tf.cast(mask, dtype=dtypes.float32), 1, keepdims=True),\n 0.0), [batch_size, batch_size])\n mask_final = array_ops.transpose(mask_final)\n\n adjacency_not = tf.cast(adjacency_not, dtype=dtypes.float32)\n\n mask = tf.cast(mask, dtype=dtypes.float32)\n\n # negatives_outside: smallest D_an where D_an > D_ap.\n negatives_outside = array_ops.reshape(\n masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])\n negatives_outside = array_ops.transpose(negatives_outside)\n\n # negatives_inside: largest D_an.\n negatives_inside = array_ops.tile(\n masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])\n\n\n semi_hard_negatives = array_ops.where(\n mask_final, negatives_outside, negatives_inside)\n\n\n if isinstance(margin, numbers.Real):\n # diff = tf.maximum(diff + margin, 0.0)\n loss_mat = pdist_matrix - semi_hard_negatives + margin\n elif margin == 'soft':\n # diff = tf.nn.softplus(diff)\n loss_mat = pdist_matrix - semi_hard_negatives\n elif margin.lower() == 'none':\n pass\n else:\n raise NotImplementedError(\n 'The margin {} is not implemented in batch_hard'.format(margin))\n\n\n mask_positives = tf.cast(\n adjacency, dtype=dtypes.float32) - array_ops.diag(\n array_ops.ones([batch_size]))\n\n\n if isinstance(margin, numbers.Real):\n print('Margin is real')\n triplet_loss_result = math_ops.maximum(tf.boolean_mask(loss_mat, tf.cast(mask_positives, tf.bool)),\n 0.0)\n assert_op = tf.Assert(tf.equal(tf.rank(triplet_loss_result), 1), ['Rank of image must be equal to 1.'])\n with tf.control_dependencies([assert_op]):\n triplet_loss = triplet_loss_result\n elif margin == 'soft':\n triplet_loss_result = tf.nn.softplus(tf.boolean_mask(loss_mat, tf.cast(mask_positives, tf.bool)))\n assert_op = tf.Assert(tf.equal(tf.rank(triplet_loss_result), 1), ['Rank of image must be equal to 1.'])\n with tf.control_dependencies([assert_op]):\n triplet_loss = triplet_loss_result\n elif margin.lower() == 'none':\n pass\n else:\n raise NotImplementedError(\n 'The margin {} is not implemented in batch_hard'.format(margin))\n\n return triplet_loss\n" ]
[ [ "tensorflow.python.ops.array_ops.shape", "tensorflow.control_dependencies", "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.cast", "tensorflow.python.ops.math_ops.to_float", "tensorflow.rank", "tensorflow.python.ops.math_ops.logical_not", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.math_ops.reduce_min", "tensorflow.python.ops.array_ops.size", "tensorflow.name_scope", "tensorflow.python.ops.array_ops.ones", "tensorflow.square", "tensorflow.matmul", "tensorflow.python.ops.array_ops.tile", "tensorflow.python.ops.math_ops.square", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.math_ops.less_equal", "tensorflow.expand_dims", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.ops.math_ops.maximum", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.0", "1.2" ] } ]
zhuang-group/Mesa
[ "8b7a0db0461de7df5c99d644a60cc7704c67a02a" ]
[ "mesa/custom_bn.py" ]
[ "# Copyright (c) 2021-present, Zhuang AI Group.\n# All rights reserved.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nif 'mesa' not in __name__:\n import custom_quant\n import packbit\n import native\nelse:\n from . import custom_quant\n from . import native\n from . import packbit\n\n\ndef SyncBatchNorm_forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):\n if not input.is_contiguous(memory_format=torch.channels_last):\n input = input.contiguous()\n if weight is not None:\n weight = weight.contiguous()\n\n size = int(input.numel() // input.size(1))\n if size == 1 and world_size < 2:\n raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size))\n\n # calculate mean/invstd for input.\n mean, invstd = torch.batch_norm_stats(input, eps)\n \n count = torch.full((1,), input.numel() // input.size(1), dtype=mean.dtype, device=mean.device)\n \n num_channels = input.shape[1]\n # C, C, 1 -> (2C + 1)\n combined = torch.cat([mean, invstd, count], dim=0)\n # world_size * (2C + 1)\n combined_list = [ torch.empty_like(combined) for k in range(world_size) ]\n # Use allgather instead of allreduce since I don't trust in-place operations ..\n dist.all_gather(combined_list, combined, process_group, async_op=False)\n combined = torch.stack(combined_list, dim=0)\n # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1\n mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)\n \n # calculate global mean & invstd\n mean, invstd = torch.batch_norm_gather_stats_with_counts(\n input,\n mean_all,\n invstd_all,\n running_mean,\n running_var,\n momentum,\n eps,\n count_all.view(-1)\n )\n\n self.process_group = process_group\n\n # apply element-wise normalization\n out = torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)\n return out\n\ndef SyncBatchNorm_backward(saved_input, weight, mean, invstd, count_tensor, process_group, needs_input_grad, grad_output):\n if not grad_output.is_contiguous(memory_format=torch.channels_last):\n grad_output = grad_output.contiguous()\n #saved_input, weight, mean, invstd, count_tensor = self.saved_tensors\n #process_group = self.process_group\n grad_input = grad_weight = grad_bias = None\n\n # calculate local stats as well as grad_weight / grad_bias\n sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(\n grad_output,\n saved_input,\n mean,\n invstd,\n weight,\n True,\n needs_input_grad[0],\n needs_input_grad[1]\n )\n\n if True:\n # synchronizing stats used to calculate input gradient.\n num_channels = sum_dy.shape[0]\n combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)\n torch.distributed.all_reduce(\n combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)\n sum_dy, sum_dy_xmu = torch.split(combined, num_channels)\n\n # backward pass for gradient calculation\n grad_input = torch.batch_norm_backward_elemt(\n grad_output,\n saved_input,\n mean,\n invstd,\n weight,\n sum_dy,\n sum_dy_xmu,\n count_tensor\n )\n\n return grad_input, grad_weight, grad_bias #, None, None, None, None, None, None\n\ndef bn_pre_forward(self, input):\n self._check_input_dim(input)\n\n if self.momentum is None:\n exponential_average_factor = 0.0\n else:\n exponential_average_factor = self.momentum\n \n if self.training and self.track_running_stats:\n # TODO: if statement only here to tell the jit to skip emitting this when it is None\n if self.num_batches_tracked is not None: # type: ignore\n self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n\n if self.training:\n bn_training = True\n else:\n bn_training = (self.running_mean is None) and (self.running_var is None)\n \n assert self.running_mean is None or isinstance(self.running_mean, torch.Tensor)\n assert self.running_var is None or isinstance(self.running_var, torch.Tensor)\n running_mean = self.running_mean if not self.training or self.track_running_stats else None\n running_var = self.running_var if not self.training or self.track_running_stats else None\n\n need_sync = bn_training and input.is_cuda and hasattr(self, 'process_group')\n process_group = None\n world_size = 1\n if need_sync:\n process_group = torch.distributed.group.WORLD\n if self.process_group:\n process_group = self.process_group\n try:\n world_size = torch.distributed.get_world_size(process_group)\n except AssertionError:\n world_size = 1\n need_sync = world_size > 1\n\n # fallback to framework BN when synchronization is not necessary\n if need_sync:\n if not self.ddp_gpu_size:\n raise AttributeError('SyncBatchNorm is only supported within torch.nn.parallel.DistributedDataParallel')\n \n return exponential_average_factor, bn_training, running_mean, running_var, need_sync, process_group, world_size\n\nclass batchnorm2d(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, weight, bias, mean, var, average_factor, training, need_sync, process_group, world_size, eps,\n clip_val, level, iteration, ema_decay, quant_groups, shift):\n if need_sync:\n # currently not support\n output = SyncBatchNorm_forward(ctx, input, bn_weight, bn_bias, bn_mean, bn_var, bn_eps, average_factor, process_group, world_size)\n else:\n output, save_mean, save_var, reverse = native.batch_norm_forward(input, weight, bias, mean, var, training, average_factor, eps)\n if training:\n ctx.bn_parameter = (weight, bias, mean, var, save_mean, save_var, reverse, eps)\n custom_quant.Quant.forward(ctx, input, clip_val, level, iteration, ema_decay, quant_groups, shift)\n if training:\n ctx.need_sync = need_sync\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.need_sync:\n grad_output, grad_bn_weight, grad_bn_bias = SyncBatchNorm_backward(input, bn_weight, bn_mean, bn_invstd, bn_count_all, \\\n bn_process_group, ctx.needs_input_grad[7:9], grad_output)\n else:\n weight, bias, running_mean, running_var, save_mean, save_var, reverse, eps = ctx.bn_parameter\n # input = ctx.bn_input\n input = custom_quant.Quant.restore(ctx)\n grad_input, grad_weight, grad_bias = native.batch_norm_backward(input, grad_output, weight, running_mean, running_var, \\\n save_mean, save_var, 0, reverse)\n ctx.bn_input = None\n ctx.bn_parameter = None\n ctx.need_sync = None\n\n return grad_input, grad_weight, grad_bias, None, None, None, None, None, None, None, None, None, None, None, None, None, None\n\nclass BatchNorm2d(nn.BatchNorm2d, custom_quant.Quant):\n def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, args=None, logger=None, quant_groups=1):\n super(BatchNorm2d, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)\n self.repr = super(BatchNorm2d, self).__repr__()\n custom_quant.Quant.__init__(self, args=args, logger=logger, quant_groups=quant_groups)\n self.tag = 'bn'\n\n def __repr__(self):\n return self.__str__()\n\n def forward(self, x):\n if self.enable and self.training:\n assert x.is_cuda, \"Not supprot cpu mode yet\"\n average_factor, training, mean, var, need_sync, process_group, world_size = bn_pre_forward(self, x)\n y = batchnorm2d.apply(x, self.weight, self.bias, mean, var, average_factor, training, need_sync, process_group, world_size, self.eps,\n self.clip_val, self.level, self.iteration, self.ema_decay, self.quant_groups, self.shift)\n else:\n y = super().forward(x)\n return y\n\nif __name__ == \"__main__\":\n model = BatchNorm2d(64, args=None)\n input = torch.randn(4, 100, 35, 45)\n\n from test import test\n test(model)\n\n" ]
[ [ "torch.batch_norm_backward_elemt", "torch.empty_like", "torch.distributed.all_reduce", "torch.cat", "torch.randn", "torch.batch_norm_elemt", "torch.split", "torch.stack", "torch.distributed.get_world_size", "torch.batch_norm_backward_reduce", "torch.batch_norm_stats" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shyhuai/kfac_pytorch
[ "f5a99366fa94345697432a8aabdc5d370f68d06f" ]
[ "kfac/autograd_hacks.py" ]
[ "\"\"\"\nLibrary for extracting interesting quantites from autograd, see README.md\n\nNot thread-safe because of module-level variables\n\nNotation:\no: number of output classes (exact Hessian), number of Hessian samples (sampled Hessian)\nn: batch-size\ndo: output dimension (output channels for convolution)\ndi: input dimension (input channels for convolution)\nHi: per-example Hessian of matmul, shaped as matrix of [dim, dim], indices have been row-vectorized\nHi_bias: per-example Hessian of bias\nOh, Ow: output height, output width (convolution)\nKh, Kw: kernel height, kernel width (convolution)\n\nJb: batch output Jacobian of matmul, output sensitivity for example,class pair, [o, n, ....]\nJb_bias: as above, but for bias\n\nA, activations: inputs into current layer\nB, backprops: backprop values (aka Lop aka Jacobian-vector product) observed at current layer\n\n\"\"\"\n\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n_supported_layers = ['Linear', 'Conv2d'] # Supported layer class types\n_hooks_disabled: bool = False # work-around for https://github.com/pytorch/pytorch/issues/25723\n_enforce_fresh_backprop: bool = False # global switch to catch double backprop errors on Hessian computation\n\n\ndef add_hooks(model: nn.Module) -> None:\n \"\"\"\n Adds hooks to model to save activations and backprop values.\n\n The hooks will\n 1. save activations into param.activations during forward pass\n 2. append backprops to params.backprops_list during backward pass.\n\n Call \"remove_hooks(model)\" to disable this.\n\n Args:\n model:\n \"\"\"\n\n global _hooks_disabled\n _hooks_disabled = False\n\n handles = []\n for layer in model.modules():\n if _layer_type(layer) in _supported_layers:\n handles.append(layer.register_forward_hook(_capture_activations))\n handles.append(layer.register_backward_hook(_capture_backprops))\n\n model.__dict__.setdefault('autograd_hacks_hooks', []).extend(handles)\n\n\ndef remove_hooks(model: nn.Module) -> None:\n \"\"\"\n Remove hooks added by add_hooks(model)\n \"\"\"\n\n assert model == 0, \"not working, remove this after fix to https://github.com/pytorch/pytorch/issues/25723\"\n\n if not hasattr(model, 'autograd_hacks_hooks'):\n print(\"Warning, asked to remove hooks, but no hooks found\")\n else:\n for handle in model.autograd_hacks_hooks:\n handle.remove()\n del model.autograd_hacks_hooks\n\n\ndef disable_hooks() -> None:\n \"\"\"\n Globally disable all hooks installed by this library.\n \"\"\"\n\n global _hooks_disabled\n _hooks_disabled = True\n\n\ndef enable_hooks() -> None:\n \"\"\"the opposite of disable_hooks()\"\"\"\n\n global _hooks_disabled\n _hooks_disabled = False\n\n\ndef is_supported(layer: nn.Module) -> bool:\n \"\"\"Check if this layer is supported\"\"\"\n\n return _layer_type(layer) in _supported_layers\n\n\ndef _layer_type(layer: nn.Module) -> str:\n return layer.__class__.__name__\n\n\ndef _capture_activations(layer: nn.Module, input: List[torch.Tensor], output: torch.Tensor):\n \"\"\"Save activations into layer.activations in forward pass\"\"\"\n\n if _hooks_disabled:\n return\n assert _layer_type(layer) in _supported_layers, \"Hook installed on unsupported layer, this shouldn't happen\"\n setattr(layer, \"activations\", input[0].detach())\n\n\ndef _capture_backprops(layer: nn.Module, _input, output):\n \"\"\"Append backprop to layer.backprops_list in backward pass.\"\"\"\n global _enforce_fresh_backprop\n\n if _hooks_disabled:\n return\n\n if _enforce_fresh_backprop:\n assert not hasattr(layer, 'backprops_list'), \"Seeing result of previous backprop, use clear_backprops(model) to clear\"\n _enforce_fresh_backprop = False\n\n if not hasattr(layer, 'backprops_list'):\n setattr(layer, 'backprops_list', [])\n layer.backprops_list.append(output[0].detach())\n\n\ndef clear_backprops(model: nn.Module) -> None:\n \"\"\"Delete layer.backprops_list in every layer.\"\"\"\n for layer in model.modules():\n if hasattr(layer, 'backprops_list'):\n del layer.backprops_list\n\n\ndef compute_grad1(model: nn.Module, loss_type: str = 'mean') -> None:\n \"\"\"\n Compute per-example gradients and save them under 'param.grad1'. Must be called after loss.backprop()\n\n Args:\n model:\n loss_type: either \"mean\" or \"sum\" depending whether backpropped loss was averaged or summed over batch\n \"\"\"\n\n assert loss_type in ('sum', 'mean')\n for layer in model.modules():\n layer_type = _layer_type(layer)\n if layer_type not in _supported_layers:\n continue\n assert hasattr(layer, 'activations'), \"No activations detected, run forward after add_hooks(model)\"\n assert hasattr(layer, 'backprops_list'), \"No backprops detected, run backward after add_hooks(model)\"\n assert len(layer.backprops_list) == 1, \"Multiple backprops detected, make sure to call clear_backprops(model)\"\n\n A = layer.activations\n n = A.shape[0]\n if loss_type == 'mean':\n B = layer.backprops_list[0] * n\n else: # loss_type == 'sum':\n B = layer.backprops_list[0]\n\n if layer_type == 'Linear':\n setattr(layer.weight, 'grad1', torch.einsum('ni,nj->nij', B, A))\n if layer.bias is not None:\n setattr(layer.bias, 'grad1', B)\n\n elif layer_type == 'Conv2d':\n A = torch.nn.functional.unfold(A, layer.kernel_size, dilation=layer.dilation, padding=layer.padding, stride=layer.stride)\n #A = torch.nn.functional.unfold(A, layer.kernel_size)\n B = B.reshape(n, -1, A.shape[-1])\n grad1 = torch.einsum('ijk,ilk->ijl', B, A)\n shape = [n] + list(layer.weight.shape)\n setattr(layer.weight, 'grad1', grad1.reshape(shape))\n if layer.bias is not None:\n setattr(layer.bias, 'grad1', torch.sum(B, dim=2))\n\n\ndef compute_hess(model: nn.Module,) -> None:\n \"\"\"Save Hessian under param.hess for each param in the model\"\"\"\n\n for layer in model.modules():\n layer_type = _layer_type(layer)\n if layer_type not in _supported_layers:\n continue\n assert hasattr(layer, 'activations'), \"No activations detected, run forward after add_hooks(model)\"\n assert hasattr(layer, 'backprops_list'), \"No backprops detected, run backward after add_hooks(model)\"\n\n if layer_type == 'Linear':\n A = layer.activations\n B = torch.stack(layer.backprops_list)\n\n n = A.shape[0]\n o = B.shape[0]\n\n A = torch.stack([A] * o)\n Jb = torch.einsum(\"oni,onj->onij\", B, A).reshape(n*o, -1)\n H = torch.einsum('ni,nj->ij', Jb, Jb) / n\n\n setattr(layer.weight, 'hess', H)\n\n if layer.bias is not None:\n setattr(layer.bias, 'hess', torch.einsum('oni,onj->ij', B, B)/n)\n\n elif layer_type == 'Conv2d':\n Kh, Kw = layer.kernel_size\n di, do = layer.in_channels, layer.out_channels\n\n A = layer.activations.detach()\n A = torch.nn.functional.unfold(A, (Kh, Kw)) # n, di * Kh * Kw, Oh * Ow\n n = A.shape[0]\n B = torch.stack([Bt.reshape(n, do, -1) for Bt in layer.backprops_list]) # o, n, do, Oh*Ow\n o = B.shape[0]\n\n A = torch.stack([A] * o) # o, n, di * Kh * Kw, Oh*Ow\n Jb = torch.einsum('onij,onkj->onik', B, A) # o, n, do, di * Kh * Kw\n\n Hi = torch.einsum('onij,onkl->nijkl', Jb, Jb) # n, do, di*Kh*Kw, do, di*Kh*Kw\n Jb_bias = torch.einsum('onij->oni', B)\n Hi_bias = torch.einsum('oni,onj->nij', Jb_bias, Jb_bias)\n\n setattr(layer.weight, 'hess', Hi.mean(dim=0))\n if layer.bias is not None:\n setattr(layer.bias, 'hess', Hi_bias.mean(dim=0))\n\n\ndef backprop_hess(output: torch.Tensor, hess_type: str) -> None:\n \"\"\"\n Call backprop 1 or more times to get values needed for Hessian computation.\n\n Args:\n output: prediction of neural network (ie, input of nn.CrossEntropyLoss())\n hess_type: type of Hessian propagation, \"CrossEntropy\" results in exact Hessian for CrossEntropy\n\n Returns:\n\n \"\"\"\n\n assert hess_type in ('LeastSquares', 'CrossEntropy')\n global _enforce_fresh_backprop\n n, o = output.shape\n\n _enforce_fresh_backprop = True\n\n if hess_type == 'CrossEntropy':\n batch = F.softmax(output, dim=1)\n\n mask = torch.eye(o).expand(n, o, o)\n diag_part = batch.unsqueeze(2).expand(n, o, o) * mask\n outer_prod_part = torch.einsum('ij,ik->ijk', batch, batch)\n hess = diag_part - outer_prod_part\n assert hess.shape == (n, o, o)\n\n for i in range(n):\n hess[i, :, :] = symsqrt(hess[i, :, :])\n hess = hess.transpose(0, 1)\n\n elif hess_type == 'LeastSquares':\n hess = []\n assert len(output.shape) == 2\n batch_size, output_size = output.shape\n\n id_mat = torch.eye(output_size)\n for out_idx in range(output_size):\n hess.append(torch.stack([id_mat[out_idx]] * batch_size))\n\n for o in range(o):\n output.backward(hess[o], retain_graph=True)\n\n\ndef symsqrt(a, cond=None, return_rank=False, dtype=torch.float32):\n \"\"\"Symmetric square root of a positive semi-definite matrix.\n See https://github.com/pytorch/pytorch/issues/25481\"\"\"\n\n s, u = torch.symeig(a, eigenvectors=True)\n cond_dict = {torch.float32: 1e3 * 1.1920929e-07, torch.float64: 1E6 * 2.220446049250313e-16}\n\n if cond in [None, -1]:\n cond = cond_dict[dtype]\n\n above_cutoff = (abs(s) > cond * torch.max(abs(s)))\n\n psigma_diag = torch.sqrt(s[above_cutoff])\n u = u[:, above_cutoff]\n\n B = u @ torch.diag(psigma_diag) @ u.t()\n if return_rank:\n return B, len(psigma_diag)\n else:\n return B\n" ]
[ [ "torch.nn.functional.softmax", "torch.sqrt", "torch.einsum", "torch.eye", "torch.sum", "torch.symeig", "torch.diag", "torch.stack", "torch.nn.functional.unfold" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tyuyoshi/pandas
[ "4e034ec0006b6c05160ce67ea1420ce28f295c91" ]
[ "pandas/io/sql.py" ]
[ "\"\"\"\nCollection of query wrappers / abstractions to both facilitate data\nretrieval and to reduce dependency on DB-specific API.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom contextlib import contextmanager\nfrom datetime import (\n date,\n datetime,\n time,\n)\nfrom functools import partial\nimport re\nfrom typing import (\n Any,\n Iterator,\n Sequence,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nfrom pandas._typing import DtypeArg\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_datetime64tz_dtype,\n is_dict_like,\n is_list_like,\n)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas import get_option\nfrom pandas.core.api import (\n DataFrame,\n Series,\n)\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.tools.datetimes import to_datetime\nfrom pandas.util.version import Version\n\n\nclass DatabaseError(OSError):\n pass\n\n\n# -----------------------------------------------------------------------------\n# -- Helper functions\n\n\ndef _gt14() -> bool:\n \"\"\"\n Check if sqlalchemy.__version__ is at least 1.4.0, when several\n deprecations were made.\n \"\"\"\n import sqlalchemy\n\n return Version(sqlalchemy.__version__) >= Version(\"1.4.0\")\n\n\ndef _convert_params(sql, params):\n \"\"\"Convert SQL and params args to DBAPI2.0 compliant format.\"\"\"\n args = [sql]\n if params is not None:\n if hasattr(params, \"keys\"): # test if params is a mapping\n args += [params]\n else:\n args += [list(params)]\n return args\n\n\ndef _process_parse_dates_argument(parse_dates):\n \"\"\"Process parse_dates argument for read_sql functions\"\"\"\n # handle non-list entries for parse_dates gracefully\n if parse_dates is True or parse_dates is None or parse_dates is False:\n parse_dates = []\n\n elif not hasattr(parse_dates, \"__iter__\"):\n parse_dates = [parse_dates]\n return parse_dates\n\n\ndef _handle_date_column(\n col, utc: bool | None = None, format: str | dict[str, Any] | None = None\n):\n if isinstance(format, dict):\n # GH35185 Allow custom error values in parse_dates argument of\n # read_sql like functions.\n # Format can take on custom to_datetime argument values such as\n # {\"errors\": \"coerce\"} or {\"dayfirst\": True}\n error = format.pop(\"errors\", None) or \"ignore\"\n return to_datetime(col, errors=error, **format)\n else:\n # Allow passing of formatting string for integers\n # GH17855\n if format is None and (\n issubclass(col.dtype.type, np.floating)\n or issubclass(col.dtype.type, np.integer)\n ):\n format = \"s\"\n if format in [\"D\", \"d\", \"h\", \"m\", \"s\", \"ms\", \"us\", \"ns\"]:\n return to_datetime(col, errors=\"coerce\", unit=format, utc=utc)\n elif is_datetime64tz_dtype(col.dtype):\n # coerce to UTC timezone\n # GH11216\n return to_datetime(col, utc=True)\n else:\n return to_datetime(col, errors=\"coerce\", format=format, utc=utc)\n\n\ndef _parse_date_columns(data_frame, parse_dates):\n \"\"\"\n Force non-datetime columns to be read as such.\n Supports both string formatted and integer timestamp columns.\n \"\"\"\n parse_dates = _process_parse_dates_argument(parse_dates)\n\n # we want to coerce datetime64_tz dtypes for now to UTC\n # we could in theory do a 'nice' conversion from a FixedOffset tz\n # GH11216\n for col_name, df_col in data_frame.items():\n if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:\n try:\n fmt = parse_dates[col_name]\n except TypeError:\n fmt = None\n data_frame[col_name] = _handle_date_column(df_col, format=fmt)\n\n return data_frame\n\n\ndef _wrap_result(\n data,\n columns,\n index_col=None,\n coerce_float: bool = True,\n parse_dates=None,\n dtype: DtypeArg | None = None,\n):\n \"\"\"Wrap result set of query in a DataFrame.\"\"\"\n frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)\n\n if dtype:\n frame = frame.astype(dtype)\n\n frame = _parse_date_columns(frame, parse_dates)\n\n if index_col is not None:\n frame.set_index(index_col, inplace=True)\n\n return frame\n\n\ndef execute(sql, con, params=None):\n \"\"\"\n Execute the given SQL query using the provided connection object.\n\n Parameters\n ----------\n sql : string\n SQL query to be executed.\n con : SQLAlchemy connectable(engine/connection) or sqlite3 connection\n Using SQLAlchemy makes it possible to use any DB supported by the\n library.\n If a DBAPI2 object, only sqlite3 is supported.\n params : list or tuple, optional, default: None\n List of parameters to pass to execute method.\n\n Returns\n -------\n Results Iterable\n \"\"\"\n pandas_sql = pandasSQL_builder(con)\n args = _convert_params(sql, params)\n return pandas_sql.execute(*args)\n\n\n# -----------------------------------------------------------------------------\n# -- Read and write to DataFrames\n\n\n@overload\ndef read_sql_table(\n table_name,\n con,\n schema=...,\n index_col=...,\n coerce_float=...,\n parse_dates=...,\n columns=...,\n chunksize: None = ...,\n) -> DataFrame:\n ...\n\n\n@overload\ndef read_sql_table(\n table_name,\n con,\n schema=...,\n index_col=...,\n coerce_float=...,\n parse_dates=...,\n columns=...,\n chunksize: int = ...,\n) -> Iterator[DataFrame]:\n ...\n\n\ndef read_sql_table(\n table_name: str,\n con,\n schema: str | None = None,\n index_col: str | Sequence[str] | None = None,\n coerce_float: bool = True,\n parse_dates=None,\n columns=None,\n chunksize: int | None = None,\n) -> DataFrame | Iterator[DataFrame]:\n \"\"\"\n Read SQL database table into a DataFrame.\n\n Given a table name and a SQLAlchemy connectable, returns a DataFrame.\n This function does not support DBAPI connections.\n\n Parameters\n ----------\n table_name : str\n Name of SQL table in database.\n con : SQLAlchemy connectable or str\n A database URI could be provided as str.\n SQLite DBAPI connection mode not supported.\n schema : str, default None\n Name of SQL schema in database to query (if database flavor\n supports this). Uses default schema if None (default).\n index_col : str or list of str, optional, default: None\n Column(s) to set as index(MultiIndex).\n coerce_float : bool, default True\n Attempts to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point. Can result in loss of Precision.\n parse_dates : list or dict, default None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict corresponds\n to the keyword arguments of :func:`pandas.to_datetime`\n Especially useful with databases without native Datetime support,\n such as SQLite.\n columns : list, default None\n List of column names to select from SQL table.\n chunksize : int, default None\n If specified, returns an iterator where `chunksize` is the number of\n rows to include in each chunk.\n\n Returns\n -------\n DataFrame or Iterator[DataFrame]\n A SQL table is returned as two-dimensional data structure with labeled\n axes.\n\n See Also\n --------\n read_sql_query : Read SQL query into a DataFrame.\n read_sql : Read SQL query or database table into a DataFrame.\n\n Notes\n -----\n Any datetime values with time zone information will be converted to UTC.\n\n Examples\n --------\n >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP\n \"\"\"\n pandas_sql = pandasSQL_builder(con, schema=schema)\n if not pandas_sql.has_table(table_name):\n raise ValueError(f\"Table {table_name} not found\")\n\n table = pandas_sql.read_table(\n table_name,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n columns=columns,\n chunksize=chunksize,\n )\n\n if table is not None:\n return table\n else:\n raise ValueError(f\"Table {table_name} not found\", con)\n\n\n@overload\ndef read_sql_query(\n sql,\n con,\n index_col=...,\n coerce_float=...,\n params=...,\n parse_dates=...,\n chunksize: None = ...,\n dtype: DtypeArg | None = ...,\n) -> DataFrame:\n ...\n\n\n@overload\ndef read_sql_query(\n sql,\n con,\n index_col=...,\n coerce_float=...,\n params=...,\n parse_dates=...,\n chunksize: int = ...,\n dtype: DtypeArg | None = ...,\n) -> Iterator[DataFrame]:\n ...\n\n\ndef read_sql_query(\n sql,\n con,\n index_col=None,\n coerce_float: bool = True,\n params=None,\n parse_dates=None,\n chunksize: int | None = None,\n dtype: DtypeArg | None = None,\n) -> DataFrame | Iterator[DataFrame]:\n \"\"\"\n Read SQL query into a DataFrame.\n\n Returns a DataFrame corresponding to the result set of the query\n string. Optionally provide an `index_col` parameter to use one of the\n columns as the index, otherwise default integer index will be used.\n\n Parameters\n ----------\n sql : str SQL query or SQLAlchemy Selectable (select or text object)\n SQL query to be executed.\n con : SQLAlchemy connectable, str, or sqlite3 connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. If a DBAPI2 object, only sqlite3 is supported.\n index_col : str or list of str, optional, default: None\n Column(s) to set as index(MultiIndex).\n coerce_float : bool, default True\n Attempts to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point. Useful for SQL result sets.\n params : list, tuple or dict, optional, default: None\n List of parameters to pass to execute method. The syntax used\n to pass parameters is database driver dependent. Check your\n database driver documentation for which of the five syntax styles,\n described in PEP 249's paramstyle, is supported.\n Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.\n parse_dates : list or dict, default: None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict corresponds\n to the keyword arguments of :func:`pandas.to_datetime`\n Especially useful with databases without native Datetime support,\n such as SQLite.\n chunksize : int, default None\n If specified, return an iterator where `chunksize` is the number of\n rows to include in each chunk.\n dtype : Type name or dict of columns\n Data type for data or columns. E.g. np.float64 or\n {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}.\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n DataFrame or Iterator[DataFrame]\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql : Read SQL query or database table into a DataFrame.\n\n Notes\n -----\n Any datetime values with time zone information parsed via the `parse_dates`\n parameter will be converted to UTC.\n \"\"\"\n pandas_sql = pandasSQL_builder(con)\n return pandas_sql.read_query(\n sql,\n index_col=index_col,\n params=params,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n chunksize=chunksize,\n dtype=dtype,\n )\n\n\n@overload\ndef read_sql(\n sql,\n con,\n index_col=...,\n coerce_float=...,\n params=...,\n parse_dates=...,\n columns=...,\n chunksize: None = ...,\n) -> DataFrame:\n ...\n\n\n@overload\ndef read_sql(\n sql,\n con,\n index_col=...,\n coerce_float=...,\n params=...,\n parse_dates=...,\n columns=...,\n chunksize: int = ...,\n) -> Iterator[DataFrame]:\n ...\n\n\ndef read_sql(\n sql,\n con,\n index_col: str | Sequence[str] | None = None,\n coerce_float: bool = True,\n params=None,\n parse_dates=None,\n columns=None,\n chunksize: int | None = None,\n) -> DataFrame | Iterator[DataFrame]:\n \"\"\"\n Read SQL query or database table into a DataFrame.\n\n This function is a convenience wrapper around ``read_sql_table`` and\n ``read_sql_query`` (for backward compatibility). It will delegate\n to the specific function depending on the provided input. A SQL query\n will be routed to ``read_sql_query``, while a database table name will\n be routed to ``read_sql_table``. Note that the delegated function might\n have more specific notes about their functionality not listed here.\n\n Parameters\n ----------\n sql : str or SQLAlchemy Selectable (select or text object)\n SQL query to be executed or a table name.\n con : SQLAlchemy connectable, str, or sqlite3 connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible\n for engine disposal and connection closure for the SQLAlchemy connectable; str\n connections are closed automatically. See\n `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.\n index_col : str or list of str, optional, default: None\n Column(s) to set as index(MultiIndex).\n coerce_float : bool, default True\n Attempts to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets.\n params : list, tuple or dict, optional, default: None\n List of parameters to pass to execute method. The syntax used\n to pass parameters is database driver dependent. Check your\n database driver documentation for which of the five syntax styles,\n described in PEP 249's paramstyle, is supported.\n Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.\n parse_dates : list or dict, default: None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict corresponds\n to the keyword arguments of :func:`pandas.to_datetime`\n Especially useful with databases without native Datetime support,\n such as SQLite.\n columns : list, default: None\n List of column names to select from SQL table (only used when reading\n a table).\n chunksize : int, default None\n If specified, return an iterator where `chunksize` is the\n number of rows to include in each chunk.\n\n Returns\n -------\n DataFrame or Iterator[DataFrame]\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql_query : Read SQL query into a DataFrame.\n\n Examples\n --------\n Read data from SQL via either a SQL query or a SQL tablename.\n When using a SQLite database only SQL queries are accepted,\n providing only the SQL tablename will result in an error.\n\n >>> from sqlite3 import connect\n >>> conn = connect(':memory:')\n >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],\n ... columns=['int_column', 'date_column'])\n >>> df.to_sql('test_data', conn)\n 2\n\n >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)\n int_column date_column\n 0 0 10/11/12\n 1 1 12/11/10\n\n >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP\n\n Apply date parsing to columns through the ``parse_dates`` argument\n\n >>> pd.read_sql('SELECT int_column, date_column FROM test_data',\n ... conn,\n ... parse_dates=[\"date_column\"])\n int_column date_column\n 0 0 2012-10-11\n 1 1 2010-12-11\n\n The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.\n Custom argument values for applying ``pd.to_datetime`` on a column are specified\n via a dictionary format:\n 1. Ignore errors while parsing the values of \"date_column\"\n\n >>> pd.read_sql('SELECT int_column, date_column FROM test_data',\n ... conn,\n ... parse_dates={\"date_column\": {\"errors\": \"ignore\"}})\n int_column date_column\n 0 0 2012-10-11\n 1 1 2010-12-11\n\n 2. Apply a dayfirst date parsing order on the values of \"date_column\"\n\n >>> pd.read_sql('SELECT int_column, date_column FROM test_data',\n ... conn,\n ... parse_dates={\"date_column\": {\"dayfirst\": True}})\n int_column date_column\n 0 0 2012-11-10\n 1 1 2010-11-12\n\n 3. Apply custom formatting when date parsing the values of \"date_column\"\n\n >>> pd.read_sql('SELECT int_column, date_column FROM test_data',\n ... conn,\n ... parse_dates={\"date_column\": {\"format\": \"%d/%m/%y\"}})\n int_column date_column\n 0 0 2012-11-10\n 1 1 2010-11-12\n \"\"\"\n pandas_sql = pandasSQL_builder(con)\n\n if isinstance(pandas_sql, SQLiteDatabase):\n return pandas_sql.read_query(\n sql,\n index_col=index_col,\n params=params,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n chunksize=chunksize,\n )\n\n try:\n _is_table_name = pandas_sql.has_table(sql)\n except Exception:\n # using generic exception to catch errors from sql drivers (GH24988)\n _is_table_name = False\n\n if _is_table_name:\n pandas_sql.meta.reflect(bind=pandas_sql.connectable, only=[sql])\n return pandas_sql.read_table(\n sql,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n columns=columns,\n chunksize=chunksize,\n )\n else:\n return pandas_sql.read_query(\n sql,\n index_col=index_col,\n params=params,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n chunksize=chunksize,\n )\n\n\ndef to_sql(\n frame,\n name: str,\n con,\n schema: str | None = None,\n if_exists: str = \"fail\",\n index: bool = True,\n index_label=None,\n chunksize: int | None = None,\n dtype: DtypeArg | None = None,\n method: str | None = None,\n engine: str = \"auto\",\n **engine_kwargs,\n) -> int | None:\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Parameters\n ----------\n frame : DataFrame, Series\n name : str\n Name of SQL table.\n con : SQLAlchemy connectable(engine/connection) or database string URI\n or sqlite3 DBAPI2 connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library.\n If a DBAPI2 object, only sqlite3 is supported.\n schema : str, optional\n Name of SQL schema in database to write to (if database flavor\n supports this). If None, use default schema (default).\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n - fail: If table exists, do nothing.\n - replace: If table exists, drop it, recreate it, and insert data.\n - append: If table exists, insert data. Create if does not exist.\n index : bool, default True\n Write DataFrame index as a column.\n index_label : str or sequence, optional\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n dtype : dict or scalar, optional\n Specifying the datatype for columns. If a dictionary is used, the\n keys should be the column names and the values should be the\n SQLAlchemy types or strings for the sqlite3 fallback mode. If a\n scalar is provided, it will be applied to all columns.\n method : {None, 'multi', callable}, optional\n Controls the SQL insertion clause used:\n\n - None : Uses standard SQL ``INSERT`` clause (one per row).\n - ``'multi'``: Pass multiple values in a single ``INSERT`` clause.\n - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n engine : {'auto', 'sqlalchemy'}, default 'auto'\n SQL engine library to use. If 'auto', then the option\n ``io.sql.engine`` is used. The default ``io.sql.engine``\n behavior is 'sqlalchemy'\n\n .. versionadded:: 1.3.0\n\n **engine_kwargs\n Any additional kwargs are passed to the engine.\n\n Returns\n -------\n None or int\n Number of rows affected by to_sql. None is returned if the callable\n passed into ``method`` does not return the number of rows.\n\n .. versionadded:: 1.4.0\n\n Notes\n -----\n The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor``\n or SQLAlchemy connectable. The returned value may not reflect the exact number of written\n rows as stipulated in the\n `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or\n `SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__\n \"\"\" # noqa:E501\n if if_exists not in (\"fail\", \"replace\", \"append\"):\n raise ValueError(f\"'{if_exists}' is not valid for if_exists\")\n\n pandas_sql = pandasSQL_builder(con, schema=schema)\n\n if isinstance(frame, Series):\n frame = frame.to_frame()\n elif not isinstance(frame, DataFrame):\n raise NotImplementedError(\n \"'frame' argument should be either a Series or a DataFrame\"\n )\n\n return pandas_sql.to_sql(\n frame,\n name,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n schema=schema,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n engine=engine,\n **engine_kwargs,\n )\n\n\ndef has_table(table_name: str, con, schema: str | None = None):\n \"\"\"\n Check if DataBase has named table.\n\n Parameters\n ----------\n table_name: string\n Name of SQL table.\n con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library.\n If a DBAPI2 object, only sqlite3 is supported.\n schema : string, default None\n Name of SQL schema in database to write to (if database flavor supports\n this). If None, use default schema (default).\n\n Returns\n -------\n boolean\n \"\"\"\n pandas_sql = pandasSQL_builder(con, schema=schema)\n return pandas_sql.has_table(table_name)\n\n\ntable_exists = has_table\n\n\ndef pandasSQL_builder(con, schema: str | None = None):\n \"\"\"\n Convenience function to return the correct PandasSQL subclass based on the\n provided parameters.\n \"\"\"\n import sqlite3\n\n if isinstance(con, sqlite3.Connection) or con is None:\n return SQLiteDatabase(con)\n\n sqlalchemy = import_optional_dependency(\"sqlalchemy\")\n\n if isinstance(con, str):\n con = sqlalchemy.create_engine(con)\n\n if isinstance(con, sqlalchemy.engine.Connectable):\n return SQLDatabase(con, schema=schema)\n\n raise ValueError(\n \"pandas only support SQLAlchemy connectable(engine/connection) or\"\n \"database string URI or sqlite3 DBAPI2 connection\"\n )\n\n\nclass SQLTable(PandasObject):\n \"\"\"\n For mapping Pandas tables to SQL tables.\n Uses fact that table is reflected by SQLAlchemy to\n do better type conversions.\n Also holds various flags needed to avoid having to\n pass them between functions all the time.\n \"\"\"\n\n # TODO: support for multiIndex\n\n def __init__(\n self,\n name: str,\n pandas_sql_engine,\n frame=None,\n index=True,\n if_exists=\"fail\",\n prefix=\"pandas\",\n index_label=None,\n schema=None,\n keys=None,\n dtype: DtypeArg | None = None,\n ):\n self.name = name\n self.pd_sql = pandas_sql_engine\n self.prefix = prefix\n self.frame = frame\n self.index = self._index_name(index, index_label)\n self.schema = schema\n self.if_exists = if_exists\n self.keys = keys\n self.dtype = dtype\n\n if frame is not None:\n # We want to initialize based on a dataframe\n self.table = self._create_table_setup()\n else:\n # no data provided, read-only mode\n self.table = self.pd_sql.get_table(self.name, self.schema)\n\n if self.table is None:\n raise ValueError(f\"Could not init table '{name}'\")\n\n def exists(self):\n return self.pd_sql.has_table(self.name, self.schema)\n\n def sql_schema(self):\n from sqlalchemy.schema import CreateTable\n\n return str(CreateTable(self.table).compile(self.pd_sql.connectable))\n\n def _execute_create(self):\n # Inserting table into database, add to MetaData object\n if _gt14():\n self.table = self.table.to_metadata(self.pd_sql.meta)\n else:\n self.table = self.table.tometadata(self.pd_sql.meta)\n self.table.create(bind=self.pd_sql.connectable)\n\n def create(self):\n if self.exists():\n if self.if_exists == \"fail\":\n raise ValueError(f\"Table '{self.name}' already exists.\")\n elif self.if_exists == \"replace\":\n self.pd_sql.drop_table(self.name, self.schema)\n self._execute_create()\n elif self.if_exists == \"append\":\n pass\n else:\n raise ValueError(f\"'{self.if_exists}' is not valid for if_exists\")\n else:\n self._execute_create()\n\n def _execute_insert(self, conn, keys: list[str], data_iter) -> int:\n \"\"\"\n Execute SQL statement inserting data\n\n Parameters\n ----------\n conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection\n keys : list of str\n Column names\n data_iter : generator of list\n Each item contains a list of values to be inserted\n \"\"\"\n data = [dict(zip(keys, row)) for row in data_iter]\n result = conn.execute(self.table.insert(), data)\n return result.rowcount\n\n def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:\n \"\"\"\n Alternative to _execute_insert for DBs support multivalue INSERT.\n\n Note: multi-value insert is usually faster for analytics DBs\n and tables containing a few columns\n but performance degrades quickly with increase of columns.\n \"\"\"\n\n from sqlalchemy import insert\n\n data = [dict(zip(keys, row)) for row in data_iter]\n stmt = insert(self.table).values(data)\n result = conn.execute(stmt)\n return result.rowcount\n\n def insert_data(self):\n if self.index is not None:\n temp = self.frame.copy()\n temp.index.names = self.index\n try:\n temp.reset_index(inplace=True)\n except ValueError as err:\n raise ValueError(f\"duplicate name in index/columns: {err}\") from err\n else:\n temp = self.frame\n\n column_names = list(map(str, temp.columns))\n ncols = len(column_names)\n data_list = [None] * ncols\n\n for i, (_, ser) in enumerate(temp.items()):\n vals = ser._values\n if vals.dtype.kind == \"M\":\n d = vals.to_pydatetime()\n elif vals.dtype.kind == \"m\":\n # store as integers, see GH#6921, GH#7076\n d = vals.view(\"i8\").astype(object)\n else:\n d = vals.astype(object)\n\n assert isinstance(d, np.ndarray), type(d)\n\n if ser._can_hold_na:\n # Note: this will miss timedeltas since they are converted to int\n mask = isna(d)\n d[mask] = None\n\n # error: No overload variant of \"__setitem__\" of \"list\" matches\n # argument types \"int\", \"ndarray\"\n data_list[i] = d # type: ignore[call-overload]\n\n return column_names, data_list\n\n def insert(\n self, chunksize: int | None = None, method: str | None = None\n ) -> int | None:\n\n # set insert method\n if method is None:\n exec_insert = self._execute_insert\n elif method == \"multi\":\n exec_insert = self._execute_insert_multi\n elif callable(method):\n exec_insert = partial(method, self)\n else:\n raise ValueError(f\"Invalid parameter `method`: {method}\")\n\n keys, data_list = self.insert_data()\n\n nrows = len(self.frame)\n\n if nrows == 0:\n return 0\n\n if chunksize is None:\n chunksize = nrows\n elif chunksize == 0:\n raise ValueError(\"chunksize argument should be non-zero\")\n\n chunks = (nrows // chunksize) + 1\n total_inserted = 0\n with self.pd_sql.run_transaction() as conn:\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, nrows)\n if start_i >= end_i:\n break\n\n chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))\n num_inserted = exec_insert(conn, keys, chunk_iter)\n if num_inserted is None:\n total_inserted = None\n else:\n total_inserted += num_inserted\n return total_inserted\n\n def _query_iterator(\n self,\n result,\n chunksize: str | None,\n columns,\n coerce_float: bool = True,\n parse_dates=None,\n ):\n \"\"\"Return generator through chunked result set.\"\"\"\n has_read_data = False\n while True:\n data = result.fetchmany(chunksize)\n if not data:\n if not has_read_data:\n yield DataFrame.from_records(\n [], columns=columns, coerce_float=coerce_float\n )\n break\n else:\n has_read_data = True\n self.frame = DataFrame.from_records(\n data, columns=columns, coerce_float=coerce_float\n )\n\n self._harmonize_columns(parse_dates=parse_dates)\n\n if self.index is not None:\n self.frame.set_index(self.index, inplace=True)\n\n yield self.frame\n\n def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):\n from sqlalchemy import select\n\n if columns is not None and len(columns) > 0:\n cols = [self.table.c[n] for n in columns]\n if self.index is not None:\n for idx in self.index[::-1]:\n cols.insert(0, self.table.c[idx])\n sql_select = select(*cols) if _gt14() else select(cols)\n else:\n sql_select = select(self.table) if _gt14() else self.table.select()\n\n result = self.pd_sql.execute(sql_select)\n column_names = result.keys()\n\n if chunksize is not None:\n return self._query_iterator(\n result,\n chunksize,\n column_names,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n )\n else:\n data = result.fetchall()\n self.frame = DataFrame.from_records(\n data, columns=column_names, coerce_float=coerce_float\n )\n\n self._harmonize_columns(parse_dates=parse_dates)\n\n if self.index is not None:\n self.frame.set_index(self.index, inplace=True)\n\n return self.frame\n\n def _index_name(self, index, index_label):\n # for writing: index=True to include index in sql table\n if index is True:\n nlevels = self.frame.index.nlevels\n # if index_label is specified, set this as index name(s)\n if index_label is not None:\n if not isinstance(index_label, list):\n index_label = [index_label]\n if len(index_label) != nlevels:\n raise ValueError(\n \"Length of 'index_label' should match number of \"\n f\"levels, which is {nlevels}\"\n )\n else:\n return index_label\n # return the used column labels for the index columns\n if (\n nlevels == 1\n and \"index\" not in self.frame.columns\n and self.frame.index.name is None\n ):\n return [\"index\"]\n else:\n return com.fill_missing_names(self.frame.index.names)\n\n # for reading: index=(list of) string to specify column to set as index\n elif isinstance(index, str):\n return [index]\n elif isinstance(index, list):\n return index\n else:\n return None\n\n def _get_column_names_and_types(self, dtype_mapper):\n column_names_and_types = []\n if self.index is not None:\n for i, idx_label in enumerate(self.index):\n idx_type = dtype_mapper(self.frame.index._get_level_values(i))\n column_names_and_types.append((str(idx_label), idx_type, True))\n\n column_names_and_types += [\n (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)\n for i in range(len(self.frame.columns))\n ]\n\n return column_names_and_types\n\n def _create_table_setup(self):\n from sqlalchemy import (\n Column,\n PrimaryKeyConstraint,\n Table,\n )\n from sqlalchemy.schema import MetaData\n\n column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)\n\n columns = [\n Column(name, typ, index=is_index)\n for name, typ, is_index in column_names_and_types\n ]\n\n if self.keys is not None:\n if not is_list_like(self.keys):\n keys = [self.keys]\n else:\n keys = self.keys\n pkc = PrimaryKeyConstraint(*keys, name=self.name + \"_pk\")\n columns.append(pkc)\n\n schema = self.schema or self.pd_sql.meta.schema\n\n # At this point, attach to new metadata, only attach to self.meta\n # once table is created.\n meta = MetaData()\n return Table(self.name, meta, *columns, schema=schema)\n\n def _harmonize_columns(self, parse_dates=None):\n \"\"\"\n Make the DataFrame's column types align with the SQL table\n column types.\n Need to work around limited NA value support. Floats are always\n fine, ints must always be floats if there are Null values.\n Booleans are hard because converting bool column with None replaces\n all Nones with false. Therefore only convert bool if there are no\n NA values.\n Datetimes should already be converted to np.datetime64 if supported,\n but here we also force conversion if required.\n \"\"\"\n parse_dates = _process_parse_dates_argument(parse_dates)\n\n for sql_col in self.table.columns:\n col_name = sql_col.name\n try:\n df_col = self.frame[col_name]\n\n # Handle date parsing upfront; don't try to convert columns\n # twice\n if col_name in parse_dates:\n try:\n fmt = parse_dates[col_name]\n except TypeError:\n fmt = None\n self.frame[col_name] = _handle_date_column(df_col, format=fmt)\n continue\n\n # the type the dataframe column should have\n col_type = self._get_dtype(sql_col.type)\n\n if (\n col_type is datetime\n or col_type is date\n or col_type is DatetimeTZDtype\n ):\n # Convert tz-aware Datetime SQL columns to UTC\n utc = col_type is DatetimeTZDtype\n self.frame[col_name] = _handle_date_column(df_col, utc=utc)\n elif col_type is float:\n # floats support NA, can always convert!\n self.frame[col_name] = df_col.astype(col_type, copy=False)\n\n elif len(df_col) == df_col.count():\n # No NA values, can convert ints and bools\n if col_type is np.dtype(\"int64\") or col_type is bool:\n self.frame[col_name] = df_col.astype(col_type, copy=False)\n except KeyError:\n pass # this column not in results\n\n def _sqlalchemy_type(self, col):\n\n dtype: DtypeArg = self.dtype or {}\n if is_dict_like(dtype):\n dtype = cast(dict, dtype)\n if col.name in dtype:\n return dtype[col.name]\n\n # Infer type of column, while ignoring missing values.\n # Needed for inserting typed data containing NULLs, GH 8778.\n col_type = lib.infer_dtype(col, skipna=True)\n\n from sqlalchemy.types import (\n TIMESTAMP,\n BigInteger,\n Boolean,\n Date,\n DateTime,\n Float,\n Integer,\n SmallInteger,\n Text,\n Time,\n )\n\n if col_type == \"datetime64\" or col_type == \"datetime\":\n # GH 9086: TIMESTAMP is the suggested type if the column contains\n # timezone information\n try:\n if col.dt.tz is not None:\n return TIMESTAMP(timezone=True)\n except AttributeError:\n # The column is actually a DatetimeIndex\n # GH 26761 or an Index with date-like data e.g. 9999-01-01\n if getattr(col, \"tz\", None) is not None:\n return TIMESTAMP(timezone=True)\n return DateTime\n if col_type == \"timedelta64\":\n warnings.warn(\n \"the 'timedelta' type is not supported, and will be \"\n \"written as integer values (ns frequency) to the database.\",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n return BigInteger\n elif col_type == \"floating\":\n if col.dtype == \"float32\":\n return Float(precision=23)\n else:\n return Float(precision=53)\n elif col_type == \"integer\":\n # GH35076 Map pandas integer to optimal SQLAlchemy integer type\n if col.dtype.name.lower() in (\"int8\", \"uint8\", \"int16\"):\n return SmallInteger\n elif col.dtype.name.lower() in (\"uint16\", \"int32\"):\n return Integer\n elif col.dtype.name.lower() == \"uint64\":\n raise ValueError(\"Unsigned 64 bit integer datatype is not supported\")\n else:\n return BigInteger\n elif col_type == \"boolean\":\n return Boolean\n elif col_type == \"date\":\n return Date\n elif col_type == \"time\":\n return Time\n elif col_type == \"complex\":\n raise ValueError(\"Complex datatypes not supported\")\n\n return Text\n\n def _get_dtype(self, sqltype):\n from sqlalchemy.types import (\n TIMESTAMP,\n Boolean,\n Date,\n DateTime,\n Float,\n Integer,\n )\n\n if isinstance(sqltype, Float):\n return float\n elif isinstance(sqltype, Integer):\n # TODO: Refine integer size.\n return np.dtype(\"int64\")\n elif isinstance(sqltype, TIMESTAMP):\n # we have a timezone capable type\n if not sqltype.timezone:\n return datetime\n return DatetimeTZDtype\n elif isinstance(sqltype, DateTime):\n # Caution: np.datetime64 is also a subclass of np.number.\n return datetime\n elif isinstance(sqltype, Date):\n return date\n elif isinstance(sqltype, Boolean):\n return bool\n return object\n\n\nclass PandasSQL(PandasObject):\n \"\"\"\n Subclasses Should define read_sql and to_sql.\n \"\"\"\n\n def read_sql(self, *args, **kwargs):\n raise ValueError(\n \"PandasSQL must be created with an SQLAlchemy \"\n \"connectable or sqlite connection\"\n )\n\n def to_sql(\n self,\n frame,\n name,\n if_exists=\"fail\",\n index=True,\n index_label=None,\n schema=None,\n chunksize=None,\n dtype: DtypeArg | None = None,\n method=None,\n ) -> int | None:\n raise ValueError(\n \"PandasSQL must be created with an SQLAlchemy \"\n \"connectable or sqlite connection\"\n )\n\n\nclass BaseEngine:\n def insert_records(\n self,\n table: SQLTable,\n con,\n frame,\n name,\n index=True,\n schema=None,\n chunksize=None,\n method=None,\n **engine_kwargs,\n ) -> int | None:\n \"\"\"\n Inserts data into already-prepared table\n \"\"\"\n raise AbstractMethodError(self)\n\n\nclass SQLAlchemyEngine(BaseEngine):\n def __init__(self):\n import_optional_dependency(\n \"sqlalchemy\", extra=\"sqlalchemy is required for SQL support.\"\n )\n\n def insert_records(\n self,\n table: SQLTable,\n con,\n frame,\n name,\n index=True,\n schema=None,\n chunksize=None,\n method=None,\n **engine_kwargs,\n ) -> int | None:\n from sqlalchemy import exc\n\n try:\n return table.insert(chunksize=chunksize, method=method)\n except exc.SQLAlchemyError as err:\n # GH34431\n # https://stackoverflow.com/a/67358288/6067848\n msg = r\"\"\"(\\(1054, \"Unknown column 'inf(e0)?' in 'field list'\"\\))(?#\n )|inf can not be used with MySQL\"\"\"\n err_text = str(err.orig)\n if re.search(msg, err_text):\n raise ValueError(\"inf cannot be used with MySQL\") from err\n else:\n raise err\n\n\ndef get_engine(engine: str) -> BaseEngine:\n \"\"\"return our implementation\"\"\"\n if engine == \"auto\":\n engine = get_option(\"io.sql.engine\")\n\n if engine == \"auto\":\n # try engines in this order\n engine_classes = [SQLAlchemyEngine]\n\n error_msgs = \"\"\n for engine_class in engine_classes:\n try:\n return engine_class()\n except ImportError as err:\n error_msgs += \"\\n - \" + str(err)\n\n raise ImportError(\n \"Unable to find a usable engine; \"\n \"tried using: 'sqlalchemy'.\\n\"\n \"A suitable version of \"\n \"sqlalchemy is required for sql I/O \"\n \"support.\\n\"\n \"Trying to import the above resulted in these errors:\"\n f\"{error_msgs}\"\n )\n\n elif engine == \"sqlalchemy\":\n return SQLAlchemyEngine()\n\n raise ValueError(\"engine must be one of 'auto', 'sqlalchemy'\")\n\n\nclass SQLDatabase(PandasSQL):\n \"\"\"\n This class enables conversion between DataFrame and SQL databases\n using SQLAlchemy to handle DataBase abstraction.\n\n Parameters\n ----------\n engine : SQLAlchemy connectable\n Connectable to connect with the database. Using SQLAlchemy makes it\n possible to use any DB supported by that library.\n schema : string, default None\n Name of SQL schema in database to write to (if database flavor\n supports this). If None, use default schema (default).\n\n \"\"\"\n\n def __init__(self, engine, schema: str | None = None):\n from sqlalchemy.schema import MetaData\n\n self.connectable = engine\n self.meta = MetaData(schema=schema)\n\n @contextmanager\n def run_transaction(self):\n from sqlalchemy.engine import Engine\n\n if isinstance(self.connectable, Engine):\n with self.connectable.connect() as conn:\n with conn.begin():\n yield conn\n else:\n yield self.connectable\n\n def execute(self, *args, **kwargs):\n \"\"\"Simple passthrough to SQLAlchemy connectable\"\"\"\n return self.connectable.execution_options().execute(*args, **kwargs)\n\n def read_table(\n self,\n table_name: str,\n index_col: str | Sequence[str] | None = None,\n coerce_float: bool = True,\n parse_dates=None,\n columns=None,\n schema: str | None = None,\n chunksize: int | None = None,\n ):\n \"\"\"\n Read SQL database table into a DataFrame.\n\n Parameters\n ----------\n table_name : str\n Name of SQL table in database.\n index_col : string, optional, default: None\n Column to set as index.\n coerce_float : bool, default True\n Attempts to convert values of non-string, non-numeric objects\n (like decimal.Decimal) to floating point. This can result in\n loss of precision.\n parse_dates : list or dict, default: None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg}``, where the arg corresponds\n to the keyword arguments of :func:`pandas.to_datetime`.\n Especially useful with databases without native Datetime support,\n such as SQLite.\n columns : list, default: None\n List of column names to select from SQL table.\n schema : string, default None\n Name of SQL schema in database to query (if database flavor\n supports this). If specified, this overwrites the default\n schema of the SQL database object.\n chunksize : int, default None\n If specified, return an iterator where `chunksize` is the number\n of rows to include in each chunk.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n pandas.read_sql_table\n SQLDatabase.read_query\n\n \"\"\"\n table = SQLTable(table_name, self, index=index_col, schema=schema)\n return table.read(\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n columns=columns,\n chunksize=chunksize,\n )\n\n @staticmethod\n def _query_iterator(\n result,\n chunksize: int,\n columns,\n index_col=None,\n coerce_float=True,\n parse_dates=None,\n dtype: DtypeArg | None = None,\n ):\n \"\"\"Return generator through chunked result set\"\"\"\n has_read_data = False\n while True:\n data = result.fetchmany(chunksize)\n if not data:\n if not has_read_data:\n yield _wrap_result(\n [],\n columns,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n )\n break\n else:\n has_read_data = True\n yield _wrap_result(\n data,\n columns,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n dtype=dtype,\n )\n\n def read_query(\n self,\n sql: str,\n index_col: str | None = None,\n coerce_float: bool = True,\n parse_dates=None,\n params=None,\n chunksize: int | None = None,\n dtype: DtypeArg | None = None,\n ):\n \"\"\"\n Read SQL query into a DataFrame.\n\n Parameters\n ----------\n sql : str\n SQL query to be executed.\n index_col : string, optional, default: None\n Column name to use as index for the returned DataFrame object.\n coerce_float : bool, default True\n Attempt to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets.\n params : list, tuple or dict, optional, default: None\n List of parameters to pass to execute method. The syntax used\n to pass parameters is database driver dependent. Check your\n database driver documentation for which of the five syntax styles,\n described in PEP 249's paramstyle, is supported.\n Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}\n parse_dates : list or dict, default: None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict\n corresponds to the keyword arguments of\n :func:`pandas.to_datetime` Especially useful with databases\n without native Datetime support, such as SQLite.\n chunksize : int, default None\n If specified, return an iterator where `chunksize` is the number\n of rows to include in each chunk.\n dtype : Type name or dict of columns\n Data type for data or columns. E.g. np.float64 or\n {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql\n\n \"\"\"\n args = _convert_params(sql, params)\n\n result = self.execute(*args)\n columns = result.keys()\n\n if chunksize is not None:\n return self._query_iterator(\n result,\n chunksize,\n columns,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n dtype=dtype,\n )\n else:\n data = result.fetchall()\n frame = _wrap_result(\n data,\n columns,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n dtype=dtype,\n )\n return frame\n\n read_sql = read_query\n\n def prep_table(\n self,\n frame,\n name,\n if_exists=\"fail\",\n index=True,\n index_label=None,\n schema=None,\n dtype: DtypeArg | None = None,\n ) -> SQLTable:\n \"\"\"\n Prepares table in the database for data insertion. Creates it if needed, etc.\n \"\"\"\n if dtype:\n if not is_dict_like(dtype):\n # error: Value expression in dictionary comprehension has incompatible\n # type \"Union[ExtensionDtype, str, dtype[Any], Type[object],\n # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],\n # Type[str], Type[float], Type[int], Type[complex], Type[bool],\n # Type[object]]]]\"; expected type \"Union[ExtensionDtype, str,\n # dtype[Any], Type[object]]\"\n dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]\n else:\n dtype = cast(dict, dtype)\n\n from sqlalchemy.types import (\n TypeEngine,\n to_instance,\n )\n\n for col, my_type in dtype.items():\n if not isinstance(to_instance(my_type), TypeEngine):\n raise ValueError(f\"The type of {col} is not a SQLAlchemy type\")\n\n table = SQLTable(\n name,\n self,\n frame=frame,\n index=index,\n if_exists=if_exists,\n index_label=index_label,\n schema=schema,\n dtype=dtype,\n )\n table.create()\n return table\n\n def check_case_sensitive(\n self,\n name,\n schema,\n ):\n \"\"\"\n Checks table name for issues with case-sensitivity.\n Method is called after data is inserted.\n \"\"\"\n if not name.isdigit() and not name.islower():\n # check for potentially case sensitivity issues (GH7815)\n # Only check when name is not a number and name is not lower case\n engine = self.connectable.engine\n with self.connectable.connect() as conn:\n if _gt14():\n from sqlalchemy import inspect\n\n insp = inspect(conn)\n table_names = insp.get_table_names(\n schema=schema or self.meta.schema\n )\n else:\n table_names = engine.table_names(\n schema=schema or self.meta.schema, connection=conn\n )\n if name not in table_names:\n msg = (\n f\"The provided table name '{name}' is not found exactly as \"\n \"such in the database after writing the table, possibly \"\n \"due to case sensitivity issues. Consider using lower \"\n \"case table names.\"\n )\n warnings.warn(msg, UserWarning)\n\n def to_sql(\n self,\n frame,\n name,\n if_exists=\"fail\",\n index=True,\n index_label=None,\n schema=None,\n chunksize=None,\n dtype: DtypeArg | None = None,\n method=None,\n engine=\"auto\",\n **engine_kwargs,\n ) -> int | None:\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Parameters\n ----------\n frame : DataFrame\n name : string\n Name of SQL table.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n - fail: If table exists, do nothing.\n - replace: If table exists, drop it, recreate it, and insert data.\n - append: If table exists, insert data. Create if does not exist.\n index : boolean, default True\n Write DataFrame index as a column.\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n schema : string, default None\n Name of SQL schema in database to write to (if database flavor\n supports this). If specified, this overwrites the default\n schema of the SQLDatabase object.\n chunksize : int, default None\n If not None, then rows will be written in batches of this size at a\n time. If None, all rows will be written at once.\n dtype : single type or dict of column name to SQL type, default None\n Optional specifying the datatype for columns. The SQL type should\n be a SQLAlchemy type. If all columns are of the same type, one\n single value can be used.\n method : {None', 'multi', callable}, default None\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n engine : {'auto', 'sqlalchemy'}, default 'auto'\n SQL engine library to use. If 'auto', then the option\n ``io.sql.engine`` is used. The default ``io.sql.engine``\n behavior is 'sqlalchemy'\n\n .. versionadded:: 1.3.0\n\n **engine_kwargs\n Any additional kwargs are passed to the engine.\n \"\"\"\n sql_engine = get_engine(engine)\n\n table = self.prep_table(\n frame=frame,\n name=name,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n schema=schema,\n dtype=dtype,\n )\n\n total_inserted = sql_engine.insert_records(\n table=table,\n con=self.connectable,\n frame=frame,\n name=name,\n index=index,\n schema=schema,\n chunksize=chunksize,\n method=method,\n **engine_kwargs,\n )\n\n self.check_case_sensitive(name=name, schema=schema)\n return total_inserted\n\n @property\n def tables(self):\n return self.meta.tables\n\n def has_table(self, name: str, schema: str | None = None):\n if _gt14():\n from sqlalchemy import inspect\n\n insp = inspect(self.connectable)\n return insp.has_table(name, schema or self.meta.schema)\n else:\n return self.connectable.run_callable(\n self.connectable.dialect.has_table, name, schema or self.meta.schema\n )\n\n def get_table(self, table_name: str, schema: str | None = None):\n from sqlalchemy import (\n Numeric,\n Table,\n )\n\n schema = schema or self.meta.schema\n tbl = Table(\n table_name, self.meta, autoload_with=self.connectable, schema=schema\n )\n for column in tbl.columns:\n if isinstance(column.type, Numeric):\n column.type.asdecimal = False\n return tbl\n\n def drop_table(self, table_name: str, schema: str | None = None):\n schema = schema or self.meta.schema\n if self.has_table(table_name, schema):\n self.meta.reflect(bind=self.connectable, only=[table_name], schema=schema)\n self.get_table(table_name, schema).drop(bind=self.connectable)\n self.meta.clear()\n\n def _create_sql_schema(\n self,\n frame: DataFrame,\n table_name: str,\n keys: list[str] | None = None,\n dtype: DtypeArg | None = None,\n schema: str | None = None,\n ):\n table = SQLTable(\n table_name,\n self,\n frame=frame,\n index=False,\n keys=keys,\n dtype=dtype,\n schema=schema,\n )\n return str(table.sql_schema())\n\n\n# ---- SQL without SQLAlchemy ---\n# sqlite-specific sql strings and handler class\n# dictionary used for readability purposes\n_SQL_TYPES = {\n \"string\": \"TEXT\",\n \"floating\": \"REAL\",\n \"integer\": \"INTEGER\",\n \"datetime\": \"TIMESTAMP\",\n \"date\": \"DATE\",\n \"time\": \"TIME\",\n \"boolean\": \"INTEGER\",\n}\n\n\ndef _get_unicode_name(name):\n try:\n uname = str(name).encode(\"utf-8\", \"strict\").decode(\"utf-8\")\n except UnicodeError as err:\n raise ValueError(f\"Cannot convert identifier to UTF-8: '{name}'\") from err\n return uname\n\n\ndef _get_valid_sqlite_name(name):\n # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\\\n # -for-sqlite-table-column-names-in-python\n # Ensure the string can be encoded as UTF-8.\n # Ensure the string does not include any NUL characters.\n # Replace all \" with \"\".\n # Wrap the entire thing in double quotes.\n\n uname = _get_unicode_name(name)\n if not len(uname):\n raise ValueError(\"Empty table or column name specified\")\n\n nul_index = uname.find(\"\\x00\")\n if nul_index >= 0:\n raise ValueError(\"SQLite identifier cannot contain NULs\")\n return '\"' + uname.replace('\"', '\"\"') + '\"'\n\n\nclass SQLiteTable(SQLTable):\n \"\"\"\n Patch the SQLTable for fallback support.\n Instead of a table variable just use the Create Table statement.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # GH 8341\n # register an adapter callable for datetime.time object\n import sqlite3\n\n # this will transform time(12,34,56,789) into '12:34:56.000789'\n # (this is what sqlalchemy does)\n sqlite3.register_adapter(time, lambda _: _.strftime(\"%H:%M:%S.%f\"))\n super().__init__(*args, **kwargs)\n\n def sql_schema(self):\n return str(\";\\n\".join(self.table))\n\n def _execute_create(self):\n with self.pd_sql.run_transaction() as conn:\n for stmt in self.table:\n conn.execute(stmt)\n\n def insert_statement(self, *, num_rows: int):\n names = list(map(str, self.frame.columns))\n wld = \"?\" # wildcard char\n escape = _get_valid_sqlite_name\n\n if self.index is not None:\n for idx in self.index[::-1]:\n names.insert(0, idx)\n\n bracketed_names = [escape(column) for column in names]\n col_names = \",\".join(bracketed_names)\n\n row_wildcards = \",\".join([wld] * len(names))\n wildcards = \",\".join([f\"({row_wildcards})\" for _ in range(num_rows)])\n insert_statement = (\n f\"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}\"\n )\n return insert_statement\n\n def _execute_insert(self, conn, keys, data_iter) -> int:\n data_list = list(data_iter)\n conn.executemany(self.insert_statement(num_rows=1), data_list)\n return conn.rowcount\n\n def _execute_insert_multi(self, conn, keys, data_iter) -> int:\n data_list = list(data_iter)\n flattened_data = [x for row in data_list for x in row]\n conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)\n return conn.rowcount\n\n def _create_table_setup(self):\n \"\"\"\n Return a list of SQL statements that creates a table reflecting the\n structure of a DataFrame. The first entry will be a CREATE TABLE\n statement while the rest will be CREATE INDEX statements.\n \"\"\"\n column_names_and_types = self._get_column_names_and_types(self._sql_type_name)\n escape = _get_valid_sqlite_name\n\n create_tbl_stmts = [\n escape(cname) + \" \" + ctype for cname, ctype, _ in column_names_and_types\n ]\n\n if self.keys is not None and len(self.keys):\n if not is_list_like(self.keys):\n keys = [self.keys]\n else:\n keys = self.keys\n cnames_br = \", \".join([escape(c) for c in keys])\n create_tbl_stmts.append(\n f\"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})\"\n )\n if self.schema:\n schema_name = self.schema + \".\"\n else:\n schema_name = \"\"\n create_stmts = [\n \"CREATE TABLE \"\n + schema_name\n + escape(self.name)\n + \" (\\n\"\n + \",\\n \".join(create_tbl_stmts)\n + \"\\n)\"\n ]\n\n ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]\n if len(ix_cols):\n cnames = \"_\".join(ix_cols)\n cnames_br = \",\".join([escape(c) for c in ix_cols])\n create_stmts.append(\n \"CREATE INDEX \"\n + escape(\"ix_\" + self.name + \"_\" + cnames)\n + \"ON \"\n + escape(self.name)\n + \" (\"\n + cnames_br\n + \")\"\n )\n\n return create_stmts\n\n def _sql_type_name(self, col):\n dtype: DtypeArg = self.dtype or {}\n if is_dict_like(dtype):\n dtype = cast(dict, dtype)\n if col.name in dtype:\n return dtype[col.name]\n\n # Infer type of column, while ignoring missing values.\n # Needed for inserting typed data containing NULLs, GH 8778.\n col_type = lib.infer_dtype(col, skipna=True)\n\n if col_type == \"timedelta64\":\n warnings.warn(\n \"the 'timedelta' type is not supported, and will be \"\n \"written as integer values (ns frequency) to the database.\",\n UserWarning,\n stacklevel=find_stack_level(),\n )\n col_type = \"integer\"\n\n elif col_type == \"datetime64\":\n col_type = \"datetime\"\n\n elif col_type == \"empty\":\n col_type = \"string\"\n\n elif col_type == \"complex\":\n raise ValueError(\"Complex datatypes not supported\")\n\n if col_type not in _SQL_TYPES:\n col_type = \"string\"\n\n return _SQL_TYPES[col_type]\n\n\nclass SQLiteDatabase(PandasSQL):\n \"\"\"\n Version of SQLDatabase to support SQLite connections (fallback without\n SQLAlchemy). This should only be used internally.\n\n Parameters\n ----------\n con : sqlite connection object\n\n \"\"\"\n\n def __init__(self, con):\n self.con = con\n\n @contextmanager\n def run_transaction(self):\n cur = self.con.cursor()\n try:\n yield cur\n self.con.commit()\n except Exception:\n self.con.rollback()\n raise\n finally:\n cur.close()\n\n def execute(self, *args, **kwargs):\n cur = self.con.cursor()\n try:\n cur.execute(*args, **kwargs)\n return cur\n except Exception as exc:\n try:\n self.con.rollback()\n except Exception as inner_exc: # pragma: no cover\n ex = DatabaseError(\n f\"Execution failed on sql: {args[0]}\\n{exc}\\nunable to rollback\"\n )\n raise ex from inner_exc\n\n ex = DatabaseError(f\"Execution failed on sql '{args[0]}': {exc}\")\n raise ex from exc\n\n @staticmethod\n def _query_iterator(\n cursor,\n chunksize: int,\n columns,\n index_col=None,\n coerce_float: bool = True,\n parse_dates=None,\n dtype: DtypeArg | None = None,\n ):\n \"\"\"Return generator through chunked result set\"\"\"\n has_read_data = False\n while True:\n data = cursor.fetchmany(chunksize)\n if type(data) == tuple:\n data = list(data)\n if not data:\n cursor.close()\n if not has_read_data:\n yield DataFrame.from_records(\n [], columns=columns, coerce_float=coerce_float\n )\n break\n else:\n has_read_data = True\n yield _wrap_result(\n data,\n columns,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n dtype=dtype,\n )\n\n def read_query(\n self,\n sql,\n index_col=None,\n coerce_float: bool = True,\n params=None,\n parse_dates=None,\n chunksize: int | None = None,\n dtype: DtypeArg | None = None,\n ):\n\n args = _convert_params(sql, params)\n cursor = self.execute(*args)\n columns = [col_desc[0] for col_desc in cursor.description]\n\n if chunksize is not None:\n return self._query_iterator(\n cursor,\n chunksize,\n columns,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n dtype=dtype,\n )\n else:\n data = self._fetchall_as_list(cursor)\n cursor.close()\n\n frame = _wrap_result(\n data,\n columns,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n dtype=dtype,\n )\n return frame\n\n def _fetchall_as_list(self, cur):\n result = cur.fetchall()\n if not isinstance(result, list):\n result = list(result)\n return result\n\n def to_sql(\n self,\n frame,\n name,\n if_exists=\"fail\",\n index=True,\n index_label=None,\n schema=None,\n chunksize=None,\n dtype: DtypeArg | None = None,\n method=None,\n **kwargs,\n ) -> int | None:\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Parameters\n ----------\n frame: DataFrame\n name: string\n Name of SQL table.\n if_exists: {'fail', 'replace', 'append'}, default 'fail'\n fail: If table exists, do nothing.\n replace: If table exists, drop it, recreate it, and insert data.\n append: If table exists, insert data. Create if it does not exist.\n index : bool, default True\n Write DataFrame index as a column\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n schema : string, default None\n Ignored parameter included for compatibility with SQLAlchemy\n version of ``to_sql``.\n chunksize : int, default None\n If not None, then rows will be written in batches of this\n size at a time. If None, all rows will be written at once.\n dtype : single type or dict of column name to SQL type, default None\n Optional specifying the datatype for columns. The SQL type should\n be a string. If all columns are of the same type, one single value\n can be used.\n method : {None, 'multi', callable}, default None\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n \"\"\"\n if dtype:\n if not is_dict_like(dtype):\n # error: Value expression in dictionary comprehension has incompatible\n # type \"Union[ExtensionDtype, str, dtype[Any], Type[object],\n # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],\n # Type[str], Type[float], Type[int], Type[complex], Type[bool],\n # Type[object]]]]\"; expected type \"Union[ExtensionDtype, str,\n # dtype[Any], Type[object]]\"\n dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]\n else:\n dtype = cast(dict, dtype)\n\n for col, my_type in dtype.items():\n if not isinstance(my_type, str):\n raise ValueError(f\"{col} ({my_type}) not a string\")\n\n table = SQLiteTable(\n name,\n self,\n frame=frame,\n index=index,\n if_exists=if_exists,\n index_label=index_label,\n dtype=dtype,\n )\n table.create()\n return table.insert(chunksize, method)\n\n def has_table(self, name: str, schema: str | None = None):\n\n wld = \"?\"\n query = f\"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};\"\n\n return len(self.execute(query, [name]).fetchall()) > 0\n\n def get_table(self, table_name: str, schema: str | None = None):\n return None # not supported in fallback mode\n\n def drop_table(self, name: str, schema: str | None = None):\n drop_sql = f\"DROP TABLE {_get_valid_sqlite_name(name)}\"\n self.execute(drop_sql)\n\n def _create_sql_schema(\n self,\n frame,\n table_name: str,\n keys=None,\n dtype: DtypeArg | None = None,\n schema: str | None = None,\n ):\n table = SQLiteTable(\n table_name,\n self,\n frame=frame,\n index=False,\n keys=keys,\n dtype=dtype,\n schema=schema,\n )\n return str(table.sql_schema())\n\n\ndef get_schema(\n frame,\n name: str,\n keys=None,\n con=None,\n dtype: DtypeArg | None = None,\n schema: str | None = None,\n):\n \"\"\"\n Get the SQL db table schema for the given frame.\n\n Parameters\n ----------\n frame : DataFrame\n name : str\n name of SQL table\n keys : string or sequence, default: None\n columns to use a primary key\n con: an open SQL database connection object or a SQLAlchemy connectable\n Using SQLAlchemy makes it possible to use any DB supported by that\n library, default: None\n If a DBAPI2 object, only sqlite3 is supported.\n dtype : dict of column name to SQL type, default None\n Optional specifying the datatype for columns. The SQL type should\n be a SQLAlchemy type, or a string for sqlite3 fallback connection.\n schema: str, default: None\n Optional specifying the schema to be used in creating the table.\n\n .. versionadded:: 1.2.0\n \"\"\"\n pandas_sql = pandasSQL_builder(con=con)\n return pandas_sql._create_sql_schema(\n frame, name, keys=keys, dtype=dtype, schema=schema\n )\n" ]
[ [ "pandas.util._exceptions.find_stack_level", "pandas.core.dtypes.common.is_list_like", "pandas.core.api.DataFrame.from_records", "pandas.core.common.fill_missing_names", "pandas.errors.AbstractMethodError", "pandas.compat._optional.import_optional_dependency", "pandas.core.dtypes.common.is_datetime64tz_dtype", "numpy.dtype", "pandas.util.version.Version", "pandas.core.dtypes.missing.isna", "pandas.core.dtypes.common.is_dict_like", "pandas.get_option", "pandas._libs.lib.infer_dtype", "pandas.core.tools.datetimes.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Albert-GM/TFM
[ "2574f7cd1411ff253d045c7dff894de36659aae8" ]
[ "src/data/modelling_movement.py" ]
[ "# =============================================================================\n# Makes a model simulating the movement of people between countries.\n# =============================================================================\n\n\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nimport os\nimport re\nroot_project = re.findall(r'(^\\S*TFM)', os.getcwd())[0]\n\ndf = pd.read_pickle(f\"{root_project}/data/interim/country_info_nonans.pickle\")\n\ndf_move = df.loc[:, ['country_name', 'country_code']]\n# Arrivals and departures by country and day\ndf_move['arrivals/day'] = df['arrivals'] / 365\ndf_move['departures/day'] = df['departures'] / 365\n# Ratio of arrivals to total by country\ndf_move['prop_arrivals'] = df_move['arrivals/day'] / \\\n np.sum(df_move['arrivals/day'])\n\ncountrycode_to_proparriv = pd.Series(\n df_move['prop_arrivals'].values, index=df_move['country_code']).to_dict()\n\ncountrycode_to_departures = pd.Series(\n df_move['departures/day'].values, index=df_move['country_code']).to_dict()\n\n\n# Add to the dataframe a column with info about the number of people going from\n# one country to another\nl_people = []\ndf_people = df.copy()\n\nfor country in df.iterrows():\n # Possibles destinations of country\n country_destinations = country[1]['destinations']\n # Compute probabilities of going to each of destinations\n prob = {x: countrycode_to_proparriv[x] for x in country_destinations}\n sum_prob = np.sum(list(prob.values()))\n # Probabilities of going to each of destinations normalized. sum=1\n prob = {k: v / sum_prob for k, v in prob.items()}\n # Compute individuals going from country to destinations\n people = {k: int(round(\n v * countrycode_to_departures[country[1]['country_code']], 0))\n for k, v in prob.items()}\n l_people.append(people)\n\n\ndf['departures/day'] = l_people\ndf.drop('destinations', axis=1, inplace=True)\n\n# Make origin-destination matrix from graph\nH = nx.DiGraph()\n\nfor index, country in df.iterrows():\n destinations = country['departures/day']\n for k, v in destinations.items():\n H.add_edge(country['country_code'], k, people=v)\n\nOD_matrix = nx.attr_matrix(H, edge_attr='people', rc_order=df['country_code'])\n\n# Uncomment to save new data\n# df.to_pickle(f\"{root_project}/data/interim/country_info_final.pickle\")\n# np.save(f\"{root_project}/data/interim/od_matrix.npy\", OD_matrix)\n" ]
[ [ "pandas.read_pickle", "numpy.sum", "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
mtoqeerpk/geemap
[ "70ebe305d25a7a5a5191b24595b4180fb7962f52" ]
[ "geemap/geemap.py" ]
[ "\"\"\"Main module for interactive mapping using Google Earth Engine Python API and ipyleaflet.\nKeep in mind that Earth Engine functions use both camel case and snake case, such as setOptions(), setCenter(), centerObject(), addLayer().\nipyleaflet functions use snake case, such as add_tile_layer(), add_wms_layer(), add_minimap().\n\"\"\"\n\nimport colour\nimport ee\nimport geocoder\nimport ipyleaflet\nimport math\nimport os\nimport time\nimport ipywidgets as widgets\nfrom bqplot import pyplot as plt\nfrom ipyfilechooser import FileChooser\nfrom ipyleaflet import *\nfrom ipytree import Tree, Node\nfrom IPython.display import display\nfrom .basemaps import ee_basemaps\nfrom .conversion import *\nfrom .legends import builtin_legends\n\n\ndef ee_initialize(token_name='EARTHENGINE_TOKEN'):\n \"\"\"Authenticates Earth Engine and initialize an Earth Engine session\n\n \"\"\"\n try:\n ee_token = os.environ.get(token_name)\n if ee_token is not None:\n credential = '{\"refresh_token\":\"%s\"}' % ee_token\n credential_file_path = os.path.expanduser(\"~/.config/earthengine/\")\n os.makedirs(credential_file_path, exist_ok=True)\n with open(credential_file_path + 'credentials', 'w') as file:\n file.write(credential)\n elif in_colab_shell():\n if credentials_in_drive() and (not credentials_in_colab()):\n copy_credentials_to_colab()\n elif not credentials_in_colab:\n ee.Authenticate()\n if is_drive_mounted() and (not credentials_in_drive()):\n copy_credentials_to_drive()\n else:\n if is_drive_mounted():\n copy_credentials_to_drive()\n\n ee.Initialize()\n except:\n ee.Authenticate()\n ee.Initialize()\n\n\nclass Map(ipyleaflet.Map):\n \"\"\"The Map class inherits from ipyleaflet.Map. The arguments you can pass to the Map can be found at https://ipyleaflet.readthedocs.io/en/latest/api_reference/map.html. By default, the Map will add Google Maps as the basemap. Set add_google_map = False to use OpenStreetMap as the basemap.\n\n Returns:\n object: ipyleaflet map object.\n \"\"\"\n\n def __init__(self, **kwargs):\n\n # Authenticates Earth Engine and initializes an Earth Engine session\n ee_initialize()\n\n # Default map center location and zoom level\n latlon = [40, -100]\n zoom = 4\n\n # Interchangeable parameters between ipyleaflet and folium\n if 'location' in kwargs.keys():\n kwargs['center'] = kwargs['location']\n kwargs.pop('location')\n if 'center' in kwargs.keys():\n latlon = kwargs['center']\n else:\n kwargs['center'] = latlon\n\n if 'zoom_start' in kwargs.keys():\n kwargs['zoom'] = kwargs['zoom_start']\n kwargs.pop('zoom_start')\n if 'zoom' in kwargs.keys():\n zoom = kwargs['zoom']\n else:\n kwargs['zoom'] = zoom\n\n if 'add_google_map' not in kwargs.keys():\n kwargs['add_google_map'] = True\n\n if 'show_attribution' not in kwargs.keys():\n kwargs['show_attribution'] = True\n\n if 'scroll_wheel_zoom' not in kwargs.keys():\n kwargs['scroll_wheel_zoom'] = True\n\n if 'zoom_control' not in kwargs.keys():\n kwargs['zoom_control'] = True\n\n if 'height' not in kwargs.keys():\n kwargs['height'] = '550px'\n\n # Inherits the ipyleaflet Map class\n super().__init__(**kwargs)\n self.layout.height = kwargs['height']\n\n self.clear_controls()\n\n self.draw_count = 0 # The number of shapes drawn by the user using the DrawControl\n # The list of Earth Engine Geometry objects converted from geojson\n self.draw_features = []\n # The Earth Engine Geometry object converted from the last drawn feature\n self.draw_last_feature = None\n self.draw_layer = None\n self.draw_last_json = None\n self.draw_last_bounds = None\n self.user_roi = None\n self.user_rois = None\n\n self.roi_start = False\n self.roi_end = False\n self.roi_reducer = ee.Reducer.mean()\n self.roi_reducer_scale = None\n\n # List for storing pixel values and locations based on user-drawn geometries.\n self.chart_points = []\n self.chart_values = []\n self.chart_labels = None\n\n self.plot_widget = None # The plot widget for plotting Earth Engine data\n self.plot_control = None # The plot control for interacting plotting\n self.random_marker = None\n\n self.legend_widget = None\n self.legend_control = None\n\n self.ee_layers = []\n self.ee_layer_names = []\n self.ee_raster_layers = []\n self.ee_raster_layer_names = []\n self.ee_layer_dict = {}\n\n self.search_locations = None\n self.search_loc_marker = None\n self.search_loc_geom = None\n self.search_datasets = None\n self.screenshot = None\n self.toolbar = None\n self.toolbar_button = None\n\n # Adds search button and search box\n search_button = widgets.ToggleButton(\n value=False,\n tooltip='Search location/data',\n icon='globe'\n )\n search_button.layout.width = '36px'\n\n search_type = widgets.ToggleButtons(\n options=['name/address', 'lat-lon', 'data'],\n tooltips=['Search by place name or address',\n 'Search by lat-lon coordinates', 'Search Earth Engine data catalog']\n )\n search_type.style.button_width = '110px'\n\n search_box = widgets.Text(\n placeholder='Search by place name or address',\n tooltip='Search location',\n )\n search_box.layout.width = '340px'\n\n search_output = widgets.Output(\n layout={'max_width': '340px', 'max_height': '250px', 'overflow': 'scroll'})\n\n search_results = widgets.RadioButtons()\n\n assets_dropdown = widgets.Dropdown()\n assets_dropdown.layout.min_width = '279px'\n assets_dropdown.layout.max_width = '279px'\n assets_dropdown.options = []\n\n import_btn = widgets.Button(\n description='import',\n button_style='primary',\n tooltip='Click to import the selected asset',\n )\n import_btn.layout.min_width = '57px'\n import_btn.layout.max_width = '57px'\n\n def import_btn_clicked(b):\n if assets_dropdown.value != '':\n datasets = self.search_datasets\n dataset = datasets[assets_dropdown.index]\n dataset_uid = 'dataset_' + random_string(string_length=3)\n line1 = '{} = {}\\n'.format(\n dataset_uid, dataset['ee_id_snippet'])\n line2 = 'Map.addLayer(' + dataset_uid + \\\n ', {}, \"' + dataset['id'] + '\")'\n contents = ''.join([line1, line2])\n create_code_cell(contents)\n\n import_btn.on_click(import_btn_clicked)\n\n html_widget = widgets.HTML()\n\n def dropdown_change(change):\n dropdown_index = assets_dropdown.index\n if dropdown_index is not None and dropdown_index >= 0:\n with search_output:\n search_output.clear_output(wait=True)\n print('Loading ...')\n datasets = self.search_datasets\n dataset = datasets[dropdown_index]\n dataset_html = ee_data_html(dataset)\n html_widget.value = dataset_html\n search_output.clear_output(wait=True)\n display(html_widget)\n\n assets_dropdown.observe(dropdown_change, names='value')\n\n assets_combo = widgets.HBox()\n assets_combo.children = [import_btn, assets_dropdown]\n\n def search_result_change(change):\n result_index = search_results.index\n locations = self.search_locations\n location = locations[result_index]\n latlon = (location.lat, location.lng)\n self.search_loc_geom = ee.Geometry.Point(\n location.lng, location.lat)\n marker = self.search_loc_marker\n marker.location = latlon\n self.center = latlon\n\n search_results.observe(search_result_change, names='value')\n\n def search_btn_click(change):\n if change['new']:\n search_widget.children = [search_button, search_result_widget]\n else:\n search_widget.children = [search_button]\n search_result_widget.children = [search_type, search_box]\n\n search_button.observe(search_btn_click, 'value')\n\n def search_type_changed(change):\n search_box.value = ''\n search_output.clear_output()\n if change['new'] == 'name/address':\n search_box.placeholder = 'Search by place name or address, e.g., Paris'\n assets_dropdown.options = []\n search_result_widget.children = [\n search_type, search_box, search_output]\n elif change['new'] == 'lat-lon':\n search_box.placeholder = 'Search by lat-lon, e.g., 40, -100'\n assets_dropdown.options = []\n search_result_widget.children = [\n search_type, search_box, search_output]\n elif change['new'] == 'data':\n search_box.placeholder = 'Search GEE data catalog by keywords, e.g., elevation'\n search_result_widget.children = [\n search_type, search_box, assets_combo, search_output]\n\n search_type.observe(search_type_changed, names='value')\n\n def search_box_callback(text):\n\n if text.value != '':\n\n if search_type.value == 'name/address':\n g = geocode(text.value)\n elif search_type.value == 'lat-lon':\n g = geocode(text.value, reverse=True)\n if g is None and latlon_from_text(text.value):\n search_output.clear_output()\n latlon = latlon_from_text(text.value)\n self.search_loc_geom = ee.Geometry.Point(\n latlon[1], latlon[0])\n if self.search_loc_marker is None:\n marker = Marker(\n location=latlon, draggable=False, name='Search location')\n self.search_loc_marker = marker\n self.add_layer(marker)\n self.center = latlon\n else:\n marker = self.search_loc_marker\n marker.location = latlon\n self.center = latlon\n with search_output:\n print('No address found for {}'.format(latlon))\n return\n elif search_type.value == 'data':\n search_output.clear_output()\n with search_output:\n print('Searching ...')\n self.default_style = {'cursor': 'wait'}\n ee_assets = search_ee_data(text.value)\n self.search_datasets = ee_assets\n asset_titles = [x['title'] for x in ee_assets]\n assets_dropdown.options = asset_titles\n search_output.clear_output()\n if len(ee_assets) > 0:\n html_widget.value = ee_data_html(ee_assets[0])\n with search_output:\n display(html_widget)\n\n self.default_style = {'cursor': 'default'}\n\n return\n\n self.search_locations = g\n if g is not None and len(g) > 0:\n top_loc = g[0]\n latlon = (top_loc.lat, top_loc.lng)\n self.search_loc_geom = ee.Geometry.Point(\n top_loc.lng, top_loc.lat)\n if self.search_loc_marker is None:\n marker = Marker(\n location=latlon, draggable=False, name='Search location')\n self.search_loc_marker = marker\n self.add_layer(marker)\n self.center = latlon\n else:\n marker = self.search_loc_marker\n marker.location = latlon\n self.center = latlon\n search_results.options = [x.address for x in g]\n search_result_widget.children = [\n search_type, search_box, search_output]\n with search_output:\n search_output.clear_output(wait=True)\n display(search_results)\n else:\n with search_output:\n search_output.clear_output()\n print('No results could be found.')\n\n search_box.on_submit(search_box_callback)\n\n search_result_widget = widgets.VBox()\n search_result_widget.children = [search_type, search_box]\n\n search_widget = widgets.HBox()\n search_widget.children = [search_button]\n data_control = WidgetControl(\n widget=search_widget, position='topleft')\n\n self.add_control(control=data_control)\n\n search_marker = Marker(icon=AwesomeIcon(\n name=\"check\", marker_color='green', icon_color='darkgreen'))\n search = SearchControl(position=\"topleft\",\n url='https://nominatim.openstreetmap.org/search?format=json&q={s}',\n zoom=5,\n property_name='display_name',\n marker=search_marker\n )\n self.add_control(search)\n\n if kwargs['zoom_control']:\n self.add_control(ZoomControl(position='topleft'))\n\n layer_control = LayersControl(position='topright')\n self.add_control(layer_control)\n self.layer_control = layer_control\n\n scale = ScaleControl(position='bottomleft')\n self.add_control(scale)\n self.scale_control = scale\n\n fullscreen = FullScreenControl()\n self.add_control(fullscreen)\n self.fullscreen_control = fullscreen\n\n measure = MeasureControl(\n position='bottomleft',\n active_color='orange',\n primary_length_unit='kilometers'\n )\n self.add_control(measure)\n self.measure_control = measure\n\n if kwargs.get('add_google_map'):\n self.add_layer(ee_basemaps['ROADMAP'])\n\n if kwargs.get('show_attribution'):\n self.add_control(AttributionControl(position='bottomright'))\n\n draw_control = DrawControl(marker={'shapeOptions': {'color': '#0000FF'}},\n rectangle={'shapeOptions': {\n 'color': '#0000FF'}},\n circle={'shapeOptions': {\n 'color': '#0000FF'}},\n circlemarker={},\n )\n\n draw_control_lite = DrawControl(marker={},\n rectangle={'shapeOptions': {\n 'color': '#0000FF'}},\n circle={'shapeOptions': {\n 'color': '#0000FF'}},\n circlemarker={},\n polyline={},\n polygon={}\n )\n # Handles draw events\n\n def handle_draw(target, action, geo_json):\n try:\n # print(geo_json)\n # geo_json = adjust_longitude(geo_json)\n # print(geo_json)\n self.roi_start = True\n self.draw_count += 1\n geom = geojson_to_ee(geo_json, False)\n self.user_roi = geom\n feature = ee.Feature(geom)\n self.draw_last_json = geo_json\n self.draw_last_bounds = minimum_bounding_box(geo_json)\n self.draw_last_feature = feature\n self.draw_features.append(feature)\n collection = ee.FeatureCollection(self.draw_features)\n self.user_rois = collection\n ee_draw_layer = ee_tile_layer(\n collection, {'color': 'blue'}, 'Drawn Features', True, 0.5)\n\n if self.draw_count == 1:\n self.add_layer(ee_draw_layer)\n self.draw_layer = ee_draw_layer\n else:\n self.substitute_layer(self.draw_layer, ee_draw_layer)\n self.draw_layer = ee_draw_layer\n\n draw_control.clear()\n\n self.roi_end = True\n self.roi_start = False\n except Exception as e:\n print(e)\n print(\"There was an error creating Earth Engine Feature.\")\n self.draw_count = 0\n self.draw_features = []\n self.draw_last_feature = None\n self.draw_layer = None\n self.user_roi = None\n self.roi_start = False\n self.roi_end = False\n\n draw_control.on_draw(handle_draw)\n self.add_control(draw_control)\n self.draw_control = draw_control\n self.draw_control_lite = draw_control_lite\n\n # Dropdown widget for plotting\n self.plot_dropdown_control = None\n self.plot_dropdown_widget = None\n self.plot_options = {}\n\n self.plot_marker_cluster = MarkerCluster(name=\"Marker Cluster\")\n self.plot_coordinates = []\n self.plot_markers = []\n self.plot_last_click = []\n self.plot_all_clicks = []\n\n # Adds Inspector widget\n inspector_checkbox = widgets.Checkbox(\n value=False,\n description='Inspector',\n indent=False,\n layout=widgets.Layout(height='18px')\n )\n inspector_checkbox.layout.width = '13ex'\n\n # Adds Plot widget\n plot_checkbox = widgets.Checkbox(\n value=False,\n description='Plotting',\n indent=False,\n )\n plot_checkbox.layout.width = '13ex'\n self.plot_checkbox = plot_checkbox\n\n vb = widgets.VBox(children=[inspector_checkbox, plot_checkbox])\n\n chk_control = WidgetControl(widget=vb, position='topright')\n self.add_control(chk_control)\n self.inspector_control = chk_control\n\n self.inspector_checked = inspector_checkbox.value\n self.plot_checked = plot_checkbox.value\n\n def inspect_chk_changed(b):\n self.inspector_checked = inspector_checkbox.value\n if not self.inspector_checked:\n output.clear_output()\n inspector_checkbox.observe(inspect_chk_changed)\n\n output = widgets.Output(layout={'border': '1px solid black'})\n output_control = WidgetControl(widget=output, position='topright')\n self.add_control(output_control)\n\n def plot_chk_changed(button):\n\n if button['name'] == 'value' and button['new']:\n self.plot_checked = True\n plot_dropdown_widget = widgets.Dropdown(\n options=list(self.ee_raster_layer_names),\n )\n plot_dropdown_widget.layout.width = '18ex'\n self.plot_dropdown_widget = plot_dropdown_widget\n plot_dropdown_control = WidgetControl(\n widget=plot_dropdown_widget, position='topright')\n self.plot_dropdown_control = plot_dropdown_control\n self.add_control(plot_dropdown_control)\n self.remove_control(self.draw_control)\n self.add_control(self.draw_control_lite)\n elif button['name'] == 'value' and (not button['new']):\n self.plot_checked = False\n plot_dropdown_widget = self.plot_dropdown_widget\n plot_dropdown_control = self.plot_dropdown_control\n self.remove_control(plot_dropdown_control)\n del plot_dropdown_widget\n del plot_dropdown_control\n if self.plot_control in self.controls:\n plot_control = self.plot_control\n plot_widget = self.plot_widget\n self.remove_control(plot_control)\n self.plot_control = None\n self.plot_widget = None\n del plot_control\n del plot_widget\n if self.plot_marker_cluster is not None and self.plot_marker_cluster in self.layers:\n self.remove_layer(self.plot_marker_cluster)\n self.remove_control(self.draw_control_lite)\n self.add_control(self.draw_control)\n\n plot_checkbox.observe(plot_chk_changed)\n\n tool_output = widgets.Output()\n tool_output.clear_output(wait=True)\n\n save_map_widget = widgets.VBox()\n\n save_type = widgets.ToggleButtons(\n options=['HTML', 'PNG', 'JPG'],\n tooltips=['Save the map as an HTML file',\n 'Take a screenshot and save as a PNG file',\n 'Take a screenshot and save as a JPG file']\n )\n\n # download_dir = os.getcwd()\n file_chooser = FileChooser(os.getcwd())\n file_chooser.default_filename = 'my_map.html'\n file_chooser.use_dir_icons = False\n\n ok_cancel = widgets.ToggleButtons(\n options=['OK', 'Cancel'],\n tooltips=['OK', 'Cancel'],\n button_style='primary'\n )\n ok_cancel.value = None\n\n def save_type_changed(change):\n ok_cancel.value = None\n # file_chooser.reset()\n file_chooser.default_path = os.getcwd()\n if change['new'] == 'HTML':\n file_chooser.default_filename = 'my_map.html'\n elif change['new'] == 'PNG':\n file_chooser.default_filename = 'my_map.png'\n elif change['new'] == 'JPG':\n file_chooser.default_filename = 'my_map.jpg'\n save_map_widget.children = [save_type, file_chooser]\n\n def chooser_callback(chooser):\n # file_chooser.default_path = os.getcwd()\n save_map_widget.children = [save_type, file_chooser, ok_cancel]\n\n def ok_cancel_clicked(change):\n if change['new'] == 'OK':\n file_path = file_chooser.selected\n ext = os.path.splitext(file_path)[1]\n if save_type.value == 'HTML' and ext.upper() == '.HTML':\n tool_output.clear_output()\n self.to_html(file_path)\n elif save_type.value == 'PNG' and ext.upper() == '.PNG':\n tool_output.clear_output()\n self.toolbar_button.value = False\n time.sleep(1)\n screen_capture(outfile=file_path)\n elif save_type.value == 'JPG' and ext.upper() == '.JPG':\n tool_output.clear_output()\n self.toolbar_button.value = False\n time.sleep(1)\n screen_capture(outfile=file_path)\n else:\n label = widgets.Label(\n value=\"The selected file extension does not match the selected exporting type.\")\n save_map_widget.children = [save_type, file_chooser, label]\n self.toolbar_reset()\n elif change['new'] == 'Cancel':\n tool_output.clear_output()\n self.toolbar_reset()\n save_type.observe(save_type_changed, names='value')\n ok_cancel.observe(ok_cancel_clicked, names='value')\n\n file_chooser.register_callback(chooser_callback)\n\n save_map_widget.children = [save_type, file_chooser]\n\n tools = {\n 'mouse-pointer': 'pointer',\n 'camera': 'to_image',\n 'info': 'identify',\n 'map-marker': 'plotting'\n }\n icons = ['mouse-pointer', 'camera', 'info', 'map-marker']\n tooltips = ['Default pointer',\n 'Save map as HTML or image', 'Inspector', 'Plotting']\n icon_width = '42px'\n icon_height = '40px'\n n_cols = 2\n n_rows = math.ceil(len(icons) / n_cols)\n\n toolbar_grid = widgets.GridBox(children=[widgets.ToggleButton(layout=widgets.Layout(width='auto', height='auto'),\n button_style='primary', icon=icons[i], tooltip=tooltips[i]) for i in range(len(icons))],\n layout=widgets.Layout(\n width='90px',\n grid_template_columns=(icon_width + ' ') * 2,\n grid_template_rows=(icon_height + ' ') * n_rows,\n grid_gap='1px 1px')\n )\n self.toolbar = toolbar_grid\n\n def tool_callback(change):\n if change['new']:\n current_tool = change['owner']\n for tool in toolbar_grid.children:\n if not tool is current_tool:\n tool.value = False\n tool = change['owner']\n if tools[tool.icon] == 'to_image':\n with tool_output:\n tool_output.clear_output()\n display(save_map_widget)\n else:\n tool_output.clear_output()\n save_map_widget.children = [save_type, file_chooser]\n\n for tool in toolbar_grid.children:\n tool.observe(tool_callback, 'value')\n\n toolbar_button = widgets.ToggleButton(\n value=False,\n tooltip='Toolbar',\n icon='wrench'\n )\n toolbar_button.layout.width = '37px'\n self.toolbar_button = toolbar_button\n\n def toolbar_btn_click(change):\n if change['new']:\n toolbar_widget.children = [toolbar_button, toolbar_grid]\n else:\n toolbar_widget.children = [toolbar_button]\n tool_output.clear_output()\n self.toolbar_reset()\n\n toolbar_button.observe(toolbar_btn_click, 'value')\n\n toolbar_widget = widgets.VBox()\n toolbar_widget.children = [toolbar_button]\n toolbar_control = WidgetControl(\n widget=toolbar_widget, position='topright')\n self.add_control(toolbar_control)\n\n tool_output_control = WidgetControl(\n widget=tool_output, position='topright')\n self.add_control(tool_output_control)\n\n def handle_interaction(**kwargs):\n latlon = kwargs.get('coordinates')\n if kwargs.get('type') == 'click' and self.inspector_checked:\n self.default_style = {'cursor': 'wait'}\n\n sample_scale = self.getScale()\n layers = self.ee_layers\n\n with output:\n\n output.clear_output(wait=True)\n for index, ee_object in enumerate(layers):\n xy = ee.Geometry.Point(latlon[::-1])\n layer_names = self.ee_layer_names\n layer_name = layer_names[index]\n object_type = ee_object.__class__.__name__\n\n try:\n if isinstance(ee_object, ee.ImageCollection):\n ee_object = ee_object.mosaic()\n elif isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) \\\n or isinstance(ee_object, ee.featurecollection.FeatureCollection):\n ee_object = ee.FeatureCollection(ee_object)\n\n if isinstance(ee_object, ee.Image):\n item = ee_object.reduceRegion(\n ee.Reducer.first(), xy, sample_scale).getInfo()\n b_name = 'band'\n if len(item) > 1:\n b_name = 'bands'\n print(\"{}: {} ({} {})\".format(\n layer_name, object_type, len(item), b_name))\n keys = item.keys()\n for key in keys:\n print(\" {}: {}\".format(key, item[key]))\n elif isinstance(ee_object, ee.FeatureCollection):\n filtered = ee_object.filterBounds(xy)\n size = filtered.size().getInfo()\n if size > 0:\n first = filtered.first()\n props = first.toDictionary().getInfo()\n b_name = 'property'\n if len(props) > 1:\n b_name = 'properties'\n print(\"{}: Feature ({} {})\".format(\n layer_name, len(props), b_name))\n keys = props.keys()\n for key in keys:\n print(\" {}: {}\".format(\n key, props[key]))\n except Exception as e:\n print(e)\n\n self.default_style = {'cursor': 'crosshair'}\n if kwargs.get('type') == 'click' and self.plot_checked and len(self.ee_raster_layers) > 0:\n plot_layer_name = self.plot_dropdown_widget.value\n layer_names = self.ee_raster_layer_names\n layers = self.ee_raster_layers\n index = layer_names.index(plot_layer_name)\n ee_object = layers[index]\n\n if isinstance(ee_object, ee.ImageCollection):\n ee_object = ee_object.mosaic()\n\n try:\n self.default_style = {'cursor': 'wait'}\n plot_options = self.plot_options\n sample_scale = self.getScale()\n if'sample_scale' in plot_options.keys() and (plot_options['sample_scale'] is not None):\n sample_scale = plot_options['sample_scale']\n if 'title' not in plot_options.keys():\n plot_options['title'] = plot_layer_name\n if ('add_marker_cluster' in plot_options.keys()) and plot_options['add_marker_cluster']:\n plot_coordinates = self.plot_coordinates\n markers = self.plot_markers\n marker_cluster = self.plot_marker_cluster\n plot_coordinates.append(latlon)\n self.plot_last_click = latlon\n self.plot_all_clicks = plot_coordinates\n markers.append(Marker(location=latlon))\n marker_cluster.markers = markers\n self.plot_marker_cluster = marker_cluster\n\n band_names = ee_object.bandNames().getInfo()\n self.chart_labels = band_names\n\n if self.roi_end:\n if self.roi_reducer_scale is None:\n scale = ee_object.select(\n 0).projection().nominalScale()\n else:\n scale = self.roi_reducer_scale\n dict_values = ee_object.reduceRegion(\n reducer=self.roi_reducer, geometry=self.user_roi, scale=scale, bestEffort=True).getInfo()\n self.chart_points.append(\n self.user_roi.centroid(1).coordinates().getInfo())\n else:\n xy = ee.Geometry.Point(latlon[::-1])\n dict_values = ee_object.sample(\n xy, scale=sample_scale).first().toDictionary().getInfo()\n self.chart_points.append(xy.coordinates().getInfo())\n band_values = list(dict_values.values())\n self.chart_values.append(band_values)\n self.plot(band_names, band_values, **plot_options)\n if plot_options['title'] == plot_layer_name:\n del plot_options['title']\n self.default_style = {'cursor': 'crosshair'}\n self.roi_end = False\n except Exception as e:\n if self.plot_widget is not None:\n with self.plot_widget:\n self.plot_widget.clear_output()\n print(\"No data for the clicked location.\")\n else:\n print(e)\n self.default_style = {'cursor': 'crosshair'}\n self.roi_end = False\n self.on_interaction(handle_interaction)\n\n def set_options(self, mapTypeId='HYBRID', styles=None, types=None):\n \"\"\"Adds Google basemap and controls to the ipyleaflet map.\n\n Args:\n mapTypeId (str, optional): A mapTypeId to set the basemap to. Can be one of \"ROADMAP\", \"SATELLITE\", \"HYBRID\" or \"TERRAIN\" to select one of the standard Google Maps API map types. Defaults to 'HYBRID'.\n styles (object, optional): A dictionary of custom MapTypeStyle objects keyed with a name that will appear in the map's Map Type Controls. Defaults to None.\n types (list, optional): A list of mapTypeIds to make available. If omitted, but opt_styles is specified, appends all of the style keys to the standard Google Maps API map types.. Defaults to None.\n \"\"\"\n self.clear_layers()\n self.clear_controls()\n self.scroll_wheel_zoom = True\n self.add_control(ZoomControl(position='topleft'))\n self.add_control(LayersControl(position='topright'))\n self.add_control(ScaleControl(position='bottomleft'))\n self.add_control(FullScreenControl())\n self.add_control(DrawControl())\n\n measure = MeasureControl(\n position='bottomleft',\n active_color='orange',\n primary_length_unit='kilometers'\n )\n self.add_control(measure)\n\n try:\n self.add_layer(ee_basemaps[mapTypeId])\n except Exception as e:\n print(e)\n print(\n 'Google basemaps can only be one of \"ROADMAP\", \"SATELLITE\", \"HYBRID\" or \"TERRAIN\".')\n\n setOptions = set_options\n\n def add_ee_layer(self, ee_object, vis_params={}, name=None, shown=True, opacity=1.0):\n \"\"\"Adds a given EE object to the map as a layer.\n\n Args:\n ee_object (Collection|Feature|Image|MapId): The object to add to the map.\n vis_params (dict, optional): The visualization parameters. Defaults to {}.\n name (str, optional): The name of the layer. Defaults to 'Layer N'.\n shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.\n opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.\n \"\"\"\n image = None\n if name is None:\n layer_count = len(self.layers)\n name = 'Layer ' + str(layer_count + 1)\n\n if not isinstance(ee_object, ee.Image) and not isinstance(ee_object, ee.ImageCollection) and not isinstance(ee_object, ee.FeatureCollection) and not isinstance(ee_object, ee.Feature) and not isinstance(ee_object, ee.Geometry):\n err_str = \"\\n\\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection.\"\n raise AttributeError(err_str)\n\n if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):\n features = ee.FeatureCollection(ee_object)\n\n width = 2\n\n if 'width' in vis_params:\n width = vis_params['width']\n\n color = '000000'\n\n if 'color' in vis_params:\n color = vis_params['color']\n\n image_fill = features.style(\n **{'fillColor': color}).updateMask(ee.Image.constant(0.5))\n image_outline = features.style(\n **{'color': color, 'fillColor': '00000000', 'width': width})\n\n image = image_fill.blend(image_outline)\n elif isinstance(ee_object, ee.image.Image):\n image = ee_object\n elif isinstance(ee_object, ee.imagecollection.ImageCollection):\n image = ee_object.mosaic()\n\n map_id_dict = ee.Image(image).getMapId(vis_params)\n tile_layer = ipyleaflet.TileLayer(\n url=map_id_dict['tile_fetcher'].url_format,\n attribution='Google Earth Engine',\n name=name,\n opacity=opacity,\n visible=True\n # visible=shown\n )\n\n layer = self.find_layer(name=name)\n if layer is not None:\n\n existing_object = self.ee_layer_dict[name]['ee_object']\n\n if isinstance(existing_object, ee.Image) or isinstance(existing_object, ee.ImageCollection):\n self.ee_raster_layers.remove(existing_object)\n self.ee_raster_layer_names.remove(name)\n if self.plot_dropdown_widget is not None:\n self.plot_dropdown_widget.options = list(\n self.ee_raster_layer_names)\n\n self.ee_layers.remove(existing_object)\n self.ee_layer_names.remove(name)\n self.remove_layer(layer)\n\n self.ee_layers.append(ee_object)\n self.ee_layer_names.append(name)\n self.ee_layer_dict[name] = {\n 'ee_object': ee_object, 'ee_layer': tile_layer}\n\n self.add_layer(tile_layer)\n\n if isinstance(ee_object, ee.Image) or isinstance(ee_object, ee.ImageCollection):\n self.ee_raster_layers.append(ee_object)\n self.ee_raster_layer_names.append(name)\n if self.plot_dropdown_widget is not None:\n self.plot_dropdown_widget.options = list(\n self.ee_raster_layer_names)\n\n addLayer = add_ee_layer\n\n def set_center(self, lon, lat, zoom=None):\n \"\"\"Centers the map view at a given coordinates with the given zoom level.\n\n Args:\n lon (float): The longitude of the center, in degrees.\n lat (float): The latitude of the center, in degrees.\n zoom (int, optional): The zoom level, from 1 to 24. Defaults to None.\n \"\"\"\n self.center = (lat, lon)\n if zoom is not None:\n self.zoom = zoom\n\n setCenter = set_center\n\n def center_object(self, ee_object, zoom=None):\n \"\"\"Centers the map view on a given object.\n\n Args:\n ee_object (Element|Geometry): An Earth Engine object to center on - a geometry, image or feature.\n zoom (int, optional): The zoom level, from 1 to 24. Defaults to None.\n \"\"\"\n lat = 0\n lon = 0\n bounds = [[lat, lon], [lat, lon]]\n if isinstance(ee_object, ee.geometry.Geometry):\n centroid = ee_object.centroid(1)\n lon, lat = centroid.getInfo()['coordinates']\n bounds = [[lat, lon], [lat, lon]]\n elif isinstance(ee_object, ee.feature.Feature):\n centroid = ee_object.geometry().centroid(1)\n lon, lat = centroid.getInfo()['coordinates']\n bounds = [[lat, lon], [lat, lon]]\n elif isinstance(ee_object, ee.featurecollection.FeatureCollection):\n centroid = ee_object.geometry().centroid()\n lon, lat = centroid.getInfo()['coordinates']\n bounds = [[lat, lon], [lat, lon]]\n elif isinstance(ee_object, ee.image.Image):\n geometry = ee_object.geometry()\n coordinates = geometry.getInfo()['coordinates'][0]\n bounds = [coordinates[0][::-1], coordinates[2][::-1]]\n elif isinstance(ee_object, ee.imagecollection.ImageCollection):\n geometry = ee_object.geometry()\n coordinates = geometry.getInfo()['coordinates'][0]\n bounds = [coordinates[0][::-1], coordinates[2][::-1]]\n else:\n bounds = [[0, 0], [0, 0]]\n\n lat = bounds[0][0]\n lon = bounds[0][1]\n\n self.setCenter(lon, lat, zoom)\n\n centerObject = center_object\n\n def get_scale(self):\n \"\"\"Returns the approximate pixel scale of the current map view, in meters.\n\n Returns:\n float: Map resolution in meters.\n \"\"\"\n zoom_level = self.zoom\n # Reference: https://blogs.bing.com/maps/2006/02/25/map-control-zoom-levels-gt-resolution\n resolution = 156543.04 * math.cos(0) / math.pow(2, zoom_level)\n return resolution\n\n getScale = get_scale\n\n def add_basemap(self, basemap='HYBRID'):\n \"\"\"Adds a basemap to the map.\n\n Args:\n basemap (str, optional): Can be one of string from ee_basemaps. Defaults to 'HYBRID'.\n \"\"\"\n try:\n self.add_layer(ee_basemaps[basemap])\n except Exception as e:\n print(e)\n print('Basemap can only be one of the following:\\n {}'.format(\n '\\n '.join(ee_basemaps.keys())))\n\n def find_layer(self, name):\n \"\"\"Finds layer by name\n\n Args:\n name (str): Name of the layer to find.\n\n Returns:\n object: ipyleaflet layer object.\n \"\"\"\n layers = self.layers\n\n for layer in layers:\n if layer.name == name:\n return layer\n\n return None\n\n def layer_opacity(self, name, value=1.0):\n \"\"\"Changes layer opacity.\n\n Args:\n name (str): The name of the layer to change opacity.\n value (float, optional): The opacity value to set. Defaults to 1.0.\n \"\"\"\n layer = self.find_layer(name)\n try:\n layer.opacity = value\n # layer.interact(opacity=(0, 1, 0.1)) # to change layer opacity interactively\n except Exception as e:\n print(e)\n\n def add_wms_layer(self, url, layers, name=None, attribution='', format='image/jpeg', transparent=False, opacity=1.0, shown=True):\n \"\"\"Add a WMS layer to the map.\n\n Args:\n url (str): The URL of the WMS web service.\n layers (str): Comma-separated list of WMS layers to show. \n name (str, optional): The layer name to use on the layer control. Defaults to None.\n attribution (str, optional): The attribution of the data layer. Defaults to ''.\n format (str, optional): WMS image format (use ‘image/png’ for layers with transparency). Defaults to 'image/jpeg'.\n transparent (bool, optional): If True, the WMS service will return images with transparency. Defaults to False.\n opacity (float, optional): The opacity of the layer. Defaults to 1.0.\n shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.\n \"\"\"\n\n if name is None:\n name = str(layers)\n\n try:\n wms_layer = ipyleaflet.WMSLayer(\n url=url,\n layers=layers,\n name=name,\n attribution=attribution,\n format=format,\n transparent=transparent,\n opacity=opacity,\n visible=True\n # visible=shown\n )\n self.add_layer(wms_layer)\n except Exception as e:\n print(e)\n print(\"Failed to add the specified WMS TileLayer.\")\n\n def add_tile_layer(self, url='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', name=None, attribution='', opacity=1.0, shown=True):\n \"\"\"Adds a TileLayer to the map.\n\n Args:\n url (str, optional): The URL of the tile layer. Defaults to 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'.\n name (str, optional): The layer name to use for the layer. Defaults to None.\n attribution (str, optional): The attribution to use. Defaults to ''.\n opacity (float, optional): The opacity of the layer. Defaults to 1.\n shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.\n \"\"\"\n try:\n tile_layer = ipyleaflet.TileLayer(\n url=url,\n name=name,\n attribution=attribution,\n opacity=opacity,\n visible=True\n # visible=shown\n )\n self.add_layer(tile_layer)\n except Exception as e:\n print(e)\n print(\"Failed to add the specified TileLayer.\")\n\n def add_minimap(self, zoom=5, position=\"bottomright\"):\n \"\"\"Adds a minimap (overview) to the ipyleaflet map.\n\n Args:\n zoom (int, optional): Initial map zoom level. Defaults to 5.\n position (str, optional): Position of the minimap. Defaults to \"bottomright\".\n \"\"\"\n minimap = ipyleaflet.Map(\n zoom_control=False, attribution_control=False,\n zoom=5, center=self.center, layers=[ee_basemaps['ROADMAP']]\n )\n minimap.layout.width = '150px'\n minimap.layout.height = '150px'\n link((minimap, 'center'), (self, 'center'))\n minimap_control = WidgetControl(widget=minimap, position=position)\n self.add_control(minimap_control)\n\n def marker_cluster(self):\n \"\"\"Adds a marker cluster to the map and returns a list of ee.Feature, which can be accessed using Map.ee_marker_cluster.\n\n Returns:\n object: a list of ee.Feature\n \"\"\"\n coordinates = []\n markers = []\n marker_cluster = MarkerCluster(name=\"Marker Cluster\")\n self.last_click = []\n self.all_clicks = []\n self.ee_markers = []\n self.add_layer(marker_cluster)\n\n def handle_interaction(**kwargs):\n latlon = kwargs.get('coordinates')\n if kwargs.get('type') == 'click':\n coordinates.append(latlon)\n geom = ee.Geometry.Point(latlon[1], latlon[0])\n feature = ee.Feature(geom)\n self.ee_markers.append(feature)\n self.last_click = latlon\n self.all_clicks = coordinates\n markers.append(Marker(location=latlon))\n marker_cluster.markers = markers\n elif kwargs.get('type') == 'mousemove':\n pass\n # cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp\n self.default_style = {'cursor': 'crosshair'}\n self.on_interaction(handle_interaction)\n\n def set_plot_options(self, add_marker_cluster=False, sample_scale=None, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):\n \"\"\"Sets plotting options.\n\n Args:\n add_marker_cluster (bool, optional): Whether to add a marker cluster. Defaults to False.\n sample_scale (float, optional): A nominal scale in meters of the projection to sample in . Defaults to None.\n plot_type (str, optional): The plot type can be one of \"None\", \"bar\", \"scatter\" or \"hist\". Defaults to None.\n overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.\n position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.\n min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.\n max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.\n min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.\n max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.\n\n \"\"\"\n plot_options_dict = {}\n plot_options_dict['add_marker_cluster'] = add_marker_cluster\n plot_options_dict['sample_scale'] = sample_scale\n plot_options_dict['plot_type'] = plot_type\n plot_options_dict['overlay'] = overlay\n plot_options_dict['position'] = position\n plot_options_dict['min_width'] = min_width\n plot_options_dict['max_width'] = max_width\n plot_options_dict['min_height'] = min_height\n plot_options_dict['max_height'] = max_height\n\n for key in kwargs.keys():\n plot_options_dict[key] = kwargs[key]\n\n self.plot_options = plot_options_dict\n\n if add_marker_cluster and (self.plot_marker_cluster not in self.layers):\n self.add_layer(self.plot_marker_cluster)\n\n def plot(self, x, y, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):\n \"\"\"Creates a plot based on x-array and y-array data.\n\n Args:\n x (numpy.ndarray or list): The x-coordinates of the plotted line.\n y (numpy.ndarray or list): The y-coordinates of the plotted line.\n plot_type (str, optional): The plot type can be one of \"None\", \"bar\", \"scatter\" or \"hist\". Defaults to None.\n overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.\n position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.\n min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.\n max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.\n min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.\n max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None. \n\n \"\"\"\n if self.plot_widget is not None:\n plot_widget = self.plot_widget\n else:\n plot_widget = widgets.Output(layout={'border': '1px solid black'})\n plot_control = WidgetControl(widget=plot_widget, position=position, min_width=min_width,\n max_width=max_width, min_height=min_height, max_height=max_height)\n self.plot_widget = plot_widget\n self.plot_control = plot_control\n self.add_control(plot_control)\n\n if max_width is None:\n max_width = 500\n if max_height is None:\n max_height = 300\n\n if (plot_type is None) and ('markers' not in kwargs.keys()):\n kwargs['markers'] = 'circle'\n\n with plot_widget:\n try:\n fig = plt.figure(1, **kwargs)\n if max_width is not None:\n fig.layout.width = str(max_width) + 'px'\n if max_height is not None:\n fig.layout.height = str(max_height) + 'px'\n\n plot_widget.clear_output(wait=True)\n if not overlay:\n plt.clear()\n\n if plot_type is None:\n if 'marker' not in kwargs.keys():\n kwargs['marker'] = 'circle'\n plt.plot(x, y, **kwargs)\n elif plot_type == 'bar':\n plt.bar(x, y, **kwargs)\n elif plot_type == 'scatter':\n plt.scatter(x, y, **kwargs)\n elif plot_type == 'hist':\n plt.hist(y, **kwargs)\n plt.show()\n\n except Exception as e:\n print(e)\n print(\"Failed to create plot.\")\n\n def plot_demo(self, iterations=20, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):\n \"\"\"A demo of interactive plotting using random pixel coordinates.\n\n Args:\n iterations (int, optional): How many iterations to run for the demo. Defaults to 20.\n plot_type (str, optional): The plot type can be one of \"None\", \"bar\", \"scatter\" or \"hist\". Defaults to None.\n overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.\n position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.\n min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.\n max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.\n min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.\n max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None. \n \"\"\"\n\n import numpy as np\n import time\n\n if self.random_marker is not None:\n self.remove_layer(self.random_marker)\n\n image = ee.Image('LE7_TOA_5YEAR/1999_2003').select([0, 1, 2, 3, 4, 6])\n self.addLayer(\n image, {'bands': ['B4', 'B3', 'B2'], 'gamma': 1.4}, \"LE7_TOA_5YEAR/1999_2003\")\n self.setCenter(-50.078877, 25.190030, 3)\n band_names = image.bandNames().getInfo()\n band_count = len(band_names)\n\n latitudes = np.random.uniform(30, 48, size=iterations)\n longitudes = np.random.uniform(-121, -76, size=iterations)\n\n marker = Marker(location=(0, 0))\n self.random_marker = marker\n self.add_layer(marker)\n\n for i in range(iterations):\n try:\n coordinate = ee.Geometry.Point([longitudes[i], latitudes[i]])\n dict_values = image.sample(\n coordinate).first().toDictionary().getInfo()\n band_values = list(dict_values.values())\n title = '{}/{}: Spectral signature at ({}, {})'.format(i+1, iterations,\n round(latitudes[i], 2), round(longitudes[i], 2))\n marker.location = (latitudes[i], longitudes[i])\n self.plot(band_names, band_values, plot_type=plot_type, overlay=overlay,\n min_width=min_width, max_width=max_width, min_height=min_height, max_height=max_height, title=title, **kwargs)\n time.sleep(0.3)\n except Exception as e:\n print(e)\n\n def plot_raster(self, ee_object=None, sample_scale=None, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):\n \"\"\"Interactive plotting of Earth Engine data by clicking on the map.\n\n Args:\n ee_object (object, optional): The ee.Image or ee.ImageCollection to sample. Defaults to None.\n sample_scale (float, optional): A nominal scale in meters of the projection to sample in. Defaults to None.\n plot_type (str, optional): The plot type can be one of \"None\", \"bar\", \"scatter\" or \"hist\". Defaults to None.\n overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.\n position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.\n min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.\n max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.\n min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.\n max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None. \n\n \"\"\"\n if self.plot_control is not None:\n del self.plot_widget\n self.remove_control(self.plot_control)\n\n if self.random_marker is not None:\n self.remove_layer(self.random_marker)\n\n plot_widget = widgets.Output(layout={'border': '1px solid black'})\n plot_control = WidgetControl(widget=plot_widget, position=position, min_width=min_width,\n max_width=max_width, min_height=min_height, max_height=max_height)\n self.plot_widget = plot_widget\n self.plot_control = plot_control\n self.add_control(plot_control)\n\n self.default_style = {'cursor': 'crosshair'}\n msg = \"The plot function can only be used on ee.Image or ee.ImageCollection with more than one band.\"\n if (ee_object is None) and len(self.ee_raster_layers) > 0:\n ee_object = self.ee_raster_layers[-1]\n if isinstance(ee_object, ee.ImageCollection):\n ee_object = ee_object.mosaic()\n elif isinstance(ee_object, ee.ImageCollection):\n ee_object = ee_object.mosaic()\n elif not isinstance(ee_object, ee.Image):\n print(msg)\n return\n\n if sample_scale is None:\n sample_scale = self.getScale()\n\n if max_width is None:\n max_width = 500\n\n band_names = ee_object.bandNames().getInfo()\n\n coordinates = []\n markers = []\n marker_cluster = MarkerCluster(name=\"Marker Cluster\")\n self.last_click = []\n self.all_clicks = []\n self.add_layer(marker_cluster)\n\n def handle_interaction(**kwargs2):\n latlon = kwargs2.get('coordinates')\n\n if kwargs2.get('type') == 'click':\n try:\n coordinates.append(latlon)\n self.last_click = latlon\n self.all_clicks = coordinates\n markers.append(Marker(location=latlon))\n marker_cluster.markers = markers\n self.default_style = {'cursor': 'wait'}\n xy = ee.Geometry.Point(latlon[::-1])\n dict_values = ee_object.sample(\n xy, scale=sample_scale).first().toDictionary().getInfo()\n band_values = list(dict_values.values())\n self.plot(band_names, band_values, plot_type=plot_type, overlay=overlay,\n min_width=min_width, max_width=max_width, min_height=min_height, max_height=max_height, **kwargs)\n self.default_style = {'cursor': 'crosshair'}\n except Exception as e:\n if self.plot_widget is not None:\n with self.plot_widget:\n self.plot_widget.clear_output()\n print(\"No data for the clicked location.\")\n else:\n print(e)\n self.default_style = {'cursor': 'crosshair'}\n\n self.on_interaction(handle_interaction)\n\n def add_maker_cluster(self, event='click', add_marker=True):\n \"\"\"Captures user inputs and add markers to the map.\n\n Args:\n event (str, optional): [description]. Defaults to 'click'.\n add_marker (bool, optional): If True, add markers to the map. Defaults to True.\n\n Returns:\n object: a marker cluster.\n \"\"\"\n coordinates = []\n markers = []\n marker_cluster = MarkerCluster(name=\"Marker Cluster\")\n self.last_click = []\n self.all_clicks = []\n if add_marker:\n self.add_layer(marker_cluster)\n\n def handle_interaction(**kwargs):\n latlon = kwargs.get('coordinates')\n\n if event == 'click' and kwargs.get('type') == 'click':\n coordinates.append(latlon)\n self.last_click = latlon\n self.all_clicks = coordinates\n if add_marker:\n markers.append(Marker(location=latlon))\n marker_cluster.markers = markers\n elif kwargs.get('type') == 'mousemove':\n pass\n # cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp\n self.default_style = {'cursor': 'crosshair'}\n self.on_interaction(handle_interaction)\n\n def set_control_visibility(self, layerControl=True, fullscreenControl=True, latLngPopup=True):\n \"\"\"Sets the visibility of the controls on the map.\n\n Args:\n layerControl (bool, optional): Whether to show the control that allows the user to toggle layers on/off. Defaults to True.\n fullscreenControl (bool, optional): Whether to show the control that allows the user to make the map full-screen. Defaults to True.\n latLngPopup (bool, optional): Whether to show the control that pops up the Lat/lon when the user clicks on the map. Defaults to True.\n \"\"\"\n pass\n\n setControlVisibility = set_control_visibility\n\n def add_layer_control(self):\n \"\"\"Adds the layer control to the map.\n \"\"\"\n pass\n\n addLayerControl = add_layer_control\n\n def split_map(self, left_layer='HYBRID', right_layer='ESRI'):\n \"\"\"Adds split map.\n\n Args:\n left_layer (str, optional): The layer tile layer. Defaults to 'HYBRID'.\n right_layer (str, optional): The right tile layer. Defaults to 'ESRI'.\n \"\"\"\n try:\n self.remove_control(self.layer_control)\n self.remove_control(self.inspector_control)\n if left_layer in ee_basemaps.keys():\n left_layer = ee_basemaps[left_layer]\n\n if right_layer in ee_basemaps.keys():\n right_layer = ee_basemaps[right_layer]\n\n control = ipyleaflet.SplitMapControl(\n left_layer=left_layer, right_layer=right_layer)\n self.add_control(control)\n\n except Exception as e:\n print(e)\n print('The provided layers are invalid!')\n\n def ts_inspector(self, left_ts, right_ts, left_names, right_names, left_vis={}, right_vis={}):\n \"\"\"Creates a split-panel map for inspecting timeseries images.\n\n Args:\n left_ts (object): An ee.ImageCollection to show on the left panel.\n right_ts (object): An ee.ImageCollection to show on the right panel.\n left_names (list): A list of names to show under the left dropdown.\n right_names (list): A list of names to show under the right dropdown.\n left_vis (dict, optional): Visualization parameters for the left layer. Defaults to {}.\n right_vis (dict, optional): Visualization parameters for the right layer. Defaults to {}.\n \"\"\"\n left_count = int(left_ts.size().getInfo())\n right_count = int(right_ts.size().getInfo())\n\n if left_count != len(left_names):\n print(\n 'The number of images in left_ts must match the number of layer names in left_names.')\n return\n if right_count != len(right_names):\n print(\n 'The number of images in right_ts must match the number of layer names in right_names.')\n return\n\n left_layer = TileLayer(\n url='https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}',\n attribution='Google',\n name='Google Maps'\n )\n right_layer = TileLayer(\n url='https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}',\n attribution='Google',\n name='Google Maps'\n )\n\n self.clear_controls()\n left_dropdown = widgets.Dropdown(options=left_names, value=None)\n right_dropdown = widgets.Dropdown(options=right_names, value=None)\n left_dropdown.layout.max_width = '130px'\n right_dropdown.layout.max_width = '130px'\n\n left_control = WidgetControl(widget=left_dropdown, position='topleft')\n right_control = WidgetControl(\n widget=right_dropdown, position='topright')\n\n self.add_control(control=left_control)\n self.add_control(control=right_control)\n\n self.add_control(ZoomControl(position='topleft'))\n self.add_control(ScaleControl(position='bottomleft'))\n self.add_control(FullScreenControl())\n\n def left_dropdown_change(change):\n left_dropdown_index = left_dropdown.index\n if left_dropdown_index is not None and left_dropdown_index >= 0:\n try:\n if isinstance(left_ts, ee.ImageCollection):\n left_image = left_ts.toList(\n left_ts.size()).get(left_dropdown_index)\n elif isinstance(left_ts, ee.List):\n left_image = left_ts.get(left_dropdown_index)\n else:\n print('The left_ts argument must be an ImageCollection.')\n return\n\n if isinstance(left_image, ee.ImageCollection):\n left_image = ee.Image(left_image.mosaic())\n elif isinstance(left_image, ee.Image):\n pass\n else:\n left_image = ee.Image(left_image)\n\n left_image = ee_tile_layer(\n left_image, left_vis, left_names[left_dropdown_index])\n left_layer.url = left_image.url\n except Exception as e:\n print(e)\n return\n\n left_dropdown.observe(left_dropdown_change, names='value')\n\n def right_dropdown_change(change):\n right_dropdown_index = right_dropdown.index\n if right_dropdown_index is not None and right_dropdown_index >= 0:\n try:\n if isinstance(right_ts, ee.ImageCollection):\n right_image = right_ts.toList(\n left_ts.size()).get(right_dropdown_index)\n elif isinstance(right_ts, ee.List):\n right_image = right_ts.get(right_dropdown_index)\n else:\n print('The left_ts argument must be an ImageCollection.')\n return\n\n if isinstance(right_image, ee.ImageCollection):\n right_image = ee.Image(right_image.mosaic())\n elif isinstance(right_image, ee.Image):\n pass\n else:\n right_image = ee.Image(right_image)\n\n right_image = ee_tile_layer(\n right_image, right_vis, right_names[right_dropdown_index])\n right_layer.url = right_image.url\n except Exception as e:\n print(e)\n return\n\n right_dropdown.observe(right_dropdown_change, names='value')\n\n try:\n\n split_control = ipyleaflet.SplitMapControl(\n left_layer=left_layer, right_layer=right_layer)\n self.add_control(split_control)\n\n except Exception as e:\n print(e)\n\n def basemap_demo(self):\n \"\"\"A demo for using geemap basemaps.\n\n \"\"\"\n dropdown = widgets.Dropdown(\n options=list(ee_basemaps.keys()),\n value='HYBRID',\n description='Basemaps'\n )\n\n def on_click(change):\n basemap_name = change['new']\n old_basemap = self.layers[-1]\n self.substitute_layer(old_basemap, ee_basemaps[basemap_name])\n\n dropdown.observe(on_click, 'value')\n basemap_control = WidgetControl(widget=dropdown, position='topright')\n self.remove_control(self.inspector_control)\n # self.remove_control(self.layer_control)\n self.add_control(basemap_control)\n\n def add_legend(self, legend_title='Legend', legend_dict=None, legend_keys=None, legend_colors=None, position='bottomright', builtin_legend=None, **kwargs):\n \"\"\"Adds a customized basemap to the map.\n\n Args:\n legend_title (str, optional): Title of the legend. Defaults to 'Legend'.\n legend_dict (dict, optional): A dictionary containing legend items as keys and color as values. If provided, legend_keys and legend_colors will be ignored. Defaults to None.\n legend_keys (list, optional): A list of legend keys. Defaults to None.\n legend_colors (list, optional): A list of legend colors. Defaults to None.\n position (str, optional): Position of the legend. Defaults to 'bottomright'.\n builtin_legend (str, optional): Name of the builtin legend to add to the map. Defaults to None.\n\n \"\"\"\n import pkg_resources\n from IPython.display import display\n pkg_dir = os.path.dirname(\n pkg_resources.resource_filename(\"geemap\", \"geemap.py\"))\n legend_template = os.path.join(pkg_dir, 'data/template/legend.html')\n\n # print(kwargs['min_height'])\n\n if 'min_width' not in kwargs.keys():\n min_width = None\n else:\n min_wdith = kwargs['min_width']\n if 'max_width' not in kwargs.keys():\n max_width = None\n else:\n max_width = kwargs['max_width']\n if 'min_height' not in kwargs.keys():\n min_height = None\n else:\n min_height = kwargs['min_height']\n if 'max_height' not in kwargs.keys():\n max_height = None\n else:\n max_height = kwargs['max_height']\n if 'height' not in kwargs.keys():\n height = None\n else:\n height = kwargs['height']\n if 'width' not in kwargs.keys():\n width = None\n else:\n width = kwargs['width']\n\n if width is None:\n max_width = '300px'\n if height is None:\n max_height = '400px'\n\n if not os.path.exists(legend_template):\n print('The legend template does not exist.')\n return\n\n if legend_keys is not None:\n if not isinstance(legend_keys, list):\n print('The legend keys must be a list.')\n return\n else:\n legend_keys = ['One', 'Two', 'Three', 'Four', 'ect']\n\n if legend_colors is not None:\n if not isinstance(legend_colors, list):\n print('The legend colors must be a list.')\n return\n elif all(isinstance(item, tuple) for item in legend_colors):\n try:\n legend_colors = [rgb_to_hex(x) for x in legend_colors]\n except Exception as e:\n print(e)\n elif all((item.startswith('#') and len(item) == 7) for item in legend_colors):\n pass\n elif all((len(item) == 6) for item in legend_colors):\n pass\n else:\n print('The legend colors must be a list of tuples.')\n return\n else:\n legend_colors = ['#8DD3C7', '#FFFFB3',\n '#BEBADA', '#FB8072', '#80B1D3']\n\n if len(legend_keys) != len(legend_colors):\n print('The legend keys and values must be the same length.')\n return\n\n allowed_builtin_legends = builtin_legends.keys()\n if builtin_legend is not None:\n # builtin_legend = builtin_legend.upper()\n if builtin_legend not in allowed_builtin_legends:\n print('The builtin legend must be one of the following: {}'.format(\n ', '.join(allowed_builtin_legends)))\n return\n else:\n legend_dict = builtin_legends[builtin_legend]\n legend_keys = list(legend_dict.keys())\n legend_colors = list(legend_dict.values())\n\n if legend_dict is not None:\n if not isinstance(legend_dict, dict):\n print('The legend dict must be a dictionary.')\n return\n else:\n legend_keys = list(legend_dict.keys())\n legend_colors = list(legend_dict.values())\n if all(isinstance(item, tuple) for item in legend_colors):\n try:\n legend_colors = [rgb_to_hex(x) for x in legend_colors]\n except Exception as e:\n print(e)\n\n allowed_positions = ['topleft', 'topright',\n 'bottomleft', 'bottomright']\n if position not in allowed_positions:\n print('The position must be one of the following: {}'.format(\n ', '.join(allowed_positions)))\n return\n\n header = []\n content = []\n footer = []\n\n with open(legend_template) as f:\n lines = f.readlines()\n lines[3] = lines[3].replace('Legend', legend_title)\n header = lines[:6]\n footer = lines[11:]\n\n for index, key in enumerate(legend_keys):\n color = legend_colors[index]\n if not color.startswith('#'):\n color = '#' + color\n item = \" <li><span style='background:{};'></span>{}</li>\\n\".format(\n color, key)\n content.append(item)\n\n legend_html = header + content + footer\n legend_text = ''.join(legend_html)\n\n try:\n if self.legend_control is not None:\n legend_widget = self.legend_widget\n legend_widget.close()\n self.remove_control(self.legend_control)\n\n legend_output_widget = widgets.Output(\n layout={'border': '1px solid black', 'max_width': max_width, 'min_width': min_width, 'max_height': max_height,\n 'min_height': min_height, 'height': height, 'width': width, 'overflow': 'scroll'})\n legend_control = WidgetControl(\n widget=legend_output_widget, position=position)\n legend_widget = widgets.HTML(value=legend_text)\n with legend_output_widget:\n display(legend_widget)\n\n self.legend_widget = legend_output_widget\n self.legend_control = legend_control\n self.add_control(legend_control)\n\n except Exception as e:\n print(e)\n\n def image_overlay(self, url, bounds, name):\n \"\"\"Overlays an image from the Internet or locally on the map.\n\n Args:\n url (str): http URL or local file path to the image.\n bounds (tuple): bounding box of the image in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -100)).\n name (str): name of the layer to show on the layer control.\n \"\"\"\n from base64 import b64encode\n from PIL import Image, ImageSequence\n from io import BytesIO\n try:\n if not url.startswith('http'):\n\n if not os.path.exists(url):\n print('The provided file does not exist.')\n return\n\n ext = os.path.splitext(url)[1][1:] # file extension\n image = Image.open(url)\n\n f = BytesIO()\n if ext.lower() == 'gif':\n frames = []\n # Loop over each frame in the animated image\n for frame in ImageSequence.Iterator(image):\n frame = frame.convert('RGBA')\n b = BytesIO()\n frame.save(b, format=\"gif\")\n frame = Image.open(b)\n frames.append(frame)\n frames[0].save(f, format='GIF', save_all=True,\n append_images=frames[1:], loop=0)\n else:\n image.save(f, ext)\n\n data = b64encode(f.getvalue())\n data = data.decode('ascii')\n url = 'data:image/{};base64,'.format(ext) + data\n img = ipyleaflet.ImageOverlay(url=url, bounds=bounds, name=name)\n self.add_layer(img)\n except Exception as e:\n print(e)\n \n def video_overlay(self, url, bounds, name):\n \"\"\"Overlays a video from the Internet on the map.\n\n Args:\n url (str): http URL of the video, such as \"https://www.mapbox.com/bites/00188/patricia_nasa.webm\"\n bounds (tuple): bounding box of the video in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -100)).\n name (str): name of the layer to show on the layer control.\n \"\"\"\n try:\n video = ipyleaflet.VideoOverlay(url=url, bounds=bounds, name=name)\n self.add_layer(video)\n except Exception as e:\n print(e)\n\n def add_landsat_ts_gif(self, layer_name='Timelapse', roi=None, label=None, start_year=1984, end_year=2019, start_date='06-10', end_date='09-20', bands=['NIR', 'Red', 'Green'], vis_params=None, dimensions=768, frames_per_second=10, font_size=30, font_color='white', add_progress_bar=True, progress_bar_color='white', progress_bar_height=5, out_gif=None, download=False, apply_fmask=True, nd_bands=None, nd_threshold=0, nd_palette=['black', 'blue']):\n \"\"\"Adds a Landsat timelapse to the map.\n\n Args:\n layer_name (str, optional): Layer name to show under the layer control. Defaults to 'Timelapse'.\n roi (object, optional): Region of interest to create the timelapse. Defaults to None.\n label (str, optional): A label to shown on the GIF, such as place name. Defaults to None.\n start_year (int, optional): Starting year for the timelapse. Defaults to 1984.\n end_year (int, optional): Ending year for the timelapse. Defaults to 2019.\n start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.\n end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.\n bands (list, optional): Three bands selected from ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']. Defaults to ['NIR', 'Red', 'Green'].\n vis_params (dict, optional): Visualization parameters. Defaults to None.\n dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.\n frames_per_second (int, optional): Animation speed. Defaults to 10.\n font_size (int, optional): Font size of the animated text and label. Defaults to 30.\n font_color (str, optional): Font color of the animated text and label. Defaults to 'black'.\n add_progress_bar (bool, optional): Whether to add a progress bar at the bottom of the GIF. Defaults to True.\n progress_bar_color (str, optional): Color for the progress bar. Defaults to 'white'.\n progress_bar_height (int, optional): Height of the progress bar. Defaults to 5.\n out_gif (str, optional): File path to the output animated GIF. Defaults to None.\n download (bool, optional): Whether to download the gif. Defaults to False.\n apply_fmask (bool, optional): Whether to apply Fmask (Function of mask) for automated clouds, cloud shadows, snow, and water masking.\n nd_bands (list, optional): A list of names specifying the bands to use, e.g., ['Green', 'SWIR1']. The normalized difference is computed as (first − second) / (first + second). Note that negative input values are forced to 0 so that the result is confined to the range (-1, 1). \n nd_threshold (float, optional): The threshold for extacting pixels from the normalized difference band. \n nd_palette (str, optional): The color palette to use for displaying the normalized difference band. \n\n \"\"\"\n try:\n\n if roi is None:\n if self.draw_last_feature is not None:\n feature = self.draw_last_feature\n roi = feature.geometry()\n else:\n roi = ee.Geometry.Polygon(\n [[[-115.471773, 35.892718],\n [-115.471773, 36.409454],\n [-114.271283, 36.409454],\n [-114.271283, 35.892718],\n [-115.471773, 35.892718]]], None, False)\n elif isinstance(roi, ee.Feature) or isinstance(roi, ee.FeatureCollection):\n roi = roi.geometry()\n elif isinstance(roi, ee.Geometry):\n pass\n else:\n print('The provided roi is invalid. It must be an ee.Geometry')\n return\n\n geojson = ee_to_geojson(roi)\n bounds = minimum_bounding_box(geojson)\n geojson = adjust_longitude(geojson)\n roi = ee.Geometry(geojson)\n\n in_gif = landsat_ts_gif(roi=roi, out_gif=out_gif, start_year=start_year, end_year=end_year, start_date=start_date,\n end_date=end_date, bands=bands, vis_params=vis_params, dimensions=dimensions, frames_per_second=frames_per_second, apply_fmask=apply_fmask, nd_bands=nd_bands, nd_threshold=nd_threshold, nd_palette=nd_palette)\n in_nd_gif = in_gif.replace('.gif', '_nd.gif')\n\n print('Adding animated text to GIF ...')\n add_text_to_gif(in_gif, in_gif, xy=('2%', '2%'), text_sequence=start_year,\n font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)\n if nd_bands is not None:\n add_text_to_gif(in_nd_gif, in_nd_gif, xy=('2%', '2%'), text_sequence=start_year,\n font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)\n\n if label is not None:\n add_text_to_gif(in_gif, in_gif, xy=('2%', '90%'), text_sequence=label,\n font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)\n # if nd_bands is not None:\n # add_text_to_gif(in_nd_gif, in_nd_gif, xy=('2%', '90%'), text_sequence=label,\n # font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)\n\n if is_tool('ffmpeg'):\n reduce_gif_size(in_gif)\n if nd_bands is not None:\n reduce_gif_size(in_nd_gif)\n\n print('Adding GIF to the map ...')\n self.image_overlay(url=in_gif, bounds=bounds, name=layer_name)\n if nd_bands is not None:\n self.image_overlay(\n url=in_nd_gif, bounds=bounds, name=layer_name+' ND')\n print('The timelapse has been added to the map.')\n\n if download:\n link = create_download_link(\n in_gif, title=\"Click here to download the timelapse: \")\n display(link)\n\n except Exception as e:\n print(e)\n\n def to_html(self, outfile, title='My Map', width='100%', height='880px'):\n \"\"\"Saves the map as a HTML file.\n\n Args:\n outfile (str): The output file path to the HTML file.\n title (str, optional): The title of the HTML file. Defaults to 'My Map'.\n width (str, optional): The width of the map in pixels or percentage. Defaults to '100%'.\n height (str, optional): The height of the map in pixels. Defaults to '880px'.\n \"\"\"\n try:\n\n if not outfile.endswith('.html'):\n print('The output file must end with .html')\n return\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n before_width = self.layout.width\n before_height = self.layout.height\n\n if not isinstance(width, str):\n print(\"width must be a string.\")\n return\n elif width.endswith('px') or width.endswith('%'):\n pass\n else:\n print('width must end with px or %')\n return\n\n if not isinstance(height, str):\n print(\"height must be a string.\")\n return\n elif not height.endswith('px'):\n print('height must end with px')\n return\n\n self.layout.width = width\n self.layout.height = height\n\n self.save(outfile, title=title)\n\n self.layout.width = before_width\n self.layout.height = before_height\n\n except Exception as e:\n print(e)\n\n def to_image(self, outfile=None, monitor=1):\n \"\"\"Saves the map as a PNG or JPG image.\n\n Args:\n outfile (str, optional): The output file path to the image. Defaults to None.\n monitor (int, optional): The monitor to take the screenshot. Defaults to 1.\n \"\"\"\n if outfile is None:\n outfile = os.path.join(os.getcwd(), 'my_map.png')\n\n if outfile.endswith('.png') or outfile.endswith('.jpg'):\n pass\n else:\n print('The output file must be a PNG or JPG image.')\n return\n\n work_dir = os.path.dirname(outfile)\n if not os.path.exists(work_dir):\n os.makedirs(work_dir)\n\n screenshot = screen_capture(outfile, monitor)\n self.screenshot = screenshot\n\n def toolbar_reset(self):\n \"\"\"Reset the toolbar so that no tool is selected.\n \"\"\"\n toolbar_grid = self.toolbar\n for tool in toolbar_grid.children:\n tool.value = False\n\n def add_raster(self, image, bands=None, layer_name=None, colormap=None, x_dim='x', y_dim='y'):\n \"\"\"Adds a local raster dataset to the map.\n\n Args:\n image (str): The image file path.\n bands (int or list, optional): The image bands to use. It can be either a nubmer (e.g., 1) or a list (e.g., [3, 2, 1]). Defaults to None.\n layer_name (str, optional): The layer name to use for the raster. Defaults to None.\n colormap (str, optional): The name of the colormap to use for the raster, such as 'gray' and 'terrain'. More can be found at https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html. Defaults to None.\n x_dim (str, optional): The x dimension. Defaults to 'x'.\n y_dim (str, optional): The y dimension. Defaults to 'y'.\n \"\"\"\n try:\n import xarray_leaflet\n\n except:\n # import platform\n # if platform.system() != \"Windows\":\n # # install_from_github(\n # # url='https://github.com/davidbrochart/xarray_leaflet')\n # check_install('xarray_leaflet')\n # import xarray_leaflet\n # else:\n print(\n 'You need to install xarray_leaflet first. See https://github.com/davidbrochart/xarray_leaflet')\n print(\n 'Try the following to install xarray_leaflet: \\n\\nconda install -c conda-forge xarray_leaflet')\n return\n\n import warnings\n import numpy as np\n import rioxarray\n import xarray as xr\n import matplotlib.pyplot as plt\n\n warnings.simplefilter('ignore')\n\n if not os.path.exists(image):\n print('The image file does not exist.')\n return\n\n if colormap is None:\n colormap = plt.cm.inferno\n\n if layer_name is None:\n layer_name = 'Layer_' + random_string()\n\n if isinstance(colormap, str):\n colormap = plt.cm.get_cmap(name=colormap)\n\n da = rioxarray.open_rasterio(image, masked=True)\n\n # print(da.rio.nodata)\n\n multi_band = False\n if len(da.band) > 1:\n multi_band = True\n if bands is None:\n bands = [3, 2, 1]\n else:\n bands = 1\n\n if multi_band:\n da = da.rio.write_nodata(0)\n else:\n da = da.rio.write_nodata(np.nan)\n da = da.sel(band=bands)\n\n # crs = da.rio.crs\n # nan = da.attrs['nodatavals'][0]\n # da = da / da.max()\n # # if multi_band:\n # da = xr.where(da == nan, np.nan, da)\n # da = da.rio.write_nodata(0)\n # da = da.rio.write_crs(crs)\n\n if multi_band:\n layer = da.leaflet.plot(\n self, x_dim=x_dim, y_dim=y_dim, rgb_dim='band')\n else:\n layer = da.leaflet.plot(\n self, x_dim=x_dim, y_dim=y_dim, colormap=colormap)\n\n layer.name = layer_name\n\n def remove_drawn_features(self):\n \"\"\"Removes user-drawn geometries from the map\n \"\"\"\n if self.draw_layer is not None:\n self.remove_layer(self.draw_layer)\n self.draw_count = 0\n self.draw_features = []\n self.draw_last_feature = None\n self.draw_layer = None\n self.draw_last_json = None\n self.draw_last_bounds = None\n self.user_roi = None\n self.user_rois = None\n self.chart_values = []\n self.chart_points = []\n self.chart_labels = None\n\n def extract_values_to_points(self, filename):\n \"\"\"Exports pixel values to a csv file based on user-drawn geometries.\n\n Args:\n filename (str): The output file path to the csv file or shapefile.\n \"\"\"\n import csv\n\n filename = os.path.abspath(filename)\n allowed_formats = ['csv', 'shp']\n ext = filename[-3:]\n\n if ext not in allowed_formats:\n print('The output file must be one of the following: {}'.format(\n ', '.join(allowed_formats)))\n return\n\n out_dir = os.path.dirname(filename)\n out_csv = filename[:-3] + 'csv'\n out_shp = filename[:-3] + 'shp'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n count = len(self.chart_points)\n out_list = []\n if count > 0:\n header = ['id', 'longitude', 'latitude'] + self.chart_labels\n out_list.append(header)\n\n for i in range(0, count):\n id = i + 1\n line = [id] + self.chart_points[i] + self.chart_values[i]\n out_list.append(line)\n\n with open(out_csv, \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(out_list)\n\n if ext == 'csv':\n print('The csv file has been saved to: {}'.format(out_csv))\n else:\n csv_to_shp(out_csv, out_shp)\n print('The shapefile has been saved to: {}'.format(out_shp))\n\n\n# The functions below are outside the Map class.\n\ndef screen_capture(outfile, monitor=1):\n \"\"\"Takes a full screenshot of the selected monitor.\n\n Args:\n outfile (str): The output file path to the screenshot.\n monitor (int, optional): The monitor to take the screenshot. Defaults to 1.\n \"\"\"\n from mss import mss\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n if not isinstance(monitor, int):\n print('The monitor number must be an integer.')\n return\n\n try:\n with mss() as sct:\n sct.shot(output=outfile, mon=monitor)\n return outfile\n\n except Exception as e:\n print(e)\n\n\ndef install_from_github(url):\n \"\"\"Install a package from a GitHub repository.\n\n Args:\n url (str): The URL of the GitHub repository.\n \"\"\"\n\n try:\n download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')\n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n\n repo_name = os.path.basename(url)\n zip_url = os.path.join(url, 'archive/master.zip')\n filename = repo_name + '-master.zip'\n download_from_url(url=zip_url, out_file_name=filename,\n out_dir=download_dir, unzip=True)\n\n pkg_dir = os.path.join(download_dir, repo_name + '-master')\n pkg_name = os.path.basename(url)\n work_dir = os.getcwd()\n os.chdir(pkg_dir)\n print('Installing {}...'.format(pkg_name))\n cmd = 'pip install .'\n os.system(cmd)\n os.chdir(work_dir)\n print('{} has been installed successfully.'.format(pkg_name))\n # print(\"\\nPlease comment out 'install_from_github()' and restart the kernel to take effect:\\nJupyter menu -> Kernel -> Restart & Clear Output\")\n\n except Exception as e:\n print(e)\n\n\ndef rgb_to_hex(rgb=(255, 255, 255)):\n \"\"\"Converts RGB to hex color. In RGB color R stands for Red, G stands for Green, and B stands for Blue, and it ranges from the decimal value of 0 – 255.\n\n Args:\n rgb (tuple, optional): RGB color code as a tuple of (red, green, blue). Defaults to (255, 255, 255).\n\n Returns:\n str: hex color code\n \"\"\"\n return '%02x%02x%02x' % rgb\n\n\ndef hex_to_rgb(value='FFFFFF'):\n \"\"\"Converts hex color to RGB color. \n\n Args:\n value (str, optional): Hex color code as a string. Defaults to 'FFFFFF'.\n\n Returns:\n tuple: RGB color as a tuple.\n \"\"\"\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i+lv//3], 16) for i in range(0, lv, lv//3))\n\n\ndef check_color(in_color):\n \"\"\"Checks the input color and returns the corresponding hex color code.\n\n Args:\n in_color (str or tuple): It can be a string (e.g., 'red', '#ffff00') or tuple (e.g., (255, 127, 0)).\n\n Returns:\n str: A hex color code.\n \"\"\"\n out_color = '#000000' # default black color\n if isinstance(in_color, tuple) and len(in_color) == 3:\n if all(isinstance(item, int) for item in in_color):\n rescaled_color = [x / 255.0 for x in in_color]\n out_color = colour.Color(rgb=tuple(rescaled_color))\n return out_color.hex_l\n else:\n print(\n 'RGB color must be a tuple with three integer values ranging from 0 to 255.')\n return\n else:\n try:\n out_color = colour.Color(in_color)\n return out_color.hex_l\n except Exception as e:\n print('The provided color is invalid. Using the default black color.')\n print(e)\n return out_color\n\n\ndef system_fonts(show_full_path=False):\n \"\"\"Gets a list of system fonts.\n\n # Common font locations:\n # Linux: /usr/share/fonts/TTF/\n # Windows: C:\\Windows\\Fonts\n # macOS: System > Library > Fonts\n\n Args:\n show_full_path (bool, optional): Whether to show the full path of each system font. Defaults to False.\n\n Returns:\n list: A list of system fonts.\n \"\"\"\n try:\n import matplotlib.font_manager\n\n font_list = matplotlib.font_manager.findSystemFonts(\n fontpaths=None, fontext='ttf')\n font_list.sort()\n\n font_names = [os.path.basename(f) for f in font_list]\n font_names.sort()\n\n if show_full_path:\n return font_list\n else:\n return font_names\n\n except Exception as e:\n print(e)\n\n\ndef add_text_to_gif(in_gif, out_gif, xy=None, text_sequence=None, font_type=\"arial.ttf\", font_size=20, font_color='#000000', add_progress_bar=True, progress_bar_color='white', progress_bar_height=5, duration=100, loop=0):\n \"\"\"Adds animated text to a GIF image.\n\n Args:\n in_gif (str): The file path to the input GIF image.\n out_gif (str): The file path to the output GIF image.\n xy (tuple, optional): Top left corner of the text. It can be formatted like this: (10, 10) or ('15%', '25%'). Defaults to None.\n text_sequence (int, str, list, optional): Text to be drawn. It can be an integer number, a string, or a list of strings. Defaults to None.\n font_type (str, optional): Font type. Defaults to \"arial.ttf\".\n font_size (int, optional): Font size. Defaults to 20.\n font_color (str, optional): Font color. It can be a string (e.g., 'red'), rgb tuple (e.g., (255, 127, 0)), or hex code (e.g., '#ff00ff'). Defaults to '#000000'.\n add_progress_bar (bool, optional): Whether to add a progress bar at the bottom of the GIF. Defaults to True.\n progress_bar_color (str, optional): Color for the progress bar. Defaults to 'white'.\n progress_bar_height (int, optional): Height of the progress bar. Defaults to 5.\n duration (int, optional): controls how long each frame will be displayed for, in milliseconds. It is the inverse of the frame rate. Setting it to 100 milliseconds gives 10 frames per second. You can decrease the duration to give a smoother animation.. Defaults to 100.\n loop (int, optional): controls how many times the animation repeats. The default, 1, means that the animation will play once and then stop (displaying the last frame). A value of 0 means that the animation will repeat forever. Defaults to 0.\n\n \"\"\"\n import io\n import pkg_resources\n import warnings\n from PIL import Image, ImageDraw, ImageSequence, ImageFont\n\n warnings.simplefilter('ignore')\n pkg_dir = os.path.dirname(\n pkg_resources.resource_filename(\"geemap\", \"geemap.py\"))\n default_font = os.path.join(pkg_dir, 'data/fonts/arial.ttf')\n\n in_gif = os.path.abspath(in_gif)\n out_gif = os.path.abspath(out_gif)\n\n if not os.path.exists(in_gif):\n print('The input gif file does not exist.')\n return\n\n if not os.path.exists(os.path.dirname(out_gif)):\n os.makedirs(os.path.dirname(out_gif))\n\n if font_type == 'arial.ttf':\n font = ImageFont.truetype(default_font, font_size)\n else:\n try:\n font_list = system_fonts(show_full_path=True)\n font_names = [os.path.basename(f) for f in font_list]\n if (font_type in font_list) or (font_type in font_names):\n font = ImageFont.truetype(font_type, font_size)\n else:\n print(\n 'The specified font type could not be found on your system. Using the default font instead.')\n font = ImageFont.truetype(default_font, font_size)\n except Exception as e:\n print(e)\n font = ImageFont.truetype(default_font, font_size)\n\n color = check_color(font_color)\n progress_bar_color = check_color(progress_bar_color)\n\n try:\n image = Image.open(in_gif)\n except Exception as e:\n print('An error occurred while opening the gif.')\n print(e)\n return\n\n count = image.n_frames\n W, H = image.size\n progress_bar_widths = [i * 1.0 / count * W for i in range(1, count + 1)]\n progress_bar_shapes = [[(0, H - progress_bar_height), (x, H)]\n for x in progress_bar_widths]\n\n if xy is None:\n # default text location is 5% width and 5% height of the image.\n xy = (int(0.05 * W), int(0.05 * H))\n elif (xy is not None) and (not isinstance(xy, tuple)) and (len(xy) == 2):\n print(\"xy must be a tuple, e.g., (10, 10), ('10%', '10%')\")\n return\n elif all(isinstance(item, int) for item in xy) and (len(xy) == 2):\n x, y = xy\n if (x > 0) and (x < W) and (y > 0) and (y < H):\n pass\n else:\n print(\n 'xy is out of bounds. x must be within [0, {}], and y must be within [0, {}]'.format(W, H))\n return\n elif all(isinstance(item, str) for item in xy) and (len(xy) == 2):\n x, y = xy\n if ('%' in x) and ('%' in y):\n try:\n x = int(float(x.replace('%', '')) / 100.0 * W)\n y = int(float(y.replace('%', '')) / 100.0 * H)\n xy = (x, y)\n except Exception as e:\n print(\n \"The specified xy is invalid. It must be formatted like this ('10%', '10%')\")\n return\n else:\n print(\"The specified xy is invalid. It must be formatted like this: (10, 10) or ('10%', '10%')\")\n return\n\n if text_sequence is None:\n text = [str(x) for x in range(1, count + 1)]\n elif isinstance(text_sequence, int):\n text = [str(x) for x in range(\n text_sequence, text_sequence + count + 1)]\n elif isinstance(text_sequence, str):\n try:\n text_sequence = int(text_sequence)\n text = [str(x) for x in range(\n text_sequence, text_sequence + count + 1)]\n except Exception as e:\n text = [text_sequence] * count\n elif isinstance(text_sequence, list) and len(text_sequence) != count:\n print('The length of the text sequence must be equal to the number ({}) of frames in the gif.'.format(count))\n return\n else:\n text = [str(x) for x in text_sequence]\n\n try:\n\n frames = []\n # Loop over each frame in the animated image\n for index, frame in enumerate(ImageSequence.Iterator(image)):\n # Draw the text on the frame\n frame = frame.convert('RGB')\n draw = ImageDraw.Draw(frame)\n # w, h = draw.textsize(text[index])\n draw.text(xy, text[index], font=font, fill=color)\n if add_progress_bar:\n draw.rectangle(\n progress_bar_shapes[index], fill=progress_bar_color)\n del draw\n\n b = io.BytesIO()\n frame.save(b, format=\"GIF\")\n frame = Image.open(b)\n\n frames.append(frame)\n # https://www.pythoninformer.com/python-libraries/pillow/creating-animated-gif/\n # Save the frames as a new image\n\n frames[0].save(out_gif, save_all=True,\n append_images=frames[1:], duration=duration, loop=loop, optimize=True)\n except Exception as e:\n print(e)\n\n\ndef open_image_from_url(url):\n \"\"\"Loads an image from the specified URL.\n\n Args:\n url (str): URL of the image.\n\n Returns:\n object: Image object.\n \"\"\"\n from PIL import Image\n import requests\n from io import BytesIO\n from urllib.parse import urlparse\n\n try:\n\n # if url.endswith('.gif'):\n # out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')\n # if not os.path.exists(out_dir):\n # os.makedirs(out_dir)\n # a = urlparse(url)\n # out_name = os.path.basename(a.path)\n # out_path = os.path.join(out_dir, out_name)\n # download_from_url(url, out_name, out_dir, unzip=False)\n # img = Image.open(out_path)\n # else:\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n return img\n except Exception as e:\n print(e)\n\n\ndef has_transparency(img):\n \"\"\"Checks whether an image has transparency.\n\n Args:\n img (object): a PIL Image object.\n\n Returns:\n bool: True if it has transparency, False otherwise.\n \"\"\"\n\n if img.mode == \"P\":\n transparent = img.info.get(\"transparency\", -1)\n for _, index in img.getcolors():\n if index == transparent:\n return True\n elif img.mode == \"RGBA\":\n extrema = img.getextrema()\n if extrema[3][0] < 255:\n return True\n\n return False\n\n\ndef add_image_to_gif(in_gif, out_gif, in_image, xy=None, image_size=(80, 80), circle_mask=False):\n \"\"\"Adds an image logo to a GIF image.\n\n Args:\n in_gif (str): Input file path to the GIF image.\n out_gif (str): Output file path to the GIF image.\n in_image (str): Input file path to the image.\n xy (tuple, optional): Top left corner of the text. It can be formatted like this: (10, 10) or ('15%', '25%'). Defaults to None.\n image_size (tuple, optional): Resize image. Defaults to (80, 80).\n circle_mask (bool, optional): Whether to apply a circle mask to the image. This only works with non-png images. Defaults to False.\n \"\"\"\n import io\n import warnings\n from PIL import Image, ImageDraw, ImageSequence, ImageFilter\n\n warnings.simplefilter('ignore')\n\n in_gif = os.path.abspath(in_gif)\n\n is_url = False\n if in_image.startswith('http'):\n is_url = True\n\n if not os.path.exists(in_gif):\n print('The input gif file does not exist.')\n return\n\n if (not is_url) and (not os.path.exists(in_image)):\n print('The provided logo file does not exist.')\n return\n\n if not os.path.exists(os.path.dirname(out_gif)):\n os.makedirs(os.path.dirname(out_gif))\n\n try:\n image = Image.open(in_gif)\n except Exception as e:\n print('An error occurred while opening the image.')\n print(e)\n return\n\n try:\n if in_image.startswith('http'):\n logo_raw_image = open_image_from_url(in_image)\n else:\n in_image = os.path.abspath(in_image)\n logo_raw_image = Image.open(in_image)\n except Exception as e:\n print(e)\n\n logo_raw_size = logo_raw_image.size\n image_size = min(logo_raw_size[0], image_size[0]), min(\n logo_raw_size[1], image_size[1])\n\n logo_image = logo_raw_image.convert('RGBA')\n logo_image.thumbnail(image_size, Image.ANTIALIAS)\n\n W, H = image.size\n mask_im = None\n\n if circle_mask:\n mask_im = Image.new(\"L\", image_size, 0)\n draw = ImageDraw.Draw(mask_im)\n draw.ellipse((0, 0, image_size[0], image_size[1]), fill=255)\n\n if has_transparency(logo_raw_image):\n mask_im = logo_image.copy()\n\n if xy is None:\n # default logo location is 5% width and 5% height of the image.\n xy = (int(0.05 * W), int(0.05 * H))\n elif (xy is not None) and (not isinstance(xy, tuple)) and (len(xy) == 2):\n print(\"xy must be a tuple, e.g., (10, 10), ('10%', '10%')\")\n return\n elif all(isinstance(item, int) for item in xy) and (len(xy) == 2):\n x, y = xy\n if (x > 0) and (x < W) and (y > 0) and (y < H):\n pass\n else:\n print(\n 'xy is out of bounds. x must be within [0, {}], and y must be within [0, {}]'.format(W, H))\n return\n elif all(isinstance(item, str) for item in xy) and (len(xy) == 2):\n x, y = xy\n if ('%' in x) and ('%' in y):\n try:\n x = int(float(x.replace('%', '')) / 100.0 * W)\n y = int(float(y.replace('%', '')) / 100.0 * H)\n xy = (x, y)\n except Exception as e:\n print(\n \"The specified xy is invalid. It must be formatted like this ('10%', '10%')\")\n return\n else:\n print(\"The specified xy is invalid. It must be formatted like this: (10, 10) or ('10%', '10%')\")\n return\n\n try:\n\n frames = []\n for index, frame in enumerate(ImageSequence.Iterator(image)):\n frame = frame.convert('RGBA')\n frame.paste(logo_image, xy, mask_im)\n\n b = io.BytesIO()\n frame.save(b, format=\"GIF\")\n frame = Image.open(b)\n frames.append(frame)\n\n frames[0].save(out_gif, save_all=True, append_images=frames[1:])\n except Exception as e:\n print(e)\n\n\ndef show_image(img_path, width=None, height=None):\n \"\"\"Shows an image within Jupyter notebook.\n\n Args:\n img_path (str): The image file path.\n width (int, optional): Width of the image in pixels. Defaults to None.\n height (int, optional): Height of the image in pixels. Defaults to None.\n\n \"\"\"\n from IPython.display import display\n\n try:\n out = widgets.Output()\n # layout={'border': '1px solid black'})\n # layout={'border': '1px solid black', 'width': str(width + 20) + 'px', 'height': str(height + 10) + 'px'},)\n out.clear_output(wait=True)\n display(out)\n with out:\n file = open(img_path, \"rb\")\n image = file.read()\n if (width is None) and (height is None):\n display(widgets.Image(value=image))\n elif (width is not None) and (height is not None):\n display(widgets.Image(value=image, width=width, height=height))\n else:\n print('You need set both width and height.')\n return\n except Exception as e:\n print(e)\n\n\ndef legend_from_ee(ee_class_table):\n \"\"\"Extract legend from an Earth Engine class table on the Earth Engine Data Catalog page\n such as https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1\n\n Value\tColor\tDescription\n 0\t1c0dff\tWater\n 1\t05450a\tEvergreen needleleaf forest\n 2\t086a10\tEvergreen broadleaf forest\n 3\t54a708\tDeciduous needleleaf forest\n 4\t78d203\tDeciduous broadleaf forest\n 5\t009900\tMixed forest\n 6\tc6b044\tClosed shrublands\n 7\tdcd159\tOpen shrublands\n 8\tdade48\tWoody savannas\n 9\tfbff13\tSavannas\n 10\tb6ff05\tGrasslands\n 11\t27ff87\tPermanent wetlands\n 12\tc24f44\tCroplands\n 13\ta5a5a5\tUrban and built-up\n 14\tff6d4c\tCropland/natural vegetation mosaic\n 15\t69fff8\tSnow and ice\n 16\tf9ffa4\tBarren or sparsely vegetated\n 254\tffffff\tUnclassified\n\n Args:\n ee_class_table (str): An Earth Engine class table with triple quotes.\n\n Returns:\n dict: Returns a legend dictionary that can be used to create a legend.\n \"\"\"\n try:\n ee_class_table = ee_class_table.strip()\n lines = ee_class_table.split('\\n')[1:]\n\n if lines[0] == 'Value\\tColor\\tDescription':\n lines = lines[1:]\n\n legend_dict = {}\n for index, line in enumerate(lines):\n items = line.split(\"\\t\")\n items = [item.strip() for item in items]\n color = items[1]\n key = items[0] + \" \" + items[2]\n legend_dict[key] = color\n\n return legend_dict\n\n except Exception as e:\n print(e)\n\n\ndef ee_tile_layer(ee_object, vis_params={}, name='Layer untitled', shown=True, opacity=1.0):\n \"\"\"Converts and Earth Engine layer to ipyleaflet TileLayer.\n\n Args:\n ee_object (Collection|Feature|Image|MapId): The object to add to the map.\n vis_params (dict, optional): The visualization parameters. Defaults to {}.\n name (str, optional): The name of the layer. Defaults to 'Layer untitled'.\n shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.\n opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.\n \"\"\"\n # ee_initialize()\n\n image = None\n\n if not isinstance(ee_object, ee.Image) and not isinstance(ee_object, ee.ImageCollection) and not isinstance(ee_object, ee.FeatureCollection) and not isinstance(ee_object, ee.Feature) and not isinstance(ee_object, ee.Geometry):\n err_str = \"\\n\\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection.\"\n raise AttributeError(err_str)\n\n if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):\n features = ee.FeatureCollection(ee_object)\n\n width = 2\n\n if 'width' in vis_params:\n width = vis_params['width']\n\n color = '000000'\n\n if 'color' in vis_params:\n color = vis_params['color']\n\n image_fill = features.style(\n **{'fillColor': color}).updateMask(ee.Image.constant(0.5))\n image_outline = features.style(\n **{'color': color, 'fillColor': '00000000', 'width': width})\n\n image = image_fill.blend(image_outline)\n elif isinstance(ee_object, ee.image.Image):\n image = ee_object\n elif isinstance(ee_object, ee.imagecollection.ImageCollection):\n image = ee_object.mosaic()\n\n map_id_dict = ee.Image(image).getMapId(vis_params)\n tile_layer = ipyleaflet.TileLayer(\n url=map_id_dict['tile_fetcher'].url_format,\n attribution='Google Earth Engine',\n name=name,\n opacity=opacity,\n visible=True\n # visible=shown\n )\n return tile_layer\n\n\ndef geojson_to_ee(geo_json, geodesic=True):\n \"\"\"Converts a geojson to ee.Geometry()\n\n Args:\n geo_json (dict): A geojson geometry dictionary or file path.\n\n Returns:\n ee_object: An ee.Geometry object\n \"\"\"\n # ee_initialize()\n\n try:\n\n import json\n\n if not isinstance(geo_json, dict) and os.path.isfile(geo_json):\n with open(os.path.abspath(geo_json)) as f:\n geo_json = json.load(f)\n\n if geo_json['type'] == 'FeatureCollection':\n features = ee.FeatureCollection(geo_json['features'])\n return features\n elif geo_json['type'] == 'Feature':\n geom = None\n keys = geo_json['properties']['style'].keys()\n if 'radius' in keys: # Checks whether it is a circle\n geom = ee.Geometry(geo_json['geometry'])\n radius = geo_json['properties']['style']['radius']\n geom = geom.buffer(radius)\n elif geo_json['geometry']['type'] == 'Point': # Checks whether it is a point\n coordinates = geo_json['geometry']['coordinates']\n longitude = coordinates[0]\n latitude = coordinates[1]\n geom = ee.Geometry.Point(longitude, latitude)\n else:\n geom = ee.Geometry(geo_json['geometry'], \"\", geodesic)\n return geom\n else:\n print(\"Could not convert the geojson to ee.Geometry()\")\n\n except Exception as e:\n print(\"Could not convert the geojson to ee.Geometry()\")\n print(e)\n\n\ndef ee_to_geojson(ee_object, out_json=None):\n \"\"\"Converts Earth Engine object to geojson.\n\n Args:\n ee_object (object): An Earth Engine object.\n\n Returns:\n object: GeoJSON object.\n \"\"\"\n from json import dumps\n # ee_initialize()\n\n try:\n if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):\n json_object = ee_object.getInfo()\n if out_json is not None:\n out_json = os.path.abspath(out_json)\n if not os.path.exists(os.path.dirname(out_json)):\n os.makedirs(os.path.dirname(out_json))\n geojson = open(out_json, \"w\")\n geojson.write(\n dumps({\"type\": \"FeatureCollection\", \"features\": json_object}, indent=2) + \"\\n\")\n geojson.close()\n return json_object\n else:\n print(\"Could not convert the Earth Engine object to geojson\")\n except Exception as e:\n print(e)\n\n\ndef open_github(subdir=None):\n \"\"\"Opens the GitHub repository for this package.\n\n Args:\n subdir (str, optional): Sub-directory of the repository. Defaults to None.\n \"\"\"\n import webbrowser\n\n url = 'https://github.com/giswqs/geemap'\n\n if subdir == 'source':\n url += '/tree/master/geemap/'\n elif subdir == 'examples':\n url += '/tree/master/examples'\n elif subdir == 'tutorials':\n url += '/tree/master/tutorials'\n\n webbrowser.open_new_tab(url)\n\n\ndef clone_repo(out_dir='.', unzip=True):\n \"\"\"Clones the geemap GitHub repository.\n\n Args:\n out_dir (str, optional): Output folder for the repo. Defaults to '.'.\n unzip (bool, optional): Whether to unzip the repository. Defaults to True.\n \"\"\"\n url = 'https://github.com/giswqs/geemap/archive/master.zip'\n filename = 'geemap-master.zip'\n download_from_url(url, out_file_name=filename,\n out_dir=out_dir, unzip=unzip)\n\n\ndef open_youtube():\n \"\"\"Opens the YouTube tutorials for geemap.\n \"\"\"\n import webbrowser\n\n url = 'https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3'\n webbrowser.open_new_tab(url)\n\n\ndef api_docs():\n \"\"\"Open a browser and navigate to the geemap API documentation.\n \"\"\"\n import webbrowser\n\n url = 'https://giswqs.github.io/geemap/geemap'\n webbrowser.open_new_tab(url)\n\n\ndef show_youtube(id='h0pz3S6Tvx0'):\n \"\"\"Displays a YouTube video within Jupyter notebooks.\n\n Args:\n id (str, optional): Unique ID of the video. Defaults to 'h0pz3S6Tvx0'.\n\n \"\"\"\n from IPython.display import YouTubeVideo, display\n try:\n out = widgets.Output(\n layout={'width': '815px'})\n # layout={'border': '1px solid black', 'width': '815px'})\n out.clear_output(wait=True)\n display(out)\n with out:\n display(YouTubeVideo(id, width=800, height=450))\n except Exception as e:\n print(e)\n\n\ndef check_install(package):\n \"\"\"Checks whether a package is installed. If not, it will install the package.\n\n Args:\n package (str): The name of the package to check.\n \"\"\"\n import subprocess\n\n try:\n __import__(package)\n # print('{} is already installed.'.format(package))\n except ImportError:\n print('{} is not installed. Installing ...'.format(package))\n try:\n subprocess.check_call([\"python\", '-m', 'pip', 'install', package])\n except Exception as e:\n print('Failed to install {}'.format(package))\n print(e)\n print(\"{} has been installed successfully.\".format(package))\n\n\ndef update_package():\n \"\"\"Updates the geemap package from the geemap GitHub repository without the need to use pip or conda.\n In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.\n\n \"\"\"\n import shutil\n try:\n download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')\n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n clone_repo(out_dir=download_dir)\n\n pkg_dir = os.path.join(download_dir, 'geemap-master')\n work_dir = os.getcwd()\n os.chdir(pkg_dir)\n\n if shutil.which('pip') is None:\n cmd = 'pip3 install .'\n else:\n cmd = 'pip install .'\n\n os.system(cmd)\n os.chdir(work_dir)\n\n print(\"\\nPlease comment out 'geemap.update_package()' and restart the kernel to take effect:\\nJupyter menu -> Kernel -> Restart & Clear Output\")\n\n except Exception as e:\n print(e)\n\n\ndef csv_to_shp(in_csv, out_shp, longitude='longitude', latitude='latitude'):\n \"\"\"Converts a csv file with latlon info to a point shapefile.\n\n Args:\n in_csv (str): The input csv file containing longitude and latitude columns.\n out_shp (str): The file path to the output shapefile.\n longitude (str, optional): The column name of the longitude column. Defaults to 'longitude'.\n latitude (str, optional): The column name of the latitude column. Defaults to 'latitude'.\n \"\"\"\n import csv\n import shapefile as shp\n\n if not os.path.exists(in_csv):\n print('The provided CSV file does not exist.')\n return\n\n if not in_csv.endswith('.csv'):\n print('The input file must end with .csv')\n return\n\n out_dir = os.path.dirname(out_shp)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n try:\n points = shp.Writer(out_shp, shapeType=shp.POINT)\n with open(in_csv) as csvfile:\n csvreader = csv.DictReader(csvfile)\n header = csvreader.fieldnames\n [points.field(field) for field in header]\n for row in csvreader:\n points.point((float(row[longitude])), (float(row[latitude])))\n points.record(*tuple([row[f] for f in header]))\n\n out_prj = out_shp.replace('.shp', '.prj')\n with open(out_prj, 'w') as f:\n prj_str = 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.0174532925199433]] '\n f.write(prj_str)\n\n except Exception as e:\n print(e)\n\n\ndef shp_to_geojson(in_shp, out_json=None):\n \"\"\"Converts a shapefile to GeoJSON.\n\n Args:\n in_shp (str): File path of the input shapefile.\n out_json (str, optional): File path of the output GeoJSON. Defaults to None.\n\n Returns:\n object: The json object representing the shapefile.\n \"\"\"\n # check_install('pyshp')\n # ee_initialize()\n try:\n import json\n import shapefile\n in_shp = os.path.abspath(in_shp)\n\n if out_json is None:\n out_json = os.path.splitext(in_shp)[0] + \".json\"\n\n if os.path.exists(out_json):\n out_json = out_json.replace('.json', '_bk.json')\n\n elif not os.path.exists(os.path.dirname(out_json)):\n os.makedirs(os.path.dirname(out_json))\n\n reader = shapefile.Reader(in_shp)\n fields = reader.fields[1:]\n field_names = [field[0] for field in fields]\n buffer = []\n for sr in reader.shapeRecords():\n atr = dict(zip(field_names, sr.record))\n geom = sr.shape.__geo_interface__\n buffer.append(dict(type=\"Feature\", geometry=geom, properties=atr))\n\n from json import dumps\n geojson = open(out_json, \"w\")\n geojson.write(dumps({\"type\": \"FeatureCollection\",\n \"features\": buffer}, indent=2) + \"\\n\")\n geojson.close()\n\n with open(out_json) as f:\n json_data = json.load(f)\n\n return json_data\n\n except Exception as e:\n print(e)\n\n\ndef shp_to_ee(in_shp):\n \"\"\"Converts a shapefile to Earth Engine objects.\n\n Args:\n in_shp (str): File path to a shapefile.\n\n Returns:\n object: Earth Engine objects representing the shapefile.\n \"\"\"\n # ee_initialize()\n try:\n json_data = shp_to_geojson(in_shp)\n ee_object = geojson_to_ee(json_data)\n return ee_object\n except Exception as e:\n print(e)\n\n\ndef filter_polygons(ftr):\n \"\"\"Converts GeometryCollection to Polygon/MultiPolygon\n\n Args:\n ftr (object): ee.Feature\n\n Returns:\n object: ee.Feature\n \"\"\"\n # ee_initialize()\n geometries = ftr.geometry().geometries()\n geometries = geometries.map(lambda geo: ee.Feature(\n ee.Geometry(geo)).set('geoType', ee.Geometry(geo).type()))\n\n polygons = ee.FeatureCollection(geometries).filter(\n ee.Filter.eq('geoType', 'Polygon')).geometry()\n return ee.Feature(polygons).copyProperties(ftr)\n\n\ndef ee_export_vector(ee_object, filename, selectors=None):\n \"\"\"Exports Earth Engine FeatureCollection to other formats, including shp, csv, json, kml, and kmz.\n\n Args:\n ee_object (object): ee.FeatureCollection to export.\n filename (str): Output file name.\n selectors (list, optional): A list of attributes to export. Defaults to None.\n \"\"\"\n import requests\n import zipfile\n # ee_initialize()\n\n if not isinstance(ee_object, ee.FeatureCollection):\n raise ValueError('ee_object must be an ee.FeatureCollection')\n\n allowed_formats = ['csv', 'geojson', 'kml', 'kmz', 'shp']\n # allowed_formats = ['csv', 'kml', 'kmz']\n filename = os.path.abspath(filename)\n basename = os.path.basename(filename)\n name = os.path.splitext(basename)[0]\n filetype = os.path.splitext(basename)[1][1:].lower()\n\n if filetype == 'shp':\n filename = filename.replace('.shp', '.zip')\n\n if not (filetype.lower() in allowed_formats):\n print('The file type must be one of the following: {}'.format(\n ', '.join(allowed_formats)))\n print('Earth Engine no longer supports downloading featureCollection as shapefile or json. \\nPlease use geemap.ee_export_vector_to_drive() to export featureCollection to Google Drive.')\n raise ValueError\n\n if selectors is None:\n selectors = ee_object.first().propertyNames().getInfo()\n if filetype == 'csv':\n # remove .geo coordinate field\n ee_object = ee_object.select([\".*\"], None, False)\n\n if filetype == 'geojson':\n selectors = ['.geo'] + selectors\n\n elif not isinstance(selectors, list):\n raise ValueError(\n \"selectors must be a list, such as ['attribute1', 'attribute2']\")\n else:\n allowed_attributes = ee_object.first().propertyNames().getInfo()\n for attribute in selectors:\n if not (attribute in allowed_attributes):\n raise ValueError('Attributes must be one chosen from: {} '.format(\n ', '.join(allowed_attributes)))\n\n try:\n print('Generating URL ...')\n url = ee_object.getDownloadURL(\n filetype=filetype, selectors=selectors, filename=name)\n print('Downloading data from {}\\nPlease wait ...'.format(url))\n r = requests.get(url, stream=True)\n\n if r.status_code != 200:\n print('An error occurred while downloading. \\n Retrying ...')\n try:\n new_ee_object = ee_object.map(filter_polygons)\n print('Generating URL ...')\n url = new_ee_object.getDownloadURL(\n filetype=filetype, selectors=selectors, filename=name)\n print('Downloading data from {}\\nPlease wait ...'.format(url))\n r = requests.get(url, stream=True)\n except Exception as e:\n print(e)\n raise ValueError\n\n with open(filename, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n except Exception as e:\n print('An error occurred while downloading.')\n raise ValueError(e)\n\n try:\n if filetype == 'shp':\n z = zipfile.ZipFile(filename)\n z.extractall(os.path.dirname(filename))\n os.remove(filename)\n filename = filename.replace('.zip', '.shp')\n\n print('Data downloaded to {}'.format(filename))\n except Exception as e:\n raise ValueError(e)\n\n\ndef ee_export_vector_to_drive(ee_object, description, folder, file_format='shp', selectors=None):\n \"\"\"Exports Earth Engine FeatureCollection to Google Drive. other formats, including shp, csv, json, kml, and kmz.\n\n Args:\n ee_object (object): ee.FeatureCollection to export.\n description (str): File name of the output file.\n folder (str): Folder name within Google Drive to save the exported file.\n file_format (str, optional): The supported file format include shp, csv, json, kml, kmz, and TFRecord. Defaults to 'shp'.\n selectors (list, optional): The list of attributes to export. Defaults to None.\n \"\"\"\n if not isinstance(ee_object, ee.FeatureCollection):\n print('The ee_object must be an ee.FeatureCollection.')\n return\n\n allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp', 'tfrecord']\n if not (file_format.lower() in allowed_formats):\n print('The file type must be one of the following: {}'.format(\n ', '.join(allowed_formats)))\n return\n\n task_config = {\n 'folder': folder,\n 'fileFormat': file_format,\n }\n\n if selectors is not None:\n task_config['selectors'] = selectors\n elif (selectors is None) and (file_format.lower() == 'csv'):\n # remove .geo coordinate field\n ee_object = ee_object.select([\".*\"], None, False)\n\n print('Exporting {}...'.format(description))\n task = ee.batch.Export.table.toDrive(ee_object, description, **task_config)\n task.start()\n\n\ndef ee_export_geojson(ee_object, filename=None, selectors=None):\n \"\"\"Exports Earth Engine FeatureCollection to geojson.\n\n Args:\n ee_object (object): ee.FeatureCollection to export.\n filename (str): Output file name. Defaults to None.\n selectors (list, optional): A list of attributes to export. Defaults to None.\n \"\"\"\n import requests\n import zipfile\n # ee_initialize()\n\n if not isinstance(ee_object, ee.FeatureCollection):\n print('The ee_object must be an ee.FeatureCollection.')\n return\n\n if filename is None:\n out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')\n filename = os.path.join(out_dir, random_string(6) + '.geojson')\n\n allowed_formats = ['geojson']\n filename = os.path.abspath(filename)\n basename = os.path.basename(filename)\n name = os.path.splitext(basename)[0]\n filetype = os.path.splitext(basename)[1][1:].lower()\n\n if not (filetype.lower() in allowed_formats):\n print('The output file type must be geojson.')\n return\n\n if selectors is None:\n selectors = ee_object.first().propertyNames().getInfo()\n selectors = ['.geo'] + selectors\n\n elif not isinstance(selectors, list):\n print(\"selectors must be a list, such as ['attribute1', 'attribute2']\")\n return\n else:\n allowed_attributes = ee_object.first().propertyNames().getInfo()\n for attribute in selectors:\n if not (attribute in allowed_attributes):\n print('Attributes must be one chosen from: {} '.format(\n ', '.join(allowed_attributes)))\n return\n\n try:\n # print('Generating URL ...')\n url = ee_object.getDownloadURL(\n filetype=filetype, selectors=selectors, filename=name)\n # print('Downloading data from {}\\nPlease wait ...'.format(url))\n r = requests.get(url, stream=True)\n\n if r.status_code != 200:\n print('An error occurred while downloading. \\n Retrying ...')\n try:\n new_ee_object = ee_object.map(filter_polygons)\n print('Generating URL ...')\n url = new_ee_object.getDownloadURL(\n filetype=filetype, selectors=selectors, filename=name)\n print('Downloading data from {}\\nPlease wait ...'.format(url))\n r = requests.get(url, stream=True)\n except Exception as e:\n print(e)\n\n with open(filename, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n except Exception as e:\n print('An error occurred while downloading.')\n print(e)\n return\n\n with open(filename) as f:\n geojson = f.read()\n\n return geojson\n\n\ndef ee_to_shp(ee_object, filename, selectors=None):\n \"\"\"Downloads an ee.FeatureCollection as a shapefile.\n\n Args:\n ee_object (object): ee.FeatureCollection\n filename (str): The output filepath of the shapefile.\n selectors (list, optional): A list of attributes to export. Defaults to None.\n \"\"\"\n # ee_initialize()\n try:\n if filename.lower().endswith('.shp'):\n ee_export_vector(ee_object=ee_object,\n filename=filename, selectors=selectors)\n else:\n print('The filename must end with .shp')\n\n except Exception as e:\n print(e)\n\n\ndef ee_to_csv(ee_object, filename, selectors=None):\n \"\"\"Downloads an ee.FeatureCollection as a CSV file.\n\n Args:\n ee_object (object): ee.FeatureCollection\n filename (str): The output filepath of the CSV file.\n selectors (list, optional): A list of attributes to export. Defaults to None.\n \"\"\"\n # ee_initialize()\n try:\n if filename.lower().endswith('.csv'):\n ee_export_vector(ee_object=ee_object,\n filename=filename, selectors=selectors)\n else:\n print('The filename must end with .csv')\n\n except Exception as e:\n print(e)\n\n\ndef ee_export_image(ee_object, filename, scale=None, crs=None, region=None, file_per_band=False):\n \"\"\"Exports an ee.Image as a GeoTIFF.\n\n Args:\n ee_object (object): The ee.Image to download.\n filename (str): Output filename for the exported image.\n scale (float, optional): A default scale to use for any bands that do not specify one; ignored if crs and crs_transform is specified. Defaults to None.\n crs (str, optional): A default CRS string to use for any bands that do not explicitly specify one. Defaults to None.\n region (object, optional): A polygon specifying a region to download; ignored if crs and crs_transform is specified. Defaults to None.\n file_per_band (bool, optional): Whether to produce a different GeoTIFF per band. Defaults to False.\n \"\"\"\n import requests\n import zipfile\n # ee_initialize()\n\n if not isinstance(ee_object, ee.Image):\n print('The ee_object must be an ee.Image.')\n return\n\n filename = os.path.abspath(filename)\n basename = os.path.basename(filename)\n name = os.path.splitext(basename)[0]\n filetype = os.path.splitext(basename)[1][1:].lower()\n filename_zip = filename.replace('.tif', '.zip')\n\n if filetype != 'tif':\n print('The filename must end with .tif')\n return\n\n try:\n print('Generating URL ...')\n params = {'name': name, 'filePerBand': file_per_band}\n if scale is None:\n scale = ee_object.projection().nominalScale().multiply(10)\n params['scale'] = scale\n if region is None:\n region = ee_object.geometry()\n params['region'] = region\n if crs is not None:\n params['crs'] = crs\n\n url = ee_object.getDownloadURL(params)\n print('Downloading data from {}\\nPlease wait ...'.format(url))\n r = requests.get(url, stream=True)\n\n if r.status_code != 200:\n print('An error occurred while downloading.')\n return\n\n with open(filename_zip, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n\n except Exception as e:\n print('An error occurred while downloading.')\n print(e)\n return\n\n try:\n z = zipfile.ZipFile(filename_zip)\n z.extractall(os.path.dirname(filename))\n z.close()\n os.remove(filename_zip)\n\n if file_per_band:\n print('Data downloaded to {}'.format(os.path.dirname(filename)))\n else:\n print('Data downloaded to {}'.format(filename))\n except Exception as e:\n print(e)\n\n\ndef ee_export_image_collection(ee_object, out_dir, scale=None, crs=None, region=None, file_per_band=False):\n \"\"\"Exports an ImageCollection as GeoTIFFs.\n\n Args:\n ee_object (object): The ee.Image to download.\n out_dir (str): The output directory for the exported images.\n scale (float, optional): A default scale to use for any bands that do not specify one; ignored if crs and crs_transform is specified. Defaults to None.\n crs (str, optional): A default CRS string to use for any bands that do not explicitly specify one. Defaults to None.\n region (object, optional): A polygon specifying a region to download; ignored if crs and crs_transform is specified. Defaults to None.\n file_per_band (bool, optional): Whether to produce a different GeoTIFF per band. Defaults to False.\n \"\"\"\n\n import requests\n import zipfile\n # ee_initialize()\n\n if not isinstance(ee_object, ee.ImageCollection):\n print('The ee_object must be an ee.ImageCollection.')\n return\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n try:\n\n count = int(ee_object.size().getInfo())\n print(\"Total number of images: {}\\n\".format(count))\n\n for i in range(0, count):\n image = ee.Image(ee_object.toList(count).get(i))\n name = image.get('system:index').getInfo() + '.tif'\n filename = os.path.join(os.path.abspath(out_dir), name)\n print('Exporting {}/{}: {}'.format(i+1, count, name))\n ee_export_image(image, filename=filename, scale=scale,\n crs=crs, region=region, file_per_band=file_per_band)\n print('\\n')\n\n except Exception as e:\n print(e)\n\n\ndef ee_export_image_to_drive(ee_object, description, folder=None, region=None, scale=None, crs=None, max_pixels=1.0E13, file_format='GeoTIFF'):\n \"\"\"Creates a batch task to export an Image as a raster to Google Drive.\n\n Args:\n ee_object (object): The image to export.\n description (str): A human-readable name of the task. \n folder (str, optional): The Google Drive Folder that the export will reside in. Defaults to None.\n region (object, optional): A LinearRing, Polygon, or coordinates representing region to export. These may be specified as the Geometry objects or coordinates serialized as a string. If not specified, the region defaults to the viewport at the time of invocation. Defaults to None.\n scale (float, optional): Resolution in meters per pixel. Defaults to 10 times of the image resolution.\n crs (str, optional): CRS to use for the exported image.. Defaults to None.\n max_pixels (int, optional): Restrict the number of pixels in the export. Defaults to 1.0E13.\n file_format (str, optional): The string file format to which the image is exported. Currently only 'GeoTIFF' and 'TFRecord' are supported. Defaults to 'GeoTIFF'.\n \"\"\"\n # ee_initialize()\n\n if not isinstance(ee_object, ee.Image):\n print('The ee_object must be an ee.Image.')\n return\n\n try:\n params = {}\n\n if folder is not None:\n params['driveFolder'] = folder\n if region is not None:\n params['region'] = region\n if scale is None:\n scale = ee_object.projection().nominalScale().multiply(10)\n params['scale'] = scale\n if crs is not None:\n params['crs'] = crs\n params['maxPixels'] = max_pixels\n params['fileFormat'] = file_format\n\n task = ee.batch.Export.image(ee_object, description, params)\n task.start()\n\n print('Exporting {} ...'.format(description))\n\n except Exception as e:\n print(e)\n\n\ndef ee_export_image_collection_to_drive(ee_object, descriptions=None, folder=None, region=None, scale=None, crs=None, max_pixels=1.0E13, file_format='GeoTIFF'):\n \"\"\"Creates a batch task to export an ImageCollection as raster images to Google Drive.\n\n Args:\n ee_object (object): The image to export.\n descriptions (list): A list of human-readable names of the tasks. \n folder (str, optional): The Google Drive Folder that the export will reside in. Defaults to None.\n region (object, optional): A LinearRing, Polygon, or coordinates representing region to export. These may be specified as the Geometry objects or coordinates serialized as a string. If not specified, the region defaults to the viewport at the time of invocation. Defaults to None.\n scale (float, optional): Resolution in meters per pixel. Defaults to 10 times of the image resolution.\n crs (str, optional): CRS to use for the exported image.. Defaults to None.\n max_pixels (int, optional): Restrict the number of pixels in the export. Defaults to 1.0E13.\n file_format (str, optional): The string file format to which the image is exported. Currently only 'GeoTIFF' and 'TFRecord' are supported. Defaults to 'GeoTIFF'.\n \"\"\"\n # ee_initialize()\n\n if not isinstance(ee_object, ee.ImageCollection):\n print('The ee_object must be an ee.ImageCollection.')\n return\n\n try:\n count = int(ee_object.size().getInfo())\n print(\"Total number of images: {}\\n\".format(count))\n\n if (descriptions is not None) and (len(descriptions) != count):\n print('The number of descriptions is not equal to the number of images.')\n return\n\n if descriptions is None:\n descriptions = ee_object.aggregate_array('system:index').getInfo()\n\n images = ee_object.toList(count)\n\n for i in range(0, count):\n image = ee.Image(images.get(i))\n name = descriptions[i]\n ee_export_image_to_drive(\n image, name, folder, region, scale, crs, max_pixels, file_format)\n\n except Exception as e:\n print(e)\n\n\ndef ee_to_numpy(ee_object, bands=None, region=None, properties=None, default_value=None):\n \"\"\"Extracts a rectangular region of pixels from an image into a 2D numpy array per band.\n\n Args:\n ee_object (object): The image to sample.\n bands (list, optional): The list of band names to extract. Please make sure that all bands have the same spatial resolution. Defaults to None. \n region (object, optional): The region whose projected bounding box is used to sample the image. The maximum number of pixels you can export is 262,144. Resampling and reprojecting all bands to a fixed scale can be useful. Defaults to the footprint in each band.\n properties (list, optional): The properties to copy over from the sampled image. Defaults to all non-system properties.\n default_value (float, optional): A default value used when a sampled pixel is masked or outside a band's footprint. Defaults to None.\n\n Returns:\n array: A 3D numpy array.\n \"\"\"\n import numpy as np\n if not isinstance(ee_object, ee.Image):\n print('The input must be an ee.Image.')\n return\n\n if region is None:\n region = ee_object.geometry()\n\n try:\n\n if bands is not None:\n ee_object = ee_object.select(bands)\n else:\n bands = ee_object.bandNames().getInfo()\n\n band_count = len(bands)\n band_arrs = ee_object.sampleRectangle(\n region=region, properties=properties, defaultValue=default_value)\n band_values = []\n\n for band in bands:\n band_arr = band_arrs.get(band).getInfo()\n band_value = np.array(band_arr)\n band_values.append(band_value)\n\n image = np.dstack(band_values)\n return image\n\n except Exception as e:\n print(e)\n\n\ndef download_ee_video(collection, video_args, out_gif):\n \"\"\"Downloads a video thumbnail as a GIF image from Earth Engine.\n\n Args:\n collection (object): An ee.ImageCollection.\n video_args (object): Parameters for expring the video thumbnail.\n out_gif (str): File path to the output GIF.\n \"\"\"\n import requests\n\n out_gif = os.path.abspath(out_gif)\n if not out_gif.endswith(\".gif\"):\n print('The output file must have an extension of .gif.')\n return\n\n if not os.path.exists(os.path.dirname(out_gif)):\n os.makedirs(os.path.dirname(out_gif))\n\n if 'region' in video_args.keys():\n roi = video_args['region']\n\n if not isinstance(roi, ee.Geometry):\n\n try:\n roi = roi.geometry()\n except Exception as e:\n print('Could not convert the provided roi to ee.Geometry')\n print(e)\n return\n\n video_args['region'] = roi\n\n try:\n print('Generating URL...')\n url = collection.getVideoThumbURL(video_args)\n\n print('Downloading GIF image from {}\\nPlease wait ...'.format(url))\n r = requests.get(url, stream=True)\n\n if r.status_code != 200:\n print('An error occurred while downloading.')\n return\n else:\n with open(out_gif, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n print('The GIF image has been saved to: {}'.format(out_gif))\n except Exception as e:\n print(e)\n\n\ndef zonal_statistics(in_value_raster, in_zone_vector, out_file_path, statistics_type='MEAN', scale=None, crs=None, tile_scale=1.0, **kwargs):\n \"\"\"Summarizes the values of a raster within the zones of another dataset and exports the results as a csv, shp, json, kml, or kmz.\n\n Args:\n in_value_raster (object): An ee.Image that contains the values on which to calculate a statistic.\n in_zone_vector (object): An ee.FeatureCollection that defines the zones.\n out_file_path (str): Output file path that will contain the summary of the values in each zone. The file type can be: csv, shp, json, kml, kmz\n statistics_type (str, optional): Statistic type to be calculated. Defaults to 'MEAN'. For 'HIST', you can provide three parameters: max_buckets, min_bucket_width, and max_raw. For 'FIXED_HIST', you must provide three parameters: hist_min, hist_max, and hist_steps.\n scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.\n crs (str, optional): The projection to work in. If unspecified, the projection of the image's first band is used. If specified in addition to scale, rescaled to the specified scale. Defaults to None.\n tile_scale (float, optional): A scaling factor used to reduce aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable computations that run out of memory with the default. Defaults to 1.0.\n \"\"\"\n\n if not isinstance(in_value_raster, ee.Image):\n print('The input raster must be an ee.Image.')\n return\n\n if not isinstance(in_zone_vector, ee.FeatureCollection):\n print('The input zone data must be an ee.FeatureCollection.')\n return\n\n allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']\n filename = os.path.abspath(out_file_path)\n basename = os.path.basename(filename)\n name = os.path.splitext(basename)[0]\n filetype = os.path.splitext(basename)[1][1:].lower()\n\n if not (filetype in allowed_formats):\n print('The file type must be one of the following: {}'.format(\n ', '.join(allowed_formats)))\n return\n\n # Parameters for histogram\n # The maximum number of buckets to use when building a histogram; will be rounded up to a power of 2.\n max_buckets = None\n # The minimum histogram bucket width, or null to allow any power of 2.\n min_bucket_width = None\n # The number of values to accumulate before building the initial histogram.\n max_raw = None\n hist_min = 1.0 # The lower (inclusive) bound of the first bucket.\n hist_max = 100.0 # The upper (exclusive) bound of the last bucket.\n hist_steps = 10 # The number of buckets to use.\n\n if 'max_buckets' in kwargs.keys():\n max_buckets = kwargs['max_buckets']\n if 'min_bucket_width' in kwargs.keys():\n min_bucket_width = kwargs['min_bucket']\n if 'max_raw' in kwargs.keys():\n max_raw = kwargs['max_raw']\n\n if statistics_type.upper() == 'FIXED_HIST' and ('hist_min' in kwargs.keys()) and ('hist_max' in kwargs.keys()) and ('hist_steps' in kwargs.keys()):\n hist_min = kwargs['hist_min']\n hist_max = kwargs['hist_max']\n hist_steps = kwargs['hist_steps']\n elif statistics_type.upper() == 'FIXED_HIST':\n print('To use fixedHistogram, please provide these three parameters: hist_min, hist_max, and hist_steps.')\n return\n\n allowed_statistics = {\n 'MEAN': ee.Reducer.mean(),\n 'MAXIMUM': ee.Reducer.max(),\n 'MEDIAN': ee.Reducer.median(),\n 'MINIMUM': ee.Reducer.min(),\n 'STD': ee.Reducer.stdDev(),\n 'MIN_MAX': ee.Reducer.minMax(),\n 'SUM': ee.Reducer.sum(),\n 'VARIANCE': ee.Reducer.variance(),\n 'HIST': ee.Reducer.histogram(maxBuckets=max_buckets, minBucketWidth=min_bucket_width, maxRaw=max_raw),\n 'FIXED_HIST': ee.Reducer.fixedHistogram(hist_min, hist_max, hist_steps)\n }\n\n if not (statistics_type.upper() in allowed_statistics.keys()):\n print('The statistics type must be one of the following: {}'.format(\n ', '.join(list(allowed_statistics.keys()))))\n return\n\n if scale is None:\n scale = in_value_raster.projection().nominalScale().multiply(10)\n\n try:\n print('Computing statistics ...')\n result = in_value_raster.reduceRegions(\n collection=in_zone_vector, reducer=allowed_statistics[statistics_type], scale=scale, crs=crs, tileScale=tile_scale)\n ee_export_vector(result, filename)\n except Exception as e:\n print(e)\n\n\ndef zonal_statistics_by_group(in_value_raster, in_zone_vector, out_file_path, statistics_type='SUM', decimal_places=0, denominator=1.0, scale=None, crs=None, tile_scale=1.0):\n \"\"\"Summarizes the area or percentage of a raster by group within the zones of another dataset and exports the results as a csv, shp, json, kml, or kmz.\n\n Args:\n in_value_raster (object): An integer Image that contains the values on which to calculate area/percentage.\n in_zone_vector (object): An ee.FeatureCollection that defines the zones.\n out_file_path (str): Output file path that will contain the summary of the values in each zone. The file type can be: csv, shp, json, kml, kmz\n statistics_type (str, optional): Can be either 'SUM' or 'PERCENTAGE' . Defaults to 'SUM'.\n decimal_places (int, optional): The number of decimal places to use. Defaults to 0.\n denominator (float, optional): To covert area units (e.g., from square meters to square kilometers). Defaults to 1.0.\n scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.\n crs (str, optional): The projection to work in. If unspecified, the projection of the image's first band is used. If specified in addition to scale, rescaled to the specified scale. Defaults to None.\n tile_scale (float, optional): A scaling factor used to reduce aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable computations that run out of memory with the default. Defaults to 1.0.\n\n \"\"\"\n if not isinstance(in_value_raster, ee.Image):\n print('The input raster must be an ee.Image.')\n return\n\n band_count = in_value_raster.bandNames().size().getInfo()\n\n band_name = ''\n if band_count == 1:\n band_name = in_value_raster.bandNames().get(0)\n else:\n print('The input image can only have one band.')\n return\n\n band_types = in_value_raster.bandTypes().get(band_name).getInfo()\n band_type = band_types.get('precision')\n if band_type != 'int':\n print('The input image band must be integer type.')\n return\n\n if not isinstance(in_zone_vector, ee.FeatureCollection):\n print('The input zone data must be an ee.FeatureCollection.')\n return\n\n allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']\n filename = os.path.abspath(out_file_path)\n basename = os.path.basename(filename)\n name = os.path.splitext(basename)[0]\n filetype = os.path.splitext(basename)[1][1:]\n\n if not (filetype.lower() in allowed_formats):\n print('The file type must be one of the following: {}'.format(\n ', '.join(allowed_formats)))\n return\n\n out_dir = os.path.dirname(filename)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n allowed_statistics = ['SUM', 'PERCENTAGE']\n if not (statistics_type.upper() in allowed_statistics):\n print('The statistics type can only be one of {}'.format(\n ', '.join(allowed_statistics)))\n return\n\n if scale is None:\n scale = in_value_raster.projection().nominalScale().multiply(10)\n\n try:\n\n print('Computing ... ')\n geometry = in_zone_vector.geometry()\n\n hist = in_value_raster.reduceRegion(ee.Reducer.frequencyHistogram(\n ), geometry=geometry, bestEffort=True, scale=scale)\n class_values = ee.Dictionary(hist.get(band_name)).keys().map(\n lambda v: ee.Number.parse(v)).sort()\n\n class_names = class_values.map(\n lambda c: ee.String('Class_').cat(ee.Number(c).format()))\n\n class_count = class_values.size().getInfo()\n dataset = ee.Image.pixelArea().divide(denominator).addBands(in_value_raster)\n\n init_result = dataset.reduceRegions(**{\n 'collection': in_zone_vector,\n 'reducer': ee.Reducer.sum().group(**{\n 'groupField': 1,\n 'groupName': 'group',\n }),\n 'scale': scale\n })\n\n def build_dict(input_list):\n\n decimal_format = '%.{}f'.format(decimal_places)\n in_dict = input_list.map(lambda x: ee.Dictionary().set(ee.String('Class_').cat(\n ee.Number(ee.Dictionary(x).get('group')).format()), ee.Number.parse(ee.Number(ee.Dictionary(x).get('sum')).format(decimal_format))))\n return in_dict\n\n def get_keys(input_list):\n return input_list.map(lambda x: ee.String('Class_').cat(ee.Number(ee.Dictionary(x).get('group')).format()))\n\n def get_values(input_list):\n decimal_format = '%.{}f'.format(decimal_places)\n return input_list.map(lambda x: ee.Number.parse(ee.Number(ee.Dictionary(x).get('sum')).format(decimal_format)))\n\n def set_attribute(f):\n groups = ee.List(f.get('groups'))\n keys = get_keys(groups)\n values = get_values(groups)\n total_area = ee.List(values).reduce(ee.Reducer.sum())\n\n def get_class_values(x):\n cls_value = ee.Algorithms.If(\n keys.contains(x), values.get(keys.indexOf(x)), 0)\n cls_value = ee.Algorithms.If(ee.String(statistics_type).compareTo(ee.String(\n 'SUM')), ee.Number(cls_value).divide(ee.Number(total_area)), cls_value)\n return cls_value\n\n full_values = class_names.map(lambda x: get_class_values(x))\n attr_dict = ee.Dictionary.fromLists(class_names, full_values)\n attr_dict = attr_dict.set('Class_sum', total_area)\n\n return f.set(attr_dict).set('groups', None)\n\n final_result = init_result.map(set_attribute)\n ee_export_vector(final_result, filename)\n\n except Exception as e:\n print(e)\n\n\ndef create_colorbar(width=150, height=30, palette=['blue', 'green', 'red'], add_ticks=True, add_labels=True, labels=None, vertical=False, out_file=None, font_type='arial.ttf', font_size=12, font_color='black', add_outline=True, outline_color='black'):\n \"\"\"Creates a colorbar based on the provided palette.\n\n Args:\n width (int, optional): Width of the colorbar in pixels. Defaults to 150.\n height (int, optional): Height of the colorbar in pixels. Defaults to 30.\n palette (list, optional): Palette for the colorbar. Each color can be provided as a string (e.g., 'red'), a hex string (e.g., '#ff0000'), or an RGB tuple (255, 0, 255). Defaults to ['blue', 'green', 'red'].\n add_ticks (bool, optional): Whether to add tick markers to the colorbar. Defaults to True.\n add_labels (bool, optional): Whether to add labels to the colorbar. Defaults to True.\n labels (list, optional): A list of labels to add to the colorbar. Defaults to None.\n vertical (bool, optional): Whether to rotate the colorbar vertically. Defaults to False.\n out_file (str, optional): File path to the output colorbar in png format. Defaults to None.\n font_type (str, optional): Font type to use for labels. Defaults to 'arial.ttf'.\n font_size (int, optional): Font size to use for labels. Defaults to 12.\n font_color (str, optional): Font color to use for labels. Defaults to 'black'.\n add_outline (bool, optional): Whether to add an outline to the colorbar. Defaults to True.\n outline_color (str, optional): Color for the outline of the colorbar. Defaults to 'black'.\n\n Returns:\n str: File path of the output colorbar in png format.\n\n \"\"\"\n import decimal\n import io\n import pkg_resources\n import warnings\n from colour import Color\n from PIL import Image, ImageDraw, ImageFont\n\n warnings.simplefilter('ignore')\n pkg_dir = os.path.dirname(\n pkg_resources.resource_filename(\"geemap\", \"geemap.py\"))\n\n if out_file is None:\n filename = 'colorbar_' + random_string() + '.png'\n out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')\n out_file = os.path.join(out_dir, filename)\n elif not out_file.endswith('.png'):\n print('The output file must end with .png')\n return\n else:\n out_file = os.path.abspath(out_file)\n\n if not os.path.exists(os.path.dirname(out_file)):\n os.makedirs(os.path.dirname(out_file))\n\n im = Image.new('RGBA', (width, height))\n ld = im.load()\n\n def float_range(start, stop, step):\n while start < stop:\n yield float(start)\n start += decimal.Decimal(step)\n\n n_colors = len(palette)\n decimal_places = 2\n rgb_colors = [Color(check_color(c)).rgb for c in palette]\n keys = [round(c, decimal_places)\n for c in list(float_range(0, 1.0001, 1.0/(n_colors - 1)))]\n\n heatmap = []\n for index, item in enumerate(keys):\n pair = [item, rgb_colors[index]]\n heatmap.append(pair)\n\n def gaussian(x, a, b, c, d=0):\n return a * math.exp(-(x - b)**2 / (2 * c**2)) + d\n\n def pixel(x, width=100, map=[], spread=1):\n width = float(width)\n r = sum([gaussian(x, p[1][0], p[0] * width, width/(spread*len(map)))\n for p in map])\n g = sum([gaussian(x, p[1][1], p[0] * width, width/(spread*len(map)))\n for p in map])\n b = sum([gaussian(x, p[1][2], p[0] * width, width/(spread*len(map)))\n for p in map])\n return min(1.0, r), min(1.0, g), min(1.0, b)\n\n for x in range(im.size[0]):\n r, g, b = pixel(x, width=width, map=heatmap)\n r, g, b = [int(256*v) for v in (r, g, b)]\n for y in range(im.size[1]):\n ld[x, y] = r, g, b\n\n if add_outline:\n draw = ImageDraw.Draw(im)\n draw.rectangle([(0, 0), (width-1, height-1)],\n outline=check_color(outline_color))\n del draw\n\n if add_ticks:\n tick_length = height * 0.1\n x = [key * width for key in keys]\n y_top = height - tick_length\n y_bottom = height\n draw = ImageDraw.Draw(im)\n for i in x:\n shape = [(i, y_top), (i, y_bottom)]\n draw.line(shape, fill='black', width=0)\n del draw\n\n if vertical:\n im = im.transpose(Image.ROTATE_90)\n\n width, height = im.size\n\n if labels is None:\n labels = [str(c) for c in keys]\n elif len(labels) == 2:\n try:\n lowerbound = float(labels[0])\n upperbound = float(labels[1])\n step = (upperbound - lowerbound) / (len(palette) - 1)\n labels = [str(lowerbound + c * step)\n for c in range(0, len(palette))]\n except Exception as e:\n print(e)\n print('The labels are invalid.')\n return\n elif len(labels) == len(palette):\n labels = [str(c) for c in labels]\n else:\n print('The labels must have the same length as the palette.')\n return\n\n if add_labels:\n\n default_font = os.path.join(pkg_dir, 'data/fonts/arial.ttf')\n if font_type == 'arial.ttf':\n font = ImageFont.truetype(default_font, font_size)\n else:\n try:\n font_list = system_fonts(show_full_path=True)\n font_names = [os.path.basename(f) for f in font_list]\n if (font_type in font_list) or (font_type in font_names):\n font = ImageFont.truetype(font_type, font_size)\n else:\n print(\n 'The specified font type could not be found on your system. Using the default font instead.')\n font = ImageFont.truetype(default_font, font_size)\n except Exception as e:\n print(e)\n font = ImageFont.truetype(default_font, font_size)\n\n font_color = check_color(font_color)\n\n draw = ImageDraw.Draw(im)\n w, h = draw.textsize(labels[0], font=font)\n\n for label in labels:\n w_tmp, h_tmp = draw.textsize(label, font)\n if w_tmp > w:\n w = w_tmp\n if h_tmp > h:\n h = h_tmp\n\n W, H = width + w * 2, height + h * 2\n background = Image.new('RGBA', (W, H))\n draw = ImageDraw.Draw(background)\n\n if vertical:\n xy = (0, h)\n else:\n xy = (w, 0)\n background.paste(im, xy, im)\n\n for index, label in enumerate(labels):\n\n w_tmp, h_tmp = draw.textsize(label, font)\n\n if vertical:\n spacing = 5\n x = width + spacing\n y = int(height + h - keys[index] * height - h_tmp / 2 - 1)\n draw.text((x, y), label, font=font, fill=font_color)\n\n else:\n x = int(keys[index] * width + w - w_tmp / 2)\n spacing = int(h * 0.05)\n y = height + spacing\n draw.text((x, y), label, font=font, fill=font_color)\n\n im = background.copy()\n\n im.save(out_file)\n return out_file\n\n\ndef naip_timeseries(roi=None, start_year=2009, end_year=2018):\n \"\"\"Creates NAIP annual timeseries\n\n Args:\n roi (object, optional): An ee.Geometry representing the region of interest. Defaults to None.\n start_year (int, optional): Starting year for the timeseries. Defaults to2009.\n end_year (int, optional): Ending year for the timeseries. Defaults to 2018.\n\n Returns:\n object: An ee.ImageCollection representing annual NAIP imagery.\n \"\"\"\n # ee_initialize()\n try:\n\n def get_annual_NAIP(year):\n try:\n collection = ee.ImageCollection('USDA/NAIP/DOQQ')\n if roi is not None:\n collection = collection.filterBounds(roi)\n start_date = ee.Date.fromYMD(year, 1, 1)\n end_date = ee.Date.fromYMD(year, 12, 31)\n naip = collection.filterDate(start_date, end_date) \\\n .filter(ee.Filter.listContains(\"system:band_names\", \"N\"))\n naip = ee.Image(ee.ImageCollection(naip).mosaic())\n return naip\n except Exception as e:\n print(e)\n\n years = ee.List.sequence(start_year, end_year)\n collection = years.map(get_annual_NAIP)\n return collection\n\n except Exception as e:\n print(e)\n\n\ndef sentinel2_timeseries(roi=None, start_year=2015, end_year=2019, start_date='01-01', end_date='12-31'):\n \"\"\"Generates an annual Sentinel 2 ImageCollection. This algorithm is adapted from https://gist.github.com/jdbcode/76b9ac49faf51627ebd3ff988e10adbc. A huge thank you to Justin Braaten for sharing his fantastic work.\n Images include both level 1C and level 2A imagery.\n Args:\n\n roi (object, optional): Region of interest to create the timelapse. Defaults to None.\n start_year (int, optional): Starting year for the timelapse. Defaults to 2015.\n end_year (int, optional): Ending year for the timelapse. Defaults to 2019.\n start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '01-01'.\n end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '12-31'.\n Returns:\n object: Returns an ImageCollection containing annual Sentinel 2 images.\n \"\"\"\n ################################################################################\n\n ################################################################################\n # Input and output parameters.\n import re\n import datetime\n\n # ee_initialize()\n\n if roi is None:\n # roi = ee.Geometry.Polygon(\n # [[[-180, -80],\n # [-180, 80],\n # [180, 80],\n # [180, -80],\n # [-180, -80]]], None, False)\n roi = ee.Geometry.Polygon(\n [[[-115.471773, 35.892718],\n [-115.471773, 36.409454],\n [-114.271283, 36.409454],\n [-114.271283, 35.892718],\n [-115.471773, 35.892718]]], None, False)\n\n if not isinstance(roi, ee.Geometry):\n\n try:\n roi = roi.geometry()\n except Exception as e:\n print('Could not convert the provided roi to ee.Geometry')\n print(e)\n return\n\n # Adjusts longitudes less than -180 degrees or greater than 180 degrees.\n geojson = ee_to_geojson(roi)\n geojson = adjust_longitude(geojson)\n roi = ee.Geometry(geojson)\n\n ################################################################################\n # Setup vars to get dates.\n if isinstance(start_year, int) and (start_year >= 2015) and (start_year <= 2020):\n pass\n else:\n print('The start year must be an integer >= 2015.')\n return\n\n if isinstance(end_year, int) and (end_year >= 2015) and (end_year <= 2020):\n pass\n else:\n print('The end year must be an integer <= 2020.')\n return\n\n if re.match(\"[0-9]{2}\\-[0-9]{2}\", start_date) and re.match(\"[0-9]{2}\\-[0-9]{2}\", end_date):\n pass\n else:\n print('The start data and end date must be month-day, such as 06-10, 09-20')\n return\n\n try:\n datetime.datetime(int(start_year), int(\n start_date[:2]), int(start_date[3:5]))\n datetime.datetime(int(end_year), int(end_date[:2]), int(end_date[3:5]))\n except Exception as e:\n print('The input dates are invalid.')\n print(e)\n return\n\n try:\n start_test = datetime.datetime(int(start_year), int(\n start_date[:2]), int(start_date[3:5]))\n end_test = datetime.datetime(\n int(end_year), int(end_date[:2]), int(end_date[3:5]))\n if start_test > end_test:\n raise ValueError('Start date must be prior to end date')\n except Exception as e:\n print(e)\n return\n\n def days_between(d1, d2):\n d1 = datetime.datetime.strptime(d1, \"%Y-%m-%d\")\n d2 = datetime.datetime.strptime(d2, \"%Y-%m-%d\")\n return abs((d2 - d1).days)\n\n n_days = days_between(str(start_year) + '-' + start_date,\n str(start_year) + '-' + end_date)\n start_month = int(start_date[:2])\n start_day = int(start_date[3:5])\n start_date = str(start_year) + '-' + start_date\n end_date = str(end_year) + '-' + end_date\n\n # Define a collection filter by date, bounds, and quality.\n def colFilter(col, aoi): # , startDate, endDate):\n return(col.filterBounds(aoi))\n\n # Get Sentinel 2 collections, both Level-1C (top of atmophere) and Level-2A (surface reflectance)\n MSILCcol = ee.ImageCollection('COPERNICUS/S2')\n MSI2Acol = ee.ImageCollection('COPERNICUS/S2_SR')\n\n # Define a collection filter by date, bounds, and quality.\n def colFilter(col, roi, start_date, end_date):\n return(col\n .filterBounds(roi)\n .filterDate(start_date, end_date))\n # .filter('CLOUD_COVER < 5')\n # .filter('GEOMETRIC_RMSE_MODEL < 15')\n # .filter('IMAGE_QUALITY == 9 || IMAGE_QUALITY_OLI == 9'))\n\n # Function to get and rename bands of interest from MSI\n def renameMSI(img):\n return(img.select(\n ['B2', 'B3', 'B4', 'B5', 'B6', 'B7',\n 'B8', 'B8A', 'B11', 'B12', 'QA60'],\n ['Blue', 'Green', 'Red', 'Red Edge 1', 'Red Edge 2', 'Red Edge 3', 'NIR', 'Red Edge 4', 'SWIR1', 'SWIR2', 'QA60']))\n\n # Add NBR for LandTrendr segmentation.\n\n def calcNbr(img):\n return(img.addBands(img.normalizedDifference(['NIR', 'SWIR2'])\n .multiply(-10000).rename('NBR')).int16())\n\n # Define function to mask out clouds and cloud shadows in images.\n # Use CFmask band included in USGS Landsat SR image product.\n\n def fmask(img):\n cloudOpaqueBitMask = 1 << 10\n cloudCirrusBitMask = 1 << 11\n qa = img.select('QA60')\n mask = qa.bitwiseAnd(cloudOpaqueBitMask).eq(0) \\\n .And(qa.bitwiseAnd(cloudCirrusBitMask).eq(0))\n return(img.updateMask(mask))\n\n # Define function to prepare MSI images.\n def prepMSI(img):\n orig = img\n img = renameMSI(img)\n img = fmask(img)\n return(ee.Image(img.copyProperties(orig, orig.propertyNames()))\n .resample('bicubic'))\n\n # Get annual median collection.\n def getAnnualComp(y):\n startDate = ee.Date.fromYMD(\n ee.Number(y), ee.Number(start_month), ee.Number(start_day))\n endDate = startDate.advance(ee.Number(n_days), 'day')\n\n # Filter collections and prepare them for merging.\n MSILCcoly = colFilter(MSILCcol, roi, startDate, endDate).map(prepMSI)\n MSI2Acoly = colFilter(MSI2Acol, roi, startDate, endDate).map(prepMSI)\n\n # Merge the collections.\n col = MSILCcoly.merge(MSI2Acoly)\n\n yearImg = col.median()\n nBands = yearImg.bandNames().size()\n yearImg = ee.Image(ee.Algorithms.If(\n nBands,\n yearImg,\n dummyImg))\n return(calcNbr(yearImg)\n .set({'year': y, 'system:time_start': startDate.millis(), 'nBands': nBands}))\n\n ################################################################################\n\n # Make a dummy image for missing years.\n bandNames = ee.List(['Blue', 'Green', 'Red', 'Red Edge 1',\n 'Red Edge 2', 'Red Edge 3', 'NIR',\n 'Red Edge 4', 'SWIR1', 'SWIR2', 'QA60'])\n fillerValues = ee.List.repeat(0, bandNames.size())\n dummyImg = ee.Image.constant(fillerValues).rename(bandNames) \\\n .selfMask().int16()\n\n ################################################################################\n # Get a list of years\n years = ee.List.sequence(start_year, end_year)\n\n ################################################################################\n # Make list of annual image composites.\n imgList = years.map(getAnnualComp)\n\n # Convert image composite list to collection\n imgCol = ee.ImageCollection.fromImages(imgList)\n\n imgCol = imgCol.map(lambda img: img.clip(roi))\n\n return imgCol\n\n\ndef landsat_timeseries(roi=None, start_year=1984, end_year=2020, start_date='06-10', end_date='09-20', apply_fmask=True):\n \"\"\"Generates an annual Landsat ImageCollection. This algorithm is adapted from https://gist.github.com/jdbcode/76b9ac49faf51627ebd3ff988e10adbc. A huge thank you to Justin Braaten for sharing his fantastic work.\n\n Args:\n roi (object, optional): Region of interest to create the timelapse. Defaults to None.\n start_year (int, optional): Starting year for the timelapse. Defaults to 1984.\n end_year (int, optional): Ending year for the timelapse. Defaults to 2020.\n start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.\n end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.\n apply_fmask (bool, optional): Whether to apply Fmask (Function of mask) for automated clouds, cloud shadows, snow, and water masking.\n Returns:\n object: Returns an ImageCollection containing annual Landsat images.\n \"\"\"\n\n ################################################################################\n # Input and output parameters.\n import re\n import datetime\n\n if roi is None:\n roi = ee.Geometry.Polygon(\n [[[-115.471773, 35.892718],\n [-115.471773, 36.409454],\n [-114.271283, 36.409454],\n [-114.271283, 35.892718],\n [-115.471773, 35.892718]]], None, False)\n\n if not isinstance(roi, ee.Geometry):\n\n try:\n roi = roi.geometry()\n except Exception as e:\n print('Could not convert the provided roi to ee.Geometry')\n print(e)\n return\n\n ################################################################################\n\n # Setup vars to get dates.\n if isinstance(start_year, int) and (start_year >= 1984) and (start_year < 2020):\n pass\n else:\n print('The start year must be an integer >= 1984.')\n return\n\n if isinstance(end_year, int) and (end_year > 1984) and (end_year <= 2020):\n pass\n else:\n print('The end year must be an integer <= 2020.')\n return\n\n if re.match(\"[0-9]{2}\\-[0-9]{2}\", start_date) and re.match(\"[0-9]{2}\\-[0-9]{2}\", end_date):\n pass\n else:\n print('The start date and end date must be month-day, such as 06-10, 09-20')\n return\n\n try:\n datetime.datetime(int(start_year), int(\n start_date[:2]), int(start_date[3:5]))\n datetime.datetime(int(end_year), int(end_date[:2]), int(end_date[3:5]))\n except Exception as e:\n print('The input dates are invalid.')\n return\n\n def days_between(d1, d2):\n d1 = datetime.datetime.strptime(d1, \"%Y-%m-%d\")\n d2 = datetime.datetime.strptime(d2, \"%Y-%m-%d\")\n return abs((d2 - d1).days)\n\n n_days = days_between(str(start_year) + '-' + start_date,\n str(start_year) + '-' + end_date)\n start_month = int(start_date[:2])\n start_day = int(start_date[3:5])\n start_date = str(start_year) + '-' + start_date\n end_date = str(end_year) + '-' + end_date\n\n # Define a collection filter by date, bounds, and quality.\n def colFilter(col, aoi): # , startDate, endDate):\n return(col.filterBounds(aoi))\n\n # Landsat collection preprocessingEnabled\n # Get Landsat surface reflectance collections for OLI, ETM+ and TM sensors.\n LC08col = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')\n LE07col = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR')\n LT05col = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR')\n LT04col = ee.ImageCollection('LANDSAT/LT04/C01/T1_SR')\n\n # Define a collection filter by date, bounds, and quality.\n def colFilter(col, roi, start_date, end_date):\n return(col\n .filterBounds(roi)\n .filterDate(start_date, end_date))\n # .filter('CLOUD_COVER < 5')\n # .filter('GEOMETRIC_RMSE_MODEL < 15')\n # .filter('IMAGE_QUALITY == 9 || IMAGE_QUALITY_OLI == 9'))\n\n # Function to get and rename bands of interest from OLI.\n def renameOli(img):\n return(img.select(\n ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa'],\n ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']))\n\n # Function to get and rename bands of interest from ETM+.\n def renameEtm(img):\n return(img.select(\n ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa'],\n ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']))\n\n # Add NBR for LandTrendr segmentation.\n def calcNbr(img):\n return(img.addBands(img.normalizedDifference(['NIR', 'SWIR2'])\n .multiply(-10000).rename('NBR')).int16())\n\n # Define function to mask out clouds and cloud shadows in images.\n # Use CFmask band included in USGS Landsat SR image product.\n def fmask(img):\n cloudShadowBitMask = 1 << 3\n cloudsBitMask = 1 << 5\n qa = img.select('pixel_qa')\n mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0) \\\n .And(qa.bitwiseAnd(cloudsBitMask).eq(0))\n return(img.updateMask(mask))\n\n # Define function to prepare OLI images.\n def prepOli(img):\n orig = img\n img = renameOli(img)\n if apply_fmask:\n img = fmask(img)\n return (ee.Image(img.copyProperties(orig, orig.propertyNames()))\n .resample('bicubic'))\n\n # Define function to prepare ETM+ images.\n def prepEtm(img):\n orig = img\n img = renameEtm(img)\n if apply_fmask:\n img = fmask(img)\n return(ee.Image(img.copyProperties(orig, orig.propertyNames()))\n .resample('bicubic'))\n\n # Get annual median collection.\n def getAnnualComp(y):\n startDate = ee.Date.fromYMD(\n ee.Number(y), ee.Number(start_month), ee.Number(start_day))\n endDate = startDate.advance(ee.Number(n_days), 'day')\n\n # Filter collections and prepare them for merging.\n LC08coly = colFilter(LC08col, roi, startDate, endDate).map(prepOli)\n LE07coly = colFilter(LE07col, roi, startDate, endDate).map(prepEtm)\n LT05coly = colFilter(LT05col, roi, startDate, endDate).map(prepEtm)\n LT04coly = colFilter(LT04col, roi, startDate, endDate).map(prepEtm)\n\n # Merge the collections.\n col = LC08coly.merge(LE07coly).merge(LT05coly).merge(LT04coly)\n\n yearImg = col.median()\n nBands = yearImg.bandNames().size()\n yearImg = ee.Image(ee.Algorithms.If(\n nBands,\n yearImg,\n dummyImg))\n return(calcNbr(yearImg)\n .set({'year': y, 'system:time_start': startDate.millis(), 'nBands': nBands}))\n\n ################################################################################\n\n # Make a dummy image for missing years.\n bandNames = ee.List(['Blue', 'Green', 'Red', 'NIR',\n 'SWIR1', 'SWIR2', 'pixel_qa'])\n fillerValues = ee.List.repeat(0, bandNames.size())\n dummyImg = ee.Image.constant(fillerValues).rename(bandNames) \\\n .selfMask().int16()\n\n ################################################################################\n # Get a list of years\n years = ee.List.sequence(start_year, end_year)\n\n ################################################################################\n # Make list of annual image composites.\n imgList = years.map(getAnnualComp)\n\n # Convert image composite list to collection\n imgCol = ee.ImageCollection.fromImages(imgList)\n\n imgCol = imgCol.map(lambda img: img.clip(\n roi).set({'coordinates': roi.coordinates()}))\n\n return imgCol\n\n # ################################################################################\n # # Run LandTrendr.\n # lt = ee.Algorithms.TemporalSegmentation.LandTrendr(\n # timeSeries=imgCol.select(['NBR', 'SWIR1', 'NIR', 'Green']),\n # maxSegments=10,\n # spikeThreshold=0.7,\n # vertexCountOvershoot=3,\n # preventOneYearRecovery=True,\n # recoveryThreshold=0.5,\n # pvalThreshold=0.05,\n # bestModelProportion=0.75,\n # minObservationsNeeded=6)\n\n # ################################################################################\n # # Get fitted imagery. This starts export tasks.\n # def getYearStr(year):\n # return(ee.String('yr_').cat(ee.Algorithms.String(year).slice(0,4)))\n\n # yearsStr = years.map(getYearStr)\n\n # r = lt.select(['SWIR1_fit']).arrayFlatten([yearsStr]).toShort()\n # g = lt.select(['NIR_fit']).arrayFlatten([yearsStr]).toShort()\n # b = lt.select(['Green_fit']).arrayFlatten([yearsStr]).toShort()\n\n # for i, c in zip([r, g, b], ['r', 'g', 'b']):\n # descr = 'mamore-river-'+c\n # name = 'users/user/'+descr\n # print(name)\n # task = ee.batch.Export.image.toAsset(\n # image=i,\n # region=roi.getInfo()['coordinates'],\n # assetId=name,\n # description=descr,\n # scale=30,\n # crs='EPSG:3857',\n # maxPixels=1e13)\n # task.start()\n\n\ndef landsat_ts_gif(roi=None, out_gif=None, start_year=1984, end_year=2019, start_date='06-10', end_date='09-20', bands=['NIR', 'Red', 'Green'], vis_params=None, dimensions=768, frames_per_second=10, apply_fmask=True, nd_bands=None, nd_threshold=0, nd_palette=['black', 'blue']):\n \"\"\"Generates a Landsat timelapse GIF image. This function is adapted from https://emaprlab.users.earthengine.app/view/lt-gee-time-series-animator. A huge thank you to Justin Braaten for sharing his fantastic work.\n\n Args:\n roi (object, optional): Region of interest to create the timelapse. Defaults to None.\n out_gif (str, optional): File path to the output animated GIF. Defaults to None.\n start_year (int, optional): Starting year for the timelapse. Defaults to 1984.\n end_year (int, optional): Ending year for the timelapse. Defaults to 2019.\n start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.\n end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.\n bands (list, optional): Three bands selected from ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']. Defaults to ['NIR', 'Red', 'Green'].\n vis_params (dict, optional): Visualization parameters. Defaults to None.\n dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.\n frames_per_second (int, optional): Animation speed. Defaults to 10.\n apply_fmask (bool, optional): Whether to apply Fmask (Function of mask) for automated clouds, cloud shadows, snow, and water masking.\n nd_bands (list, optional): A list of names specifying the bands to use, e.g., ['Green', 'SWIR1']. The normalized difference is computed as (first − second) / (first + second). Note that negative input values are forced to 0 so that the result is confined to the range (-1, 1). \n nd_threshold (float, optional): The threshold for extacting pixels from the normalized difference band. \n nd_palette (list, optional): The color palette to use for displaying the normalized difference band. \n\n Returns:\n str: File path to the output GIF image.\n \"\"\"\n\n # ee_initialize()\n\n if roi is None:\n roi = ee.Geometry.Polygon(\n [[[-115.471773, 35.892718],\n [-115.471773, 36.409454],\n [-114.271283, 36.409454],\n [-114.271283, 35.892718],\n [-115.471773, 35.892718]]], None, False)\n elif isinstance(roi, ee.Feature) or isinstance(roi, ee.FeatureCollection):\n roi = roi.geometry()\n elif isinstance(roi, ee.Geometry):\n pass\n else:\n print('The provided roi is invalid. It must be an ee.Geometry')\n return\n\n if out_gif is None:\n out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')\n filename = 'landsat_ts_' + random_string() + '.gif'\n out_gif = os.path.join(out_dir, filename)\n elif not out_gif.endswith('.gif'):\n print('The output file must end with .gif')\n return\n # elif not os.path.isfile(out_gif):\n # print('The output file must be a file')\n # return\n else:\n out_gif = os.path.abspath(out_gif)\n out_dir = os.path.dirname(out_gif)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n allowed_bands = ['Blue', 'Green', 'Red',\n 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']\n\n if len(bands) == 3 and all(x in allowed_bands for x in bands):\n pass\n else:\n raise Exception('You can only select 3 bands from the following: {}'.format(\n ', '.join(allowed_bands)))\n\n if nd_bands is not None:\n if len(nd_bands) == 2 and all(x in allowed_bands[:-1] for x in nd_bands):\n pass\n else:\n raise Exception('You can only select two bands from the following: {}'.format(\n ', '.join(allowed_bands[:-1])))\n\n try:\n col = landsat_timeseries(\n roi, start_year, end_year, start_date, end_date, apply_fmask)\n\n if vis_params is None:\n vis_params = {}\n vis_params['bands'] = bands\n vis_params['min'] = 0\n vis_params['max'] = 4000\n vis_params['gamma'] = [1, 1, 1]\n\n video_args = vis_params.copy()\n video_args['dimensions'] = dimensions\n video_args['region'] = roi\n video_args['framesPerSecond'] = frames_per_second\n video_args['crs'] = 'EPSG:3857'\n\n if 'bands' not in video_args.keys():\n video_args['bands'] = bands\n\n if 'min' not in video_args.keys():\n video_args['min'] = 0\n\n if 'max' not in video_args.keys():\n video_args['max'] = 4000\n\n if 'gamma' not in video_args.keys():\n video_args['gamma'] = [1, 1, 1]\n\n download_ee_video(col, video_args, out_gif)\n\n if nd_bands is not None:\n nd_images = landsat_ts_norm_diff(\n col, bands=nd_bands, threshold=nd_threshold)\n out_nd_gif = out_gif.replace('.gif', '_nd.gif')\n landsat_ts_norm_diff_gif(nd_images, out_gif=out_nd_gif, vis_params=None,\n palette=nd_palette, dimensions=dimensions, frames_per_second=frames_per_second)\n\n return out_gif\n\n except Exception as e:\n print(e)\n\n\ndef minimum_bounding_box(geojson):\n \"\"\"Gets the minimum bounding box for a geojson polygon.\n\n Args:\n geojson (dict): A geojson dictionary.\n\n Returns:\n tuple: Returns a tuple containing the minimum bounding box in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -120)).\n \"\"\"\n coordinates = []\n try:\n if 'geometry' in geojson.keys():\n coordinates = geojson['geometry']['coordinates'][0]\n else:\n coordinates = geojson['coordinates'][0]\n\n lower_left = min([x[1] for x in coordinates]), min(\n [x[0] for x in coordinates]) # (lat, lon)\n upper_right = max([x[1] for x in coordinates]), max([x[0]\n for x in coordinates]) # (lat, lon)\n bounds = (lower_left, upper_right)\n return bounds\n except Exception as e:\n # print(e)\n return None\n\n\ndef geocode(location, max_rows=10, reverse=False):\n \"\"\"Search location by address and lat/lon coordinates.\n\n Args:\n location (str): Place name or address\n max_rows (int, optional): Maximum number of records to return. Defaults to 10.\n reverse (bool, optional): Search place based on coordinates. Defaults to False.\n\n Returns:\n list: Returns a list of locations.\n \"\"\"\n if not isinstance(location, str):\n print('The location must be a string.')\n return None\n\n if not reverse:\n\n locations = []\n addresses = set()\n g = geocoder.arcgis(location, maxRows=max_rows)\n\n for result in g:\n address = result.address\n if not address in addresses:\n addresses.add(address)\n locations.append(result)\n\n if len(locations) > 0:\n return locations\n else:\n return None\n\n else:\n try:\n if ',' in location:\n latlon = [float(x) for x in location.split(',')]\n elif ' ' in location:\n latlon = [float(x) for x in location.split(' ')]\n else:\n print(\n 'The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')\n return\n g = geocoder.arcgis(latlon, method='reverse')\n locations = []\n addresses = set()\n\n for result in g:\n address = result.address\n if not address in addresses:\n addresses.add(address)\n locations.append(result)\n\n if len(locations) > 0:\n return locations\n else:\n return None\n\n except Exception as e:\n print(e)\n return None\n\n\ndef is_latlon_valid(location):\n \"\"\"Checks whether a pair of coordinates is valid.\n\n Args:\n location (str): A pair of latlon coordinates separated by comma or space.\n\n Returns:\n bool: Returns True if valid.\n \"\"\"\n latlon = []\n if ',' in location:\n latlon = [float(x) for x in location.split(',')]\n elif ' ' in location:\n latlon = [float(x) for x in location.split(' ')]\n else:\n print(\n 'The coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')\n return False\n\n try:\n lat, lon = float(latlon[0]), float(latlon[1])\n if lat >= -90 and lat <= 90 and lon >= -180 and lat <= 180:\n return True\n else:\n return False\n except Exception as e:\n print(e)\n return False\n\n\ndef latlon_from_text(location):\n \"\"\"Extracts latlon from text.\n\n Args:\n location (str): A pair of latlon coordinates separated by comma or space.\n\n Returns:\n bool: Returns (lat, lon) if valid.\n \"\"\"\n latlon = []\n try:\n if ',' in location:\n latlon = [float(x) for x in location.split(',')]\n elif ' ' in location:\n latlon = [float(x) for x in location.split(' ')]\n else:\n print(\n 'The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')\n return None\n\n lat, lon = latlon[0], latlon[1]\n if lat >= -90 and lat <= 90 and lon >= -180 and lat <= 180:\n return lat, lon\n else:\n return None\n\n except Exception as e:\n print(e)\n print('The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')\n return None\n\n\ndef search_ee_data(keywords):\n \"\"\"Searches Earth Engine data catalog.\n\n Args:\n keywords (str): Keywords to search for can be id, provider, tag and so on\n\n Returns:\n list: Returns a lit of assets.\n \"\"\"\n try:\n cmd = 'geeadd search --keywords \"{}\"'.format(str(keywords))\n output = os.popen(cmd).read()\n start_index = output.index('[')\n assets = eval(output[start_index:])\n\n results = []\n for asset in assets:\n asset_dates = asset['start_date'] + ' - ' + asset['end_date']\n asset_snippet = asset['ee_id_snippet']\n start_index = asset_snippet.index(\"'\") + 1\n end_index = asset_snippet.index(\"'\", start_index)\n asset_id = asset_snippet[start_index:end_index]\n\n asset['dates'] = asset_dates\n asset['id'] = asset_id\n asset['uid'] = asset_id.replace('/', '_')\n # asset['url'] = 'https://developers.google.com/earth-engine/datasets/catalog/' + asset['uid']\n # asset['thumbnail'] = 'https://mw1.google.com/ges/dd/images/{}_sample.png'.format(\n # asset['uid'])\n results.append(asset)\n\n return results\n\n except Exception as e:\n print(e)\n \n\ndef ee_data_thumbnail(asset_id):\n \"\"\"Retrieves the thumbnail URL of an Earth Engine asset.\n\n Args:\n asset_id (str): An Earth Engine asset id.\n\n Returns:\n str: An http url of the thumbnail.\n \"\"\"\n import requests\n import urllib\n from bs4 import BeautifulSoup\n\n asset_uid = asset_id.replace('/', '_')\n asset_url = \"https://developers.google.com/earth-engine/datasets/catalog/{}\".format(\n asset_uid)\n thumbnail_url = 'https://mw1.google.com/ges/dd/images/{}_sample.png'.format(\n asset_uid)\n\n r = requests.get(thumbnail_url)\n\n try:\n if r.status_code != 200:\n html_page = urllib.request.urlopen(asset_url)\n soup = BeautifulSoup(html_page, features=\"html.parser\")\n\n for img in soup.findAll('img'):\n if 'sample.png' in img.get('src'):\n thumbnail_url = img.get('src')\n return thumbnail_url\n\n return thumbnail_url\n except Exception as e:\n print(e)\n\n\ndef ee_data_html(asset):\n \"\"\"Generates HTML from an asset to be used in the HTML widget.\n\n Args:\n asset (dict): A dictionary containing an Earth Engine asset.\n\n Returns:\n str: A string containing HTML.\n \"\"\"\n template = '''\n <html>\n <body>\n <h3>asset_title</h3>\n <h4>Dataset Availability</h4>\n <p style=\"margin-left: 40px\">asset_dates</p>\n <h4>Earth Engine Snippet</h4>\n <p style=\"margin-left: 40px\">ee_id_snippet</p>\n <h4>Earth Engine Data Catalog</h4>\n <p style=\"margin-left: 40px\"><a href=\"asset_url\" target=\"_blank\">asset_id</a></p>\n <h4>Dataset Thumbnail</h4>\n <img src=\"thumbnail_url\">\n </body>\n </html>\n '''\n\n try:\n\n text = template.replace('asset_title', asset['title'])\n text = text.replace('asset_dates', asset['dates'])\n text = text.replace('ee_id_snippet', asset['ee_id_snippet'])\n text = text.replace('asset_id', asset['id'])\n text = text.replace('asset_url', asset['asset_url'])\n # asset['thumbnail'] = ee_data_thumbnail(asset['id'])\n text = text.replace('thumbnail_url', asset['thumbnail_url'])\n\n return text\n\n except Exception as e:\n print(e)\n\n\ndef create_code_cell(code='', where='below'):\n \"\"\"Creates a code cell in the IPython Notebook.\n\n Args:\n code (str, optional): Code to fill the new code cell with. Defaults to ''.\n where (str, optional): Where to add the new code cell. It can be one of the following: above, below, at_bottom. Defaults to 'below'.\n \"\"\"\n\n import base64\n from IPython.display import Javascript, display\n encoded_code = (base64.b64encode(str.encode(code))).decode()\n display(Javascript(\"\"\"\n var code = IPython.notebook.insert_cell_{0}('code');\n code.set_text(atob(\"{1}\"));\n \"\"\".format(where, encoded_code)))\n\n\ndef ee_api_to_csv(outfile=None):\n \"\"\"Extracts Earth Engine API documentation from https://developers.google.com/earth-engine/api_docs as a csv file.\n\n Args:\n outfile (str, optional): The output file path to a csv file. Defaults to None.\n \"\"\"\n import csv\n import requests\n from bs4 import BeautifulSoup\n\n pkg_dir = os.path.dirname(\n pkg_resources.resource_filename(\"geemap\", \"geemap.py\"))\n data_dir = os.path.join(pkg_dir, 'data')\n template_dir = os.path.join(data_dir, 'template')\n csv_file = os.path.join(template_dir, 'ee_api_docs.csv')\n\n if outfile is None:\n outfile = csv_file\n else:\n if not outfile.endswith('.csv'):\n print('The output file must end with .csv')\n return\n else:\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n url = 'https://developers.google.com/earth-engine/api_docs'\n\n try:\n\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n\n names = []\n descriptions = []\n functions = []\n returns = []\n arguments = []\n types = []\n details = []\n\n names = [h2.text for h2 in soup.find_all('h2')]\n descriptions = [\n h2.next_sibling.next_sibling.text for h2 in soup.find_all('h2')]\n func_tables = soup.find_all('table', class_='blue')\n functions = [func_table.find(\n 'code').text for func_table in func_tables]\n returns = [func_table.find_all(\n 'td')[1].text for func_table in func_tables]\n\n detail_tables = []\n tables = soup.find_all('table', class_='blue')\n\n for table in tables:\n item = table.next_sibling\n if item.attrs == {'class': ['details']}:\n detail_tables.append(item)\n else:\n detail_tables.append(\"\")\n\n for detail_table in detail_tables:\n if detail_table != '':\n items = [item.text for item in detail_table.find_all('code')]\n else:\n items = \"\"\n arguments.append(items)\n\n for detail_table in detail_tables:\n if detail_table != '':\n items = [item.text for item in detail_table.find_all('td')]\n items = items[1::3]\n else:\n items = \"\"\n types.append(items)\n\n for detail_table in detail_tables:\n if detail_table != '':\n items = [item.text for item in detail_table.find_all('p')]\n else:\n items = \"\"\n details.append(items)\n\n csv_file = open(outfile, 'w', encoding='utf-8')\n csv_writer = csv.writer(csv_file, delimiter='\\t')\n\n csv_writer.writerow(\n ['name', 'description', 'function', 'returns', 'argument', 'type', 'details'])\n\n for i in range(len(names)):\n name = names[i]\n description = descriptions[i]\n function = functions[i]\n return_type = returns[i]\n argument = '|'.join(arguments[i])\n argu_type = '|'.join(types[i])\n detail = '|'.join(details[i])\n\n csv_writer.writerow(\n [name, description, function, return_type, argument, argu_type, detail])\n\n csv_file.close()\n\n except Exception as e:\n print(e)\n\n\ndef read_api_csv():\n \"\"\"Extracts Earth Engine API from a csv file and returns a dictionary containing information about each function.\n\n Returns:\n dict: The dictionary containing information about each function, including name, description, function form, return type, arguments, html. \n \"\"\"\n import copy\n import csv\n\n pkg_dir = os.path.dirname(\n pkg_resources.resource_filename(\"geemap\", \"geemap.py\"))\n data_dir = os.path.join(pkg_dir, 'data')\n template_dir = os.path.join(data_dir, 'template')\n csv_file = os.path.join(template_dir, 'ee_api_docs.csv')\n html_file = os.path.join(template_dir, 'ee_api_docs.html')\n\n with open(html_file) as f:\n in_html_lines = f.readlines()\n\n api_dict = {}\n\n with open(csv_file, 'r', encoding='utf-8') as f:\n csv_reader = csv.DictReader(f, delimiter='\\t')\n\n for line in csv_reader:\n\n out_html_lines = copy.copy(in_html_lines)\n out_html_lines[65] = in_html_lines[65].replace(\n 'function_name', line['name'])\n out_html_lines[66] = in_html_lines[66].replace(\n 'function_description', line.get('description'))\n out_html_lines[74] = in_html_lines[74].replace(\n 'function_usage', line.get('function'))\n out_html_lines[75] = in_html_lines[75].replace(\n 'function_returns', line.get('returns'))\n\n arguments = line.get('argument')\n types = line.get('type')\n details = line.get('details')\n\n if '|' in arguments:\n argument_items = arguments.split('|')\n else:\n argument_items = [arguments]\n\n if '|' in types:\n types_items = types.split('|')\n else:\n types_items = [types]\n\n if '|' in details:\n details_items = details.split('|')\n else:\n details_items = [details]\n\n out_argument_lines = []\n\n for index in range(len(argument_items)):\n in_argument_lines = in_html_lines[87:92]\n in_argument_lines[1] = in_argument_lines[1].replace(\n 'function_argument', argument_items[index])\n in_argument_lines[2] = in_argument_lines[2].replace(\n 'function_type', types_items[index])\n in_argument_lines[3] = in_argument_lines[3].replace(\n 'function_details', details_items[index])\n out_argument_lines.append(\"\".join(in_argument_lines))\n\n out_html_lines = out_html_lines[:87] + \\\n out_argument_lines + out_html_lines[92:]\n\n contents = ''.join(out_html_lines)\n\n api_dict[line['name']] = {\n 'description': line.get('description'),\n 'function': line.get('function'),\n 'returns': line.get('returns'),\n 'argument': line.get('argument'),\n 'type': line.get('type'),\n 'details': line.get('details'),\n 'html': contents\n }\n\n return api_dict\n\n\ndef ee_function_tree(name):\n \"\"\"Construct the tree structure based on an Earth Engine function. For example, the function \"ee.Algorithms.FMask.matchClouds\" will return a list [\"ee.Algorithms\", \"ee.Algorithms.FMask\", \"ee.Algorithms.FMask.matchClouds\"]\n\n Args:\n name (str): The name of the Earth Engine function\n\n Returns:\n list: The list for parent functions.\n \"\"\"\n func_list = []\n try:\n items = name.split('.')\n if items[0] == 'ee':\n for i in range(2, len(items) + 1):\n func_list.append('.'.join(items[0:i]))\n else:\n for i in range(1, len(items) + 1):\n func_list.append('.'.join(items[0:i]))\n\n return func_list\n except Exception as e:\n print(e)\n print('The provided function name is invalid.')\n\n\ndef build_api_tree(api_dict, output_widget, layout_width='100%'):\n \"\"\"Builds an Earth Engine API tree view.\n\n Args:\n api_dict (dict): The dictionary containing information about each Earth Engine API function.\n output_widget (object): An Output widget.\n layout_width (str, optional): The percentage width of the widget. Defaults to '100%'.\n\n Returns:\n tuple: Returns a tuple containing two items: a tree Output widget and a tree dictionary.\n \"\"\"\n import warnings\n warnings.filterwarnings('ignore')\n\n tree = Tree()\n tree_dict = {}\n\n names = api_dict.keys()\n\n def handle_click(event):\n if event['new']:\n name = event['owner'].name\n values = api_dict[name]\n\n with output_widget:\n output_widget.clear_output()\n html_widget = widgets.HTML(value=values['html'])\n display(html_widget)\n\n for name in names:\n func_list = ee_function_tree(name)\n first = func_list[0]\n\n if first not in tree_dict.keys():\n tree_dict[first] = Node(first)\n tree_dict[first].opened = False\n tree.add_node(tree_dict[first])\n\n for index, func in enumerate(func_list):\n if index > 0:\n if func not in tree_dict.keys():\n node = tree_dict[func_list[index - 1]]\n node.opened = False\n tree_dict[func] = Node(func)\n node.add_node(tree_dict[func])\n\n if index == len(func_list) - 1:\n node = tree_dict[func_list[index]]\n node.icon = 'file'\n node.observe(handle_click, 'selected')\n\n return tree, tree_dict\n\n\ndef search_api_tree(keywords, api_tree):\n \"\"\"Search Earth Engine API and return functions containing the specified keywords\n\n Args:\n keywords (str): The keywords to search for.\n api_tree (dict): The dictionary containing the Earth Engine API tree.\n\n Returns:\n object: An ipytree object/widget.\n \"\"\"\n import warnings\n warnings.filterwarnings('ignore')\n\n sub_tree = Tree()\n\n for key in api_tree.keys():\n if keywords in key:\n sub_tree.add_node(api_tree[key])\n\n return sub_tree\n\n\ndef ee_search(asset_limit=100):\n \"\"\"Search Earth Engine API and user assets. If you received a warning (IOPub message rate exceeded) in Jupyter notebook, you can relaunch Jupyter notebook using the following command:\n jupyter notebook --NotebookApp.iopub_msg_rate_limit=10000\n\n Args:\n asset_limit (int, optional): The number of assets to display for each asset type, i.e., Image, ImageCollection, and FeatureCollection. Defaults to 100.\n \"\"\"\n\n import warnings\n warnings.filterwarnings('ignore')\n\n class Flags:\n def __init__(self, repos=None, docs=None, assets=None, docs_dict=None, asset_dict=None, asset_import=None):\n self.repos = repos\n self.docs = docs\n self.assets = assets\n self.docs_dict = docs_dict\n self.asset_dict = asset_dict\n self.asset_import = asset_import\n\n flags = Flags()\n\n search_type = widgets.ToggleButtons(\n options=['Scripts', 'Docs', 'Assets'],\n tooltips=['Search Earth Engine Scripts',\n 'Search Earth Engine API', 'Search Earth Engine Assets'],\n button_style='primary'\n )\n search_type.style.button_width = '100px'\n\n search_box = widgets.Text(\n placeholder='Filter scripts...', value='Loading...')\n search_box.layout.width = '310px'\n\n tree_widget = widgets.Output()\n\n left_widget = widgets.VBox()\n right_widget = widgets.VBox()\n output_widget = widgets.Output()\n output_widget.layout.max_width = '650px'\n\n search_widget = widgets.HBox()\n search_widget.children = [left_widget, right_widget]\n display(search_widget)\n\n repo_tree, repo_output, _ = build_repo_tree()\n left_widget.children = [search_type, repo_tree]\n right_widget.children = [repo_output]\n\n flags.repos = repo_tree\n search_box.value = ''\n\n def search_type_changed(change):\n search_box.value = ''\n\n output_widget.clear_output()\n tree_widget.clear_output()\n if change['new'] == 'Scripts':\n search_box.placeholder = 'Filter scripts...'\n left_widget.children = [search_type, repo_tree]\n right_widget.children = [repo_output]\n elif change['new'] == 'Docs':\n search_box.placeholder = 'Filter methods...'\n search_box.value = 'Loading...'\n left_widget.children = [search_type, search_box, tree_widget]\n right_widget.children = [output_widget]\n if flags.docs is None:\n api_dict = read_api_csv()\n ee_api_tree, tree_dict = build_api_tree(\n api_dict, output_widget)\n flags.docs = ee_api_tree\n flags.docs_dict = tree_dict\n else:\n ee_api_tree = flags.docs\n with tree_widget:\n tree_widget.clear_output()\n display(ee_api_tree)\n right_widget.children = [output_widget]\n search_box.value = ''\n elif change['new'] == 'Assets':\n search_box.placeholder = 'Filter assets...'\n left_widget.children = [search_type, search_box, tree_widget]\n right_widget.children = [output_widget]\n search_box.value = 'Loading...'\n if flags.assets is None:\n asset_tree, asset_widget, asset_dict = build_asset_tree(\n limit=asset_limit)\n flags.assets = asset_tree\n flags.asset_dict = asset_dict\n flags.asset_import = asset_widget\n\n with tree_widget:\n tree_widget.clear_output()\n display(flags.assets)\n right_widget.children = [flags.asset_import]\n search_box.value = ''\n\n search_type.observe(search_type_changed, names='value')\n\n def search_box_callback(text):\n\n if search_type.value == 'Docs':\n with tree_widget:\n if text.value == '':\n print('Loading...')\n tree_widget.clear_output(wait=True)\n display(flags.docs)\n else:\n tree_widget.clear_output()\n print('Searching...')\n tree_widget.clear_output(wait=True)\n sub_tree = search_api_tree(text.value, flags.docs_dict)\n display(sub_tree)\n elif search_type.value == 'Assets':\n with tree_widget:\n if text.value == '':\n print('Loading...')\n tree_widget.clear_output(wait=True)\n display(flags.assets)\n else:\n tree_widget.clear_output()\n print('Searching...')\n tree_widget.clear_output(wait=True)\n sub_tree = search_api_tree(text.value, flags.asset_dict)\n display(sub_tree)\n\n search_box.on_submit(search_box_callback)\n\n\ndef ee_user_id():\n \"\"\"Gets Earth Engine account user id.\n\n Returns:\n str: A string containing the user id.\n \"\"\"\n # ee_initialize()\n roots = ee.data.getAssetRoots()\n if len(roots) == 0:\n return None\n else:\n root = ee.data.getAssetRoots()[0]\n user_id = root['id'].replace(\"projects/earthengine-legacy/assets/\", \"\")\n return user_id\n\n\ndef build_asset_tree(limit=100):\n\n import warnings\n import geeadd.ee_report as geeadd\n warnings.filterwarnings('ignore')\n\n # ee_initialize()\n\n tree = Tree(multiple_selection=False)\n tree_dict = {}\n asset_types = {}\n\n asset_icons = {\n 'FOLDER': 'folder',\n 'TABLE': 'table',\n 'IMAGE': 'image',\n 'IMAGE_COLLECTION': 'file'\n }\n\n info_widget = widgets.HBox()\n\n import_btn = widgets.Button(\n description='import',\n button_style='primary',\n tooltip='Click to import the selected asset',\n disabled=True\n )\n import_btn.layout.min_width = '57px'\n import_btn.layout.max_width = '57px'\n\n path_widget = widgets.Text()\n path_widget.layout.min_width = '500px'\n # path_widget.disabled = True\n\n info_widget.children = [import_btn, path_widget]\n\n user_id = ee_user_id()\n if user_id is None:\n print('Your GEE account does not have any assets. Please create a repository at https://code.earthengine.google.com')\n return\n\n user_path = 'projects/earthengine-legacy/assets/' + user_id\n root_node = Node(user_id)\n root_node.opened = True\n tree_dict[user_id] = root_node\n tree.add_node(root_node)\n\n collection_list, table_list, image_list, folder_paths = geeadd.fparse(\n user_path)\n collection_list = collection_list[:limit]\n table_list = table_list[:limit]\n image_list = image_list[:limit]\n folder_paths = folder_paths[:limit]\n folders = [p[35:] for p in folder_paths[1:]]\n\n asset_type = 'FOLDER'\n for folder in folders:\n bare_folder = folder.replace(user_id + '/', '')\n if folder not in tree_dict.keys():\n node = Node(bare_folder)\n node.opened = False\n node.icon = asset_icons[asset_type]\n root_node.add_node(node)\n tree_dict[folder] = node\n asset_types[folder] = asset_type\n\n def import_btn_clicked(b):\n if path_widget.value != '':\n dataset_uid = 'dataset_' + random_string(string_length=3)\n layer_name = path_widget.value.split('/')[-1][:-2:]\n line1 = '{} = {}\\n'.format(\n dataset_uid, path_widget.value)\n line2 = 'Map.addLayer(' + dataset_uid + \\\n ', {}, \"' + layer_name + '\")'\n contents = ''.join([line1, line2])\n create_code_cell(contents)\n\n import_btn.on_click(import_btn_clicked)\n\n def handle_click(event):\n if event['new']:\n cur_node = event['owner']\n for key in tree_dict.keys():\n if cur_node is tree_dict[key]:\n if asset_types[key] == 'IMAGE':\n path_widget.value = \"ee.Image('{}')\".format(key)\n elif asset_types[key] == 'IMAGE_COLLECTION':\n path_widget.value = \"ee.ImageCollection('{}')\".format(\n key)\n elif asset_types[key] == 'TABLE':\n path_widget.value = \"ee.FeatureCollection('{}')\".format(\n key)\n if import_btn.disabled:\n import_btn.disabled = False\n break\n\n assets = [collection_list, image_list, table_list]\n for index, asset_list in enumerate(assets):\n if index == 0:\n asset_type = 'IMAGE_COLLECTION'\n elif index == 1:\n asset_type = 'IMAGE'\n else:\n asset_type = 'TABLE'\n\n for asset in asset_list:\n items = asset.split('/')\n parent = '/'.join(items[:-1])\n child = items[-1]\n parent_node = tree_dict[parent]\n child_node = Node(child)\n child_node.icon = asset_icons[asset_type]\n parent_node.add_node(child_node)\n tree_dict[asset] = child_node\n asset_types[asset] = asset_type\n child_node.observe(handle_click, 'selected')\n\n return tree, info_widget, tree_dict\n\n\ndef build_repo_tree(out_dir=None, name='gee_repos'):\n \"\"\"Builds a repo tree for GEE account.\n\n Args:\n out_dir (str): The output directory for the repos. Defaults to None.\n name (str, optional): The output name for the repo directory. Defaults to 'gee_repos'.\n\n Returns:\n tuple: Returns a tuple containing a tree widget, an output widget, and a tree dictionary containing nodes.\n \"\"\"\n import warnings\n warnings.filterwarnings('ignore')\n\n if out_dir is None:\n out_dir = os.path.join(os.path.expanduser('~'))\n\n repo_dir = os.path.join(out_dir, name)\n if not os.path.exists(repo_dir):\n os.makedirs(repo_dir)\n\n URLs = {\n # 'Owner': 'https://earthengine.googlesource.com/{}/default'.format(ee_user_id()),\n 'Writer': '',\n 'Reader': 'https://github.com/giswqs/geemap',\n 'Examples': 'https://github.com/giswqs/earthengine-py-examples',\n 'Archive': 'https://earthengine.googlesource.com/EGU2017-EE101'\n }\n\n user_id = ee_user_id()\n if user_id is not None:\n URLs['Owner'] = 'https://earthengine.googlesource.com/{}/default'.format(\n ee_user_id())\n\n path_widget = widgets.Text(\n placeholder='Enter the link to a Git repository here...')\n path_widget.layout.width = '475px'\n clone_widget = widgets.Button(\n description='Clone', button_style='primary', tooltip='Clone the repository to folder.')\n info_widget = widgets.HBox()\n\n groups = ['Owner', 'Writer', 'Reader', 'Examples', 'Archive']\n for group in groups:\n group_dir = os.path.join(repo_dir, group)\n if not os.path.exists(group_dir):\n os.makedirs(group_dir)\n\n example_dir = os.path.join(repo_dir, 'Examples/earthengine-py-examples')\n if not os.path.exists(example_dir):\n clone_github_repo(URLs['Examples'], out_dir=example_dir)\n\n left_widget, right_widget, tree_dict = file_browser(\n in_dir=repo_dir, add_root_node=False, search_description='Filter scripts...', use_import=True, return_sep_widgets=True)\n info_widget.children = [right_widget]\n\n def handle_folder_click(event):\n if event['new']:\n url = ''\n selected = event['owner']\n if selected.name in URLs.keys():\n url = URLs[selected.name]\n\n path_widget.value = url\n clone_widget.disabled = False\n info_widget.children = [path_widget, clone_widget]\n else:\n info_widget.children = [right_widget]\n\n for group in groups:\n dirname = os.path.join(repo_dir, group)\n node = tree_dict[dirname]\n node.observe(handle_folder_click, 'selected')\n\n def handle_clone_click(b):\n\n url = path_widget.value\n default_dir = os.path.join(repo_dir, 'Examples')\n if url == '':\n path_widget.value = 'Please enter a valid URL to the repository.'\n else:\n for group in groups:\n key = os.path.join(repo_dir, group)\n node = tree_dict[key]\n if node.selected:\n default_dir = key\n try:\n path_widget.value = 'Cloning...'\n clone_dir = os.path.join(default_dir, os.path.basename(url))\n if 'github.com' in url:\n clone_github_repo(url, out_dir=clone_dir)\n elif 'googlesource' in url:\n clone_google_repo(url, out_dir=clone_dir)\n path_widget.value = 'Cloned to {}'.format(clone_dir)\n clone_widget.disabled = True\n except Exception as e:\n path_widget.value = 'An error occurred when trying to clone the repository ' + \\\n str(e)\n clone_widget.disabled = True\n\n clone_widget.on_click(handle_clone_click)\n\n return left_widget, info_widget, tree_dict\n\n\ndef file_browser(in_dir=None, show_hidden=False, add_root_node=True, search_description=None, use_import=False, return_sep_widgets=False):\n \"\"\"Creates a simple file browser and text editor.\n\n Args:\n in_dir (str, optional): The input directory. Defaults to None, which will use the current working directory.\n show_hidden (bool, optional): Whether to show hidden files/folders. Defaults to False.\n add_root_node (bool, optional): Whether to add the input directory as a root node. Defaults to True.\n search_description (str, optional): The description of the search box. Defaults to None.\n use_import (bool, optional): Whether to show the import button. Defaults to False.\n return_sep_widgets (bool, optional): Whether to return the results as separate widgets. Defaults to False.\n\n Returns:\n object: An ipywidget.\n \"\"\"\n import platform\n if in_dir is None:\n in_dir = os.getcwd()\n\n if not os.path.exists(in_dir):\n print('The provided directory does not exist.')\n return\n elif not os.path.isdir(in_dir):\n print('The provided path is not a valid directory.')\n return\n\n sep = '/'\n if platform.system() == \"Windows\":\n sep = '\\\\'\n\n if in_dir.endswith(sep):\n in_dir = in_dir[:-1]\n\n full_widget = widgets.HBox()\n left_widget = widgets.VBox()\n\n right_widget = widgets.VBox()\n\n import_btn = widgets.Button(\n description='import', button_style='primary', tooltip='import the content to a new cell', disabled=True)\n import_btn.layout.width = '70px'\n path_widget = widgets.Text()\n path_widget.layout.min_width = '400px'\n # path_widget.layout.max_width = '400px'\n save_widget = widgets.Button(\n description='Save', button_style='primary', tooltip='Save edits to file.', disabled=True)\n info_widget = widgets.HBox()\n info_widget.children = [path_widget, save_widget]\n if use_import:\n info_widget.children = [import_btn, path_widget, save_widget]\n\n text_widget = widgets.Textarea()\n text_widget.layout.width = '630px'\n text_widget.layout.height = '600px'\n\n right_widget.children = [info_widget, text_widget]\n full_widget.children = [left_widget]\n\n if search_description is None:\n search_description = 'Search files/folders...'\n search_box = widgets.Text(placeholder=search_description)\n search_box.layout.width = '310px'\n tree_widget = widgets.Output()\n tree_widget.layout.max_width = '310px'\n tree_widget.overflow = 'auto'\n\n left_widget.children = [search_box, tree_widget]\n\n tree = Tree(multiple_selection=False)\n tree_dict = {}\n\n def on_button_clicked(b):\n content = text_widget.value\n out_file = path_widget.value\n\n out_dir = os.path.dirname(out_file)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n with open(out_file, 'w') as f:\n f.write(content)\n\n text_widget.disabled = True\n text_widget.value = 'The content has been saved successfully.'\n save_widget.disabled = True\n path_widget.disabled = True\n\n if (out_file not in tree_dict.keys()) and (out_dir in tree_dict.keys()):\n node = Node(os.path.basename(out_file))\n tree_dict[out_file] = node\n parent_node = tree_dict[out_dir]\n parent_node.add_node(node)\n\n save_widget.on_click(on_button_clicked)\n\n def import_btn_clicked(b):\n if (text_widget.value != '') and (path_widget.value.endswith('.py')):\n create_code_cell(text_widget.value)\n\n import_btn.on_click(import_btn_clicked)\n\n def search_box_callback(text):\n\n with tree_widget:\n if text.value == '':\n print('Loading...')\n tree_widget.clear_output(wait=True)\n display(tree)\n else:\n tree_widget.clear_output()\n print('Searching...')\n tree_widget.clear_output(wait=True)\n sub_tree = search_api_tree(text.value, tree_dict)\n display(sub_tree)\n search_box.on_submit(search_box_callback)\n\n def handle_file_click(event):\n if event['new']:\n cur_node = event['owner']\n for key in tree_dict.keys():\n if (cur_node is tree_dict[key]) and (os.path.isfile(key)):\n if key.endswith('.py'):\n import_btn.disabled = False\n else:\n import_btn.disabled = True\n try:\n with open(key) as f:\n content = f.read()\n text_widget.value = content\n text_widget.disabled = False\n path_widget.value = key\n path_widget.disabled = False\n save_widget.disabled = False\n full_widget.children = [left_widget, right_widget]\n except Exception as e:\n path_widget.value = key\n path_widget.disabled = True\n save_widget.disabled = True\n text_widget.disabled = True\n text_widget.value = 'Failed to open {}.'.format(\n cur_node.name) + '\\n\\n' + str(e)\n full_widget.children = [left_widget, right_widget]\n return\n break\n\n def handle_folder_click(event):\n if event['new']:\n full_widget.children = [left_widget]\n text_widget.value = ''\n\n if add_root_node:\n root_name = in_dir.split(sep)[-1]\n root_node = Node(root_name)\n tree_dict[in_dir] = root_node\n tree.add_node(root_node)\n root_node.observe(handle_folder_click, 'selected')\n\n for root, d_names, f_names in os.walk(in_dir):\n\n if not show_hidden:\n folders = root.split(sep)\n for folder in folders:\n if folder.startswith('.'):\n continue\n for d_name in d_names:\n if d_name.startswith('.'):\n d_names.remove(d_name)\n for f_name in f_names:\n if f_name.startswith('.'):\n f_names.remove(f_name)\n\n d_names.sort()\n f_names.sort()\n\n if (not add_root_node) and (root == in_dir):\n for d_name in d_names:\n node = Node(d_name)\n tree_dict[os.path.join(in_dir, d_name)] = node\n tree.add_node(node)\n node.opened = False\n node.observe(handle_folder_click, 'selected')\n\n if (root != in_dir) and (root not in tree_dict.keys()):\n name = root.split(sep)[-1]\n dir_name = os.path.dirname(root)\n parent_node = tree_dict[dir_name]\n node = Node(name)\n tree_dict[root] = node\n parent_node.add_node(node)\n node.observe(handle_folder_click, 'selected')\n\n if len(f_names) > 0:\n parent_node = tree_dict[root]\n parent_node.opened = False\n for f_name in f_names:\n node = Node(f_name)\n node.icon = 'file'\n full_path = os.path.join(root, f_name)\n tree_dict[full_path] = node\n parent_node.add_node(node)\n node.observe(handle_file_click, 'selected')\n\n with tree_widget:\n tree_widget.clear_output()\n display(tree)\n\n if return_sep_widgets:\n return left_widget, right_widget, tree_dict\n else:\n return full_widget\n\n\ndef check_git_install():\n \"\"\"Checks if Git is installed.\n\n Returns:\n bool: Returns True if Git is installed, otherwise returns False.\n \"\"\"\n import webbrowser\n\n cmd = 'git --version'\n output = os.popen(cmd).read()\n\n if 'git version' in output:\n return True\n else:\n url = 'https://git-scm.com/downloads'\n print(\n \"Git is not installed. Please download Git from {} and install it.\".format(url))\n webbrowser.open_new_tab(url)\n return False\n\n\ndef clone_github_repo(url, out_dir):\n \"\"\"Clones a GitHub repository.\n\n Args:\n url (str): The link to the GitHub repository\n out_dir (str): The output directory for the cloned repository. \n \"\"\"\n\n import zipfile\n\n repo_name = os.path.basename(url)\n # url_zip = os.path.join(url, 'archive/master.zip')\n url_zip = url + '/archive/master.zip'\n\n if os.path.exists(out_dir):\n print(\n 'The specified output directory already exists. Please choose a new directory.')\n return\n\n parent_dir = os.path.dirname(out_dir)\n out_file_path = os.path.join(parent_dir, repo_name + '.zip')\n\n try:\n urllib.request.urlretrieve(url_zip, out_file_path)\n except:\n print(\"The provided URL is invalid. Please double check the URL.\")\n return\n\n with zipfile.ZipFile(out_file_path, \"r\") as zip_ref:\n zip_ref.extractall(parent_dir)\n\n src = out_file_path.replace('.zip', '-master')\n os.rename(src, out_dir)\n os.remove(out_file_path)\n\n\ndef clone_github_repo2(url, out_dir=None):\n \"\"\"Clones a GitHub repository.\n\n Args:\n url (str): The link to the GitHub repository\n out_dir (str, optional): The output directory for the cloned repository. Defaults to None.\n \"\"\"\n check_install('dulwich')\n from dulwich import porcelain\n\n repo_name = os.path.basename(url)\n\n if out_dir is None:\n out_dir = os.path.join(os.getcwd(), repo_name)\n\n if not os.path.exists(os.path.dirname(out_dir)):\n os.makedirs(os.path.dirname(out_dir))\n\n if os.path.exists(out_dir):\n print(\n 'The specified output directory already exists. Please choose a new directory.')\n return\n\n try:\n porcelain.clone(url, out_dir)\n except Exception as e:\n print('Failed to clone the repository.')\n print(e)\n\n\ndef clone_google_repo(url, out_dir=None):\n \"\"\"Clones an Earth Engine repository from https://earthengine.googlesource.com, such as https://earthengine.googlesource.com/users/google/datasets\n\n Args:\n url (str): The link to the Earth Engine repository\n out_dir (str, optional): The output directory for the cloned repository. Defaults to None.\n \"\"\"\n repo_name = os.path.basename(url)\n\n if out_dir is None:\n out_dir = os.path.join(os.getcwd(), repo_name)\n\n if not os.path.exists(os.path.dirname(out_dir)):\n os.makedirs(os.path.dirname(out_dir))\n\n if os.path.exists(out_dir):\n print(\n 'The specified output directory already exists. Please choose a new directory.')\n return\n\n if check_git_install():\n\n cmd = 'git clone \"{}\" \"{}\"'.format(url, out_dir)\n os.popen(cmd).read()\n\n\ndef reduce_gif_size(in_gif, out_gif=None):\n \"\"\"Reduces a GIF image using ffmpeg.\n\n Args:\n in_gif (str): The input file path to the GIF image.\n out_gif (str, optional): The output file path to the GIF image. Defaults to None.\n \"\"\"\n import ffmpeg\n import shutil\n\n if not is_tool('ffmpeg'):\n print('ffmpeg is not installed on your computer.')\n return\n\n if not os.path.exists(in_gif):\n print('The input gif file does not exist.')\n return\n\n if out_gif is None:\n out_gif = in_gif\n elif not os.path.exists(os.path.dirname(out_gif)):\n os.makedirs(os.path.dirname(out_gif))\n\n if in_gif == out_gif:\n tmp_gif = in_gif.replace('.gif', '_tmp.gif')\n shutil.copyfile(in_gif, tmp_gif)\n stream = ffmpeg.input(tmp_gif)\n stream = ffmpeg.output(stream, in_gif).overwrite_output()\n ffmpeg.run(stream)\n os.remove(tmp_gif)\n\n else:\n stream = ffmpeg.input(in_gif)\n stream = ffmpeg.output(stream, out_gif).overwrite_output()\n ffmpeg.run(stream)\n\n\ndef upload_to_imgur(in_gif):\n \"\"\"Uploads an image to imgur.com\n\n Args:\n in_gif (str): The file path to the image.\n \"\"\"\n import subprocess\n\n pkg_name = 'imgur-uploader'\n if not is_tool(pkg_name):\n check_install(pkg_name)\n\n try:\n IMGUR_API_ID = os.environ.get('IMGUR_API_ID', None)\n IMGUR_API_SECRET = os.environ.get('IMGUR_API_SECRET', None)\n credentials_path = os.path.join(os.path.expanduser(\n '~'), '.config/imgur_uploader/uploader.cfg')\n\n if ((IMGUR_API_ID is not None) and (IMGUR_API_SECRET is not None)) or os.path.exists(credentials_path):\n\n proc = subprocess.Popen(\n ['imgur-uploader', in_gif], stdout=subprocess.PIPE)\n for i in range(0, 2):\n line = proc.stdout.readline()\n print(line.rstrip().decode(\"utf-8\"))\n # while True:\n # line = proc.stdout.readline()\n # if not line:\n # break\n # print(line.rstrip().decode(\"utf-8\"))\n else:\n print('Imgur API credentials could not be found. Please check https://pypi.org/project/imgur-uploader/ for instructions on how to get Imgur API credentials')\n return\n\n except Exception as e:\n print(e)\n\n\ndef is_tool(name):\n \"\"\"Check whether `name` is on PATH and marked as executable.\"\"\"\n\n from shutil import which\n\n return which(name) is not None\n\n\ndef image_props(img, date_format='YYYY-MM-dd'):\n \"\"\"Gets image properties.\n\n Args:\n img (ee.Image): The input image.\n date_format (str, optional): The output date format. Defaults to 'YYYY-MM-dd HH:mm:ss'.\n\n Returns:\n dd.Dictionary: The dictionary containing image properties.\n \"\"\"\n if not isinstance(img, ee.Image):\n print('The input object must be an ee.Image')\n return\n\n keys = img.propertyNames().remove('system:footprint').remove('system:bands')\n values = keys.map(lambda p: img.get(p))\n\n bands = img.bandNames()\n scales = bands.map(lambda b: img.select([b]).projection().nominalScale())\n scale = ee.Algorithms.If(scales.distinct().size().gt(\n 1), ee.Dictionary.fromLists(bands.getInfo(), scales), scales.get(0))\n image_date = ee.Date(img.get('system:time_start')).format(date_format)\n time_start = ee.Date(img.get('system:time_start')\n ).format('YYYY-MM-dd HH:mm:ss')\n # time_end = ee.Date(img.get('system:time_end')).format('YYYY-MM-dd HH:mm:ss')\n time_end = ee.Algorithms.If(ee.List(img.propertyNames()).contains('system:time_end'), ee.Date(\n img.get('system:time_end')).format('YYYY-MM-dd HH:mm:ss'), time_start)\n asset_size = ee.Number(img.get('system:asset_size')).divide(\n 1e6).format().cat(ee.String(' MB'))\n\n props = ee.Dictionary.fromLists(keys, values)\n props = props.set('system:time_start', time_start)\n props = props.set('system:time_end', time_end)\n props = props.set('system:asset_size', asset_size)\n props = props.set('NOMINAL_SCALE', scale)\n props = props.set('IMAGE_DATE', image_date)\n\n return props\n\n\ndef image_stats(img, region=None, scale=None):\n \"\"\"Gets image descriptive statistics.\n\n Args:\n img (ee.Image): The input image to calculate descriptive statistics.\n region (object, optional): The region over which to reduce data. Defaults to the footprint of the image's first band.\n scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.\n\n Returns:\n ee.Dictionary: A dictionary containing the description statistics of the input image.\n \"\"\"\n import geemap.utils as utils\n\n if not isinstance(img, ee.Image):\n print('The input object must be an ee.Image')\n return\n\n stat_types = ['min', 'max', 'mean', 'std', 'sum']\n\n image_min = utils.image_min_value(img, region, scale)\n image_max = utils.image_max_value(img, region, scale)\n image_mean = utils.image_mean_value(img, region, scale)\n image_std = utils.image_std_value(img, region, scale)\n image_sum = utils.image_sum_value(img, region, scale)\n\n stat_results = ee.List(\n [image_min, image_max, image_mean, image_std, image_sum])\n\n stats = ee.Dictionary.fromLists(stat_types, stat_results)\n\n return stats\n\n\ndef date_sequence(start, end, unit, date_format='YYYY-MM-dd'):\n \"\"\"Creates a date sequence.\n\n Args:\n start (str): The start date, e.g., '2000-01-01'.\n end (str): The end date, e.g., '2000-12-31'.\n unit (str): One of 'year', 'month' 'week', 'day', 'hour', 'minute', or 'second'.\n date_format (str, optional): A pattern, as described at http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. Defaults to 'YYYY-MM-dd'.\n\n Returns:\n ee.List: A list of date sequence.\n \"\"\"\n start_date = ee.Date(start)\n end_date = ee.Date(end)\n count = ee.Number(end_date.difference(start_date, unit)).toInt()\n num_seq = ee.List.sequence(0, count)\n date_seq = num_seq.map(\n lambda d: start_date.advance(d, unit).format(date_format))\n return date_seq\n\n\ndef adjust_longitude(in_fc):\n \"\"\"Adjusts longitude if it is less than -180 or greater than 180.\n\n Args:\n in_fc (dict): The input dictionary containing coordinates.\n\n Returns:\n dict: A dictionary containing the converted longitudes\n \"\"\"\n try:\n\n keys = in_fc.keys()\n\n if 'geometry' in keys:\n\n coordinates = in_fc['geometry']['coordinates']\n\n if in_fc['geometry']['type'] == 'Point':\n longitude = coordinates[0]\n if longitude < - 180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc['geometry']['coordinates'][0] = longitude\n\n elif in_fc['geometry']['type'] == 'Polygon':\n for index1, item in enumerate(coordinates):\n for index2, element in enumerate(item):\n longitude = element[0]\n if longitude < - 180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc['geometry']['coordinates'][index1][index2][0] = longitude\n\n elif in_fc['geometry']['type'] == 'LineString':\n for index, element in enumerate(coordinates):\n longitude = element[0]\n if longitude < - 180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc['geometry']['coordinates'][index][0] = longitude\n\n elif 'type' in keys:\n\n coordinates = in_fc['coordinates']\n\n if in_fc['type'] == 'Point':\n longitude = coordinates[0]\n if longitude < - 180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc['coordinates'][0] = longitude\n\n elif in_fc['type'] == 'Polygon':\n for index1, item in enumerate(coordinates):\n for index2, element in enumerate(item):\n longitude = element[0]\n if longitude < - 180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc['coordinates'][index1][index2][0] = longitude\n\n elif in_fc['type'] == 'LineString':\n for index, element in enumerate(coordinates):\n longitude = element[0]\n if longitude < - 180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc['coordinates'][index][0] = longitude\n\n return in_fc\n\n except Exception as e:\n print(e)\n return None\n\n\ndef set_proxy(port=1080, ip='http://127.0.0.1'):\n \"\"\"Sets proxy if needed. This is only needed for countries where Google services are not available.\n\n Args:\n port (int, optional): The proxy port number. Defaults to 1080.\n ip (str, optional): The IP address. Defaults to 'http://127.0.0.1'.\n \"\"\"\n import os\n import requests\n\n try:\n\n if not ip.startswith('http'):\n ip = 'http://' + ip\n proxy = '{}:{}'.format(ip, port)\n\n os.environ['HTTP_PROXY'] = proxy\n os.environ['HTTPS_PROXY'] = proxy\n\n a = requests.get('https://earthengine.google.com/')\n\n if a.status_code != 200:\n print(\n 'Failed to connect to Earth Engine. Please double check the port number and ip address.')\n\n except Exception as e:\n print(e)\n\n\ndef in_colab_shell():\n \"\"\"Tests if the code is being executed within Google Colab.\"\"\"\n try:\n import google.colab # pylint: disable=unused-variable\n return True\n except ImportError:\n return False\n\n\ndef is_drive_mounted():\n \"\"\"Checks whether Google Drive is mounted in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n \"\"\"\n drive_path = '/content/drive/My Drive'\n if os.path.exists(drive_path):\n return True\n else:\n return False\n\n\ndef credentials_in_drive():\n \"\"\"Checks if the ee credentials file exists in Google Drive.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n \"\"\"\n credentials_path = '/content/drive/My Drive/.config/earthengine/credentials'\n if os.path.exists(credentials_path):\n return True\n else:\n return False\n\n\ndef credentials_in_colab():\n \"\"\"Checks if the ee credentials file exists in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n \"\"\"\n credentials_path = '/root/.config/earthengine/credentials'\n if os.path.exists(credentials_path):\n return True\n else:\n return False\n\n\ndef copy_credentials_to_drive():\n \"\"\"Copies ee credentials from Google Colab to Google Drive.\n \"\"\"\n import shutil\n src = '/root/.config/earthengine/credentials'\n dst = '/content/drive/My Drive/.config/earthengine/credentials'\n\n wd = os.path.dirname(dst)\n if not os.path.exists(wd):\n os.makedirs(wd)\n\n shutil.copyfile(src, dst)\n\n\ndef copy_credentials_to_colab():\n \"\"\"Copies ee credentials from Google Drive to Google Colab.\n \"\"\"\n import shutil\n src = '/content/drive/My Drive/.config/earthengine/credentials'\n dst = '/root/.config/earthengine/credentials'\n\n wd = os.path.dirname(dst)\n if not os.path.exists(wd):\n os.makedirs(wd)\n\n shutil.copyfile(src, dst)\n\n\ndef create_download_link(filename, title=\"Click here to download: \"):\n \"\"\"Downloads a file from voila. Adopted from https://github.com/voila-dashboards/voila/issues/578\n\n Args:\n filename (str): The file path to the file to download\n title (str, optional): str. Defaults to \"Click here to download: \".\n\n Returns:\n str: HTML download URL.\n \"\"\"\n import base64\n from IPython.display import HTML\n data = open(filename, \"rb\").read()\n b64 = base64.b64encode(data)\n payload = b64.decode()\n basename = os.path.basename(filename)\n html = '<a download=\"{filename}\" href=\"data:text/csv;base64,{payload}\" style=\"color:#0000FF;\" target=\"_blank\">{title}</a>'\n html = html.format(payload=payload, title=title +\n f' {basename}', filename=basename)\n return HTML(html)\n\n\ndef edit_download_html(htmlWidget, filename, title=\"Click here to download: \"):\n \"\"\"Downloads a file from voila. Adopted from https://github.com/voila-dashboards/voila/issues/578#issuecomment-617668058\n\n Args:\n htmlWidget (object): The HTML widget to display the URL.\n filename (str): File path to download. \n title (str, optional): Download description. Defaults to \"Click here to download: \".\n \"\"\"\n\n from IPython.display import HTML\n import ipywidgets as widgets\n import base64\n\n # Change widget html temperarily to a font-awesome spinner\n htmlWidget.value = \"<i class=\\\"fa fa-spinner fa-spin fa-2x fa-fw\\\"></i><span class=\\\"sr-only\\\">Loading...</span>\"\n\n # Process raw data\n data = open(filename, \"rb\").read()\n b64 = base64.b64encode(data)\n payload = b64.decode()\n\n basename = os.path.basename(filename)\n\n # Create and assign html to widget\n html = '<a download=\"{filename}\" href=\"data:text/csv;base64,{payload}\" target=\"_blank\">{title}</a>'\n htmlWidget.value = html.format(\n payload=payload, title=title+basename, filename=basename)\n\n # htmlWidget = widgets.HTML(value = '')\n # htmlWidget\n\n\ndef load_GeoTIFF(URL):\n \"\"\"Loads a Cloud Optimized GeoTIFF (COG) as an Image. Only Google Cloud Storage is supported. The URL can be one of the following formats:\n Option 1: gs://pdd-stac/disasters/hurricane-harvey/0831/20170831_172754_101c_3B_AnalyticMS.tif\n Option 2: https://storage.googleapis.com/pdd-stac/disasters/hurricane-harvey/0831/20170831_172754_101c_3B_AnalyticMS.tif\n Option 3: https://storage.cloud.google.com/gcp-public-data-landsat/LC08/01/044/034/LC08_L1TP_044034_20131228_20170307_01_T1/LC08_L1TP_044034_20131228_20170307_01_T1_B5.TIF\n\n Args:\n URL (str): The Cloud Storage URL of the GeoTIFF to load.\n\n Returns:\n ee.Image: an Earth Engine image.\n \"\"\"\n\n uri = URL.strip()\n\n if uri.startswith('https://storage.googleapis.com/'):\n uri = uri.replace('https://storage.googleapis.com/', 'gs://')\n elif uri.startswith('https://storage.cloud.google.com/'):\n uri = uri.replace('https://storage.cloud.google.com/', 'gs://')\n\n if not uri.startswith('gs://'):\n raise Exception(\n 'Invalid GCS URL: {}. Expected something of the form \"gs://bucket/path/to/object.tif\".'.format(uri))\n\n if not uri.lower().endswith('.tif'):\n raise Exception(\n 'Invalid GCS URL: {}. Expected something of the form \"gs://bucket/path/to/object.tif\".'.format(uri))\n\n cloud_image = ee.Image.loadGeoTIFF(uri)\n return cloud_image\n\n\ndef load_GeoTIFFs(URLs):\n \"\"\"Loads a list of Cloud Optimized GeoTIFFs (COG) as an ImageCollection. URLs is a list of URL, which can be one of the following formats:\n Option 1: gs://pdd-stac/disasters/hurricane-harvey/0831/20170831_172754_101c_3B_AnalyticMS.tif\n Option 2: https://storage.googleapis.com/pdd-stac/disasters/hurricane-harvey/0831/20170831_172754_101c_3B_AnalyticMS.tif\n Option 3: https://storage.cloud.google.com/gcp-public-data-landsat/LC08/01/044/034/LC08_L1TP_044034_20131228_20170307_01_T1/LC08_L1TP_044034_20131228_20170307_01_T1_B5.TIF\n\n Args:\n URLs (list): A list of Cloud Storage URL of the GeoTIFF to load.\n\n Returns:\n ee.ImageCollection: An Earth Engine ImageCollection.\n \"\"\"\n\n if not isinstance(URLs, list):\n raise Exception('The URLs argument must be a list.')\n\n URIs = []\n for URL in URLs:\n uri = URL.strip()\n\n if uri.startswith('https://storage.googleapis.com/'):\n uri = uri.replace('https://storage.googleapis.com/', 'gs://')\n elif uri.startswith('https://storage.cloud.google.com/'):\n uri = uri.replace('https://storage.cloud.google.com/', 'gs://')\n\n if not uri.startswith('gs://'):\n raise Exception(\n 'Invalid GCS URL: {}. Expected something of the form \"gs://bucket/path/to/object.tif\".'.format(uri))\n\n if not uri.lower().endswith('.tif'):\n raise Exception(\n 'Invalid GCS URL: {}. Expected something of the form \"gs://bucket/path/to/object.tif\".'.format(uri))\n\n URIs.append(uri)\n\n URIs = ee.List(URIs)\n collection = URIs.map(lambda uri: ee.Image.loadGeoTIFF(uri))\n return ee.ImageCollection(collection)\n\n\ndef landsat_ts_norm_diff(collection, bands=['Green', 'SWIR1'], threshold=0):\n \"\"\"Computes a normalized difference index based on a Landsat timeseries.\n\n Args:\n collection (ee.ImageCollection): A Landsat timeseries.\n bands (list, optional): The bands to use for computing normalized difference. Defaults to ['Green', 'SWIR1'].\n threshold (float, optional): The threshold to extract features. Defaults to 0.\n\n Returns:\n ee.ImageCollection: An ImageCollection containing images with values greater than the specified threshold. \n \"\"\"\n nd_images = collection.map(lambda img: img.normalizedDifference(\n bands).gt(threshold).copyProperties(img, img.propertyNames()))\n return nd_images\n\n\ndef landsat_ts_norm_diff_gif(collection, out_gif=None, vis_params=None, palette=['black', 'blue'], dimensions=768, frames_per_second=10):\n \"\"\"[summary]\n\n Args:\n collection (ee.ImageCollection): The normalized difference Landsat timeseires.\n out_gif (str, optional): File path to the output animated GIF. Defaults to None.\n vis_params (dict, optional): Visualization parameters. Defaults to None.\n palette (list, optional): The palette to use for visualizing the timelapse. Defaults to ['black', 'blue']. The first color in the list is the background color.\n dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.\n frames_per_second (int, optional): Animation speed. Defaults to 10.\n\n Returns:\n str: File path to the output animated GIF.\n \"\"\"\n coordinates = ee.Image(collection.first()).get('coordinates')\n roi = ee.Geometry.Polygon(coordinates, None, False)\n\n if out_gif is None:\n out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')\n filename = 'landsat_ts_nd_' + random_string() + '.gif'\n out_gif = os.path.join(out_dir, filename)\n elif not out_gif.endswith('.gif'):\n raise Exception('The output file must end with .gif')\n\n bands = ['nd']\n if vis_params is None:\n vis_params = {}\n vis_params['bands'] = bands\n vis_params['palette'] = palette\n\n video_args = vis_params.copy()\n video_args['dimensions'] = dimensions\n video_args['region'] = roi\n video_args['framesPerSecond'] = frames_per_second\n video_args['crs'] = 'EPSG:3857'\n\n if 'bands' not in video_args.keys():\n video_args['bands'] = bands\n\n download_ee_video(collection, video_args, out_gif)\n\n return out_gif\n" ]
[ [ "matplotlib.pyplot.cm.get_cmap", "matplotlib.pyplot.scatter", "numpy.dstack", "matplotlib.pyplot.plot", "matplotlib.pyplot.clear", "matplotlib.pyplot.bar", "numpy.random.uniform", "numpy.array", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
toothache/onnxruntime
[ "217b2c9f931b5b0f704df0c8336def47025d2148", "217b2c9f931b5b0f704df0c8336def47025d2148", "217b2c9f931b5b0f704df0c8336def47025d2148" ]
[ "onnxruntime/core/providers/nuphar/scripts/model_quantizer.py", "orttraining/orttraining/test/python/orttraining_test_ortmodule_autograd.py", "orttraining/orttraining/python/training/ortmodule/_graph_execution_manager.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# -*- coding: UTF-8 -*-\nimport argparse\nfrom enum import Enum\nimport json\nimport numpy as np\nimport onnx\nfrom onnx import helper, numpy_helper\nfrom .node_factory import NodeFactory, ensure_opset\n\nclass QuantizeConfig:\n def __init__(self, signed, reserved_bits, type_bits):\n self.sign_bit_ = 1 if signed else 0\n self.reserved_bits_ = reserved_bits\n self.type_bits_ = type_bits\n\n @staticmethod\n def from_dict(qcfg_dict):\n return QuantizeConfig(1 if qcfg_dict['QuantizationType'] == 'Signed' else 0,\n qcfg_dict['ReservedBit'],\n qcfg_dict['QuantizeBit'])\n\n def signed(self):\n return self.sign_bit_ == 1\n\n def usable_bits(self):\n return self.type_bits_ - self.reserved_bits_\n\n def q_max(self):\n return float((1 << (self.usable_bits() - self.sign_bit_)) - 1)\n\n def q_min(self):\n return float(-(self.q_max() + 1) if self.signed() else 0)\n\n def q_range(self):\n return self.q_max() + 0.5 if self.signed() else float(1 << self.usable_bits())\n\n def q_type(self):\n if self.type_bits_ == 8:\n return np.int8 if self.sign_bit_ else np.uint8\n else:\n assert self.type_bits_ == 16\n return np.int16 if self.sign_bit_ else np.uint16\n\n def q_type_bits(self):\n return self.type_bits_\n\n def __iter__(self): # need this to make dict for json\n return iter([('QuantizeBit', self.type_bits_),\n ('QuantizationType', 'Signed' if self.sign_bit_ else 'Unsigned'),\n ('ReservedBit', self.reserved_bits_)])\n\ndef quantize_matmul_2d_with_weight(in_node, in_graph, nf, converted_weights, quantized_inputs, qcfg_dict, update_qcfg_dict, default_qcfg, onnx_opset_ver):\n assert in_node.op_type == 'MatMul'\n\n # quantize weight\n # only handles weight being inputs[1] of MatMul/Gemm node\n fparam_name = in_node.input[1]\n\n # skip if weights shared by other nodes that's not MatMul\n # TODO: support GEMM op if needed\n other_nodes = [n for n in in_graph.node if n != in_node and fparam_name in n.input and n.op_type != 'MatMul']\n if other_nodes:\n return False\n\n if in_node.output[0] in qcfg_dict:\n node_qcfg = qcfg_dict[in_node.output[0]]\n else:\n node_qcfg = None\n if not node_qcfg:\n if not update_qcfg_dict and qcfg_dict:\n # when qcfg_dict is readonly, raise warning if qcfg is not found for this node\n print(\"Warning: qcfg is not found for node with output: \" + in_node.output[0] + \", fall back to default qcfg.\")\n node_qcfg = default_qcfg\n\n w_qcfg = QuantizeConfig.from_dict(node_qcfg['W'])\n x_qcfg = QuantizeConfig.from_dict(node_qcfg['X'])\n symmetric = node_qcfg['Symmetric']\n\n # for symmetric quantization, both weight and input should be quantized to signed\n assert not symmetric or (w_qcfg.signed() and x_qcfg.signed())\n # quantize_type should match between weight and input\n assert w_qcfg.q_type_bits() == x_qcfg.q_type_bits()\n\n if fparam_name in converted_weights:\n step, base, qparam_rowsum, qparam, w_qcfg1, symmetric1 = converted_weights[fparam_name]\n # for shared weights, node should use the same kind of quantization\n assert dict(w_qcfg1) == dict(w_qcfg)\n assert symmetric1 == symmetric\n else:\n fparam = nf.get_initializer(fparam_name)\n if fparam is None or len(fparam.shape) != 2:\n return False\n\n q_range = w_qcfg.q_range()\n if symmetric:\n fscale = np.amax(np.abs(fparam), axis=0)\n step = fscale / q_range\n base = 0\n else:\n fmin = np.amin(fparam, axis=0)\n fmax = np.amax(fparam, axis=0)\n fscale = (fmax - fmin)/(2 if w_qcfg.signed() else 1) # signed would be normalized to [-1, 1], and unsigned to [0, 1]\n step = fscale / q_range\n base = (fmax + fmin + step) * 0.5 if w_qcfg.signed() else fmin\n\n fparam_norm = np.zeros_like(fparam)\n expand_fscale = np.expand_dims(fscale,0)\n np.divide((fparam - np.expand_dims(base,0)), expand_fscale, out=fparam_norm, where=expand_fscale!=0)\n qparam = np.round(fparam_norm * q_range)\n qparam = np.clip(qparam, w_qcfg.q_min(), w_qcfg.q_max())\n qparam_rowsum = np.sum(qparam, axis=0)\n qparam = qparam.astype(w_qcfg.q_type())\n\n # create new weights in main graph in case other Scans share via converted_weights\n nf.make_initializer(step, fparam_name + '_step', in_main_graph=True)\n nf.make_initializer(qparam, fparam_name + '_qparam', in_main_graph=True)\n step = fparam_name + '_step'\n qparam = fparam_name + '_qparam'\n if symmetric:\n # no need to compute qparam_rowsum and base for symmetric quantization\n base = None\n qparam_rowsum = None\n else:\n nf.make_initializer(base, fparam_name + '_base', in_main_graph=True)\n base = fparam_name + '_base'\n nf.make_initializer(qparam_rowsum, fparam_name + '_qparam_rowsum', in_main_graph=True)\n qparam_rowsum = fparam_name + '_qparam_rowsum'\n converted_weights[fparam_name] = (step, base, qparam_rowsum, qparam, w_qcfg, symmetric)\n nf.remove_initializer(fparam_name)\n\n # quantize input\n with nf.scoped_prefix(in_node.name) as scoped_prefix:\n input_dim = nf.get_initializer(qparam).shape[0]\n X = in_node.input[0]\n if quantized_inputs is not None:\n quantized_inputs_key = '{}_{}_{}'.format(X, symmetric, '|'.join(['{}:{}'.format(k,v) for (k, v) in x_qcfg]))\n if quantized_inputs is not None and quantized_inputs_key in quantized_inputs:\n scale_X, bias_X, Q_X, Q_X_sum_int32 = quantized_inputs[quantized_inputs_key]\n else:\n if symmetric:\n delta_X = nf.make_node('ReduceMax', nf.make_node('Abs', X), {'axes':[-1]}) # keepdims = 1\n inv_delta_X = nf.make_node('Reciprocal', delta_X)\n norm_X = nf.make_node('Mul', [X, inv_delta_X])\n bias_X = None\n assert x_qcfg.signed()\n else:\n reduce_max_X = nf.make_node('ReduceMax', X, {'axes':[-1]}) # keepdims = 1\n bias_X = nf.make_node('ReduceMin', X, {'axes':[-1]})\n delta_X = nf.make_node('Sub', [reduce_max_X, bias_X])\n inv_delta_X = nf.make_node('Reciprocal', delta_X)\n norm_X = nf.make_node('Mul', [nf.make_node('Sub', [X, bias_X]), inv_delta_X])\n\n scale_X = nf.make_node('Mul', [delta_X, np.asarray(1.0 / x_qcfg.q_range()).astype(np.float32)])\n Q_Xf = nf.make_node('Mul', [norm_X, np.asarray(x_qcfg.q_range()).astype(np.float32)])\n Q_Xf = nf.make_node('Add', [Q_Xf, np.asarray(0.5).astype(np.float32)])\n Q_Xf = nf.make_node('Floor', Q_Xf)\n if onnx_opset_ver < 11:\n Q_Xf = nf.make_node('Clip', Q_Xf, {'max':x_qcfg.q_max(), 'min':x_qcfg.q_min()})\n else:\n # Clip changed min max to inputs in opset 11\n Q_Xf = nf.make_node('Clip', [Q_Xf, np.asarray(x_qcfg.q_min()).astype(np.float32), np.asarray(x_qcfg.q_max()).astype(np.float32)])\n Q_X = nf.make_node('Cast', Q_Xf, {'to':int({np.uint8 : onnx.TensorProto.UINT8,\n np.int8 : onnx.TensorProto.INT8,\n np.uint16 : onnx.TensorProto.UINT16,\n np.int16 : onnx.TensorProto.INT16}[x_qcfg.q_type()])})\n\n if symmetric:\n Q_X_sum_int32 = None\n else:\n Q_X_sum_int32 = nf.make_node_with_axes('ReduceSum', nf.make_node('Cast', Q_X, {'to':int(onnx.TensorProto.INT32)}), [-1], onnx_opset_ver)\n\n if quantized_inputs is not None:\n quantized_inputs[quantized_inputs_key] = (scale_X, bias_X, Q_X, Q_X_sum_int32)\n\n # MatMulInteger\n if x_qcfg.q_type_bits() == 8:\n Q_Y = nf.make_node('MatMulInteger', [Q_X, qparam])\n else:\n Q_Y = nf.make_node('MatMulInteger16', [Q_X, qparam])\n Q_Y.domain = \"com.microsoft\"\n\n # Dequantize\n Y = in_node.output[0]\n if symmetric:\n nf.make_node('Mul',\n [nf.make_node('Mul', [step, scale_X]),\n nf.make_node('Cast', Q_Y, {'to': int(onnx.TensorProto.FLOAT)})],\n output_names=Y)\n else:\n o0 = nf.make_node('Mul', [nf.make_node('Mul', [step, scale_X]),\n nf.make_node('Cast', Q_Y, {'to': int(onnx.TensorProto.FLOAT)})])\n o1 = nf.make_node('Mul', [nf.make_node('Mul', [step, bias_X]), qparam_rowsum])\n o2 = nf.make_node('Mul', [base, nf.make_node('Mul', [scale_X, nf.make_node('Cast', Q_X_sum_int32, {'to':int(onnx.TensorProto.FLOAT)})])])\n o3 = nf.make_node('Mul', [base, nf.make_node('Mul', [bias_X, np.asarray(float(input_dim)).astype(np.float32)])])\n nf.make_node('Sum', [o3, o2, o1, o0], output_names=Y)\n\n if update_qcfg_dict:\n qcfg_dict[in_node.output[0]] = node_qcfg\n\n return True\n\ndef upgrade_op(nf, in_n):\n if in_n.op_type == 'Slice' and len(in_n.input) == 1:\n # convert opset9 Slice to opset10\n with nf.scoped_prefix(in_n.name) as scoped_prefix:\n slice_inputs = [in_n.input[0],\n np.asarray(NodeFactory.get_attribute(in_n,'starts')).astype(np.int64),\n np.asarray(NodeFactory.get_attribute(in_n,'ends')).astype(np.int64),\n np.asarray(NodeFactory.get_attribute(in_n,'axes')).astype(np.int64)]\n nf.make_node('Slice', slice_inputs, output_names=list(in_n.output))\n return True\n elif in_n.op_type == 'TopK' and len(in_n.input) == 1:\n # convert opset1 TopK to opset10\n with nf.scoped_prefix(in_n.name) as scoped_prefix:\n topk_inputs = [in_n.input[0],\n np.asarray([NodeFactory.get_attribute(in_n,'k')]).astype(np.int64)]\n nf.make_node('TopK', topk_inputs, {'axis':NodeFactory.get_attribute(in_n,'axis',-1)}, output_names=list(in_n.output))\n return True\n else:\n return False\n\n# quantize matmul to MatMulInteger using asymm uint8\ndef convert_matmul_model(input_model, output_model, only_for_scan=False, share_input_quantization=False, preset_str='asymm8_param0_input1', qcfg_json=None, export_qcfg_json=None):\n preset_qcfgs = {'asymm8_param0_input1' : {'W' : dict(QuantizeConfig(signed=1, reserved_bits=0, type_bits=8)),\n 'X' : dict(QuantizeConfig(signed=0, reserved_bits=1, type_bits=8)),\n 'Symmetric' : 0},\n 'symm16_param3_input3' : {'W' : dict(QuantizeConfig(signed=1, reserved_bits=3, type_bits=16)),\n 'X' : dict(QuantizeConfig(signed=1, reserved_bits=3, type_bits=16)),\n 'Symmetric' : 1}}\n default_qcfg = preset_qcfgs[preset_str]\n in_mp = onnx.load(input_model)\n\n qcfg_dict = {}\n if qcfg_json and not export_qcfg_json:\n with open(qcfg_json, 'r') as f:\n qcfg_dict = json.load(f)\n\n out_mp = onnx.ModelProto()\n out_mp.CopyFrom(in_mp)\n out_mp.ir_version = 5 # update ir version to avoid requirement of initializer in graph input\n onnx_opset_ver = ensure_opset(out_mp, 10) # bump up to ONNX opset 10, which is required for MatMulInteger\n ensure_opset(out_mp, 1, 'com.microsoft') # add MS domain for MatMulInteger16\n out_mp.graph.ClearField('node')\n nf = NodeFactory(out_mp.graph)\n converted_weights = {} # remember MatMul weights that have been converted, in case of sharing\n quantized_inputs = {} if share_input_quantization else None # remember quantized inputs that might be able to share between MatMuls\n for in_n in in_mp.graph.node:\n if upgrade_op(nf, in_n):\n continue\n\n if in_n.op_type == 'MatMul' and not only_for_scan:\n if quantize_matmul_2d_with_weight(in_n, in_mp.graph, nf, converted_weights, quantized_inputs, qcfg_dict, export_qcfg_json, default_qcfg, onnx_opset_ver):\n continue\n\n out_n = out_mp.graph.node.add()\n out_n.CopyFrom(in_n)\n if in_n.op_type == 'Scan' or in_n.op_type == 'Loop':\n in_subgraph = NodeFactory.get_attribute(in_n, 'body')\n out_subgraph = NodeFactory.get_attribute(out_n, 'body')\n out_subgraph.ClearField('node')\n scan_nf = NodeFactory(out_mp.graph, out_subgraph)\n subgraph_quantized_inputs = {} if share_input_quantization else None # remember quantized inputs that might be able to share between MatMuls\n for in_sn in in_subgraph.node:\n if in_sn.op_type == 'MatMul':\n if quantize_matmul_2d_with_weight(in_sn, in_subgraph, scan_nf, converted_weights, subgraph_quantized_inputs, qcfg_dict, export_qcfg_json, default_qcfg, onnx_opset_ver):\n continue\n\n if upgrade_op(scan_nf, in_sn):\n continue\n\n out_sn = out_subgraph.node.add()\n out_sn.CopyFrom(in_sn)\n\n onnx.save(out_mp, output_model)\n if export_qcfg_json:\n with open(qcfg_json, 'w') as f:\n f.write(json.dumps(qcfg_dict, indent=2))\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', required=True, help='The input model file')\n parser.add_argument('--output', required=True, help='The output model file')\n parser.add_argument('--default_qcfg', help='The preset of quantization of <asymm|symm><qbits>_param<reserve_bit>_input<reserve_bit>', choices=['asymm8_param0_input1', 'symm16_param3_input3'], default='asymm8_param0_input1')\n parser.add_argument('--qcfg_json', help='The quantization config json file for read or write.', default=None)\n parser.add_argument('--export_qcfg_json', help='If set, write default quantization config to qcfg_json file.', action='store_true', default=False)\n parser.add_argument('--only_for_scan', help='If set, apply quantization of MatMul only inside scan', action='store_true', default=False)\n parser.add_argument('--share_input_quantization', help='If set, allow input quantization to be shared if the same input is used in multiple MatMul', action='store_true', default=False)\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_arguments()\n print('input model: ' + args.input)\n print('output model ' + args.output)\n print('Quantize MatMul to MatMulInteger...')\n assert not args.export_qcfg_json or args.qcfg_json, \"--qcfg_json must be specified when --export_qcfg_json is used\"\n convert_matmul_model(args.input, args.output, args.only_for_scan, args.share_input_quantization, args.default_qcfg, args.qcfg_json, args.export_qcfg_json)\n print('Done!')\n", "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Import external libraries.\nimport onnxruntime\nimport pytest\nimport torch\nfrom torch.nn.parameter import Parameter\n\n# Import ORT modules.\nfrom _test_helpers import *\nfrom onnxruntime.training.ortmodule import ORTModule\n\ntorch.manual_seed(1)\nonnxruntime.set_seed(1)\n\n\ndef test_GeLU():\n @torch.jit.script\n def bias_gelu(bias, y):\n x = bias + y\n return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))\n\n @torch.jit.script\n def bias_gelu_backward(g, bias, y):\n x = bias + y\n tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))\n ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 +\n 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)\n return ff*g\n\n class GeLUFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, bias):\n ctx.save_for_backward(input, bias)\n return bias_gelu(bias, input)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, bias = ctx.saved_tensors\n tmp = bias_gelu_backward(grad_output, bias, input)\n return tmp, tmp\n\n class GeLUModel(torch.nn.Module):\n def __init__(self, output_size):\n super(GeLUModel, self).__init__()\n self.relu = GeLUFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n out = self.relu(model_input, self.bias)\n return out\n\n output_size = 1024\n\n def model_builder():\n return GeLUModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n run_training_test_and_compare(model_builder, input_generator, label_input)\n\n\ndef test_MegatronF():\n # MegatronGFunction is tested in distributed test files.\n class MegatronFFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input_):\n return input_\n\n @staticmethod\n def backward(ctx, grad_output):\n # Bypass the reduce as if we are using only 1 GPU.\n return grad_output\n\n class MegatronFModel(torch.nn.Module):\n def __init__(self, output_size):\n super(MegatronFModel, self).__init__()\n self.copy_ = MegatronFFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n model_input = model_input + self.bias\n out = self.copy_(model_input)\n return out\n\n output_size = 1024\n\n def model_builder():\n return MegatronFModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n run_training_test_and_compare(model_builder, input_generator, label_input)\n\n\ndef test_ScalarAndTuple():\n class ScalarAndTupleFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, alpha, beta, gamma):\n ctx.save_for_backward(input)\n ctx.alpha = alpha\n ctx.beta = beta\n ctx.gamma = gamma\n return alpha * beta[0] * beta[1] * gamma * input.clamp(min=0)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n alpha = ctx.alpha\n beta = ctx.beta\n gamma = ctx.gamma\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return alpha * beta[0] * beta[1] * gamma * grad_input, None, None, None\n\n class ScalarAndTupleModel(torch.nn.Module):\n def __init__(self, output_size):\n super(ScalarAndTupleModel, self).__init__()\n self.activation = ScalarAndTupleFunction.apply\n self.linear_a = torch.nn.Linear(output_size, output_size)\n self.linear_b = torch.nn.Linear(output_size, output_size)\n\n def forward(self, x):\n h = self.linear_a(x)\n h = self.activation(h, 5.0, (-1.0, 2.0), -1.0)\n h = self.linear_b(h)\n return h\n\n output_size = 2\n\n def model_builder():\n return ScalarAndTupleModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n run_training_test_and_compare(model_builder, input_generator, label_input)\n\n\ndef test_ScalarAndTupleReordered():\n class ScalarAndTupleReorderedFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, alpha, beta, input, gamma):\n ctx.save_for_backward(input)\n ctx.alpha = alpha\n ctx.beta = beta\n ctx.gamma = gamma\n return alpha * beta[0] * beta[1] * gamma * input.clamp(min=0)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n alpha = ctx.alpha\n beta = ctx.beta\n gamma = ctx.gamma\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return None, None, alpha * beta[0] * beta[1] * gamma * grad_input, None\n\n class ScalarAndTupleReorderedModel(torch.nn.Module):\n def __init__(self, output_size):\n super(ScalarAndTupleReorderedModel, self).__init__()\n self.activation = ScalarAndTupleReorderedFunction.apply\n self.linear_a = torch.nn.Linear(output_size, output_size)\n self.linear_b = torch.nn.Linear(output_size, output_size)\n\n def forward(self, x):\n h = self.linear_a(x)\n h = self.activation(5.0, (-1.0, 2.0), h, -1.0)\n h = self.linear_b(h)\n return h\n\n output_size = 2\n\n def model_builder():\n return ScalarAndTupleReorderedModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n run_training_test_and_compare(model_builder, input_generator, label_input)\n\n\[email protected](reason=\"This test is not correct. All tensors modified by in-place operattions should be mark_dirty(...).\")\ndef test_InplaceUpdateInputAsOutputNotRequireGrad():\n class InplaceUpdateInputAsOutputNotRequireGradFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, bias, inplace_update_input):\n # without mark_ditry, the inner computation graph is extracted into\n # another subgraph, which is a duplicated computation with the PythonOp.\n # so for the weights that are used twice BUT SHOULD only used once,\n # the gradients are almost 2x than PyTorch's grad, this is the reason we\n # ignore the gradient compare here.\n ctx.save_for_backward(inplace_update_input, bias)\n return inplace_update_input.add_(3 * bias)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, None\n\n class InplaceUpdateInputAsOutputNotRequireGradModel(torch.nn.Module):\n def __init__(self, output_size):\n super(InplaceUpdateInputAsOutputNotRequireGradModel, self).__init__()\n self.inplace_op = InplaceUpdateInputAsOutputNotRequireGradFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n x = model_input.mul(2)\n y1 = self.inplace_op(self.bias, x) # x did not require grad\n y2 = x.add(self.bias)\n out = y1 + y2\n return out\n\n output_size = 1024\n\n def model_builder():\n return InplaceUpdateInputAsOutputNotRequireGradModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test when input is in-place updated, but does not require gradient.\n run_training_test_and_compare(\n model_builder, input_generator, label_input, ignore_grad_compare=True)\n\n\[email protected](reason=\"This test is not correct. All tensors modified by in-place operattions should be mark_dirty(...).\")\ndef test_InplaceUpdateInputNotAsOutputNotRequireGrad():\n class InplaceUpdateInputNotAsOutputNotRequireGradFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, bias, inplace_update_input):\n ctx.save_for_backward(inplace_update_input, bias)\n inplace_update_input.add_(3 * bias)\n return inplace_update_input * 5\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, None\n\n class InplaceUpdateInputNotAsOutputNotRequireGradModel(torch.nn.Module):\n def __init__(self, output_size):\n super(InplaceUpdateInputNotAsOutputNotRequireGradModel, self).__init__()\n self.inplace_op = InplaceUpdateInputNotAsOutputNotRequireGradFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n x = model_input.mul(2)\n y1 = self.inplace_op(self.bias, x)\n y2 = x.add(self.bias)\n out = y1 + y2\n return out\n\n output_size = 1024\n\n def model_builder():\n return InplaceUpdateInputNotAsOutputNotRequireGradModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Without mark_ditry, the inner computation graph is extracted into another subgraph, which is a duplicated computation with the PythonOp.\n # So for the weights that are used twice BUT SHOULD only used once, the gradients are almost 2x than PyTorch's grad, this is the reason we\n # ignore the gradient compare here.\n run_training_test_and_compare(\n model_builder, input_generator, label_input, ignore_grad_compare=True)\n\n\ndef test_InplaceUpdateInputAsOutputNotRequireGradWithMarkDirty():\n class InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, bias, inplace_update_input):\n ctx.save_for_backward(inplace_update_input, bias)\n ctx.mark_dirty(inplace_update_input)\n # Be noted: if we make the input dirty, we must also put the input in outputs, otherwise, we will get such an error:\n # \"RuntimeError: Some elements marked as dirty during the forward method were not returned as output.\n # The inputs that are modified inplace must all be outputs of the Function.\"\"\n return inplace_update_input.add_(3 * bias)\n\n @staticmethod\n def backward(ctx, grad_output):\n # Bypass the reduce if we are using only 1 GPU.\n return grad_output, None\n\n class InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyModel(torch.nn.Module):\n def __init__(self, output_size):\n super(InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyModel,\n self).__init__()\n self.inplace_op = InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n x = model_input.mul(2)\n y1 = self.inplace_op(self.bias, x)\n y2 = x.add(self.bias)\n out = y1 + y2\n return out\n\n output_size = 1024\n\n def model_builder():\n return InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n run_training_test_and_compare(model_builder, input_generator, label_input)\n\n\ndef test_InplaceUpdateInputAsOutputRequireGrad():\n class InplaceUpdateInputAsOutputRequireGradFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, bias, inplace_update_input):\n ctx.save_for_backward(inplace_update_input, bias)\n # Be noted: if we make the input dirty, we must also put the input in outputs, otherwise, we will get such an error:\n # \"RuntimeError: Some elements marked as dirty during the forward method were not returned as output. The inputs that are modified inplace must all be outputs of the Function.\"\"\n return inplace_update_input.add_(3 * bias)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, grad_output\n\n class InplaceUpdateInputAsOutputRequireGradModel(torch.nn.Module):\n def __init__(self, output_size):\n super(InplaceUpdateInputAsOutputRequireGradModel, self).__init__()\n self.inplace_op = InplaceUpdateInputAsOutputRequireGradFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n x = model_input + self.bias\n y1 = self.inplace_op(self.bias, x)\n y2 = x.add(self.bias)\n out = y1 + y2\n return out\n\n output_size = 1024\n\n def model_builder():\n return InplaceUpdateInputAsOutputRequireGradModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test when input is in-place updated, but does require gradient.\n #\n # without mark_ditry, the inner computation graph is extracted into another subgraph, which is a\n # duplicated computation with the PythonOp. Thus, for the weights that are used twice BUT SHOULD\n # only used once, the gradients are almost 2x than PyTorch's grad, this is the reason we\n # ignore the gradient compare here.\n run_training_test_and_compare(\n model_builder, input_generator, label_input, ignore_grad_compare=True)\n\n\ndef test_InplaceUpdateInputNotAsOutputRequireGrad():\n class InplaceUpdateInputNotAsOutputRequireGradFunction(torch.autograd.Function):\n # without mark_ditry, the inner computation graph is extracted into another subgraph, which is a duplicated computation with the PythonOp.\n # so for the weights that are used twice BUT SHOULD only used once, the gradients are almost 2x than PyTorch's grad, this is the reason we\n # ignore the gradient compare here.\n @staticmethod\n def forward(ctx, bias, inplace_update_input):\n ctx.save_for_backward(inplace_update_input, bias)\n inplace_update_input.add_(3 * bias)\n return inplace_update_input * 5\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, grad_output\n\n class InplaceUpdateInputNotAsOutputRequireGradModel(torch.nn.Module):\n def __init__(self, output_size):\n super(InplaceUpdateInputNotAsOutputRequireGradModel, self).__init__()\n self.inplace_op = InplaceUpdateInputNotAsOutputRequireGradFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n x = model_input + self.bias\n y1 = self.inplace_op(self.bias, x)\n y2 = x.add(self.bias)\n out = y1 + y2\n return out\n\n output_size = 1024\n\n def model_builder():\n return InplaceUpdateInputNotAsOutputRequireGradModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # This case is known to have an warning message: \"The output torch tensor @140214094625024, 140212816617984\n # should reuse the input torch tensor @140214095996104, 140212816617984 but actually not.\" It seems\n # if we don't have mark_dirty() in auto grad forward, the result is not using the input_,\n # (maybe a view of it, because data address is same)\n run_training_test_and_compare(\n model_builder, input_generator, label_input, ignore_grad_compare=True)\n\n##########################################################################################\n\n\ndef test_InplaceUpdateInputAsOutputRequireGradWithMarkDirty():\n class InplaceUpdateInputAsOutputRequireGradWithMarkDirtyFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, bias, inplace_update_input):\n ctx.save_for_backward(inplace_update_input, bias)\n ctx.mark_dirty(inplace_update_input)\n # Be noted: if we make the input dirty, we must also put the input in outputs,\n # otherwise, we will get such an error:\n # \"RuntimeError: Some elements marked as dirty during the forward method were not returned as output.\n # The inputs that are modified inplace must all be outputs of the Function.\"\"\n return inplace_update_input.add_(3 * bias)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, grad_output\n\n class InplaceUpdateInputAsOutputRequireGradWithMarkDirtyModel(torch.nn.Module):\n def __init__(self, output_size):\n super(InplaceUpdateInputAsOutputRequireGradWithMarkDirtyModel,\n self).__init__()\n self.inplace_op = InplaceUpdateInputAsOutputRequireGradWithMarkDirtyFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n x = model_input + self.bias\n y1 = self.inplace_op(self.bias, x)\n y2 = x.add(self.bias)\n out = y1 + y2\n return out\n\n output_size = 1024\n\n def model_builder():\n return InplaceUpdateInputAsOutputRequireGradWithMarkDirtyModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n run_training_test_and_compare(model_builder, input_generator, label_input)\n\n\ndef test_EvalTest():\n class EvalTestFunction(torch.autograd.Function):\n @staticmethod\n # bias is an optional argument\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))\n\n @staticmethod\n def backward(ctx, grad_output):\n x = ctx.saved_tensors\n return None\n\n class EvalTestModel(torch.nn.Module):\n def __init__(self, output_size):\n super(EvalTestModel, self).__init__()\n self.custom_fn = EvalTestFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n # model_input did not require_grad\n out = self.custom_fn(model_input)\n return out + self.bias\n\n output_size = 1024\n\n def model_builder():\n return EvalTestModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test pure inferencing scenarios, when inputs don't requires_grad.\n run_evaluate_test_and_compare(model_builder, input_generator, label_input)\n\n\ndef test_TwoOutputFunction():\n class TwoOutputFunction(torch.autograd.Function):\n @staticmethod\n # bias is an optional argument\n def forward(ctx, x, y):\n ctx.save_for_backward(x, y)\n w = x + y\n z = x * y\n return w, z\n\n @staticmethod\n def backward(ctx, dw, dz):\n x, y = ctx.saved_tensors\n # Based on chain rule, we can drive Jacobian\n # of this function.\n # dL/dx = dL/dw * dw/dx + dL/dz * dz/dx\n # where\n # dw/dx = 1\n # dz/dx = y\n # Thus, dL/dx can be computed using the\n # following line. Note that dL is omitted\n # for convenience.\n dx = dw * 1.0 + dz * y\n # Similarly, we drive and then implement\n # the Jacobian for dy using chain rule\n # dL/dw = dL/dw * dw/dy + dL/dz * dz/dy\n # where\n # dw/dy = 1\n # dz/dy = x\n dy = dw * 1.0 + dz * x\n return dx, dy\n\n class TwoOutputModel(torch.nn.Module):\n def __init__(self, output_size):\n super(TwoOutputModel, self).__init__()\n self.fun = TwoOutputFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, x):\n a, b = self.fun(x, self.bias)\n return a + b\n\n output_size = 2\n\n def model_builder():\n return TwoOutputModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test multi-input and multi-output custom function.\n run_training_test_and_compare(model_builder, input_generator, label_input)\n\n\ndef test_InnerModuleCall():\n class InnerModel(torch.nn.Module):\n def __init__(self, dim, device):\n super(InnerModel, self).__init__()\n self.bias = Parameter(torch.FloatTensor([1.0] * dim).to(device))\n\n def forward(self, x):\n z = 0.5 * x * x + self.bias\n return z\n\n class OuterFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, dim, device, use_ort):\n ctx.save_for_backward(x)\n ctx.device = device\n ctx.inner = InnerModel(dim, device).to(device)\n if use_ort:\n ctx.inner = ORTModule(ctx.inner)\n enable_custom_autograd_function(ctx.inner)\n z = ctx.inner(x)\n return z\n\n @staticmethod\n def backward(ctx, dv):\n x, = ctx.saved_tensors\n y = x.detach().to(ctx.device)\n y.requires_grad = True\n g = None\n with torch.enable_grad():\n z = ctx.inner(y)\n z.backward(dv)\n g = y.grad.detach()\n return g, None, None, None\n\n class OuterModel(torch.nn.Module):\n def __init__(self, dim, device, use_ort):\n super(OuterModel, self).__init__()\n self.fun = OuterFunction.apply\n self.dim = dim\n self.device = device\n self.use_ort = use_ort\n self.bias = Parameter(torch.FloatTensor([1.0] * dim).to(device))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, x):\n z = self.fun(x + self.bias, self.dim, self.device, self.use_ort)\n return z\n\n def get_inner_module_call_result(x, device, use_ort):\n torch.manual_seed(0)\n x = x.to(device)\n x.requires_grad = True\n model = OuterModel(2, device, use_ort)\n y = model(x).sum()\n y.backward()\n return y.detach(), x.grad.detach()\n\n x = torch.FloatTensor([1.0, -1.0])\n\n # Test indirect ORTModule call from custom function\n result_pth = get_inner_module_call_result(x.detach(), 'cuda:0', False)\n result_ort = get_inner_module_call_result(x.detach(), 'cuda:0', True)\n compare_tensor_list(result_ort, result_pth)\n\n # Test indirect ORTModule call from custom function\n result_ort = get_inner_module_call_result(x.detach(), 'cpu', True)\n result_pth = get_inner_module_call_result(x.detach(), 'cpu', False)\n compare_tensor_list(result_ort, result_pth)\n\n\ndef test_Share_Input():\n class TwoOutputFunction(torch.autograd.Function):\n @staticmethod\n # bias is an optional argument\n def forward(ctx, x, y):\n ctx.save_for_backward(x, y)\n w = x + y\n z = x * y\n return w, z\n\n @staticmethod\n def backward(ctx, dw, dz):\n x, y = ctx.saved_tensors\n dx = dw * 1.0 + dz * y\n dy = dw * 1.0 + dz * x\n return dx, dy\n\n class TwoOutputModel(torch.nn.Module):\n def __init__(self, output_size):\n super(TwoOutputModel, self).__init__()\n self.fun = TwoOutputFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, x):\n a, b = self.fun(x, self.bias)\n c, d = self.fun(x, self.bias)\n return a + b + c + d\n\n output_size = 2\n\n def model_builder():\n return TwoOutputModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n def input_generator_with_requires_grad():\n return torch.randn(output_size, dtype=torch.float).requires_grad_()\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test multi-input and multi-output custom function.\n run_training_test_and_compare(model_builder, input_generator, label_input)\n\n run_training_test_and_compare(model_builder, input_generator_with_requires_grad, label_input)\n\n\ndef test_GeLU_When_Autograd_Func_Fallback_Not_Enabled():\n @torch.jit.script\n def bias_gelu(bias, y):\n x = bias + y\n return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))\n\n @torch.jit.script\n def bias_gelu_backward(g, bias, y):\n x = bias + y\n tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))\n ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 +\n 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)\n return ff*g\n\n class GeLUFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, bias):\n ctx.save_for_backward(input, bias)\n return bias_gelu(bias, input)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, bias = ctx.saved_tensors\n tmp = bias_gelu_backward(grad_output, bias, input)\n return tmp, tmp\n\n class GeLUModel(torch.nn.Module):\n def __init__(self, output_size):\n super(GeLUModel, self).__init__()\n self.relu = GeLUFunction.apply\n self.bias = Parameter(torch.empty(\n output_size,\n device=torch.cuda.current_device(),\n dtype=torch.float))\n\n with torch.no_grad():\n self.bias.uniform_()\n\n def forward(self, model_input):\n out = self.relu(model_input, self.bias)\n return out\n\n output_size = 1024\n\n def model_builder():\n return GeLUModel(output_size)\n\n def input_generator():\n return torch.randn(output_size, dtype=torch.float)\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n m_ort = model_builder()\n x_ort = input_generator()\n\n try:\n device = torch.device(\"cpu\")\n m_ort.to(device)\n model = ORTModule(m_ort)\n model.train()\n\n inputs_on_device = [x_ort.to(device)]\n output = model(*inputs_on_device)\n except RuntimeError as e:\n assert \"Detected autograd functions usage in current model, the run will fail\" in str(e)\n\ndef test_MultipleStream_InForwardFunction():\n class MultipleStreamFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n default_stream = torch.cuda.current_stream()\n ctx.save_for_backward(input)\n stream = torch.cuda.Stream()\n torch.cuda._sleep(1000 * 1000)\n input = input * 0.2\n # on different stream\n with torch.cuda.stream(stream):\n stream.wait_stream(default_stream)\n input= input * 2\n default_stream.wait_stream(stream)\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output\n\n class MultipleStreamModel(torch.nn.Module):\n def __init__(self, output_size):\n super(MultipleStreamModel, self).__init__()\n self.linear_a = torch.nn.Linear(output_size, output_size)\n self.relu = MultipleStreamFunction.apply\n\n def forward(self, model_input):\n model_input = model_input * 0.2\n out = self.relu(model_input)\n return out\n\n output_size = 2\n\n def model_builder():\n return MultipleStreamModel(output_size)\n\n def input_generator():\n return torch.tensor([2.8, 3.4], requires_grad=True) #torch.randn(output_size, dtype=torch.float)\n\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test multi-input and multi-output custom function.\n cpu_output_list, cuda_output_list = run_training_test_and_compare(model_builder, input_generator, label_input)\n\n expected_ret_list = [torch.tensor([-0.7760, -0.7280])]\n\n compare_tensor_list(expected_ret_list, cuda_output_list)\n\n\ndef test_NonDefaultStream_InForwardFunction1():\n class MultipleStreamFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n default_stream = torch.cuda.current_stream()\n stream = torch.cuda.Stream()\n # on different stream\n with torch.cuda.stream(stream):\n stream.wait_stream(default_stream)\n ctx.save_for_backward(input)\n input = input * 0.4\n\n default_stream.wait_stream(stream)\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output\n\n class MultipleStreamModel(torch.nn.Module):\n def __init__(self, output_size):\n super(MultipleStreamModel, self).__init__()\n self.linear_a = torch.nn.Linear(output_size, output_size)\n self.relu = MultipleStreamFunction.apply\n\n def forward(self, model_input):\n model_input = model_input * 0.2\n torch.cuda._sleep(1000 * 1000)\n out = self.relu(model_input)\n return out\n\n output_size = 2\n\n def model_builder():\n return MultipleStreamModel(output_size)\n\n def input_generator():\n return torch.tensor([2.8, 3.4], requires_grad=True) #torch.randn(output_size, dtype=torch.float)\n\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test multi-input and multi-output custom function.\n cpu_output_list, cuda_output_list = run_training_test_and_compare(model_builder, input_generator, label_input)\n\n expected_ret_list = [torch.tensor([-0.7760, -0.7280])]\n\n compare_tensor_list(expected_ret_list, cuda_output_list)\n\n\ndef test_NonDefaultStream_InForwardFunction2():\n class MultipleStreamFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n ctx.save_for_backward(input)\n torch.cuda._sleep(1000 * 1000)\n input = input * 0.4\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output\n\n class MultipleStreamModel(torch.nn.Module):\n def __init__(self, output_size):\n super(MultipleStreamModel, self).__init__()\n self.linear_a = torch.nn.Linear(output_size, output_size)\n self.relu = MultipleStreamFunction.apply\n\n def forward(self, model_input):\n model_input = model_input * 0.2\n stream = torch.cuda.Stream()\n default_stream = torch.cuda.current_stream()\n # on different stream\n with torch.cuda.stream(stream):\n stream.wait_stream(default_stream)\n out = self.relu(model_input)\n default_stream.wait_stream(stream)\n return out\n\n output_size = 2\n\n def model_builder():\n return MultipleStreamModel(output_size)\n\n def input_generator():\n return torch.tensor([2.8, 3.4], requires_grad=True) #torch.randn(output_size, dtype=torch.float)\n\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test multi-input and multi-output custom function.\n cpu_output_list, cuda_output_list = run_training_test_and_compare(model_builder, input_generator, label_input)\n\n expected_ret_list = [torch.tensor([-0.7760, -0.7280])]\n\n compare_tensor_list(expected_ret_list, cuda_output_list)\n\ndef test_NonDefaultStreamInplaceUpdate_InForwardFunction():\n class MultipleStreamFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n default_stream = torch.cuda.current_stream()\n stream = torch.cuda.Stream()\n # on different stream\n with torch.cuda.stream(stream):\n stream.wait_stream(default_stream)\n ctx.save_for_backward(input)\n input.mul_(0.4)\n\n ctx.mark_dirty(input)\n default_stream.wait_stream(stream)\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output\n\n class MultipleStreamModel(torch.nn.Module):\n def __init__(self, output_size):\n super(MultipleStreamModel, self).__init__()\n self.linear_a = torch.nn.Linear(output_size, output_size)\n self.relu = MultipleStreamFunction.apply\n\n def forward(self, model_input):\n model_input = model_input * 0.2\n torch.cuda._sleep(1000 * 1000)\n out = self.relu(model_input)\n return out\n\n output_size = 2\n\n def model_builder():\n return MultipleStreamModel(output_size)\n\n def input_generator():\n return torch.tensor([2.8, 3.4], requires_grad=True) #torch.randn(output_size, dtype=torch.float)\n\n\n # generate a label that have same shape as forward output.\n label_input = torch.ones([output_size])\n\n # Test multi-input and multi-output custom function.\n cpu_output_list, cuda_output_list = run_training_test_and_compare(model_builder, input_generator, label_input)\n\n expected_ret_list = [torch.tensor([-0.7760, -0.7280])]\n\n compare_tensor_list(expected_ret_list, cuda_output_list)\n", "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\nfrom .debug_options import DebugOptions, LogLevel\nfrom . import _utils, _io, _logger, torch_cpp_extensions as _cpp_ext, _onnx_models\nfrom ._custom_autograd_function_exporter import _post_process_after_export\nfrom ._graph_execution_interface import GraphExecutionInterface\nfrom onnxruntime.training.ortmodule import ONNX_OPSET_VERSION\n\nfrom onnxruntime.capi import _pybind_state as C\nfrom onnxruntime.tools.symbolic_shape_infer import SymbolicShapeInference\nfrom abc import ABC, abstractmethod\nimport copy\nimport io\nimport inspect\nimport onnx\nimport onnxruntime\nimport torch\nimport warnings\nfrom enum import IntFlag\n\nfrom torch.utils.cpp_extension import ROCM_HOME\n\n\nclass RunStateInfo(object):\n def __init__(self, state, output_info):\n \"\"\"\n :param state: State of partial run that contains intermediate tensors needed to resume the run later.\n :param output_info: Output info.\n \"\"\"\n self.state = state\n self.output_info = output_info\n\nclass _SkipCheck(IntFlag):\n \"\"\"Enumeration to specify which checks should be skipped, allowing faster execution\"\"\"\n\n SKIP_CHECK_DISABLED = 1\n SKIP_CHECK_DEVICE = 2\n SKIP_CHECK_BUILD_GRADIENT = 4\n SKIP_CHECK_EXECUTION_AGENT = 8\n\n def is_set(self, check):\n \"\"\"Check whether `check` is set on the `_SkipCheck instance\n\n SKIP_CHECK_DISABLED implies the check will return False\n \"\"\"\n\n return not _SkipCheck.is_disabled(self) and check in self\n\n def is_disabled(self):\n \"\"\"Check whether `_SkipCheck.SKIP_CHECK_DISABLED is set on the `_SkipCheck instance\"\"\"\n\n return _SkipCheck.SKIP_CHECK_DISABLED in self\n\nclass GraphExecutionManager(GraphExecutionInterface):\n def __init__(self, module, debug_options: DebugOptions):\n \"\"\"Manages building and execution of onnx graphs\n\n This class is an abstract class and should not directly be instantiated.\n Please use one of the concrete implementations of GraphExecutionManager.\n\n Interacts with OrtModuleGraphBuilder to build and optimize\n the onnx graph, and ExecutionAgent to run the onnx graph.\n \"\"\"\n\n super(GraphExecutionManager, self).__init__(module._original_module)\n\n # Original and flattened (tranformed) output module\n self._flattened_module = module\n\n # onnx models\n self._onnx_models = _onnx_models.ONNXModels()\n\n # Model after inference optimization or gradient building.\n self._optimized_onnx_model = None\n self._graph_builder = None\n self._graph_info = None\n self._graph_initializer_names = None\n self._graph_initializer_names_to_train = None\n self._graph_initializers = None\n\n # TrainingAgent or InferenceAgent\n self._execution_agent = None\n\n # indicators of some logic have been executed previously thus could be skipped for faster training\n self._skip_check = _SkipCheck.SKIP_CHECK_DISABLED\n\n # Debug flags\n self._debug_options = debug_options\n\n # Graph transformer config\n # Specify cast propagation strategy. Currently three strategies are available, NONE, INSERT-AND-REDUCE and FLOOD-FILL\n # The default is NONE, which implies the transformer does no cast-propagation transformation.\n self._propagate_cast_ops_strategy = C.PropagateCastOpsStrategy.NONE\n # Optimize by moving Cast operations if propagate_cast_ops_level is non-negative.\n # - If the _propagate_cast_ops_level is set to zero, then the transformation considers only the opcodes specified by _propagate_cast_ops_allow\n # as \"FP16 safe\", in order to insert/(re)move cast operations before/after to perform such operations in reduced (16-bit) precision.\n # - If propagate_cast_ops_level is positive, 1 or 2, then in addition to opcode codes specified by propagate_cast_ops_allow use onnxruntime\n # predetermined list of opcodes considered safe to move before/after cast operation.\n # - Onnxruntime Level 1 predetermind \"FP16 safe\" opcodes include only opcode that do not perform any computation such as Transpose, Split, Reshape, etc.\n # whereas Level 2 perdetermined \"FP16 safe\" opcodes include opcodes that perform computation using contrib ops, GeLU, Dropout, LayerNormalization, etc.\n self._propagate_cast_ops_level = 1\n # List of opcodes to be considered safe to move before/after cast operation if propagate_cast_ops_level is zero.\n self._propagate_cast_ops_allow = []\n # Whether allow fusion of layer norm subgraph if doing so will cause modified precision.\n self._allow_layer_norm_mod_precision = False\n\n # Value can be either torch.onnx.TrainingMode.TRAINING or torch.onnx.TrainingMode.EVAL\n # To be instantiated in the concrete implementation of GraphExecutionManager\n self._export_mode = None\n\n # Related to training graph shape inference\n self._current_input_shape = None\n # default execution order is priority-based for both dynamic/static shape input for now\n # if we observe benefit of static shape, we can expose this flag to user\n self._use_static_shape = False\n\n # flag to enable symbolic shape inference for dynamic shape inputs to improve performance\n self._run_symbolic_shape_infer = True\n\n # A flag saying if custom autograd.Function should be allowed. True means yes and otherwise False.\n self._enable_custom_autograd_function = False\n\n self._input_info = None\n self._module_output_schema = None\n\n # TODO: Single device support for now\n self._device = _utils.get_device_from_module(module)\n\n self._module_parameters = inspect.signature(self._original_module.forward).parameters.values()\n\n # TODO: remove after PyTorch ONNX exporter supports VAR_KEYWORD parameters.\n for input_parameter in self._module_parameters:\n if input_parameter.kind == inspect.Parameter.VAR_KEYWORD:\n if self._debug_options.logging.log_level <= LogLevel.WARNING:\n warnings.warn(\"The model's forward method has **kwargs parameter which has EXPERIMENTAL support!\",\n UserWarning)\n\n self.is_rocm_pytorch = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False)\n\n self._use_external_gpu_allocator = True\n # assign self._torch_alloc and self._torch_free if self._use_external_gpu_allocator is True\n self._get_torch_gpu_allocator_function_addresses()\n\n # WIP feature to enable caching in Gradient accumulation scenario.\n self._enable_grad_acc_optimization = False\n\n def _get_torch_gpu_allocator_function_addresses(self):\n if self._use_external_gpu_allocator and torch.cuda.is_available():\n # CPP extension to get torch GPU allocator's alloc and free function addresses\n from onnxruntime.training.ortmodule.torch_cpp_extensions import torch_gpu_allocator\n self._torch_alloc = torch_gpu_allocator.gpu_caching_allocator_raw_alloc_address()\n self._torch_free = torch_gpu_allocator.gpu_caching_allocator_raw_delete_address()\n\n def _validate_module_type(self, module):\n \"\"\"Raises a TypeError if the module is not a torch.nn.Module\"\"\"\n\n if not isinstance(module, torch.nn.Module):\n raise TypeError(f\"ORTModule only supports torch.nn.Module as input. {type(module)} is not supported.\")\n\n @staticmethod\n def execution_session_run_forward(execution_session, onnx_model, device, *inputs):\n \"\"\"Runs the forward pass on `execution_session` with given `onnx_model`, `device` and `inputs`\n\n This is a helper that can be called by the actual `GraphExecutionManager.forward` method\n\n Args:\n execution_session (InferenceAgent or InferenceAgent): Agent which runs either inference or train\n onnx_model (onnx.ModelProto): ONNX model\n device (torch.device): PyTorch device\n inputs: (torch.Tensor or a container of): User input\n\n Returns:\n Returns a tuple (user_outputs, run_info):\n user_outputs: The model output (either torch.Tensor or a container of torch.Tensor)\n run_info: A RunStateInfo which contains extra information about the execution of the graph\n \"\"\"\n\n raise NotImplemented\n\n @abstractmethod\n def forward(self):\n \"\"\"Executes the forward method for ORTModule\n\n This is an abstract method and must be overridden by a concrete implementation.\n This is the only method that the user should call on a concrete instance of the ExecutionManager\n All other methods are internal\"\"\"\n pass\n\n def _build_graph(self):\n if self._use_static_shape:\n self._graph_builder.build(self._input_info.shape)\n else:\n self._graph_builder.build()\n\n self._onnx_models.optimized_model = onnx.load_model_from_string(self._graph_builder.get_model())\n self._graph_info = self._graph_builder.get_graph_info()\n\n def _get_session_config(self):\n \"\"\"Creates and returns the session configuration to be used for the ExecutionAgent\"\"\"\n providers = None\n provider_options = None\n if self._device.type == 'cuda':\n # Configure the InferenceSessions to use the specific GPU on which the model is placed.\n providers = ([\"ROCMExecutionProvider\"] if self.is_rocm_pytorch else [\"CUDAExecutionProvider\"])\n providers.append(\"CPUExecutionProvider\")\n if self._use_external_gpu_allocator:\n provider_options = [{\"device_id\": str(self._device.index),\n \"gpu_external_alloc\": str(self._torch_alloc),\n \"gpu_external_free\": str(self._torch_free)}, {}]\n else:\n provider_options = [{\"device_id\": str(self._device.index)}, {}]\n elif self._device.type == 'cpu':\n providers = [\"CPUExecutionProvider\"]\n provider_options = [{}]\n\n session_options = onnxruntime.SessionOptions()\n session_options.enable_mem_pattern = False\n session_options.enable_mem_reuse = False\n session_options.use_deterministic_compute = False\n # default to PRIORITY_BASED execution order\n session_options.execution_order = onnxruntime.ExecutionOrder.PRIORITY_BASED\n # 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2.\n session_options.log_severity_level = int(self._debug_options.logging.log_level)\n\n return session_options, providers, provider_options\n\n def _export_model(self, *inputs, **kwargs):\n # 1. Set the self._device from the user module\n # 2. Verify input schema matches schema used on previous model export\n # 3. Export the user model under self._export_training_flag mode\n # Return True if the model needed to be exported, False if no export was required.\n\n # Note: Model is only exported when:\n # 1. Model has never been exported before.\n # 2. Model input schema has changed (changes in inputs requiring gradient, shape, boolean inputs values change, etc)\n # Model is not re-exported when the model parameters change. This can happen when the model is a stateful model,\n # or the user explicitly changed model parameters after the onnx export.\n\n schema = _io._extract_schema({'args': copy.copy(inputs), 'kwargs': copy.copy(kwargs)})\n if self._onnx_models.exported_model and schema == self._input_info.schema:\n # All required models have already been exported previously\n return False\n\n self._set_device_from_module(inputs, kwargs)\n self._onnx_models.exported_model = self._get_exported_model(*inputs, **kwargs)\n _cpp_ext._load_aten_op_executor_cpp_extension_if_needed(self._onnx_models.exported_model)\n if self._debug_options.save_onnx_models.save:\n self._onnx_models.save_exported_model(self._debug_options.save_onnx_models.path,\n self._debug_options.save_onnx_models.name_prefix,\n self._export_mode)\n\n if self._run_symbolic_shape_infer:\n self._onnx_models.exported_model = SymbolicShapeInference.infer_shapes(self._onnx_models.exported_model,\n auto_merge=True, guess_output_rank=True)\n\n return True\n\n def _get_exported_model(self, *inputs, **kwargs):\n '''Exports PyTorch `self._flattened_module` to ONNX for inferencing or training, using `*inputs` as input\n\n TODO: How to support dynamic axes? Dimensions are determined by samples\n '''\n\n # Setup dynamic axes for onnx model\n self._input_info = _io.parse_inputs_for_onnx_export(self._module_parameters,\n None,\n inputs,\n kwargs)\n output_names, output_dynamic_axes, self._module_output_schema = \\\n _io.parse_outputs_for_onnx_export_and_extract_schema(self._original_module, inputs, kwargs)\n self._input_info.dynamic_axes.update(output_dynamic_axes)\n\n # FlattenedModule needs _InputInfo to expand user input from *args to *args + **kwargs\n self._flattened_module._input_info = self._input_info\n\n # Export torch.nn.Module to ONNX\n f = io.BytesIO()\n\n # Deepcopy inputs, since input values may change after model run.\n # NOTE: Inputs may contain tensors that have attributes preventing their deepcopy (example grad_fn).\n # Therefore, deepcopy only the data component of the input tensors for export.\n sample_inputs_copy, sample_kwargs_copy = _io.deepcopy_model_input(*inputs, **kwargs)\n # NOTE: Flattening the input will change the 'input schema', resulting in a re-export\n sample_inputs_as_tuple = tuple(self._input_info.flatten(sample_inputs_copy, sample_kwargs_copy, self._device))\n # Ops behaving differently under train/eval mode need to exported with the\n # correct training flag to reflect the expected behavior.\n # For example, the Dropout node in a model is dropped under eval mode.\n assert self._export_mode is not None, \"Please use a concrete instance of ExecutionManager\"\n\n try:\n with torch.set_grad_enabled(self._enable_custom_autograd_function), \\\n _logger.suppress_os_stream_output(log_level=self._debug_options.logging.log_level):\n torch.onnx.export(self._flattened_module,\n sample_inputs_as_tuple,\n f,\n input_names=self._input_info.names,\n output_names=output_names,\n opset_version=ONNX_OPSET_VERSION,\n do_constant_folding=False,\n training=self._export_mode,\n dynamic_axes=self._input_info.dynamic_axes,\n verbose=self._debug_options.logging.log_level < LogLevel.WARNING,\n export_params=False,\n keep_initializers_as_inputs=True)\n except RuntimeError as e:\n raise RuntimeError('There was an error while exporting the PyTorch model to ONNX: {}'.format(e))\n exported_model = onnx.load_model_from_string(f.getvalue())\n\n exported_model = _post_process_after_export(exported_model, self._enable_custom_autograd_function)\n\n return exported_model\n\n def _set_device_from_module(self, inputs, kwargs):\n \"\"\"Get the device from the module and save it to self._device\"\"\"\n\n device = _utils.get_device_from_module(self._original_module) or \\\n _utils.get_device_from_inputs(inputs, kwargs)\n if not self._device or self._device != device:\n self._device = device\n if not self._device:\n raise RuntimeError('A device must be specified in the model or inputs!')\n\n def _get_graph_transformer_config(self):\n graph_transformer_config = C.TrainingGraphTransformerConfiguration()\n graph_transformer_config.propagate_cast_ops_config = C.PropagateCastOpsConfiguration()\n graph_transformer_config.propagate_cast_ops_config.level = self._propagate_cast_ops_level\n graph_transformer_config.propagate_cast_ops_config.allow = self._propagate_cast_ops_allow\n graph_transformer_config.propagate_cast_ops_config.strategy = self._propagate_cast_ops_strategy\n graph_transformer_config.allow_layer_norm_mod_precision = self._allow_layer_norm_mod_precision\n return graph_transformer_config\n\n def _initialize_graph_builder(self, training):\n \"\"\"Creates a new OrtModuleGraphBuilder, initializes it and saves it to self._graph_builder\"\"\"\n\n # All initializer names along with user inputs are a part of the onnx graph inputs\n # since the onnx model was exported with the flag keep_initializers_as_inputs=True\n onnx_initializer_names = {p.name for p in self._onnx_models.exported_model.graph.input}\n\n # TODO: PyTorch exporter bug: changes the initializer order in ONNX model\n initializer_names = [name for name, _ in self._flattened_module.named_parameters()\n if name in onnx_initializer_names]\n initializer_names_to_train = [name for name, param in self._flattened_module.named_parameters()\n if param.requires_grad and name in onnx_initializer_names]\n\n # Build and optimize the full graph\n grad_builder_config = C.OrtModuleGraphBuilderConfiguration()\n grad_builder_config.initializer_names = initializer_names\n grad_builder_config.initializer_names_to_train = initializer_names_to_train\n grad_builder_config.input_names_require_grad = self._input_info.require_grad_names\n grad_builder_config.build_gradient_graph = training\n grad_builder_config.graph_transformer_config = self._get_graph_transformer_config()\n grad_builder_config.enable_caching = self._enable_grad_acc_optimization\n grad_builder_config.loglevel = _logger.ortmodule_loglevel_to_onnxruntime_c_loglevel(self._debug_options.logging.log_level)\n self._graph_builder = C.OrtModuleGraphBuilder()\n\n # It is assumed here that the order and names of the inputs and outputs are not modified by the backend in any way\n # and are kept as they appear in the exported onnx model.\n self._graph_builder.initialize(self._onnx_models.exported_model.SerializeToString(), grad_builder_config)\n\n # TODO: Explore ways to make self._graph_info.initializer_names and self._graph_info.initializer_names_to_train\n # a set (unordered_set in the backend) that does not require a copy on each reference.\n self._graph_initializer_names = set(initializer_names)\n self._graph_initializer_names_to_train = set(initializer_names_to_train)\n\n # Initializers can be cached and used since they are expected not to be re-instantiated\n # between forward calls.\n self._graph_initializers = [param for name, param in self._flattened_module.named_parameters() \n if name in self._graph_initializer_names]\n" ]
[ [ "numpy.amax", "numpy.expand_dims", "numpy.abs", "numpy.amin", "numpy.asarray", "numpy.round", "numpy.zeros_like", "numpy.sum" ], [ "torch.ones", "torch.enable_grad", "torch.cuda.current_device", "torch.manual_seed", "torch.randn", "torch.cuda.current_stream", "torch.tensor", "torch.tanh", "torch.nn.Linear", "torch.FloatTensor", "torch.no_grad", "torch.cuda.stream", "torch.device", "torch.cuda._sleep", "torch.cuda.Stream" ], [ "torch.onnx.export", "torch.set_grad_enabled", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aolabNeuro/brain-python-interface
[ "11590717e1a1a2d5cc89a0495f02170b1b5f3c08", "11590717e1a1a2d5cc89a0495f02170b1b5f3c08" ]
[ "features/generator_features.py", "riglib/optitrack_client/optitrack.py" ]
[ "'''\nFeatures which have task-like functionality w.r.t. task...\n'''\n\nimport time\nimport tempfile\nimport random\nimport traceback\nimport numpy as np\nimport fnmatch\nimport os\nimport subprocess\nfrom riglib.experiment import traits\n\n\nclass Autostart(traits.HasTraits):\n '''\n Automatically begins the trial from the wait state, \n with a random interval drawn from `rand_start`. Doesn't really\n work if there are multiple trials in between wait states.\n '''\n rand_start = traits.Tuple((0., 0.), desc=\"Start interval\")\n exclude_parent_traits = ['wait_time']\n\n def _start_wait(self):\n '''\n At the start of the 'wait' state, determine how long to wait before starting the trial\n by drawing a sample from the rand_start interval\n '''\n s, e = self.rand_start\n self.wait_time = random.random()*(e-s) + s\n super(Autostart, self)._start_wait()\n \n def _test_start_trial(self, ts):\n '''\n Test if the required random wait time has passed\n '''\n return ts > self.wait_time and not self.pause\n\nclass AdaptiveGenerator(object):\n '''\n Deprecated--this class appears to be unused\n '''\n def __init__(self, *args, **kwargs):\n super(AdaptiveGenerator, self).__init__(*args, **kwargs)\n assert hasattr(self.gen, \"correct\"), \"Must use adaptive generator!\"\n\n def _start_reward(self):\n self.gen.correct()\n super(AdaptiveGenerator, self)._start_reward()\n \n def _start_incorrect(self):\n self.gen.incorrect()\n super(AdaptiveGenerator, self)._start_incorrect()\n\n\nclass IgnoreCorrectness(object):\n '''Deprecated--this class appears to be unused and not compatible with Sequences\n Allows any response to be correct, not just the one defined. Overrides for trialtypes'''\n def __init__(self, *args, **kwargs):\n super(IgnoreCorrectness, self).__init__(*args, **kwargs)\n if hasattr(self, \"trial_types\"):\n for ttype in self.trial_types:\n del self.status[ttype][\"%s_correct\"%ttype]\n del self.status[ttype][\"%s_incorrect\"%ttype]\n self.status[ttype][\"correct\"] = \"reward\"\n self.status[ttype][\"incorrect\"] = \"penalty\"\n\n def _test_correct(self, ts):\n return self.event is not None\n\n def _test_incorrect(self, ts):\n return False\n\n\nclass MultiHoldTime(traits.HasTraits):\n '''\n Deprecated--Use RandomDelay instead. \n Allows the hold time parameter to be multiple values per target in a given sequence chain. For instance,\n center targets and peripheral targets can have different hold times.\n '''\n\n hold_time = traits.List([.2,], desc=\"Length of hold required at targets before next target appears. \\\n Can be a single number or a list of numbers to apply to each target in the sequence (center, out, etc.)\")\n\n def _test_hold_complete(self, time_in_state):\n '''\n Test whether the target is held long enough to declare the\n trial a success\n\n Possible options\n - Target held for the minimum requred time (implemented here)\n - Sensorized object moved by a certain amount\n - Sensorized object moved to the required location\n - Manually triggered by experimenter\n '''\n if len(self.hold_time) == 1:\n hold_time = self.hold_time[0]\n else:\n hold_time = self.hold_time[self.target_index]\n return time_in_state > hold_time\n\nclass RandomDelay(traits.HasTraits):\n '''\n Replaces 'delay_time' with 'rand_delay', an interval on which the delay period is selected uniformly.\n '''\n \n rand_delay = traits.Tuple((0., 0.), desc=\"Delay interval\")\n exclude_parent_traits = ['delay_time']\n\n def _start_wait(self):\n '''\n At the start of the 'wait' state, draw a sample from the rand_delay interval for this trial.\n '''\n s, e = self.rand_delay\n self.delay_time = random.random()*(e-s) + s\n super()._start_wait()\n\nclass TransparentDelayTarget(traits.HasTraits):\n '''\n Feature to make the delay period show a semi-transparent target rather than the full target. Used \n for training the go cue. Gradually increase the alpha from 0 to 0.75 once a long enough delay \n period has been established.\n '''\n\n delay_target_alpha = traits.Float(0.25, desc=\"Transparency of the next target during delay periods\")\n\n def _start_delay(self):\n super()._start_delay()\n\n # Set the alpha of the next target\n next_idx = (self.target_index + 1)\n if next_idx < self.chain_length:\n target = self.targets[next_idx % 2]\n self._old_target_color = np.copy(target.sphere.color)\n new_target_color = list(target.sphere.color)\n new_target_color[3] = self.delay_target_alpha\n target.sphere.color = tuple(new_target_color)\n\n def _start_target(self):\n super()._start_target()\n\n # Reset the transparency of the current target\n if self.target_index > 0:\n target = self.targets[self.target_index % 2]\n target.sphere.color = self._old_target_color\n\n", "'''\nBase code for 'optitrack' feature, compatible with Optitrack motiontracker\n'''\n\nimport os\nimport time\nimport numpy as np\nfrom riglib.source import DataSourceSystem\n\nclass System(DataSourceSystem):\n '''\n Optitrack DataSourceSystem collects motion tracking data via UDP packets using natnet depacketizer\n '''\n update_freq = 240 # This may not always be the case, but lower frequencies will still work, just waste space in the circular buffer\n \n def __init__(self, client, feature=\"rigid body\", n_features=1):\n '''\n Don't start the client in this DataSourceSystem object since then it won't be available for \n commands elsewhere, i.e. start/stop recording\n '''\n self.client = client\n self.feature = feature # rigid body, skeleton, marker\n self.n_features = n_features\n self.rigid_bodies = []\n self.skeletons = []\n self.markers = []\n self.timing = []\n \n def start(self):\n '''\n Just set the callback function\n '''\n self.client.set_callback(\n lambda rb, s, m, t: self._update(rb, s, m, t))\n\n def stop(self):\n pass\n \n def get(self):\n '''\n Main logic -- parse the motion tracking data into a defined datatype\n '''\n\n # Run the client to collect a frame of data\n self.client.run_once(timeout=1)\n \n # Extract coordinates from feature\n coords = np.empty((self.n_features, 3))\n coords[:] = np.nan\n if self.feature == \"rigid body\":\n for i in range(np.min((self.n_features, len(self.rigid_bodies)))):\n if self.rigid_bodies[i].tracking_valid:\n coords[i] = self.rigid_bodies[i].position\n elif self.feature == \"marker\":\n for i in range(np.min((self.n_features, len(self.markers)))):\n coords[i] = self.markers[i].position\n elif self.feature == \"skeleton\":\n raise NotImplementedError()\n else:\n raise AttributeError(\"Feature type unknown!\")\n\n # For HDFWriter we need a dim 0\n coords = np.expand_dims(coords, axis=0)\n return coords\n \n def _update(self, rigid_bodies, skeletons, markers, timing):\n '''\n Callback for natnet client\n '''\n self.rigid_bodies = rigid_bodies\n self.skeletons = skeletons\n self.markers = markers\n self.timing = timing\n\n\n#################\n# Simulated data\n#################\nclass RigidBody():\n\n position = None\n tracking_valid = True\n def __init__(self, position):\n self.position = position\n\nclass SimulatedClient():\n\n def __init__(self, n=1, radius=(0.2,0.04,0.1), speed=(0.5,1,2)):\n self.stime = time.time()\n self.n = n\n self.radius = radius\n self.speed = speed\n\n def set_callback(self, callback):\n self.callback = callback\n\n def run_once(self, timeout=None):\n '''\n Fake some motion data\n '''\n time.sleep(1./240)\n ts = (time.time() - self.stime)\n coords = np.multiply(self.radius, np.cos(np.divide(ts, self.speed) * 2 * np.pi))\n data = [RigidBody(coords)]\n self.callback(data, [], [], [])\n\n def start_recording(self):\n print(\"Start recording\")\n\n def stop_recording(self):\n print(\"Stop recording\")\n\n def set_take(self, take_name):\n print(\"Setting take_name: \" + take_name)\n\n def set_session(self, session_name):\n print(\"Setting session_name: \" + session_name)\n\n########################\n# Playback from csv file\n########################\n\nclass PlaybackClient(SimulatedClient):\n\n def __init__(self, filename):\n import pandas as pd\n self.stime = time.time()\n csv = pd.read_csv(filename, header=[1,4,5])\n self.motiondata = csv['Rigid Body']['Position']\n self.time = csv['Type'].iloc[:,0]\n\n def run_once(self, timeout=None):\n '''\n Read one line of motion data from the csv file\n '''\n read_freq = 240 # doesn't really matter if we read too fast... \n time.sleep(1./read_freq)\n ts = (time.time() - self.stime)\n coords = np.empty((3,))\n coords[:] = np.nan\n now = (i for i,t in enumerate(self.time) if t > ts) # ...because we check the timestamps here\n try:\n row = next(now)\n coords[0] = self.motiondata.iloc[row].X\n coords[1] = self.motiondata.iloc[row].Y\n coords[2] = self.motiondata.iloc[row].Z\n except:\n pass\n data = [RigidBody(coords)]\n self.callback(data, [], [], [])\n\n# System definition function\ndef make(cls, client, feature, num_features=1, **kwargs):\n \"\"\"\n This ridiculous function dynamically creates a class with a new init function\n \"\"\"\n def init(self):\n super(self.__class__, self).__init__(client, feature, num_features, **kwargs)\n \n dtype = np.dtype((np.float, (num_features, 3)))\n return type(cls.__name__, (cls,), dict(dtype=dtype, __init__=init))" ]
[ [ "numpy.copy" ], [ "numpy.expand_dims", "pandas.read_csv", "numpy.empty", "numpy.dtype", "numpy.divide" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
chrisqqq123/FA-Dist-EfficientNet
[ "cb788b0f212d568d9bf04a51516d79fed5383585" ]
[ "test.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Script to test a pytorch model on Cifar100's validation set.\"\"\"\n\nimport argparse\nimport logging\nimport pprint\nimport sys\nimport time\n\nimport torch\nfrom torch import nn\n\nfrom models import model_factory\nimport opts\nimport utils\nimport mul_cifar100\n\n\ndef parse_args(argv):\n \"\"\"Parse arguments @argv and return the flags needed for training.\"\"\"\n parser = argparse.ArgumentParser(description=__doc__, allow_abbrev=False)\n\n group = parser.add_argument_group('General Options')\n opts.add_general_flags(group)\n\n group = parser.add_argument_group('Dataset Options')\n opts.add_dataset_flags(group)\n\n group = parser.add_argument_group('Model Options')\n opts.add_model_flags(group)\n\n args = parser.parse_args(argv)\n\n if args.model_state_file is None:\n parser.error(\"You should set --model-state-file to reload a model \"\n \"state.\")\n\n return args\n\n\ndef test_for_one_epoch(model, loss, test_loader, epoch_number):\n model.eval()\n loss.eval()\n\n data_time_meter = utils.AverageMeter()\n batch_time_meter = utils.AverageMeter()\n loss_meter = utils.AverageMeter(recent=100)\n top1_meter = utils.AverageMeter(recent=100)\n top5_meter = utils.AverageMeter(recent=100)\n\n timestamp = time.time()\n for i, (images, labels) in enumerate(test_loader):\n batch_size = images.size(0)\n\n if utils.is_model_cuda(model):\n images = images.cuda()\n labels = labels.cuda()\n\n # Record data time\n data_time_meter.update(time.time() - timestamp)\n\n # Forward pass without computing gradients.\n with torch.no_grad():\n outputs = model(images)\n loss_output = loss(outputs, labels)\n\n # Sometimes loss function returns a modified version of the output,\n # which must be used to compute the model accuracy.\n if isinstance(loss_output, tuple):\n loss_value, outputs = loss_output\n else:\n loss_value = loss_output\n\n # Record loss and model accuracy.\n loss_meter.update(loss_value.item(), batch_size)\n top1, top5 = utils.topk_accuracy(outputs, labels, recalls=(1, 5))\n top1_meter.update(top1, batch_size)\n top5_meter.update(top5, batch_size)\n\n # Record batch time\n batch_time_meter.update(time.time() - timestamp)\n timestamp = time.time()\n if i % 10 == 0:\n logging.info(\n 'Epoch: [{epoch}][{batch}/{epoch_size}]\\t'\n 'Time {batch_time.value:.2f} ({batch_time.average:.2f}) '\n 'Data {data_time.value:.2f} ({data_time.average:.2f}) '\n 'Loss {loss.value:.3f} {{{loss.average:.3f}, {loss.average_recent:.3f}}} '\n 'Top-1 {top1.value:.2f} {{{top1.average:.2f}, {top1.average_recent:.2f}}} '\n 'Top-5 {top5.value:.2f} {{{top5.average:.2f}, {top5.average_recent:.2f}}} '.format(\n epoch=epoch_number, batch=i + 1, epoch_size=len(test_loader),\n batch_time=batch_time_meter, data_time=data_time_meter,\n loss=loss_meter, top1=top1_meter, top5=top5_meter))\n # Log the overall test stats\n logging.info(\n 'Epoch: [{epoch}] -- TESTING SUMMARY\\t'\n 'Time {batch_time.sum:.2f} '\n 'Data {data_time.sum:.2f} '\n 'Loss {loss.average:.3f} '\n 'Top-1 {top1.average:.2f} '\n 'Top-5 {top5.average:.2f} '.format(\n epoch=epoch_number, batch_time=batch_time_meter, data_time=data_time_meter,\n loss=loss_meter, top1=top1_meter, top5=top5_meter))\n\n\ndef main(argv):\n \"\"\"Run the test script with command line arguments @argv.\"\"\"\n args = parse_args(argv)\n utils.general_setup(args.save, args.gpus)\n\n logging.info(\"Arguments parsed.\\n{}\".format(pprint.pformat(vars(args))))\n\n # Create the validation data loaders.\n # val_loader = imagenet.get_val_loader(args.imagenet, args.batch_size,\n # args.num_workers)\n val_loader = mul_cifar100.mul_CIFAR100DataLoader(root=args.data_dir, \n image_size=32, train=False, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\n\n # Create model and the loss.\n model, loss = model_factory.create_model(\n args.model, args.model_state_file, args.gpus, coslinear=args.coslinear, scale=args.s)\n logging.info(\"Model:\\n{}\".format(model))\n # for n,p in model.named_parameters():\n # print(n)\n # Test for one epoch.\n test_for_one_epoch(model, loss, val_loader, epoch_number=1)\n print('\\n')\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n" ]
[ [ "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
epmcj/nextflix
[ "de15f0a63fe8906a0417da675b9a1c408f71bc79" ]
[ "VideoUtils/codec.py" ]
[ "import numpy as np\nimport structures as st\n\n#return a data object containing an entire frame\ndef decomposeFrame(frame,frameNum):\n\tchannelList = []\n\tif len(frame.shape)==3:\n\t\tfor i in range(frame.shape[2]):\n\t\t\tchannelList.append(decomposeMatrix(frame[:,:,i]))\n\telse:\n\t\tchannelList.append(decomposeMatrix(frame))\n\treturn(st.Data(channelList, frameNum))\n\n#apply svd decomposition to a single channel of an image.\ndef decomposeMatrix(mat):\n\tP, D, Q = np.linalg.svd(np.matrix(mat,dtype=float), full_matrices=False)\n\tceList = []\n\tfor i in range(len(D)):\n\t\tceList.append(st.ChannelElement(P[:,i],D[i],Q[i,:]))\n\treturn(st.Channel(ceList))\n\n#recomposes the frame after the transformation into transferable data\t\ndef composeFrame(data):\n\tif(data.isEmpty()):\n\t\treturn False,None\n\telse:\n\t\t#get the dimensions\n\t\theight, width = data.dim()\n\t\t#create blank image\n\t\tframe = np.zeros((height,width,len(data.channel)), np.uint8)\n\t\n\t\t#recompose each channel\n\t\tfor i in range(len(data.channel)):\n\t\t\tframe[:,:,i] = np.uint8(composeMatrix(data.channel[i]));\n\t\t\n\t\treturn True,frame\n\n#recompose a simple 1-channel image (double)\ndef composeMatrix(channel):\n\t#get the dimensions\n\theight, width = channel.dim()\n\t\n\t#the matrices for svd\n\tP = np.zeros((height,len(channel)));\n\tD = np.zeros(len(channel));\n\tQ = np.zeros((len(channel),width));\n\t\n\t#fulfill the matrices\n\tfor i in range(len(channel)):\n\t\tP[:,i] = channel.list[i].P_column.flatten()\n\t\tD[i] = channel.list[i].D_value\n\t\tQ[i,:] = channel.list[i].Q_line.flatten()\n\t\n\t#wayback from svd\n\tm = np.matmul(np.matmul(P, np.diag(D)), Q)\n\t\n\treturn(m)\n\n" ]
[ [ "numpy.matrix", "numpy.diag" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LLNL/LBANN
[ "8bcc5d461e52de70e329d73081ca7eee3e5c580a", "8bcc5d461e52de70e329d73081ca7eee3e5c580a" ]
[ "python/lbann/contrib/modules/radial_profile.py", "ci_test/unit_tests/test_unit_layer_softmax.py" ]
[ "import numpy as np\nimport lbann\nimport lbann.modules\n\nclass RadialProfile(lbann.modules.Module):\n \"\"\"Compute average pixel value w.r.t. distance from image center.\n\n We compute the distance between each image pixel and the image\n center. These distances are binned (with a bin size of 1), and the\n average pixel value in each bin is computed.\n\n A separate profile is computed for each image channel. The image\n can have any spatial dimension, but the first dimension is\n interpreted as the channel dimension (e.g. CHW format).\n\n \"\"\"\n\n def __init__(self):\n pass\n\n def forward(self, image, dims, max_r):\n \"\"\"Compute radial profile.\n\n Args:\n image (lbann.Layer): Image\n dims (tuple of int): Image dimensions (dim 0 corresponds\n to channel)\n max_r (int): Maximum radial distance. Pixels outside this\n distance are ignored.\n\n Returns:\n Layer: num_channels x max_r radial profile\n\n \"\"\"\n\n # Bin spatial positions\n r, r_counts = self._find_radial_bins(dims[1:], max_r)\n\n # Reciprocal of bin counts\n # Note: If a count is 0, its reciprocal is 0.\n r_counts_recip = [0 if c==0 else 1/c for c in r_counts]\n\n # Get scatter indices and scaling factors\n # Note: Independent binning for each channel (dim 0)\n tile_dims = [dims[0]] + [1]*r.ndim\n inds_vals = np.tile(r, tile_dims)\n inds_vals += np.arange(0, dims[0]*max_r, max_r).reshape(tile_dims)\n inds_vals[:,r>=max_r] = -1\n inds_vals = inds_vals.flatten()\n scales_vals = r_counts_recip * dims[0]\n\n # Construct LBANN layer graph\n image = lbann.Reshape(image, dims=[np.prod(dims)])\n inds = lbann.WeightsLayer(\n weights=lbann.Weights(\n lbann.ValueInitializer(values=inds_vals),\n optimizer=lbann.NoOptimizer(),\n ),\n dims=[len(inds_vals)],\n )\n r_sums = lbann.Scatter(image, inds, dims=[dims[0]*max_r])\n scales = lbann.WeightsLayer(\n weights=lbann.Weights(\n lbann.ValueInitializer(values=scales_vals),\n optimizer=lbann.NoOptimizer(),\n ),\n dims=[len(scales_vals)],\n )\n r_means = lbann.Multiply(scales, r_sums)\n return lbann.Reshape(r_means, dims=[dims[0], max_r])\n\n def _find_radial_bins(self, dims, max_r):\n \"\"\"Bin tensor positions based on distance from center.\n\n Args:\n dims (tuple of int): Tensor dimensions\n max_r (int): Maximum radial distance. Positions outside\n this distance are ignored.\n\n Returns:\n numpy.ndarray of int: Bin for each tensor position. Some\n bins may be greater than max_r. Its dimensions match\n dims.\n numpy.ndarray of int: Number of positions in each bin.\n It is 1D and with a length of max_r.\n\n \"\"\"\n\n # Find bin for each position\n r2 = np.zeros([])\n for i, d in enumerate(dims):\n x = np.arange(d) - (d-1)/2\n r2 = np.expand_dims(r2, -1) + x**2\n r = np.sqrt(r2).astype(int)\n\n # Count number of positions in each bin\n # Note: Pad/truncate to max_r\n r_counts = np.bincount(r.flatten(), minlength=max_r)\n r_counts = r_counts[:max_r]\n\n return r, r_counts\n\n# Test by computing radial profile for user-provided image\nif __name__ == \"__main__\":\n\n # Imports\n import argparse\n import matplotlib.image\n\n # Command-line options\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'image', action='store', type=str,\n help='image file', metavar='FILE',\n )\n args = parser.parse_args()\n\n # Load image\n image = matplotlib.image.imread(args.image)\n if image.ndim == 2:\n image = np.expand_dims(image, 2)\n assert image.ndim == 3, f'failed to load 2D image from {args.image}'\n if image.shape[-1] == 1:\n image = np.tile(image, (1,1,3))\n elif image.shape[-1] == 4:\n image = image[:,:,:3]\n assert image.shape[-1] == 3, f'failed to load RGB image from {args.image}'\n image = np.transpose(image, (2,0,1))\n\n # Dummy input\n reader = lbann.reader_pb2.DataReader()\n def add_data_reader(role):\n _reader = reader.reader.add()\n _reader.name = 'synthetic'\n _reader.role = role\n _reader.num_samples = 1\n _reader.num_labels = 1\n _reader.synth_dimensions = '1'\n _reader.percent_of_data_to_use = 1.0\n add_data_reader('train')\n add_data_reader('test')\n input_ = lbann.Input()\n\n # Radial profile\n x = lbann.WeightsLayer(\n weights=lbann.Weights(\n lbann.ValueInitializer(values=image.flatten()),\n ),\n dims=image.shape,\n )\n max_r = image.shape[-1] // 2\n rprof = RadialProfile()(x, image.shape, max_r)\n rprof_slice = lbann.Slice(rprof, slice_points=[0,1,2,3])\n red = lbann.Identity(rprof_slice, name='red')\n green = lbann.Identity(rprof_slice, name='green')\n blue = lbann.Identity(rprof_slice, name='blue')\n\n # Construct model\n callbacks = [\n lbann.CallbackDumpOutputs(layers=['red', 'green', 'blue']),\n ]\n model = lbann.Model(\n epochs=0,\n layers=lbann.traverse_layer_graph([input_, rprof]),\n callbacks=callbacks,\n )\n\n # Run LBANN\n lbann.run(\n trainer=lbann.Trainer(mini_batch_size=1),\n model=model,\n data_reader=reader,\n optimizer=lbann.NoOptimizer(),\n job_name='lbann_radial_profile_test',\n )\n", "import functools\nimport operator\nimport os\nimport os.path\nimport sys\nimport numpy as np\n\n# Bamboo utilities\ncurrent_file = os.path.realpath(__file__)\ncurrent_dir = os.path.dirname(current_file)\nsys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))\nimport tools\n\n# ==============================================\n# Objects for Python data reader\n# ==============================================\n# Note: The Python data reader imports this file as a module and calls\n# the functions below to ingest data.\n\n# Data\nnp.random.seed(201910142)\n_num_samples = 19\n_sample_size = 7\n_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)\n\n# Sample access functions\ndef get_sample(index):\n return _samples[index,:]\ndef num_samples():\n return _num_samples\ndef sample_dims():\n return (_sample_size,)\n\n# ==============================================\n# NumPy softmax\n# ==============================================\n\ndef numpy_softmax(x):\n \"\"\"NumPy implementation of softmax.\n\n The computation is performed with 64-bit floats. There is also an\n implementation of softmax in SciPy 1.2.0 (scipy.special.softmax).\n\n \"\"\"\n if x.dtype is not np.float64:\n x = x.astype(np.float64)\n y = np.exp(x - np.max(x))\n return y / np.sum(y)\n\n# ==============================================\n# Setup LBANN experiment\n# ==============================================\n\ndef setup_experiment(lbann, weekly):\n \"\"\"Construct LBANN experiment.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n mini_batch_size = num_samples() // 2\n trainer = lbann.Trainer(mini_batch_size)\n model = construct_model(lbann)\n data_reader = construct_data_reader(lbann)\n optimizer = lbann.NoOptimizer()\n return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes\n\ndef construct_model(lbann):\n \"\"\"Construct LBANN model.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Input data\n # Note: Sum with a weights layer so that gradient checking will\n # verify that error signals are correct.\n x_weights = lbann.Weights(optimizer=lbann.SGD(),\n initializer=lbann.ConstantInitializer(value=0.0),\n name='input_weights')\n x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),\n dims=_sample_size),\n lbann.WeightsLayer(weights=x_weights,\n dims=_sample_size))\n x_lbann = x\n\n # Objects for LBANN model\n obj = []\n metrics = []\n callbacks = []\n\n # ------------------------------------------\n # Data-parallel layout\n # ------------------------------------------\n\n # LBANN implementation\n x = x_lbann\n y = lbann.Softmax(x, data_layout='data_parallel')\n z = lbann.L2Norm2(y)\n obj.append(z)\n metrics.append(lbann.Metric(z, name='data-parallel layout'))\n\n # NumPy implementation\n vals = []\n for i in range(num_samples()):\n x = get_sample(i).astype(np.float64)\n y = numpy_softmax(x)\n z = tools.numpy_l2norm2(y)\n vals.append(z)\n val = np.mean(vals)\n tol = 8 * val * np.finfo(np.float32).eps\n callbacks.append(lbann.CallbackCheckMetric(\n metric=metrics[-1].name,\n lower_bound=val-tol,\n upper_bound=val+tol,\n error_on_failure=True,\n execution_modes='test'))\n\n # ------------------------------------------\n # Model-parallel layout\n # ------------------------------------------\n\n # LBANN implementation\n x = x_lbann\n y = lbann.Softmax(x, data_layout='model_parallel')\n z = lbann.L2Norm2(y)\n obj.append(z)\n metrics.append(lbann.Metric(z, name='model-parallel layout'))\n\n # NumPy implementation\n vals = []\n for i in range(num_samples()):\n x = get_sample(i).astype(np.float64)\n y = numpy_softmax(x)\n z = tools.numpy_l2norm2(y)\n vals.append(z)\n val = np.mean(vals)\n tol = 8 * val * np.finfo(np.float32).eps\n callbacks.append(lbann.CallbackCheckMetric(\n metric=metrics[-1].name,\n lower_bound=val-tol,\n upper_bound=val+tol,\n error_on_failure=True,\n execution_modes='test'))\n\n # ------------------------------------------\n # Gradient checking\n # ------------------------------------------\n\n callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))\n\n # ------------------------------------------\n # Construct model\n # ------------------------------------------\n\n num_epochs = 0\n return lbann.Model(num_epochs,\n layers=lbann.traverse_layer_graph(x_lbann),\n objective_function=obj,\n metrics=metrics,\n callbacks=callbacks)\n\ndef construct_data_reader(lbann):\n \"\"\"Construct Protobuf message for Python data reader.\n\n The Python data reader will import the current Python file to\n access the sample access functions.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Note: The training data reader should be removed when\n # https://github.com/LLNL/lbann/issues/1098 is resolved.\n message = lbann.reader_pb2.DataReader()\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'train'\n )\n ])\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'test'\n )\n ])\n return message\n\n# ==============================================\n# Setup PyTest\n# ==============================================\n\n# Create test functions that can interact with PyTest\nfor _test_func in tools.create_tests(setup_experiment, __file__):\n globals()[_test_func.__name__] = _test_func\n" ]
[ [ "numpy.expand_dims", "numpy.sqrt", "numpy.arange", "numpy.tile", "numpy.prod", "numpy.transpose", "numpy.zeros" ], [ "numpy.random.seed", "numpy.finfo", "numpy.max", "numpy.random.normal", "numpy.mean", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MoritzTaylor/baselines-tf2
[ "f51e40707b3c3021ae6309788d0cc0f29832dbea" ]
[ "baselines/a2c/utils.py" ]
[ "import os\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\n\ndef ortho_init(scale=1.0):\n def _ortho_init(shape, dtype, partition_info=None):\n #lasagne ortho init for tf\n shape = tuple(shape)\n if len(shape) == 2:\n flat_shape = shape\n elif len(shape) == 4: # assumes NHWC\n flat_shape = (np.prod(shape[:-1]), shape[-1])\n else:\n raise NotImplementedError\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v # pick the one with the correct shape\n q = q.reshape(shape)\n return (scale * q[:shape[0], :shape[1]]).astype(np.float32)\n return _ortho_init\n\ndef conv(scope, *, nf, rf, stride, activation, pad='valid', init_scale=1.0, data_format='channels_last'):\n with tf.name_scope(scope):\n layer = tf.keras.layers.Conv2D(filters=nf, kernel_size=rf, strides=stride, padding=pad,\n data_format=data_format, kernel_initializer=ortho_init(init_scale))\n return layer\n\ndef fc(input_shape, scope, nh, *, init_scale=1.0, init_bias=0.0):\n with tf.name_scope(scope):\n layer = tf.keras.layers.Dense(units=nh, kernel_initializer=ortho_init(init_scale),\n bias_initializer=tf.keras.initializers.Constant(init_bias))\n # layer = tf.keras.layers.Dense(units=nh, kernel_initializer=tf.keras.initializers.Constant(init_scale),\n # bias_initializer=tf.keras.initializers.Constant(init_bias))\n layer.build(input_shape)\n return layer\n\ndef discount_with_dones(rewards, dones, gamma):\n discounted = []\n r = 0\n for reward, done in zip(rewards[::-1], dones[::-1]):\n r = reward + gamma*r*(1.-done) # fixed off by one bug\n discounted.append(r)\n return discounted[::-1]\n\n\nclass InverseLinearTimeDecay(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, initial_learning_rate, nupdates, name=\"InverseLinearTimeDecay\"):\n super(InverseLinearTimeDecay, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.nupdates = nupdates\n self.name = name\n\n def __call__(self, step):\n with tf.name_scope(self.name):\n initial_learning_rate = tf.convert_to_tensor(self.initial_learning_rate, name=\"initial_learning_rate\")\n dtype = initial_learning_rate.dtype\n step_t = tf.cast(step, dtype)\n nupdates_t = tf.convert_to_tensor(self.nupdates, dtype=dtype)\n tf.assert_less(step_t, nupdates_t)\n return initial_learning_rate * (1. - step_t / nupdates_t)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"nupdates\": self.nupdates,\n \"name\": self.name\n }\n\n\nclass LinearTimeDecay(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, initial_learning_rate, name=\"LinearTimeDecay\"):\n super(LinearTimeDecay, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.name = name\n\n def __call__(self, step):\n with tf.name_scope(self.name):\n initial_learning_rate = tf.convert_to_tensor(self.initial_learning_rate, name=\"initial_learning_rate\")\n dtype = initial_learning_rate.dtype\n step_t = tf.cast(step, dtype) # TODO: step_t = step/n_total_steps ?\n return initial_learning_rate * step_t\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"name\": self.name\n }\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.keras.initializers.Constant", "numpy.linalg.svd", "tensorflow.cast", "tensorflow.assert_less", "numpy.random.normal", "tensorflow.name_scope", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ywang-37/EnhancedSiamShipTracking
[ "0b25cf02b6088268a6c374cb20a7f0355bc65b2e", "0b25cf02b6088268a6c374cb20a7f0355bc65b2e" ]
[ "utils/pysot/datasets/vot.py", "datasets/siam_rpn_dataset.py" ]
[ "import os\nimport json\nimport numpy as np\n\nfrom glob import glob\nfrom tqdm import tqdm\n\nfrom .dataset import Dataset\nfrom .video import Video\n\n\nclass VOTVideo(Video):\n \"\"\"\n Args:\n name: video name\n root: dataset root\n video_dir: video directory\n init_rect: init rectangle\n img_names: image names\n gt_rect: groundtruth rectangle\n camera_motion: camera motion tag\n illum_change: illum change tag\n motion_change: motion change tag\n size_change: size change\n occlusion: occlusion\n \"\"\"\n def __init__(self, name, root, video_dir, init_rect, img_names, gt_rect,\n camera_motion, illum_change, motion_change, size_change, occlusion, width, height):\n super(VOTVideo, self).__init__(name, root, video_dir, init_rect, img_names, gt_rect, None)\n self.tags= {'all': [1] * len(gt_rect)}\n self.tags['camera_motion'] = camera_motion\n self.tags['illum_change'] = illum_change\n self.tags['motion_change'] = motion_change\n self.tags['size_change'] = size_change\n self.tags['occlusion'] = occlusion\n\n self.width = width\n self.height = height\n\n # empty tag\n all_tag = [v for k, v in self.tags.items() if len(v) > 0 ]\n self.tags['empty'] = np.all(1 - np.array(all_tag), axis=1).astype(np.int32).tolist()\n\n self.tag_names = list(self.tags.keys())\n\n def select_tag(self, tag, start=0, end=0):\n if tag == 'empty':\n return self.tags[tag]\n return self.tags[tag][start:end]\n\n def load_tracker(self, path, tracker_names=None, store=True):\n \"\"\"\n Args:\n path(str): path to result\n tracker_name(list): name of tracker\n \"\"\"\n if not tracker_names:\n tracker_names = [x.split('/')[-1] for x in glob(path)\n if os.path.isdir(x)]\n if isinstance(tracker_names, str):\n tracker_names = [tracker_names]\n for name in tracker_names:\n traj_files = glob(os.path.join(path, name, 'baseline', self.name, '*0*.txt'))\n if len(traj_files) == 15:\n traj_files = traj_files\n else:\n traj_files = traj_files[0:1]\n pred_traj = []\n for traj_file in traj_files:\n with open(traj_file, 'r') as f:\n traj = [list(map(float, x.strip().split(',')))\n for x in f.readlines()]\n pred_traj.append(traj)\n if store:\n self.pred_trajs[name] = pred_traj\n else:\n return pred_traj\n\n\nclass VOTDataset(Dataset):\n \"\"\"\n Args:\n name: dataset name, should be 'VOT2018', 'VOT2016'\n dataset_root: dataset root\n load_img: wether to load all imgs\n \"\"\"\n def __init__(self, name, dataset_root):\n super(VOTDataset, self).__init__(name, dataset_root)\n try:\n with open(os.path.join(dataset_root, name+'.json'), 'r') as f:\n meta_data = json.load(f)\n except:\n download_str = '# download json file for eval toolkit\\n'+\\\n 'cd $SiamMask/data\\n'+\\\n 'wget http://www.robots.ox.ac.uk/~qwang/VOT2016.json\\n'+\\\n 'wget http://www.robots.ox.ac.uk/~qwang/VOT2018.json'\n print(download_str)\n exit()\n\n # load videos\n pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100)\n self.videos = {}\n for video in pbar:\n pbar.set_postfix_str(video)\n self.videos[video] = VOTVideo(video,\n dataset_root,\n meta_data[video]['video_dir'],\n meta_data[video]['init_rect'],\n meta_data[video]['img_names'],\n meta_data[video]['gt_rect'],\n meta_data[video]['camera_motion'],\n meta_data[video]['illum_change'],\n meta_data[video]['motion_change'],\n meta_data[video]['size_change'],\n meta_data[video]['occlusion'],\n meta_data[video]['width'],\n meta_data[video]['height'])\n\n self.tags = ['all', 'camera_motion', 'illum_change', 'motion_change',\n 'size_change', 'occlusion', 'empty']\n", "from __future__ import division\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport json\nimport random\nimport logging\nfrom os.path import join\nfrom utils.bbox_helper import *\nfrom utils.anchors import Anchors\nimport math\nimport sys\npyv = sys.version[0]\nimport cv2\nif pyv[0] == '3':\n cv2.ocl.setUseOpenCL(False)\n\nlogger = logging.getLogger('global')\n\n\nsample_random = random.Random()\nsample_random.seed(123456)\n\n\nclass SubDataSet(object):\n def __init__(self, cfg):\n for string in ['root', 'anno']:\n if string not in cfg:\n raise Exception('SubDataSet need \"{}\"'.format(string))\n\n with open(cfg['anno']) as fin:\n logger.info(\"loading \" + cfg['anno'])\n self.labels = self.filter_zero(json.load(fin), cfg)\n\n def isint(x):\n try:\n int(x)\n return True\n except:\n return False\n\n # add frames args into labels\n to_del = []\n for video in self.labels:\n for track in self.labels[video]:\n frames = self.labels[video][track]\n frames = list(map(int, filter(lambda x: isint(x), frames.keys())))\n frames.sort()\n self.labels[video][track]['frames'] = frames\n if len(frames) <= 0:\n logger.info(\"warning {}/{} has no frames.\".format(video, track))\n to_del.append((video, track))\n\n # delete tracks with no frames\n for video, track in to_del:\n del self.labels[video][track]\n\n # delete videos with no valid track\n to_del = []\n for video in self.labels:\n if len(self.labels[video]) <= 0:\n logger.info(\"warning {} has no tracks\".format(video))\n to_del.append(video)\n\n for video in to_del:\n del self.labels[video]\n\n self.videos = list(self.labels.keys())\n\n logger.info(cfg['anno'] + \" loaded.\")\n\n # default args\n self.root = \"/\"\n self.start = 0\n self.num = len(self.labels)\n self.num_use = self.num\n self.frame_range = 100\n self.mark = \"vid\"\n self.path_format = \"{}.{}.{}.jpg\"\n\n self.pick = []\n\n # input args\n self.__dict__.update(cfg)\n\n self.num_use = int(self.num_use)\n\n # shuffle\n self.shuffle()\n\n def filter_zero(self, anno, cfg):\n name = cfg.get('mark', '')\n\n out = {}\n tot = 0\n new = 0\n zero = 0\n\n for video, tracks in anno.items():\n new_tracks = {}\n for trk, frames in tracks.items():\n new_frames = {}\n for frm, bbox in frames.items():\n tot += 1\n if len(bbox) == 4:\n x1, y1, x2, y2 = bbox\n w, h = x2 - x1, y2 -y1\n else:\n w, h= bbox\n if w == 0 or h == 0:\n logger.info('Error, {name} {video} {trk} {bbox}'.format(**locals()))\n zero += 1\n continue\n new += 1\n new_frames[frm] = bbox\n\n if len(new_frames) > 0:\n new_tracks[trk] = new_frames\n\n if len(new_tracks) > 0:\n out[video] = new_tracks\n\n return out\n\n def log(self):\n logger.info('SubDataSet {name} start-index {start} select [{select}/{num}] path {format}'.format(\n name=self.mark, start=self.start, select=self.num_use, num=self.num, format=self.path_format\n ))\n\n def shuffle(self):\n lists = list(range(self.start, self.start + self.num))\n\n m = 0\n pick = []\n while m < self.num_use:\n sample_random.shuffle(lists)\n pick += lists\n m += self.num\n\n self.pick = pick[:self.num_use]\n return self.pick\n\n def get_image_anno(self, video, track, frame):\n frame = \"{:06d}\".format(frame)\n image_path = join(self.root, video, self.path_format.format(frame, track, 'x'))\n image_anno = self.labels[video][track][frame]\n\n return image_path, image_anno\n\n def get_positive_pair(self, index):\n video_name = self.videos[index]\n video = self.labels[video_name]\n track = random.choice(list(video.keys()))\n track_info = video[track]\n\n frames = track_info['frames']\n\n if 'hard' not in track_info:\n template_frame = random.randint(0, len(frames)-1)\n\n left = max(template_frame - self.frame_range, 0)\n right = min(template_frame + self.frame_range, len(frames)-1) + 1\n search_range = frames[left:right]\n template_frame = frames[template_frame]\n search_frame = random.choice(search_range)\n else:\n search_frame = random.choice(track_info['hard'])\n left = max(search_frame - self.frame_range, 0)\n right = min(search_frame + self.frame_range, len(frames)-1) + 1 # python [left:right+1) = [left:right]\n template_range = frames[left:right]\n template_frame = random.choice(template_range)\n search_frame = frames[search_frame]\n\n return self.get_image_anno(video_name, track, template_frame), \\\n self.get_image_anno(video_name, track, search_frame)\n\n def get_random_target(self, index=-1):\n if index == -1:\n index = random.randint(0, self.num-1)\n video_name = self.videos[index]\n video = self.labels[video_name]\n track = random.choice(list(video.keys()))\n track_info = video[track]\n\n frames = track_info['frames']\n frame = random.choice(frames)\n\n return self.get_image_anno(video_name, track, frame)\n\n\ndef crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):\n bbox = [float(x) for x in bbox]\n a = (out_sz-1) / (bbox[2]-bbox[0])\n b = (out_sz-1) / (bbox[3]-bbox[1])\n c = -a * bbox[0]\n d = -b * bbox[1]\n mapping = np.array([[a, 0, c],\n [0, b, d]]).astype(np.float)\n crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)\n return crop\n\n\nclass Augmentation:\n def __init__(self, cfg):\n # default args\n self.shift = 0\n self.scale = 0\n self.blur = 0 #False\n self.resize = False\n self.rgbVar = np.array([[-0.55919361, 0.98062831, - 0.41940627],\n [1.72091413, 0.19879334, - 1.82968581],\n [4.64467907, 4.73710203, 4.88324118]], dtype=np.float32)\n self.flip = 0\n\n self.eig_vec = np.array([\n [0.4009, 0.7192, -0.5675],\n [-0.8140, -0.0045, -0.5808],\n [0.4203, -0.6948, -0.5836],\n ], dtype=np.float32)\n\n self.eig_val = np.array([[0.2175, 0.0188, 0.0045]], np.float32)\n\n self.__dict__.update(cfg)\n\n @staticmethod\n def random():\n return random.random() * 2 - 1.0\n\n def blur_image(self, image):\n def rand_kernel():\n size = np.random.randn(1)\n size = int(np.round(size)) * 2 + 1\n if size < 0: return None\n if random.random() < 0.5: return None\n size = min(size, 45)\n kernel = np.zeros((size, size))\n c = int(size/2)\n wx = random.random()\n kernel[:, c] += 1. / size * wx\n kernel[c, :] += 1. / size * (1-wx)\n return kernel\n\n kernel = rand_kernel()\n\n if kernel is not None:\n image = cv2.filter2D(image, -1, kernel)\n return image\n\n def __call__(self, image, bbox, size, gray=False):\n if gray:\n grayed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = np.zeros((grayed.shape[0], grayed.shape[1], 3), np.uint8)\n image[:, :, 0] = image[:, :, 1] = image[:, :, 2] = grayed\n\n shape = image.shape\n\n crop_bbox = center2corner((shape[0]//2, shape[1]//2, size-1, size-1))\n\n param = {}\n if self.shift:\n param['shift'] = (Augmentation.random() * self.shift, Augmentation.random() * self.shift)\n\n if self.scale:\n param['scale'] = ((1.0 + Augmentation.random() * self.scale), (1.0 + Augmentation.random() * self.scale))\n\n crop_bbox, _ = aug_apply(Corner(*crop_bbox), param, shape)\n\n x1 = crop_bbox.x1\n y1 = crop_bbox.y1\n\n bbox = BBox(bbox.x1 - x1, bbox.y1 - y1,\n bbox.x2 - x1, bbox.y2 - y1)\n\n if self.scale:\n scale_x, scale_y = param['scale']\n bbox = Corner(bbox.x1 / scale_x, bbox.y1 / scale_y, bbox.x2 / scale_x, bbox.y2 / scale_y)\n\n image = crop_hwc(image, crop_bbox, size)\n\n offset = np.dot(self.rgbVar, np.random.randn(3, 1))\n offset = offset[::-1] # bgr 2 rgb\n offset = offset.reshape(3)\n image = image - offset\n\n if self.blur > random.random():\n image = self.blur_image(image)\n\n if self.resize:\n imageSize = image.shape[:2]\n ratio = max(math.pow(random.random(), 0.5), 0.2) # 25 ~ 255\n rand_size = (int(round(ratio*imageSize[0])), int(round(ratio*imageSize[1])))\n image = cv2.resize(image, rand_size)\n image = cv2.resize(image, tuple(imageSize))\n\n if self.flip and self.flip > Augmentation.random():\n image = cv2.flip(image, 1)\n width = image.shape[1]\n bbox = Corner(width - 1 - bbox.x2, bbox.y1, width - 1 - bbox.x1, bbox.y2)\n\n return image, bbox\n\n\nclass AnchorTargetLayer:\n def __init__(self, cfg):\n self.thr_high = 0.6\n self.thr_low = 0.3\n self.negative = 16\n self.rpn_batch = 64\n self.positive = 16\n\n self.__dict__.update(cfg)\n\n def __call__(self, anchor, target, size, neg=False, need_iou=False):\n anchor_num = anchor.anchors.shape[0]\n\n cls = np.zeros((anchor_num, size, size), dtype=np.int64)\n cls[...] = -1 # -1 ignore 0 negative 1 positive\n delta = np.zeros((4, anchor_num, size, size), dtype=np.float32)\n delta_weight = np.zeros((anchor_num, size, size), dtype=np.float32)\n\n def select(position, keep_num=16):\n num = position[0].shape[0]\n if num <= keep_num:\n return position, num\n slt = np.arange(num)\n np.random.shuffle(slt)\n slt = slt[:keep_num]\n return tuple(p[slt] for p in position), keep_num\n\n if neg:\n l = size // 2 - 3\n r = size // 2 + 3 + 1\n\n cls[:, l:r, l:r] = 0\n\n neg, neg_num = select(np.where(cls == 0), self.negative)\n cls[:] = -1\n cls[neg] = 0\n\n if not need_iou:\n return cls, delta, delta_weight\n else:\n overlap = np.zeros((anchor_num, size, size), dtype=np.float32)\n return cls, delta, delta_weight, overlap\n\n tcx, tcy, tw, th = corner2center(target)\n\n anchor_box = anchor.all_anchors[0]\n anchor_center = anchor.all_anchors[1]\n x1, y1, x2, y2 = anchor_box[0], anchor_box[1], anchor_box[2], anchor_box[3]\n cx, cy, w, h = anchor_center[0], anchor_center[1], anchor_center[2], anchor_center[3]\n\n # delta\n delta[0] = (tcx - cx) / w\n delta[1] = (tcy - cy) / h\n delta[2] = np.log(tw / w)\n delta[3] = np.log(th / h)\n\n # IoU\n overlap = IoU([x1, y1, x2, y2], target)\n\n pos = np.where(overlap > self.thr_high)\n neg = np.where(overlap < self.thr_low)\n\n pos, pos_num = select(pos, self.positive)\n neg, neg_num = select(neg, self.rpn_batch - pos_num)\n\n cls[pos] = 1\n delta_weight[pos] = 1. / (pos_num + 1e-6)\n\n cls[neg] = 0\n\n if not need_iou:\n return cls, delta, delta_weight\n else:\n return cls, delta, delta_weight, overlap\n\n\nclass DataSets(Dataset):\n def __init__(self, cfg, anchor_cfg, num_epoch=1):\n super(DataSets, self).__init__()\n global logger\n logger = logging.getLogger('global')\n\n # anchors\n self.anchors = Anchors(anchor_cfg)\n\n # size\n self.template_size = 127\n self.origin_size = 127\n self.search_size = 255\n self.size = 17\n self.base_size = 0\n self.crop_size = 0\n\n if 'template_size' in cfg:\n self.template_size = cfg['template_size']\n if 'origin_size' in cfg:\n self.origin_size = cfg['origin_size']\n if 'search_size' in cfg:\n self.search_size = cfg['search_size']\n if 'base_size' in cfg:\n self.base_size = cfg['base_size']\n if 'size' in cfg:\n self.size = cfg['size']\n\n if (self.search_size - self.template_size) / self.anchors.stride + 1 + self.base_size != self.size:\n raise Exception(\"size not match!\") # TODO: calculate size online\n if 'crop_size' in cfg:\n self.crop_size = cfg['crop_size']\n self.template_small = False\n if 'template_small' in cfg and cfg['template_small']:\n self.template_small = True\n\n self.anchors.generate_all_anchors(im_c=self.search_size//2, size=self.size)\n\n if 'anchor_target' not in cfg:\n cfg['anchor_target'] = {}\n self.anchor_target = AnchorTargetLayer(cfg['anchor_target'])\n\n # data sets\n if 'datasets' not in cfg:\n raise(Exception('DataSet need \"{}\"'.format('datasets')))\n\n self.all_data = []\n start = 0\n self.num = 0\n for name in cfg['datasets']:\n dataset = cfg['datasets'][name]\n dataset['mark'] = name\n dataset['start'] = start\n\n dataset = SubDataSet(dataset)\n dataset.log()\n self.all_data.append(dataset)\n\n start += dataset.num # real video number\n self.num += dataset.num_use # the number used for subset shuffle\n\n # data augmentation\n aug_cfg = cfg['augmentation']\n self.template_aug = Augmentation(aug_cfg['template'])\n self.search_aug = Augmentation(aug_cfg['search'])\n self.gray = aug_cfg['gray']\n self.neg = aug_cfg['neg']\n self.inner_neg = 0 if 'inner_neg' not in aug_cfg else aug_cfg['inner_neg']\n\n self.pick = None # list to save id for each img\n if 'num' in cfg: # number used in training for all dataset\n self.num = int(cfg['num'])\n self.num *= num_epoch\n self.shuffle()\n\n self.infos = {\n 'template': self.template_size,\n 'search': self.search_size,\n 'template_small': self.template_small,\n 'gray': self.gray,\n 'neg': self.neg,\n 'inner_neg': self.inner_neg,\n 'crop_size': self.crop_size,\n 'anchor_target': self.anchor_target.__dict__,\n 'num': self.num // num_epoch\n }\n logger.info('dataset informations: \\n{}'.format(json.dumps(self.infos, indent=4)))\n\n def imread(self, path):\n img = cv2.imread(path)\n\n if self.origin_size == self.template_size:\n return img, 1.0\n\n def map_size(exe, size):\n return int(round(((exe + 1) / (self.origin_size + 1) * (size+1) - 1)))\n\n nsize = map_size(self.template_size, img.shape[1])\n\n img = cv2.resize(img, (nsize, nsize))\n\n return img, nsize / img.shape[1]\n\n def shuffle(self):\n pick = []\n m = 0\n while m < self.num:\n p = []\n for subset in self.all_data:\n sub_p = subset.shuffle()\n p += sub_p\n\n sample_random.shuffle(p)\n\n pick += p\n m = len(pick)\n self.pick = pick\n logger.info(\"shuffle done!\")\n logger.info(\"dataset length {}\".format(self.num))\n\n def __len__(self):\n return self.num\n\n def find_dataset(self, index):\n for dataset in self.all_data:\n if dataset.start + dataset.num > index:\n return dataset, index - dataset.start\n\n def __getitem__(self, index, debug=False):\n index = self.pick[index]\n dataset, index = self.find_dataset(index)\n\n gray = self.gray and self.gray > random.random()\n neg = self.neg and self.neg > random.random()\n\n if neg:\n template = dataset.get_random_target(index)\n if self.inner_neg and self.inner_neg > random.random():\n search = dataset.get_random_target()\n else:\n search = random.choice(self.all_data).get_random_target()\n else:\n template, search = dataset.get_positive_pair(index)\n\n def center_crop(img, size):\n shape = img.shape[1]\n if shape == size: return img\n c = shape // 2\n l = c - size // 2\n r = c + size // 2 + 1\n return img[l:r, l:r]\n\n template_image, scale_z = self.imread(template[0])\n\n if self.template_small:\n template_image = center_crop(template_image, self.template_size)\n\n search_image, scale_x = self.imread(search[0])\n if self.crop_size > 0:\n search_image = center_crop(search_image, self.crop_size)\n\n def toBBox(image, shape):\n imh, imw = image.shape[:2]\n if len(shape) == 4:\n w, h = shape[2]-shape[0], shape[3]-shape[1]\n else:\n w, h = shape\n context_amount = 0.5\n exemplar_size = self.template_size # 127\n wc_z = w + context_amount * (w+h)\n hc_z = h + context_amount * (w+h)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = exemplar_size / s_z\n w = w*scale_z\n h = h*scale_z\n cx, cy = imw//2, imh//2\n bbox = center2corner(Center(cx, cy, w, h))\n return bbox\n\n template_box = toBBox(template_image, template[1])\n search_box = toBBox(search_image, search[1])\n\n template, _ = self.template_aug(template_image, template_box, self.template_size, gray=gray)\n search, bbox = self.search_aug(search_image, search_box, self.search_size, gray=gray)\n\n def draw(image, box, name):\n image = image.copy()\n x1, y1, x2, y2 = map(lambda x: int(round(x)), box)\n cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0))\n cv2.imwrite(name, image)\n\n if debug:\n draw(template_image, template_box, \"debug/{:06d}_ot.jpg\".format(index))\n draw(search_image, search_box, \"debug/{:06d}_os.jpg\".format(index))\n draw(template, _, \"debug/{:06d}_t.jpg\".format(index))\n draw(search, bbox, \"debug/{:06d}_s.jpg\".format(index))\n\n cls, delta, delta_weight = self.anchor_target(self.anchors, bbox, self.size, neg)\n\n template, search = map(lambda x: np.transpose(x, (2, 0, 1)).astype(np.float32), [template, search])\n\n return template, search, cls, delta, delta_weight, np.array(bbox, np.float32)\n\n" ]
[ [ "numpy.array" ], [ "numpy.log", "numpy.sqrt", "numpy.arange", "numpy.random.shuffle", "numpy.round", "numpy.random.randn", "numpy.transpose", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
awesome-archive/ssl-suite
[ "03990c5e86432c3f475971aeaf1ff7f7821ef40c" ]
[ "vat.py" ]
[ "import torch\nfrom homura.modules import cross_entropy_with_softlabels\nfrom torch.distributions import Categorical\nfrom torch.nn import functional as F\n\nfrom backends.loss import _kl, _l2_normalize\nfrom backends.utils import SSLTrainerBase, disable_bn_stats, get_task\n\n\nclass VATTrainer(SSLTrainerBase):\n def labeled(self,\n input: torch.Tensor,\n target: torch.Tensor) -> (torch.Tensor, torch.Tensor):\n output = self.model(input)\n target = self.to_onehot(target, self.smoothing)\n s_loss = self.loss_f[0](output, target)\n return output, s_loss\n\n def unlabeled(self,\n input: torch.Tensor) -> (None, torch.Tensor, torch.Tensor):\n with disable_bn_stats(self.model):\n u_loss = self.vat_loss(input)\n e_loss = Categorical(logits=self.model(input)).entropy().mean()\n return None, u_loss, e_loss\n\n def vat_loss(self,\n input: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n pred = self.model(input)\n d = _l2_normalize(input.clone().normal_())\n d.requires_grad_(True)\n pred_hat = self.model(input + self.xi * d)\n adv_loss = _kl(pred, pred_hat)\n d_grad, = torch.autograd.grad([adv_loss], [d])\n d = _l2_normalize(d_grad)\n self.model.zero_grad()\n pred_hat = self.model(input + self.eps * d)\n return _kl(pred, pred_hat)\n\n\nif __name__ == \"__main__\":\n import hydra\n\n hydra.main('config/vat.yaml')(\n get_task(VATTrainer, [cross_entropy_with_softlabels, F.cross_entropy])\n )()\n" ]
[ [ "torch.no_grad", "torch.autograd.grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Olalaye/MegEngine
[ "695d24f24517536e6544b07936d189dbc031bbce", "695d24f24517536e6544b07936d189dbc031bbce", "695d24f24517536e6544b07936d189dbc031bbce" ]
[ "imperative/python/megengine/data/collator.py", "imperative/python/megengine/data/transform/vision/functional.py", "imperative/python/megengine/quantization/observer.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) 2016- Facebook, Inc (Adam Paszke)\n# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)\n# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\n# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\n# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\n# Copyright (c) 2011-2013 NYU (Clement Farabet)\n# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\n# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)\n# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n# ---------------------------------------------------------------------\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# This file has been modified by Megvii (\"Megvii Modifications\").\n# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.\n# ----------------------------------------------------------------------\nimport collections.abc\nimport re\n\nimport numpy as np\n\nnp_str_obj_array_pattern = re.compile(r\"[aO]\")\ndefault_collate_err_msg_format = (\n \"default_collator: inputs must contain numpy arrays, numbers, \"\n \"Unicode strings, bytes, dicts or lists; found {}\"\n)\n\n\nclass Collator:\n r\"\"\"Used for merging a list of samples to form a mini-batch of Tensor(s). Used when using batched loading from a dataset.\n Modified from https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py\n \"\"\"\n\n def apply(self, inputs):\n elem = inputs[0]\n elem_type = type(elem)\n if (\n elem_type.__module__ == \"numpy\"\n and elem_type.__name__ != \"str_\"\n and elem_type.__name__ != \"string_\"\n ):\n elem = inputs[0]\n if elem_type.__name__ == \"ndarray\":\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(default_collate_err_msg_format.format(elem.dtype))\n\n return np.ascontiguousarray(np.stack(inputs))\n elif elem.shape == (): # scalars\n return np.array(inputs)\n elif isinstance(elem, float):\n return np.array(inputs, dtype=np.float64)\n elif isinstance(elem, int):\n return np.array(inputs)\n elif isinstance(elem, (str, bytes)):\n return inputs\n elif isinstance(elem, collections.abc.Mapping):\n return {key: self.apply([d[key] for d in inputs]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, \"_fields\"): # namedtuple\n return elem_type(*(self.apply(samples) for samples in zip(*inputs)))\n elif isinstance(elem, collections.abc.Sequence):\n transposed = zip(*inputs)\n return [self.apply(samples) for samples in transposed]\n\n raise TypeError(default_collate_err_msg_format.format(elem_type))\n", "# -*- coding: utf-8 -*-\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport collections.abc\nimport functools\nimport random\n\nimport cv2\nimport numpy as np\n\n\ndef wrap_keepdims(func):\n r\"\"\"Wraper to keep the dimension of input images unchanged.\"\"\"\n\n @functools.wraps(func)\n def wrapper(image, *args, **kwargs):\n if len(image.shape) != 3:\n raise ValueError(\n \"image must have 3 dims, but got {} dims\".format(len(image.shape))\n )\n ret = func(image, *args, **kwargs)\n if len(ret.shape) == 2:\n ret = ret[:, :, np.newaxis]\n return ret\n\n return wrapper\n\n\n@wrap_keepdims\ndef to_gray(image):\n r\"\"\"Change BGR format image's color space to gray.\n\n Args:\n image: input BGR format image, with `(H, W, C)` shape.\n\n Returns:\n gray format image, with `(H, W, C)` shape.\n \"\"\"\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\n@wrap_keepdims\ndef to_bgr(image):\n r\"\"\"Change gray format image's color space to BGR.\n\n Args:\n image: input Gray format image, with `(H, W, C)` shape.\n\n Returns:\n BGR format image, with `(H, W, C)` shape.\n \"\"\"\n return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)\n\n\n@wrap_keepdims\ndef pad(input, size, value):\n r\"\"\"Pad input data with *value* and given *size*.\n\n Args:\n input: input data, with `(H, W, C)` shape.\n size: padding size of input data, it could be integer or sequence.\n If it is an integer, the input data will be padded in four directions.\n If it is a sequence contains two integer, the bottom and right side\n of input data will be padded.\n If it is a sequence contains four integer, the top, bottom, left, right\n side of input data will be padded with given size.\n value: padding value of data, could be a sequence of int or float.\n If it is float value, the dtype of image will be casted to float32 also.\n\n Returns:\n padded image.\n \"\"\"\n if isinstance(size, int):\n size = (size, size, size, size)\n elif isinstance(size, collections.abc.Sequence) and len(size) == 2:\n size = (0, size[0], 0, size[1])\n if np.array(value).dtype == float:\n input = input.astype(np.float32)\n return cv2.copyMakeBorder(input, *size, cv2.BORDER_CONSTANT, value=value)\n\n\n@wrap_keepdims\ndef flip(image, flipCode):\n r\"\"\"Accordding to the flipCode (the type of flip), flip the input image.\n\n Args:\n image: input image, with `(H, W, C)` shape.\n flipCode: code that indicates the type of flip.\n\n * 1 : Flip horizontally\n * 0 : Flip vertically\n * -1: Flip horizontally and vertically\n\n Returns:\n BGR format image, with `(H, W, C)` shape.\n \"\"\"\n return cv2.flip(image, flipCode=flipCode)\n\n\n@wrap_keepdims\ndef resize(input, size, interpolation=cv2.INTER_LINEAR):\n r\"\"\"Resize the input data to given size.\n\n Args:\n input: input data, could be image or masks, with `(H, W, C)` shape.\n size: target size of input data, with (height, width) shape.\n interpolation: interpolation method.\n\n Returns:\n resized data, with `(H, W, C)` shape.\n \"\"\"\n if len(size) != 2:\n raise ValueError(\"resize needs (h, w), but got {}\".format(size))\n\n if isinstance(interpolation, collections.abc.Sequence):\n interpolation = random.choice(interpolation)\n return cv2.resize(input, size[::-1], interpolation=interpolation)\n", "# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport math\nfrom abc import abstractmethod\nfrom copy import deepcopy\nfrom typing import Union\n\nimport numpy as np\n\nfrom .. import functional as F\nfrom ..core.tensor.dtype import QuantDtypeMeta, _builtin_quant_dtypes\nfrom ..distributed import WORLD, get_rank, is_distributed\nfrom ..functional.distributed import all_reduce_max, all_reduce_min\nfrom ..logger import get_logger\nfrom ..module import Module\nfrom ..tensor import Tensor\nfrom .utils import QParams, QParamsModuleMixin, QuantMode, create_qparams\n\nlogger = get_logger(__name__)\n\n\nclass Observer(Module, QParamsModuleMixin):\n r\"\"\"A base class for Observer Module. Used to record input tensor's statistics for\n quantization.\n\n Args:\n dtype: a string indicating which dtype to collect scale and zero_point of.\n \"\"\"\n\n def __init__(self, dtype: Union[str, QuantDtypeMeta], **kwargs):\n super().__init__()\n if isinstance(dtype, str):\n if not dtype in _builtin_quant_dtypes:\n raise ValueError(\n \"unknown dtype: {}, only support {}\".format(\n dtype, _builtin_quant_dtypes.keys()\n )\n )\n dtype = _builtin_quant_dtypes[dtype]\n if \"narrow_range\" in kwargs:\n del kwargs[\"narrow_range\"]\n logger.warning(\n \"FakeQuantize currently has no narrow_range param \"\n \"so it is ignored here\",\n exc_info=DeprecationWarning,\n )\n self.dtype = dtype\n self.qmin = dtype.qmin\n self.qmax = dtype.qmax\n self.enabled = True\n\n def enable(self):\n self.enabled = True\n\n def disable(self):\n self.enabled = False\n\n def train(self, mode: bool = True, recursive: bool = True) -> None:\n super().train(mode, recursive)\n if mode:\n self.enable()\n else:\n self.disable()\n\n @abstractmethod\n def forward(self, x):\n pass\n\n\nclass MinMaxObserver(Observer):\n r\"\"\"A Observer Module records input tensor's running min and max values to calc scale.\n\n Args:\n mode: set quantization mode.\n eps: a initial maximum value to avoid division by zero problem.\n dtype: a string indicating which dtype to collect scale and zero_point of.\n \"\"\"\n\n def __init__(\n self,\n mode: QuantMode = QuantMode.SYMMERTIC,\n eps: float = 0.00001,\n dtype: Union[str, QuantDtypeMeta] = \"qint8\",\n **kwargs\n ):\n super().__init__(dtype, **kwargs)\n self.mode = mode\n self.min_val = Tensor(np.finfo(np.float32).max, dtype=np.float32)\n self.max_val = Tensor(np.finfo(np.float32).min, dtype=np.float32)\n self.scale_limit = eps\n\n def _calculate_qparams(self, inp_min_val, inp_max_val):\n min_val = F.minimum(0.0, inp_min_val)\n max_val = F.maximum(0.0, inp_max_val)\n if self.mode == QuantMode.SYMMERTIC:\n symmetric_max_vals = F.maximum(-min_val, max_val)\n # use maximun to avoid scale too small at the begin\n scale = F.maximum(\n symmetric_max_vals / ((self.qmax - self.qmin) / 2), self.scale_limit\n )\n zero_point = None\n else:\n # use maximun to avoid scale too small at the begin\n scale = F.maximum(\n (max_val - min_val) / (self.qmax - self.qmin), self.scale_limit\n )\n # caculate zero_point\n zero_point = self.qmin - F.round((min_val / scale))\n\n return create_qparams(self.mode, self.dtype, scale=scale, zero_point=zero_point)\n\n def get_qparams(self):\n return self._calculate_qparams(self.min_val, self.max_val)\n\n def forward(self, x_orig):\n if self.enabled:\n # stop gradient\n x = x_orig.detach()\n # find max and min\n self.min_val[...] = F.minimum(self.min_val, x.min())\n self.max_val[...] = F.maximum(self.max_val, x.max())\n return x_orig\n\n\nclass SyncMinMaxObserver(MinMaxObserver):\n r\"\"\"A distributed version of :class:`~.MinMaxObserver`.\n\n Args:\n mode: set quantization mode.\n eps: a initial maximum value to avoid division by zero problem.\n dtype: a string indicating which dtype to collect scale and zero_point of.\n \"\"\"\n\n def forward(self, x_orig):\n if self.enable:\n x = x_orig.detach()\n if is_distributed():\n min_x = all_reduce_min(x.min(), WORLD)\n max_x = all_reduce_max(x.max(), WORLD)\n else:\n min_x = x.min()\n max_x = x.max()\n self.min_val[...] = F.minimum(self.min_val, min_x)\n self.max_val[...] = F.maximum(self.max_val, max_x)\n return x_orig\n\n\nclass ExponentialMovingAverageObserver(MinMaxObserver):\n r\"\"\"A :class:`~.MinMaxObserver` with momentum support for min/max updating.\n\n Args:\n momentum: momentum ratio for min/max updating.\n mode: set quantization mode.\n eps: a initial maximum value to avoid division by zero problem.\n dtype: a string indicating which dtype to collect scale and zero_point of.\n \"\"\"\n\n def __init__(\n self,\n momentum: float = 0.9,\n mode: QuantMode = QuantMode.SYMMERTIC,\n eps: float = 0.00001,\n dtype: Union[str, QuantDtypeMeta] = \"qint8\",\n **kwargs\n ):\n super().__init__(mode, eps, dtype, **kwargs)\n self.momentum = Tensor(momentum, dtype=\"float32\")\n # used to avoid if-clauses in the first forward which is not supported\n # in trace mode.\n self.runtime_momentum = Tensor(0.0)\n\n def set_momentum(self, momentum):\n self.momentum = Tensor(momentum, dtype=\"float32\")\n\n def forward(self, x_orig):\n if self.enabled:\n # stop gradient\n x = x_orig.detach()\n # Exponential Moving Average\n self.min_val[...] = (\n self.min_val * self.runtime_momentum\n + (1 - self.runtime_momentum) * x.min()\n )\n self.max_val[...] = (\n self.max_val * self.runtime_momentum\n + (1 - self.runtime_momentum) * x.max()\n )\n self.runtime_momentum[...] = self.momentum\n\n return x_orig\n\n\nclass SyncExponentialMovingAverageObserver(ExponentialMovingAverageObserver):\n r\"\"\"A distributed version of :class:`~.ExponentialMovingAverageObserver`.\n\n Args:\n momentum: momentum ratio for min/max updating.\n mode: set quantization mode.\n eps: a initial maximum value to avoid division by zero problem.\n dtype: a string indicating which dtype to collect scale and zero_point of.\n \"\"\"\n\n def forward(self, x_orig):\n if self.enabled:\n x = x_orig.detach()\n if is_distributed:\n min_x = all_reduce_min(x.min(), WORLD)\n max_x = all_reduce_max(x.max(), WORLD)\n else:\n min_x = x.min()\n max_x = x.max()\n self.min_val[...] = (\n self.min_val * self.runtime_momentum\n + (1 - self.runtime_momentum) * min_x\n )\n self.max_val[...] = (\n self.max_val * self.runtime_momentum\n + (1 - self.runtime_momentum) * max_x\n )\n self.runtime_momentum[...] = self.momentum\n return x_orig\n\n\nclass HistogramObserver(MinMaxObserver):\n r\"\"\"A :class:`~.MinMaxObserver` using running histogram of tensor values\n for min/max updating. Usually used for calibration quantization.\n\n Args:\n bins: number of bins to use for the histogram.\n upsample_rate: which ratio to interpolate histograms in.\n mode: set quantization mode.\n eps: a initial maximum value to avoid division by zero problem.\n dtype: a string indicating which dtype to collect scale and zero_point of.\n \"\"\"\n\n def __init__(\n self,\n bins: int = 2048,\n upsample_rate: int = 128,\n mode: QuantMode = QuantMode.SYMMERTIC,\n eps: float = 0.00001,\n dtype: Union[str, QuantDtypeMeta] = \"qint8\",\n **kwargs\n ):\n super().__init__(mode, eps, dtype, **kwargs)\n self.bins = bins\n self.upsample_rate = upsample_rate\n self.dst_nbins = (\n _builtin_quant_dtypes[dtype].qmax - _builtin_quant_dtypes[dtype].qmin + 1\n )\n self.histogram = Tensor([-1] + [0.0] * (bins - 1), dtype=\"float32\")\n\n def _non_linear_param_search(self):\n r\"\"\"Non-linear parameter search.\n An approximation for L2 error minimization for selecting min/max.\n By selecting new min/max, we filter out outliers in input distribution.\n \"\"\"\n\n np_min_val = self.min_val.numpy()\n np_max_val = self.max_val.numpy()\n np_histogram = self.histogram.numpy()\n assert len(np_histogram) == self.bins, \"bins mistmatch\"\n bin_width = (np_max_val - np_min_val) / self.bins\n\n def _get_norm(delta_begin, delta_end, density, norm_type):\n r\"\"\"Compute the norm of the values uniformaly distributed between\n delta_begin and delta_end.\n norm = density * (integral_{begin, end} x^2)\n = density * (end^3 - begin^3) / 3\n \"\"\"\n assert norm_type == \"L2\", \"Only L2 norms are currently supported\"\n norm = 0.0\n if norm_type == \"L2\":\n norm = (\n delta_end * delta_end * delta_end\n - delta_begin * delta_begin * delta_begin\n ) / 3\n return density * norm\n\n def _compute_quantization_error(next_start_bin, next_end_bin, norm_type):\n r\"\"\"Compute the quantization error if we use start_bin to end_bin as the\n min and max to do the quantization.\n \"\"\"\n\n norm = 0.0\n dst_bin_width = (\n bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins\n )\n if dst_bin_width == 0.0:\n return 0.0\n for src_bin in range(self.bins):\n # distances from the beginning of first dst_bin to the beginning and\n # end of src_bin\n src_bin_begin = (src_bin - next_start_bin) * bin_width\n src_bin_end = src_bin_begin + bin_width\n\n # which dst_bins the beginning and end of src_bin belong to?\n dst_bin_of_begin = min(\n self.dst_nbins - 1,\n max(0.0, math.floor(src_bin_begin / dst_bin_width)),\n )\n dst_bin_of_end = min(\n self.dst_nbins - 1,\n max(0.0, math.floor(src_bin_end / dst_bin_width)),\n )\n dst_bin_of_begin_center = (\n dst_bin_of_begin * dst_bin_width + dst_bin_width / 2\n )\n\n density = np_histogram[src_bin] / bin_width\n if dst_bin_of_begin == dst_bin_of_end:\n # if src_bin is entirely within 1 dst_bin\n delta_begin = src_bin_begin - dst_bin_of_begin_center\n delta_end = src_bin_end - dst_bin_of_begin_center\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n else:\n delta_begin = src_bin_begin - dst_bin_of_begin_center\n delta_end = dst_bin_width / 2\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n\n norm = norm + (dst_bin_of_end - dst_bin_of_begin - 1) * _get_norm(\n -dst_bin_width / 2, dst_bin_width / 2, density, norm_type\n )\n\n dst_bin_of_end_center = (\n dst_bin_of_end * dst_bin_width + dst_bin_width / 2\n )\n\n delta_begin = -dst_bin_width / 2\n delta_end = src_bin_end - dst_bin_of_end_center\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n return norm\n\n # cumulative sum\n total = sum(np_histogram)\n cSum = np.cumsum(np_histogram, axis=0)\n\n stepsize = 1e-5 # granularity\n alpha = 0.0 # lower bound\n beta = 1.0 # upper bound\n start_bin = 0\n end_bin = self.bins - 1\n norm_min = float(\"inf\")\n\n while alpha < beta:\n # Find the next step\n next_alpha = alpha + stepsize\n next_beta = beta - stepsize\n\n # find the left and right bins between the quantile bounds\n l = start_bin\n r = end_bin\n while l < end_bin and cSum[l] < next_alpha * total:\n l = l + 1\n while r > start_bin and cSum[r] > next_beta * total:\n r = r - 1\n\n # decide the next move\n next_start_bin = start_bin\n next_end_bin = end_bin\n if (l - start_bin) > (end_bin - r):\n # move the start bin\n next_start_bin = l\n alpha = next_alpha\n else:\n # move the end bin\n next_end_bin = r\n beta = next_beta\n\n if next_start_bin == start_bin and next_end_bin == end_bin:\n continue\n\n # calculate the quantization error using next_start_bin and next_end_bin\n norm = _compute_quantization_error(next_start_bin, next_end_bin, \"L2\")\n\n if norm > norm_min:\n break\n norm_min = norm\n start_bin = next_start_bin\n end_bin = next_end_bin\n\n new_min = self.min_val + Tensor(bin_width * start_bin, dtype=np.float32)\n new_max = self.min_val + Tensor(bin_width * (end_bin + 1), dtype=np.float32)\n return new_min, new_max\n\n def get_qparams(self):\n new_min, new_max = self._non_linear_param_search()\n return self._calculate_qparams(new_min, new_max)\n\n def _combine_histograms(\n self, orig_hist, new_hist, upsample_rate, downsample_rate, start_idx, Nbins\n ):\n # First up-sample the histogram with new data by a factor of L\n # This creates an approximate probability density thats piecwise constant\n upsampled_histogram = new_hist.repeat(upsample_rate)\n # Now insert the upsampled histogram into the output\n # histogram, which is initialized with zeros.\n # The offset at which the histogram is introduced is determined\n # by the start index as the output histogram can cover a wider range\n histogram_with_output_range = np.zeros((Nbins * downsample_rate))\n histogram_with_output_range[\n start_idx : Nbins * upsample_rate + start_idx\n ] = upsampled_histogram\n # Compute integral histogram, double precision is needed to ensure\n # that there are no overflows\n integral_histogram = np.cumsum(histogram_with_output_range, 0)[\n downsample_rate - 1 :: downsample_rate\n ]\n # Finally perform interpolation\n shifted_integral_histogram = np.zeros((Nbins))\n shifted_integral_histogram[1:Nbins] = integral_histogram[0:-1]\n interpolated_histogram = (\n integral_histogram - shifted_integral_histogram\n ) / upsample_rate\n orig_hist = orig_hist + interpolated_histogram\n return orig_hist\n\n def _adjust_min_max(self, combined_min, combined_max, upsample_rate):\n # We ensure that:\n # (combined_max - combined_min)/(downsample_rate*Nbins) = (max - min)/(upsample_rate*Nbins)\n # This allows us to have a common grid of resolution s, where we can align\n # the input histogram\n # start_idx maps min_val to the histogram bin index.\n np_min_val = self.min_val.numpy()\n np_max_val = self.max_val.numpy()\n\n hist_bin_width = (np_max_val - np_min_val) / (self.bins * upsample_rate)\n downsample_rate = int(\n np.ceil((combined_max - combined_min) / (self.bins * hist_bin_width))\n )\n e = downsample_rate * (self.bins * hist_bin_width) - (\n combined_max - combined_min\n )\n combined_max = combined_max + e / 2\n combined_min = combined_min - e / 2\n start_idx = int(np.round((np_min_val - combined_min) / hist_bin_width))\n\n return combined_min, combined_max, downsample_rate, start_idx\n\n def sideeffect_forward(self, x_orig):\n x = x_orig.numpy()\n min_val = self.min_val.numpy()\n max_val = self.max_val.numpy()\n histogram = self.histogram.numpy()\n new_min = x.min()\n new_max = x.max()\n if histogram[0] == -1:\n new_histogram, _ = np.histogram(x, self.bins, (new_min, new_max))\n else:\n new_min = min(new_min, min_val)\n new_max = max(new_max, max_val)\n # combine the existing histogram and new histogram into 1 histogram\n # We do this by first upsampling the histogram to a dense grid\n # and then downsampling the histogram efficiently\n (new_min, new_max, downsample_rate, start_idx) = self._adjust_min_max(\n new_min, new_max, self.upsample_rate\n )\n\n new_histogram, _ = np.histogram(x, self.bins, (new_min, new_max))\n new_histogram = new_histogram.astype(np.float64)\n if new_min == min_val and new_max == max_val:\n new_histogram += histogram\n else:\n new_histogram = self._combine_histograms(\n new_histogram,\n histogram,\n self.upsample_rate,\n downsample_rate,\n start_idx,\n self.bins,\n )\n\n self.histogram = Tensor(new_histogram, dtype=\"float32\")\n self.min_val = Tensor(new_min, dtype=\"float32\")\n self.max_val = Tensor(new_max, dtype=\"float32\")\n\n def forward(self, x_orig):\n self.sideeffect_forward(x_orig)\n return x_orig\n\n\nclass PassiveObserver(Observer):\n r\"\"\"An Observer that supports setting :attr:`scale` directly.\"\"\"\n\n def __init__(self, dtype: Union[str, QuantDtypeMeta], **kwargs):\n super().__init__(dtype, **kwargs)\n self.qparams = None\n self.orig_scale = None\n\n @property\n def scale(self):\n return self.qparams.scale\n\n @scale.setter\n def scale(self, value: np.ndarray):\n assert np.all(value > 0)\n self.qparams.scale[...] = Tensor(value)\n\n def get_qparams(self):\n return self.qparams\n\n def set_qparams(self, qparams: QParams):\n r\"\"\"set the ``qparams``.\n\n Args:\n qparams: used to set initial scale.\n \"\"\"\n self.qparams = deepcopy(qparams)\n if qparams.scale is None:\n raise AssertionError(\"Can not get an initialized scale\")\n if qparams.dtype_meta is None:\n qparams.dtype_meta = self.dtype\n else:\n assert (\n qparams.dtype_meta is self.dtype\n ), \"input qparams' dtype is not equal to self.dtype.\\nqparams.dtype_meta={}\\nself.dtype={}\".format(\n qparams.dtype_meta, self.dtype\n )\n self.orig_scale = qparams.scale.numpy()\n\n def forward(self, x):\n r\"\"\"Just return input because :attr:`qparams` is set by :func:`~.apply_easy_quant`.\"\"\"\n return x\n" ]
[ [ "numpy.array", "numpy.stack" ], [ "numpy.array" ], [ "numpy.histogram", "numpy.cumsum", "numpy.finfo", "numpy.all", "numpy.ceil", "numpy.round", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wustone1995/speech2face
[ "0eadbc8caf59c58cf5320a0a131a5e6fc9e728b8" ]
[ "preprocess/video_generator.py" ]
[ "import os\r\nimport pickle\r\nimport shutil\r\nimport imageio\r\nimport pandas as pd\r\nimport subprocess\r\nfrom PIL import Image\r\nimport face_recognition\r\nimport numpy as np\r\nimport skimage\r\nimport scipy\r\nfrom keras.engine import Model\r\nfrom keras.layers import Input\r\nfrom keras_vggface.vggface import VGGFace\r\nfrom keras_vggface import utils\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\r\nFNULL = open(os.devnull, 'w')\r\n\r\nclass VideoExtract():\r\n\r\n def __init__(self, fps, duration, face_extraction_model, verbose):\r\n\r\n self.destination_dir = \"speech2face/preprocess/data/speaker_video_embeddings/\"\r\n self.videos = \"speech2face/preprocess/data/videos/\"\r\n self.frames_dir = \"speech2face/preprocess/data/frames/\"\r\n self.frame_cropped = \"speech2face/preprocess/data/cropped_frames/\"\r\n self.model_dir = \"speech2face/preprocess/data/pretrained_model/\"\r\n self.fps = fps\r\n self.duration = duration\r\n self.face_extraction_model = face_extraction_model\r\n self.vgg = VGGFace(model='vgg16')\r\n self.out = self.vgg.get_layer('fc7').output\r\n self.vgg_model = Model(self.vgg.input, self.out)\r\n self.verbose = verbose\r\n\r\n if not os.path.isdir(self.destination_dir):\r\n os.mkdir(self.destination_dir)\r\n\r\n if not os.path.isdir(self.frames_dir):\r\n os.mkdir(self.frames_dir)\r\n\r\n def extract_video(self, id, x, y):\r\n embeddings = np.zeros((4096))\r\n if not os.path.isfile(self.videos + id + \".mp4\"):\r\n if self.verbose:\r\n print(\"--------Video {} not found-----------\".format(self.videos + id + \".mp4\"))\r\n return 1\r\n\r\n if (not os.path.isfile(self.destination_dir + id + \".pkl\")):\r\n \r\n if self.verbose:\r\n print(\"Resampling video\", id)\r\n resample = \"ffmpeg -nostats -loglevel 0 -y -i {1}{2}.mp4 -r {0} -t {3} '{4}{2}.mp4'\".format(self.fps, self.videos, id, self.duration, self.destination_dir)\r\n res2 = subprocess.Popen(resample, stdout = FNULL, shell=True).communicate()\r\n\r\n if not os.path.isfile(self.destination_dir + id + \".mp4\"):\r\n if self.verbose:\r\n print(\"--------Fault in video {}--------\".format(id))\r\n return 1\r\n\r\n extract_frames = \"ffmpeg -nostats -loglevel 0 -i '{0}{1}.mp4' {2}/%02d.jpg\".format(self.destination_dir, id, self.frames_dir)\r\n rs = subprocess.Popen(extract_frames, stdout = FNULL, shell = True).communicate()\r\n\r\n for j in range(1, 7):\r\n\r\n if not os.path.isfile(self.frames_dir + \"%02d\" % j + \".jpg\"):\r\n if self.verbose:\r\n print(\"------MISSING FRAME DETECTED FOR {} FRAME NO {}----\".format(id, j))\r\n continue\r\n\r\n if self.verbose:\r\n print(\"reading frame - {0}\".format(j))\r\n frame = Image.open(self.frames_dir + \"%02d\" % j + \".jpg\")\r\n face_boxes = face_recognition.face_locations(np.array(frame), model= self.face_extraction_model)\r\n\r\n if(len(face_boxes) > 1):\r\n if self.verbose:\r\n print(\"-----2 faces detected in {0} frame {1}-----\".format(id, j))\r\n return 1\r\n\r\n elif len(face_boxes) == 0:\r\n if self.verbose:\r\n print(\"-----No face detected in {} frame {}-----\".format(id, j))\r\n return 1\r\n \r\n top, right, bottom, left = np.squeeze(face_boxes)\r\n frame_cropped = frame.crop(box = (left, top, right, bottom))\r\n\r\n frame_resized = np.array(Image.fromarray(np.array(frame_cropped)).resize((224,224)))\r\n Image.fromarray(frame_resized).save(self.frame_cropped + id + '.jpg')\r\n frame_resized = np.expand_dims(np.array(frame_resized, dtype=np.float64), 0)\r\n frame_resized = utils.preprocess_input(frame_resized, version=1)\r\n embeddings = self.vgg_model.predict(frame_resized)\r\n break\r\n \r\n pickle.dump(embeddings, open(self.destination_dir + id + \".pkl\", \"wb\"))\r\n \r\n delete_frames = \"rm {0}*\".format(self.frames_dir)\r\n delete_video = \"rm '{0}'\".format(self.destination_dir + id + \".mp4\")\r\n rs = subprocess.Popen(delete_frames, stdout = subprocess.PIPE, shell = True).communicate()\r\n rs = subprocess.Popen(delete_video, stdout = subprocess.PIPE, shell = True).communicate()\r\n\r\n return 0\r\n" ]
[ [ "numpy.squeeze", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
silvershine157/transformers
[ "fd01104435914dd65c34026dcec8be008c40ee60" ]
[ "src/transformers/trainer.py" ]
[ "# coding=utf-8\n# Copyright 2020-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThe Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.\n\"\"\"\n\nimport collections\nimport gc\nimport inspect\nimport math\nimport os\nimport re\nimport shutil\nimport time\nimport warnings\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\n\n# Integrations must be imported before ML frameworks:\nfrom .integrations import ( # isort: split\n default_hp_search_backend,\n get_reporting_integration_callbacks,\n hp_params,\n is_fairscale_available,\n is_optuna_available,\n is_ray_tune_available,\n run_hp_search_optuna,\n run_hp_search_ray,\n init_deepspeed,\n)\n\nimport numpy as np\nimport torch\nfrom packaging import version\nfrom torch import nn\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import RandomSampler, SequentialSampler\n\nfrom .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator\nfrom .file_utils import (\n WEIGHTS_NAME,\n is_apex_available,\n is_datasets_available,\n is_in_notebook,\n is_sagemaker_distributed_available,\n is_torch_tpu_available,\n)\nfrom .modeling_utils import PreTrainedModel, unwrap_model\nfrom .optimization import Adafactor, AdamW, get_scheduler\nfrom .tokenization_utils_base import PreTrainedTokenizerBase\nfrom .trainer_callback import (\n CallbackHandler,\n DefaultFlowCallback,\n PrinterCallback,\n ProgressCallback,\n TrainerCallback,\n TrainerControl,\n TrainerState,\n)\nfrom .trainer_pt_utils import (\n DistributedLengthGroupedSampler,\n DistributedTensorGatherer,\n LabelSmoother,\n LengthGroupedSampler,\n SequentialDistributedSampler,\n distributed_broadcast_scalars,\n distributed_concat,\n nested_concat,\n nested_detach,\n nested_numpify,\n nested_xla_mesh_reduce,\n reissue_pt_warnings,\n)\nfrom .trainer_utils import (\n PREFIX_CHECKPOINT_DIR,\n BestRun,\n EvalPrediction,\n HPSearchBackend,\n PredictionOutput,\n ShardedDDPOption,\n TrainerMemoryTracker,\n TrainOutput,\n default_compute_objective,\n default_hp_space,\n get_last_checkpoint,\n set_seed,\n speed_metrics,\n)\nfrom .training_args import ParallelMode, TrainingArguments\nfrom .utils import logging\nfrom .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES\n\n\n_is_native_amp_available = False\n\nDEFAULT_CALLBACKS = [DefaultFlowCallback]\nDEFAULT_PROGRESS_CALLBACK = ProgressCallback\n\nif is_in_notebook():\n from .utils.notebook import NotebookProgressCallback\n\n DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback\n\nif is_apex_available():\n from apex import amp\n\nif version.parse(torch.__version__) >= version.parse(\"1.6\"):\n _is_native_amp_available = True\n from torch.cuda.amp import autocast\n\nif is_datasets_available():\n import datasets\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n import torch_xla.distributed.parallel_loader as pl\n\nif is_fairscale_available():\n import fairscale\n from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\n from fairscale.optim import OSS\n from fairscale.optim.grad_scaler import ShardedGradScaler\n\n if version.parse(fairscale.__version__) >= version.parse(\"0.3\"):\n from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP\n else:\n FullyShardedDDP = None\n\nif is_sagemaker_distributed_available():\n import smdistributed.dataparallel.torch.distributed as dist\n from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP\nelse:\n import torch.distributed as dist\n\nif TYPE_CHECKING:\n import optuna\n\nlogger = logging.get_logger(__name__)\n\n\nclass Trainer:\n \"\"\"\n Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.\n\n Args:\n model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):\n The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.\n\n .. note::\n\n :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`\n provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as\n they work the same way as the 🤗 Transformers models.\n args (:class:`~transformers.TrainingArguments`, `optional`):\n The arguments to tweak for training. Will default to a basic instance of\n :class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in\n the current directory if not provided.\n data_collator (:obj:`DataCollator`, `optional`):\n The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.\n Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of\n :func:`~transformers.DataCollatorWithPadding` otherwise.\n train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed.\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed.\n tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):\n The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the\n maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an\n interrupted training or reuse the fine-tuned model.\n model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):\n A function that instantiates the model to be used. If provided, each call to\n :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.\n\n The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be\n able to choose different architectures according to hyper parameters (such as layer count, sizes of inner\n layers, dropout probabilities etc).\n compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):\n The function that will be used to compute metrics at evaluation. Must take a\n :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.\n callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):\n A list of callbacks to customize the training loop. Will add those to the list of default callbacks\n detailed in :doc:`here <callback>`.\n\n If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.\n optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple\n containing the optimizer and the scheduler to use. Will default to an instance of\n :class:`~transformers.AdamW` on your model and a scheduler given by\n :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.\n\n Important attributes:\n\n - **model** -- Always points to the core model. If using a transformers model, it will be a\n :class:`~transformers.PreTrainedModel` subclass.\n - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the\n original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,\n the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the\n inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.\n - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from\n data parallelism, this means some of the model layers are split on different GPUs).\n - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set\n to :obj:`False` if model parallel or deepspeed is used, or if the default\n ``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .\n - **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called\n while in ``train``)\n\n \"\"\"\n\n from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state\n\n def __init__(\n self,\n model: Union[PreTrainedModel, torch.nn.Module] = None,\n args: TrainingArguments = None,\n data_collator: Optional[DataCollator] = None,\n train_dataset: Optional[Dataset] = None,\n eval_dataset: Optional[Dataset] = None,\n tokenizer: Optional[\"PreTrainedTokenizerBase\"] = None,\n model_init: Callable[[], PreTrainedModel] = None,\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,\n callbacks: Optional[List[TrainerCallback]] = None,\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),\n ):\n if args is None:\n output_dir = \"tmp_trainer\"\n logger.info(f\"No `TrainingArguments` passed, using `output_dir={output_dir}`.\")\n args = TrainingArguments(output_dir=output_dir)\n self.args = args\n # Seed must be set before instantiating the model when using model\n set_seed(self.args.seed)\n self.hp_name = None\n self.deepspeed = None\n self.is_in_train = False\n\n # memory metrics - must set up as early as possible\n self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)\n self._memory_tracker.start()\n\n # force device and distributed setup init explicitly\n args._setup_devices\n\n if model is None:\n if model_init is not None:\n self.model_init = model_init\n model = self.call_model_init()\n else:\n raise RuntimeError(\"`Trainer` requires either a `model` or `model_init` argument\")\n else:\n if model_init is not None:\n warnings.warn(\n \"`Trainer` requires either a `model` or `model_init` argument, but not both. \"\n \"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.\",\n FutureWarning,\n )\n self.model_init = model_init\n\n if hasattr(model, \"is_parallelizable\") and model.is_parallelizable and model.model_parallel:\n self.is_model_parallel = True\n else:\n self.is_model_parallel = False\n\n # Setup Sharded DDP training\n self.sharded_ddp = None\n if len(args.sharded_ddp) > 0:\n if args.deepspeed:\n raise ValueError(\n \"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags.\"\n )\n\n if args.local_rank == -1:\n raise ValueError(\"Using sharded DDP only works in distributed training.\")\n elif not is_fairscale_available():\n raise ImportError(\"Sharded DDP training requires fairscale: `pip install fairscale`.\")\n elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:\n raise ImportError(\n \"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found \"\n f\"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`.\"\n )\n elif ShardedDDPOption.SIMPLE in args.sharded_ddp:\n self.sharded_ddp = ShardedDDPOption.SIMPLE\n elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:\n self.sharded_ddp = ShardedDDPOption.ZERO_DP_2\n elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:\n self.sharded_ddp = ShardedDDPOption.ZERO_DP_3\n\n # one place to sort out whether to place the model on device or not\n self.place_model_on_device = args.place_model_on_device\n if (\n self.is_model_parallel\n or (args.deepspeed and args.do_train)\n or (args.fp16_full_eval and not args.do_train)\n or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])\n ):\n self.place_model_on_device = False\n\n default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)\n self.data_collator = data_collator if data_collator is not None else default_collator\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.tokenizer = tokenizer\n\n # postpone switching model to cuda when:\n # 1. MP - since we are trying to fit a much bigger than 1 gpu model\n # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,\n # and we only use deepspeed for training at the moment\n if self.place_model_on_device:\n model = model.to(args.device)\n\n # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs\n if self.is_model_parallel:\n self.args._n_gpu = 1\n\n # later use `self.model is self.model_wrapped` to check if it's wrapped or not\n self.model_wrapped = model\n self.model = model\n\n self.compute_metrics = compute_metrics\n self.optimizer, self.lr_scheduler = optimizers\n if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):\n raise RuntimeError(\n \"Passing a `model_init` is incompatible with providing the `optimizers` argument.\"\n \"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.\"\n )\n default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)\n callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks\n self.callback_handler = CallbackHandler(\n callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler\n )\n self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)\n\n # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.\n self._loggers_initialized = False\n\n # Create output directory if needed\n if self.is_world_process_zero():\n os.makedirs(self.args.output_dir, exist_ok=True)\n if not callable(self.data_collator) and callable(getattr(self.data_collator, \"collate_batch\", None)):\n raise ValueError(\"The `data_collator` should be a simple callable (function, class with `__call__`).\")\n\n if args.max_steps > 0:\n logger.info(\"max_steps is given, it will override any value given in num_train_epochs\")\n\n # Enforce rules on using datasets with no __len__\n if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:\n raise ValueError(\"train_dataset does not implement __len__, max_steps has to be specified\")\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n self._signature_columns = None\n if is_datasets_available():\n if isinstance(train_dataset, datasets.Dataset):\n self._remove_unused_columns(self.train_dataset, description=\"training\")\n if isinstance(eval_dataset, datasets.Dataset):\n self._remove_unused_columns(self.eval_dataset, description=\"evaluation\")\n\n # Mixed precision setup\n self.use_apex = False\n self.use_amp = False\n self.fp16_backend = None\n\n if args.fp16:\n if args.fp16_backend == \"auto\":\n self.fp16_backend = \"amp\" if _is_native_amp_available else \"apex\"\n else:\n self.fp16_backend = args.fp16_backend\n logger.info(f\"Using {self.fp16_backend} fp16 backend\")\n\n if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16\n if self.fp16_backend == \"amp\":\n self.use_amp = True\n self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()\n else:\n if not is_apex_available():\n raise ImportError(\n \"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.\"\n )\n self.use_apex = True\n\n # Label smoothing\n if self.args.label_smoothing_factor != 0:\n self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)\n else:\n self.label_smoother = None\n\n self.state = TrainerState()\n self.control = TrainerControl()\n # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the\n # state at each call to self.log.\n self._total_flos = None\n self.hp_search_backend = None\n self.use_tune_checkpoints = False\n default_label_names = (\n [\"start_positions\", \"end_positions\"]\n if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()\n else [\"labels\"]\n )\n self.label_names = default_label_names if self.args.label_names is None else self.args.label_names\n self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)\n\n # very last\n self._memory_tracker.stop_and_update_metrics()\n\n def add_callback(self, callback):\n \"\"\"\n Add a callback to the current list of :class:`~transformer.TrainerCallback`.\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will instantiate a member of that class.\n \"\"\"\n self.callback_handler.add_callback(callback)\n\n def pop_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.\n\n If the callback is not found, returns :obj:`None` (and no error is raised).\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will pop the first member of that class found in the list of callbacks.\n\n Returns:\n :class:`~transformer.TrainerCallback`: The callback removed, if found.\n \"\"\"\n return self.callback_handler.pop_callback(callback)\n\n def remove_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of :class:`~transformer.TrainerCallback`.\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will remove the first member of that class found in the list of callbacks.\n \"\"\"\n self.callback_handler.remove_callback(callback)\n\n def _remove_unused_columns(self, dataset: \"datasets.Dataset\", description: Optional[str] = None):\n if not self.args.remove_unused_columns:\n return\n if self._signature_columns is None:\n # Inspect model forward signature to keep only the arguments it accepts.\n signature = inspect.signature(self.model.forward)\n self._signature_columns = list(signature.parameters.keys())\n # Labels may be named label or label_ids, the default data collator handles that.\n self._signature_columns += [\"label\", \"label_ids\"]\n columns = [k for k in self._signature_columns if k in dataset.column_names]\n ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))\n if len(ignored_columns) > 0:\n dset_description = \"\" if description is None else f\"in the {description} set \"\n logger.info(\n f\"The following columns {dset_description} don't have a corresponding argument in \"\n f\"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}.\"\n )\n\n dataset.set_format(type=dataset.format[\"type\"], columns=columns, format_kwargs=dataset.format[\"format_kwargs\"])\n\n def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:\n if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(\n self.train_dataset, collections.abc.Sized\n ):\n return None\n\n # Gather the number of processes and this process index.\n if self.args.parallel_mode == ParallelMode.TPU:\n num_processes = xm.xrt_world_size()\n process_index = xm.get_ordinal()\n elif (\n self.args.parallel_mode == ParallelMode.DISTRIBUTED\n or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED\n ):\n num_processes = dist.get_world_size()\n process_index = dist.get_rank()\n else:\n num_processes = 1\n process_index = 0\n\n # Build the sampler.\n if self.args.group_by_length:\n model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None\n if num_processes <= 1:\n return LengthGroupedSampler(\n self.train_dataset, self.args.train_batch_size, model_input_name=model_input_name\n )\n else:\n return DistributedLengthGroupedSampler(\n self.train_dataset,\n self.args.train_batch_size,\n num_replicas=num_processes,\n rank=process_index,\n model_input_name=model_input_name,\n )\n\n else:\n if num_processes <= 1:\n return RandomSampler(self.train_dataset)\n else:\n return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)\n\n def get_train_dataloader(self) -> DataLoader:\n \"\"\"\n Returns the training :class:`~torch.utils.data.DataLoader`.\n\n Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted\n to distributed training if necessary) otherwise.\n\n Subclass and override this method if you want to inject some custom behavior.\n \"\"\"\n if self.train_dataset is None:\n raise ValueError(\"Trainer: training requires a train_dataset.\")\n train_sampler = self._get_train_sampler()\n\n return DataLoader(\n self.train_dataset,\n batch_size=self.args.train_batch_size,\n sampler=train_sampler,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:\n if is_torch_tpu_available():\n return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())\n elif self.args.local_rank != -1:\n return SequentialDistributedSampler(eval_dataset)\n else:\n return SequentialSampler(eval_dataset)\n\n def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:\n \"\"\"\n Returns the evaluation :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not\n accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n \"\"\"\n if eval_dataset is None and self.eval_dataset is None:\n raise ValueError(\"Trainer: evaluation requires an eval_dataset.\")\n elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):\n self._remove_unused_columns(eval_dataset, description=\"evaluation\")\n eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset\n eval_sampler = self._get_eval_sampler(eval_dataset)\n\n return DataLoader(\n eval_dataset,\n sampler=eval_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:\n \"\"\"\n Returns the test :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n \"\"\"\n if not isinstance(test_dataset, collections.abc.Sized):\n raise ValueError(\"test_dataset must implement __len__\")\n elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):\n self._remove_unused_columns(test_dataset, description=\"test\")\n test_sampler = self._get_eval_sampler(test_dataset)\n\n # We use the same batch_size as for eval.\n return DataLoader(\n test_dataset,\n sampler=test_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def create_optimizer_and_scheduler(self, num_training_steps: int):\n \"\"\"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.\n \"\"\"\n if self.optimizer is None:\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer_cls = Adafactor if self.args.adafactor else AdamW\n if self.args.adafactor:\n optimizer_cls = Adafactor\n optimizer_kwargs = {\"scale_parameter\": False, \"relative_step\": False}\n else:\n optimizer_cls = AdamW\n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n }\n optimizer_kwargs[\"lr\"] = self.args.learning_rate\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n self.optimizer = OSS(\n params=optimizer_grouped_parameters,\n optim=optimizer_cls,\n **optimizer_kwargs,\n )\n else:\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n\n if self.lr_scheduler is None:\n warmup_steps = (\n self.args.warmup_steps\n if self.args.warmup_steps > 0\n else math.ceil(num_training_steps * self.args.warmup_ratio)\n )\n\n self.lr_scheduler = get_scheduler(\n self.args.lr_scheduler_type,\n self.optimizer,\n num_warmup_steps=warmup_steps,\n num_training_steps=num_training_steps,\n )\n\n def num_examples(self, dataloader: DataLoader) -> int:\n \"\"\"\n Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.\n\n Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`\n \"\"\"\n return len(dataloader.dataset)\n\n def _hp_search_setup(self, trial: Union[\"optuna.Trial\", Dict[str, Any]]):\n \"\"\" HP search setup code \"\"\"\n self._trial = trial\n\n if self.hp_search_backend is None or trial is None:\n return\n\n params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial\n for key, value in params.items():\n if not hasattr(self.args, key):\n raise AttributeError(\n f\"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`.\"\n )\n old_attr = getattr(self.args, key, None)\n # Casting value to the proper type\n if old_attr is not None:\n value = type(old_attr)(value)\n setattr(self.args, key, value)\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n logger.info(\"Trial:\", trial.params)\n\n def _report_to_hp_search(\n self, trial: Union[\"optuna.Trial\", Dict[str, Any]], epoch: int, metrics: Dict[str, float]\n ):\n if self.hp_search_backend is None or trial is None:\n return\n self.objective = self.compute_objective(metrics.copy())\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n import optuna\n\n trial.report(self.objective, epoch)\n if trial.should_prune():\n raise optuna.TrialPruned()\n elif self.hp_search_backend == HPSearchBackend.RAY:\n from ray import tune\n\n if self.control.should_save:\n self._tune_save_checkpoint()\n tune.report(objective=self.objective, **metrics)\n\n def _tune_save_checkpoint(self):\n from ray import tune\n\n if not self.use_tune_checkpoints:\n return\n with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:\n output_dir = os.path.join(checkpoint_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\")\n self.save_model(output_dir)\n if self.is_world_process_zero():\n self.state.save_to_json(os.path.join(output_dir, \"trainer_state.json\"))\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n\n def call_model_init(self, trial=None):\n model_init_argcount = len(inspect.signature(self.model_init).parameters)\n if model_init_argcount == 0:\n model = self.model_init()\n elif model_init_argcount == 1:\n model = self.model_init(trial)\n else:\n raise RuntimeError(\"model_init should have 0 or 1 argument.\")\n\n if model is None:\n raise RuntimeError(\"model_init should not return None.\")\n\n return model\n\n def _wrap_model(self, model, training=True):\n # already initialized its own DDP and AMP\n if self.deepspeed:\n return self.deepspeed\n\n # Mixed precision training with apex (torch < 1.6)\n if self.use_apex and training:\n model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)\n\n # Multi-gpu training (should be after apex fp16 initialization)\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Note: in torch.distributed mode, there's no point in wrapping the model\n # inside a DistributedDataParallel as we'll be under `no_grad` anyways.\n if not training:\n return model\n\n # Distributed training (should be after apex fp16 initialization)\n if self.sharded_ddp is not None:\n # Sharded DDP!\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n model = ShardedDDP(model, self.optimizer)\n else:\n mixed_precision = self.args.fp16\n cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp\n zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3\n # XXX: Breaking the self.model convention but I see no way around it for now.\n self.model = model = FullyShardedDDP(\n model, mixed_precision=mixed_precision, reshard_after_forward=zero_3, cpu_offload=cpu_offload\n ).to(self.args.device)\n\n elif is_sagemaker_distributed_available():\n model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)\n elif self.args.local_rank != -1:\n if self.args.ddp_find_unused_parameters is not None:\n find_unused_parameters = self.args.ddp_find_unused_parameters\n elif isinstance(model, PreTrainedModel):\n # find_unused_parameters breaks checkpointing as per\n # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021\n find_unused_parameters = not getattr(model.config, \"gradient_checkpointing\", False)\n else:\n find_unused_parameters = True\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.args.local_rank],\n output_device=self.args.local_rank,\n find_unused_parameters=find_unused_parameters,\n )\n\n return model\n\n def train(\n self,\n resume_from_checkpoint: Optional[Union[str, bool]] = None,\n trial: Union[\"optuna.Trial\", Dict[str, Any]] = None,\n **kwargs,\n ):\n \"\"\"\n Main training entry point.\n\n Args:\n resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):\n If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of\n :class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in\n `args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,\n training will resume from the model/optimizer/scheduler states loaded here.\n trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):\n The trial run or the hyperparameter dictionary for hyperparameter search.\n kwargs:\n Additional keyword arguments used to hide deprecated arguments\n \"\"\"\n\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n self.is_in_train = True\n\n if \"model_path\" in kwargs:\n resume_from_checkpoint = kwargs.pop(\"model_path\")\n warnings.warn(\n \"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` \"\n \"instead.\",\n FutureWarning,\n )\n if len(kwargs) > 0:\n raise TypeError(f\"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.\")\n # This might change the seed so needs to run first.\n self._hp_search_setup(trial)\n\n # Model re-init\n model_reloaded = False\n if self.model_init is not None:\n # Seed must be set before instantiating the model when using model_init.\n set_seed(self.args.seed)\n self.model = self.call_model_init(trial)\n model_reloaded = True\n # Reinitializes optimizer and scheduler\n self.optimizer, self.lr_scheduler = None, None\n\n # Load potential model checkpoint\n if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:\n resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)\n if resume_from_checkpoint is None:\n raise ValueError(f\"No valid checkpoint found in output directory ({self.args.output_dir})\")\n\n if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):\n logger.info(f\"Loading model from {resume_from_checkpoint}).\")\n if isinstance(self.model, PreTrainedModel):\n self.model = self.model.from_pretrained(resume_from_checkpoint)\n model_reloaded = True\n else:\n state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))\n self.model.load_state_dict(state_dict)\n\n # If model was re-initialized, put it on the right device and update self.model_wrapped\n if model_reloaded:\n if self.place_model_on_device:\n self.model = self.model.to(self.args.device)\n self.model_wrapped = self.model\n\n # Keeping track whether we can can len() on the dataset or not\n train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)\n\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n if train_dataset_is_sized:\n num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n if self.args.max_steps > 0:\n max_steps = self.args.max_steps\n num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(\n self.args.max_steps % num_update_steps_per_epoch > 0\n )\n else:\n max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(self.args.num_train_epochs)\n else:\n # see __init__. max_steps is set when the dataset has no __len__\n max_steps = self.args.max_steps\n num_train_epochs = 1\n num_update_steps_per_epoch = max_steps\n\n delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE\n if self.args.deepspeed:\n model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)\n self.model = model.module\n self.model_wrapped = model # will get further wrapped in DDP\n self.deepspeed = model # DeepSpeedEngine object\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n elif not delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n model = self._wrap_model(self.model_wrapped)\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n if delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n # important: at this point:\n # self.model is the Transformers Model\n # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.\n\n # Train!\n if is_torch_tpu_available():\n world_size = xm.xrt_world_size()\n elif self.args.local_rank != -1:\n world_size = dist.get_world_size()\n else:\n world_size = 1\n\n total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size\n num_examples = (\n self.num_examples(train_dataloader)\n if train_dataset_is_sized\n else total_train_batch_size * self.args.max_steps\n )\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {self.args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(\n os.path.join(resume_from_checkpoint, \"trainer_state.json\")\n ):\n self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, \"trainer_state.json\"))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not self.args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {self.state.global_step}\")\n if not self.args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch.\"\n )\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None\n self.state.trial_params = hp_params(trial) if trial is not None else None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(self.args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n self._total_flos = self.state.total_flos\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not self.args.ignore_data_skip:\n for epoch in range(epochs_trained):\n # We just need to begin an iteration to create the randomization of the sampler.\n for _ in train_dataloader:\n break\n\n for epoch in range(epochs_trained, num_train_epochs):\n if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n\n if is_torch_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(\n self.args.device\n )\n epoch_iterator = parallel_loader\n else:\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if self.args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = (\n len(epoch_iterator)\n if train_dataset_is_sized\n else self.args.max_steps * self.args.gradient_accumulation_steps\n )\n self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)\n\n for step, inputs in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)\n\n if (\n ((step + 1) % self.args.gradient_accumulation_steps != 0)\n and self.args.local_rank != -1\n and self.args._no_sync_in_gradient_accumulation\n ):\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n tr_loss += self.training_step(model, inputs)\n else:\n tr_loss += self.training_step(model, inputs)\n self._total_flos += float(self.floating_point_ops(inputs))\n\n # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps\n if self.deepspeed:\n self.deepspeed.step()\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= self.args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:\n # deepspeed does its own clipping\n\n if self.use_amp:\n # AMP: gradients need unscaling\n self.scaler.unscale_(self.optimizer)\n\n if hasattr(self.optimizer, \"clip_grad_norm\"):\n # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping\n self.optimizer.clip_grad_norm(self.args.max_grad_norm)\n elif hasattr(model, \"clip_grad_norm_\"):\n # Some models (like FullyShardedDDP) have a specific way to do gradient clipping\n model.clip_grad_norm_(self.args.max_grad_norm)\n else:\n # Revert to normal clipping otherwise, handling Apex or full precision\n torch.nn.utils.clip_grad_norm_(\n amp.master_params(self.optimizer) if self.use_apex else model.parameters(),\n self.args.max_grad_norm,\n )\n\n # Optimizer step\n if self.deepspeed:\n pass # called outside the loop\n elif is_torch_tpu_available():\n xm.optimizer_step(self.optimizer)\n elif self.use_amp:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n if not self.deepspeed:\n self.lr_scheduler.step()\n\n model.zero_grad()\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n\n self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)\n\n if self.args.tpu_metrics_debug or self.args.debug:\n if is_torch_tpu_available():\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n else:\n logger.warning(\n \"You enabled PyTorch/XLA debug metrics but you don't have a TPU \"\n \"configured. Check your training configuration if this is unexpected.\"\n )\n if self.control.should_training_stop:\n break\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n logger.info(\n f\"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).\"\n )\n if isinstance(self.model, PreTrainedModel):\n self.model = self.model.from_pretrained(self.state.best_model_checkpoint)\n if self.place_model_on_device:\n self.model = self.model.to(self.args.device)\n else:\n state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))\n self.model.load_state_dict(state_dict)\n\n if self.deepspeed:\n self.deepspeed.load_checkpoint(\n self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False\n )\n\n metrics = speed_metrics(\"train\", start_time, self.state.max_steps)\n if self._total_flos is not None:\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n self.log(metrics)\n\n self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n\n if self.deepspeed:\n # free up any memory that might be useful for eval\n self.deepspeed = None\n self.optimizer = None\n self.lr_scheduler = None\n self.model_wrapped = self.model\n gc.collect() # force memory release\n # to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed\n self.place_model_on_device = self.args.place_model_on_device\n if self.is_model_parallel:\n self.place_model_on_device = False\n\n self.is_in_train = False\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)\n\n def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):\n if self.control.should_log:\n logs: Dict[str, float] = {}\n tr_loss_scalar = tr_loss.item()\n # reset tr_loss to zero\n tr_loss -= tr_loss\n\n logs[\"loss\"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)\n logs[\"learning_rate\"] = self._get_learning_rate()\n\n self._total_loss_scalar += tr_loss_scalar\n self._globalstep_last_logged = self.state.global_step\n\n self.log(logs)\n\n metrics = None\n if self.control.should_evaluate:\n metrics = self.evaluate()\n self._report_to_hp_search(trial, epoch, metrics)\n\n if self.control.should_save:\n self._save_checkpoint(model, trial, metrics=metrics)\n self.control = self.callback_handler.on_save(self.args, self.state, self.control)\n\n def _save_checkpoint(self, model, trial, metrics=None):\n # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we\n # want to save except FullyShardedDDP.\n # assert unwrap_model(model) is self.model, \"internal model should be a reference to self.model\"\n\n # Save model checkpoint\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n\n if self.hp_search_backend is not None and trial is not None:\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n run_id = trial.number\n else:\n from ray import tune\n\n run_id = tune.get_trial_id()\n run_name = self.hp_name(trial) if self.hp_name is not None else f\"run-{run_id}\"\n run_dir = os.path.join(self.args.output_dir, run_name)\n else:\n run_dir = self.args.output_dir\n self.store_flos()\n\n output_dir = os.path.join(run_dir, checkpoint_folder)\n self.save_model(output_dir)\n if self.deepspeed:\n self.deepspeed.save_checkpoint(output_dir)\n\n # Save optimizer and scheduler\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n self.optimizer.consolidate_state_dict()\n\n if is_torch_tpu_available():\n xm.rendezvous(\"saving_optimizer_states\")\n xm.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n with warnings.catch_warnings(record=True) as caught_warnings:\n xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n reissue_pt_warnings(caught_warnings)\n elif self.is_world_process_zero() and not self.deepspeed:\n # deepspeed.save_checkpoint above saves model/optim/sched\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n with warnings.catch_warnings(record=True) as caught_warnings:\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n reissue_pt_warnings(caught_warnings)\n\n # Determine the new best metric / best model checkpoint\n if metrics is not None and self.args.metric_for_best_model is not None:\n metric_to_check = self.args.metric_for_best_model\n if not metric_to_check.startswith(\"eval_\"):\n metric_to_check = f\"eval_{metric_to_check}\"\n metric_value = metrics[metric_to_check]\n\n operator = np.greater if self.args.greater_is_better else np.less\n if (\n self.state.best_metric is None\n or self.state.best_model_checkpoint is None\n or operator(metric_value, self.state.best_metric)\n ):\n self.state.best_metric = metric_value\n self.state.best_model_checkpoint = output_dir\n\n # Save the Trainer state\n if self.is_world_process_zero():\n self.state.save_to_json(os.path.join(output_dir, \"trainer_state.json\"))\n\n # Maybe delete some older checkpoints.\n if self.is_world_process_zero():\n self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)\n\n def _load_optimizer_and_scheduler(self, checkpoint):\n \"\"\"If optimizer and scheduler states exist, load them.\"\"\"\n if checkpoint is None:\n return\n\n if os.path.isfile(os.path.join(checkpoint, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(checkpoint, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n if is_torch_tpu_available():\n # On TPU we have to take some extra precautions to properly load the states on the right device.\n optimizer_state = torch.load(os.path.join(checkpoint, \"optimizer.pt\"), map_location=\"cpu\")\n with warnings.catch_warnings(record=True) as caught_warnings:\n lr_scheduler_state = torch.load(os.path.join(checkpoint, \"scheduler.pt\"), map_location=\"cpu\")\n reissue_pt_warnings(caught_warnings)\n\n xm.send_cpu_data_to_device(optimizer_state, self.args.device)\n xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)\n\n self.optimizer.load_state_dict(optimizer_state)\n self.lr_scheduler.load_state_dict(lr_scheduler_state)\n else:\n self.optimizer.load_state_dict(\n torch.load(os.path.join(checkpoint, \"optimizer.pt\"), map_location=self.args.device)\n )\n with warnings.catch_warnings(record=True) as caught_warnings:\n self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, \"scheduler.pt\")))\n reissue_pt_warnings(caught_warnings)\n\n if self.deepspeed:\n # Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function\n self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)\n\n def hyperparameter_search(\n self,\n hp_space: Optional[Callable[[\"optuna.Trial\"], Dict[str, float]]] = None,\n compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,\n n_trials: int = 20,\n direction: str = \"minimize\",\n backend: Optional[Union[\"str\", HPSearchBackend]] = None,\n hp_name: Optional[Callable[[\"optuna.Trial\"], str]] = None,\n **kwargs,\n ) -> BestRun:\n \"\"\"\n Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by\n :obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is\n provided, the sum of all metrics otherwise.\n\n .. warning::\n\n To use this method, you need to have provided a ``model_init`` when initializing your\n :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible\n with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the\n method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.\n\n Args:\n hp_space (:obj:`Callable[[\"optuna.Trial\"], Dict[str, float]]`, `optional`):\n A function that defines the hyperparameter search space. Will default to\n :func:`~transformers.trainer_utils.default_hp_space_optuna` or\n :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.\n compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):\n A function computing the objective to minimize or maximize from the metrics returned by the\n :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.\n n_trials (:obj:`int`, `optional`, defaults to 100):\n The number of trial runs to test.\n direction(:obj:`str`, `optional`, defaults to :obj:`\"minimize\"`):\n Whether to optimize greater or lower objects. Can be :obj:`\"minimize\"` or :obj:`\"maximize\"`, you should\n pick :obj:`\"minimize\"` when optimizing the validation loss, :obj:`\"maximize\"` when optimizing one or\n several metrics.\n backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):\n The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which\n one is installed. If both are installed, will default to optuna.\n kwargs:\n Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For\n more information see:\n\n - the documentation of `optuna.create_study\n <https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__\n - the documentation of `tune.run\n <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__\n\n Returns:\n :class:`transformers.trainer_utils.BestRun`: All the information about the best run.\n \"\"\"\n if backend is None:\n backend = default_hp_search_backend()\n if backend is None:\n raise RuntimeError(\n \"At least one of optuna or ray should be installed. \"\n \"To install optuna run `pip install optuna`.\"\n \"To install ray run `pip install ray[tune]`.\"\n )\n backend = HPSearchBackend(backend)\n if backend == HPSearchBackend.OPTUNA and not is_optuna_available():\n raise RuntimeError(\"You picked the optuna backend, but it is not installed. Use `pip install optuna`.\")\n if backend == HPSearchBackend.RAY and not is_ray_tune_available():\n raise RuntimeError(\n \"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`.\"\n )\n self.hp_search_backend = backend\n if self.model_init is None:\n raise RuntimeError(\n \"To use hyperparameter search, you need to pass your model through a model_init function.\"\n )\n\n self.hp_space = default_hp_space[backend] if hp_space is None else hp_space\n self.hp_name = hp_name\n self.compute_objective = default_compute_objective if compute_objective is None else compute_objective\n\n run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray\n best_run = run_hp_search(self, n_trials, direction, **kwargs)\n\n self.hp_search_backend = None\n return best_run\n\n def log(self, logs: Dict[str, float]) -> None:\n \"\"\"\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n \"\"\"\n if self.state.epoch is not None:\n logs[\"epoch\"] = round(self.state.epoch, 2)\n\n output = {**logs, **{\"step\": self.state.global_step}}\n self.state.log_history.append(output)\n self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)\n\n def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:\n \"\"\"\n Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and\n handling potential state.\n \"\"\"\n for k, v in inputs.items():\n if isinstance(v, torch.Tensor):\n inputs[k] = v.to(self.args.device)\n\n if self.args.past_index >= 0 and self._past is not None:\n inputs[\"mems\"] = self._past\n\n return inputs\n\n def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:\n \"\"\"\n Perform a training step on a batch of inputs.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to train.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n\n Return:\n :obj:`torch.Tensor`: The tensor with training loss on this batch.\n \"\"\"\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if self.use_amp:\n with autocast():\n loss = self.compute_loss(model, inputs)\n else:\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:\n # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`\n loss = loss / self.args.gradient_accumulation_steps\n\n if self.use_amp:\n self.scaler.scale(loss).backward()\n elif self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n elif self.deepspeed:\n # loss gets scaled under gradient_accumulation_steps in deepspeed\n loss = self.deepspeed.backward(loss)\n else:\n loss.backward()\n\n return loss.detach()\n\n def compute_loss(self, model, inputs, return_outputs=False):\n \"\"\"\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n \"\"\"\n if self.label_smoother is not None and \"labels\" in inputs:\n labels = inputs.pop(\"labels\")\n else:\n labels = None\n outputs = model(**inputs)\n # Save past state if it exists\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n if labels is not None:\n loss = self.label_smoother(outputs, labels)\n else:\n # We don't use .loss here since the model may return tuples instead of ModelOutput.\n loss = outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]\n\n return (loss, outputs) if return_outputs else loss\n\n def is_local_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several\n machines) main process.\n \"\"\"\n if is_torch_tpu_available():\n return xm.is_master_ordinal(local=True)\n else:\n return self.args.local_rank in [-1, 0]\n\n def is_world_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the global main process (when training in a distributed fashion on several\n machines, this is only going to be :obj:`True` for one process).\n \"\"\"\n if is_torch_tpu_available():\n return xm.is_master_ordinal(local=False)\n else:\n return self.args.local_rank == -1 or dist.get_rank() == 0\n\n def save_model(self, output_dir: Optional[str] = None):\n \"\"\"\n Will save the model, so you can reload it using :obj:`from_pretrained()`.\n\n Will only save from the main process.\n \"\"\"\n if is_torch_tpu_available():\n self._save_tpu(output_dir)\n else:\n if self.is_world_process_zero():\n self._save(output_dir)\n if self.args.local_rank != -1:\n dist.barrier()\n\n def _save_tpu(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if xm.is_master_ordinal():\n os.makedirs(output_dir, exist_ok=True)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n xm.rendezvous(\"saving_checkpoint\")\n if not isinstance(self.model, PreTrainedModel):\n if isinstance(unwrap_model(self.model), PreTrainedModel):\n unwrap_model(self.model).save_pretrained(\n output_dir,\n save_config=self.is_world_process_zero(),\n state_dict=self.model.state_dict(),\n save_function=xm.save,\n )\n else:\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)\n if self.tokenizer is not None and self.is_world_process_zero():\n self.tokenizer.save_pretrained(output_dir)\n\n def _save(self, output_dir: Optional[str] = None):\n # If we are executing this function, we are the process zero, so we don't check for that.\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel):\n if isinstance(unwrap_model(self.model), PreTrainedModel):\n unwrap_model(self.model).save_pretrained(output_dir, state_dict=self.model.state_dict())\n else:\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir)\n if self.tokenizer is not None:\n self.tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n def store_flos(self):\n # Storing the number of floating-point operations that went into the model\n if self._total_flos is not None:\n if self.args.local_rank != -1:\n self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()\n else:\n self.state.total_flos = self._total_flos\n\n def _sorted_checkpoints(\n self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False\n ) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = [str(x) for x in Path(output_dir).glob(f\"{checkpoint_prefix}-*\")]\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(f\".*{checkpoint_prefix}-([0-9]+)\", path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n # Make sure we don't delete the best model.\n if self.state.best_model_checkpoint is not None:\n best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))\n checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (\n checkpoints_sorted[-1],\n checkpoints_sorted[best_model_index],\n )\n return checkpoints_sorted\n\n def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n def evaluate(\n self,\n eval_dataset: Optional[Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> Dict[str, float]:\n \"\"\"\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init :obj:`compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,\n columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the\n :obj:`__len__` method.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n \"\"\"\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n start_time = time.time()\n\n output = self.prediction_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if self.compute_metrics is None else None,\n ignore_keys=ignore_keys,\n metric_key_prefix=metric_key_prefix,\n )\n\n n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)\n output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))\n self.log(output.metrics)\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)\n\n self._memory_tracker.stop_and_update_metrics(output.metrics)\n\n return output.metrics\n\n def predict(\n self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = \"eval\"\n ) -> PredictionOutput:\n \"\"\"\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method\n will also return metrics, like in :obj:`evaluate()`.\n\n Args:\n test_dataset (:obj:`Dataset`):\n Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n .. note::\n\n If your predictions or labels have different sequence length (for instance because you're doing dynamic\n padding in a token classification task) the predictions will be padded (on the right) to allow for\n concatenation into one array. The padding index is -100.\n\n Returns: `NamedTuple` A namedtuple with the following keys:\n\n - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.\n - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).\n - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset\n contained labels).\n \"\"\"\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):\n raise ValueError(\"test_dataset must implement __len__\")\n\n test_dataloader = self.get_test_dataloader(test_dataset)\n start_time = time.time()\n\n output = self.prediction_loop(\n test_dataloader, description=\"Prediction\", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix\n )\n output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))\n\n self._memory_tracker.stop_and_update_metrics(output.metrics)\n\n return output\n\n def prediction_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> PredictionOutput:\n \"\"\"\n Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.\n\n Works both with or without labels.\n \"\"\"\n if not isinstance(dataloader.dataset, collections.abc.Sized):\n raise ValueError(\"dataset must implement __len__\")\n prediction_loss_only = (\n prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only\n )\n\n if self.args.deepspeed and not self.args.do_train:\n # no harm, but flagging to the user that deepspeed config is ignored for eval\n # flagging only for when --do_train wasn't passed as only then it's redundant\n logger.info(\"Detected the deepspeed argument but it will not be used for evaluation\")\n\n model = self._wrap_model(self.model, training=False)\n\n # if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while\n # ``train`` is running, half it first and then put on device\n if not self.is_in_train and self.args.fp16_full_eval:\n model = model.half().to(self.args.device)\n\n batch_size = dataloader.batch_size\n num_examples = self.num_examples(dataloader)\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Num examples = %d\", num_examples)\n logger.info(\" Batch size = %d\", batch_size)\n losses_host: torch.Tensor = None\n preds_host: Union[torch.Tensor, List[torch.Tensor]] = None\n labels_host: Union[torch.Tensor, List[torch.Tensor]] = None\n\n world_size = 1\n if is_torch_tpu_available():\n world_size = xm.xrt_world_size()\n elif self.args.local_rank != -1:\n world_size = dist.get_world_size()\n world_size = max(1, world_size)\n\n eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)\n if not prediction_loss_only:\n preds_gatherer = DistributedTensorGatherer(world_size, num_examples)\n labels_gatherer = DistributedTensorGatherer(world_size, num_examples)\n\n model.eval()\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)\n\n if self.args.past_index >= 0:\n self._past = None\n\n self.callback_handler.eval_dataloader = dataloader\n\n for step, inputs in enumerate(dataloader):\n loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)\n if loss is not None:\n losses = loss.repeat(batch_size)\n losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)\n if logits is not None:\n preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)\n if labels is not None:\n labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, labels_host = None, None, None\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n eval_loss = eval_losses_gatherer.finalize()\n preds = preds_gatherer.finalize() if not prediction_loss_only else None\n label_ids = labels_gatherer.finalize() if not prediction_loss_only else None\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n\n if eval_loss is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = eval_loss.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)\n\n def _gather_and_numpify(self, tensors, name):\n \"\"\"\n Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before\n concatenating them to `gathered`\n \"\"\"\n if tensors is None:\n return\n if is_torch_tpu_available():\n tensors = nested_xla_mesh_reduce(tensors, name)\n elif self.args.local_rank != -1:\n tensors = distributed_concat(tensors)\n\n return nested_numpify(tensors)\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on :obj:`model` using obj:`inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to evaluate.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (:obj:`bool`):\n Whether or not to return the loss only.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n has_labels = all(inputs.get(k) is not None for k in self.label_names)\n inputs = self._prepare_inputs(inputs)\n if ignore_keys is None:\n if hasattr(self.model, \"config\"):\n ignore_keys = getattr(self.model.config, \"keys_to_ignore_at_inference\", [])\n else:\n ignore_keys = []\n\n # labels may be popped when computing the loss (label smoothing for instance) so we grab them first.\n if has_labels:\n labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))\n if len(labels) == 1:\n labels = labels[0]\n else:\n labels = None\n\n with torch.no_grad():\n if has_labels:\n loss, outputs = self.compute_loss(model, inputs, return_outputs=True)\n loss = loss.mean().detach()\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + [\"loss\"])\n else:\n logits = outputs[1:]\n else:\n loss = None\n if self.use_amp:\n with autocast():\n outputs = model(**inputs)\n else:\n outputs = model(**inputs)\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)\n else:\n logits = outputs\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index - 1]\n\n if prediction_loss_only:\n return (loss, None, None)\n\n logits = nested_detach(logits)\n if len(logits) == 1:\n logits = logits[0]\n\n return (loss, logits, labels)\n\n def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):\n \"\"\"\n For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of\n floating point operations for every backward + forward pass. If using another model, either implement such a\n method in the model or subclass and override this method.\n\n Args:\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n Returns:\n :obj:`int`: The number of floating-point operations.\n \"\"\"\n if hasattr(self.model, \"floating_point_ops\"):\n return self.model.floating_point_ops(inputs)\n else:\n return 0\n" ]
[ [ "torch.utils.data.distributed.DistributedSampler", "torch.cat", "torch.utils.data.sampler.SequentialSampler", "torch.tensor", "torch.cuda.amp.autocast", "torch.distributed.barrier", "torch.nn.DataParallel", "torch.cuda.amp.GradScaler", "torch.no_grad", "torch.distributed.get_rank", "torch.utils.data.dataloader.DataLoader", "torch.utils.data.sampler.RandomSampler", "torch.distributed.get_local_rank", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MendelXu/ANN
[ "f4eabeb27dbba5c9bdcf83d03776bffa34995666", "f4eabeb27dbba5c9bdcf83d03776bffa34995666" ]
[ "methods/gan/image_translator.py", "efficiency_statics/block/apnb.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Donny You ([email protected])\n# Class Definition for GAN.\n\n\nimport time\nimport torch\n\nfrom datasets.gan.data_loader import DataLoader\nfrom methods.tools.runner_helper import RunnerHelper\nfrom methods.tools.trainer import Trainer\nfrom models.gan.model_manager import ModelManager\nfrom utils.tools.average_meter import AverageMeter\nfrom utils.tools.logger import Logger as Log\n\n\nclass ImageTranslator(object):\n \"\"\"\n The class for Pose Estimation. Include train, val, val & predict.\n \"\"\"\n def __init__(self, configer):\n self.configer = configer\n self.batch_time = AverageMeter()\n self.data_time = AverageMeter()\n self.train_losses = AverageMeter()\n self.val_losses = AverageMeter()\n self.model_manager = ModelManager(configer)\n self.seg_data_loader = DataLoader(configer)\n\n self.gan_net = None\n self.train_loader = None\n self.val_loader = None\n self.optimizer = None\n self.scheduler = None\n self.runner_state = dict()\n\n self._init_model()\n\n def _init_model(self):\n self.gan_net = self.model_manager.gan_model()\n self.gan_net = RunnerHelper.load_net(self, self.gan_net)\n\n self.optimizer, self.scheduler = Trainer.init(self._get_parameters(), self.configer.get('solver'))\n\n self.train_loader = self.seg_data_loader.get_trainloader()\n self.val_loader = self.seg_data_loader.get_valloader()\n\n def _get_parameters(self):\n\n return self.gan_net.parameters()\n\n def train(self):\n \"\"\"\n Train function of every epoch during train phase.\n \"\"\"\n self.gan_net.train()\n start_time = time.time()\n # Adjust the learning rate after every epoch.\n for i, data_dict in enumerate(self.train_loader):\n Trainer.update(self, solver_dict=self.configer.get('solver'))\n inputs = data_dict['imgA']\n self.data_time.update(time.time() - start_time)\n\n # Forward pass.\n out_dict = self.gan_net(data_dict)\n # outputs = self.module_utilizer.gather(outputs)\n loss = out_dict['loss'].mean()\n self.train_losses.update(loss.item(), inputs.size(0))\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the vars of the train phase.\n self.batch_time.update(time.time() - start_time)\n start_time = time.time()\n self.runner_state['iters'] += 1\n\n # Print the log info & reset the states.\n if self.runner_state['iters'] % self.configer.get('solver', 'display_iter') == 0:\n Log.info('Train Epoch: {0}\\tTrain Iteration: {1}\\t'\n 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\\t'\n 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\\n'\n 'Learning rate = {3}\\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\\n'.format(\n self.runner_state['epoch'], self.runner_state['iters'],\n self.configer.get('solver', 'display_iter'),\n RunnerHelper.get_lr(self.optimizer), batch_time=self.batch_time,\n data_time=self.data_time, loss=self.train_losses))\n self.batch_time.reset()\n self.data_time.reset()\n self.train_losses.reset()\n\n if self.configer.get('solver', 'lr')['metric'] == 'iters' \\\n and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):\n break\n\n # Check to val the current model.\n if self.runner_state['iters'] % self.configer.get('solver', 'test_interval') == 0:\n self.val()\n\n self.runner_state['epoch'] += 1\n\n def val(self, data_loader=None):\n \"\"\"\n Validation function during the train phase.\n \"\"\"\n self.gan_net.eval()\n start_time = time.time()\n\n data_loader = self.val_loader if data_loader is None else data_loader\n for j, data_dict in enumerate(data_loader):\n inputs = data_dict['imgA']\n\n with torch.no_grad():\n # Forward pass.\n out_dict = self.gan_net(data_dict)\n # Compute the loss of the val batch.\n\n self.val_losses.update(out_dict['loss'].mean().item(), inputs.size(0))\n # Update the vars of the val phase.\n self.batch_time.update(time.time() - start_time)\n start_time = time.time()\n\n RunnerHelper.save_net(self, self.gan_net,\n val_loss=self.val_losses.avg)\n\n # Print the log info & reset the states.\n Log.info(\n 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\\t'\n 'Loss {loss.avg:.8f}\\n'.format(\n batch_time=self.batch_time, loss=self.val_losses))\n self.batch_time.reset()\n self.val_losses.reset()\n self.gan_net.train()\n\n\nif __name__ == \"__main__\":\n # Test class for pose estimator.\n pass\n", "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom model.tools.module_helper import ModuleHelper\n\n\nclass PSPModule(nn.Module):\n # (1, 2, 3, 6)\n def __init__(self, sizes=(1, 3, 6, 8), dimension=2):\n super(PSPModule, self).__init__()\n self.stages = nn.ModuleList([self._make_stage(size, dimension) for size in sizes])\n\n def _make_stage(self, size, dimension=2):\n if dimension == 1:\n prior = nn.AdaptiveAvgPool1d(output_size=size)\n elif dimension == 2:\n prior = nn.AdaptiveAvgPool2d(output_size=(size, size))\n elif dimension == 3:\n prior = nn.AdaptiveAvgPool3d(output_size=(size, size, size))\n return prior\n\n def forward(self, feats):\n n, c, _, _ = feats.size()\n priors = [stage(feats).view(n, c, -1) for stage in self.stages]\n center = torch.cat(priors, -1)\n return center\n\n\nclass _SelfAttentionBlock(nn.Module):\n '''\n The basic implementation for self-attention block/non-local block\n Input:\n N X C X H X W\n Parameters:\n in_channels : the dimension of the input feature map\n key_channels : the dimension after the key/query transform\n value_channels : the dimension after the value transform\n scale : choose the scale to downsample the input feature maps (save memory cost)\n Return:\n N X C X H X W\n position-aware context features.(w/o concate or add with the input)\n '''\n\n def __init__(self, in_channels, key_channels, value_channels, out_channels=None, scale=1, norm_type=None,psp_size=(1,3,6,8)):\n super(_SelfAttentionBlock, self).__init__()\n self.scale = scale\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.key_channels = key_channels\n self.value_channels = value_channels\n if out_channels == None:\n self.out_channels = in_channels\n self.pool = nn.MaxPool2d(kernel_size=(scale, scale))\n self.f_key = nn.Sequential(\n nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,\n kernel_size=1, stride=1, padding=0),\n ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),\n )\n self.f_query = self.f_key\n self.f_value = nn.Conv2d(in_channels=self.in_channels, out_channels=self.value_channels,\n kernel_size=1, stride=1, padding=0)\n self.W = nn.Conv2d(in_channels=self.value_channels, out_channels=self.out_channels,\n kernel_size=1, stride=1, padding=0)\n\n self.psp = PSPModule(psp_size)\n nn.init.constant_(self.W.weight, 0)\n nn.init.constant_(self.W.bias, 0)\n\n def forward(self, x):\n batch_size, h, w = x.size(0), x.size(2), x.size(3)\n if self.scale > 1:\n x = self.pool(x)\n\n value = self.psp(self.f_value(x))\n\n query = self.f_query(x).view(batch_size, self.key_channels, -1)\n query = query.permute(0, 2, 1)\n key = self.f_key(x)\n # value=self.psp(value)#.view(batch_size, self.value_channels, -1)\n value = value.permute(0, 2, 1)\n key = self.psp(key) # .view(batch_size, self.key_channels, -1)\n sim_map = torch.matmul(query, key)\n sim_map = (self.key_channels ** -.5) * sim_map\n sim_map = F.softmax(sim_map, dim=-1)\n\n context = torch.matmul(sim_map, value)\n context = context.permute(0, 2, 1).contiguous()\n context = context.view(batch_size, self.value_channels, *x.size()[2:])\n context = self.W(context)\n return context\n\n\nclass SelfAttentionBlock2D(_SelfAttentionBlock):\n def __init__(self, in_channels, key_channels, value_channels, out_channels=None, scale=1, norm_type=None,psp_size=(1,3,6,8)):\n super(SelfAttentionBlock2D, self).__init__(in_channels,\n key_channels,\n value_channels,\n out_channels,\n scale,\n norm_type,\n psp_size=psp_size)\n\n\nclass APNB(nn.Module):\n\n def __init__(self, in_channels, out_channels, key_channels, value_channels, dropout,norm_type=None,psp_size=(1,3,6,8)):\n super(APNB, self).__init__()\n self.stages = []\n self.norm_type = norm_type\n self.psp_size=psp_size\n self.model = SelfAttentionBlock2D(in_channels,\n key_channels,\n value_channels,\n out_channels,\n norm_type=self.norm_type,\n psp_size=self.psp_size)\n\n def forward(self, feats):\n output = self.model(feats)\n return output\n\n" ]
[ [ "torch.no_grad" ], [ "torch.nn.functional.softmax", "torch.cat", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool3d", "torch.nn.MaxPool2d", "torch.matmul", "torch.nn.AdaptiveAvgPool2d", "torch.nn.AdaptiveAvgPool1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JiahuaZhao/HPC-Python-CFD
[ "4fe4db053566603232bf16bdd06f8207cdadde0a" ]
[ "numpy_mpi_numexpr/cfd_mpi_ne.py" ]
[ "\n\n#!/usr/bin/env python\n#\n# CFD Calculation with MPI4PY\n# ===============\n#\n# Simulation of inviscid flow in a 2D box using the Jacobi algorithm.\n#\n# Python version - uses numpy and loops\n#\n# Alejandro Dinkelberg\n#\nimport os\nimport sys\n#import mkl\nimport time\nimport mpi4py.MPI as MPI\n\n# Import numpy\nimport numpy as np\nimport numexpr as ne\nfrom copy import deepcopy\nos.environ['NUMEXPR_MAX_THREADS'] = '128'\nne.set_num_threads(2)\n#mkl.set_num_threads(128)\n#ne.set_vml_num_threads(128)\n#ne.set_vml_accuracy_mode('fast')\n\n##################################################################################################################################################################\n# boundary and haloSWAP\n\ndef boundarypsi(psi, m, n, b, h, w, comm):\n # initialize the std values MPI\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n istart = m*rank + 1\n istop = istart + m - 1\n \n # BCs on bottom edge\n for i in range(b+1, b+w):\n if i >= istart and i <= istop:\n psi[i-istart+1][0] = i-b\n\n for i in range(b+w, m*size+1):\n if i >= istart and i <= istop:\n psi[i-istart+1][0] = w\n \n # BCS on RHS\n if rank == size-1:\n for j in range(1, h+1):\n psi[m+1][j] = w\n for j in range(h+1, h+w):\n psi[m+1][j]= w-j+h\n\n\ndef boundaryzet(zet, psi, m, n, comm):\n # initialize the std values MPI\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n istart = m*rank + 1\n istop = istart + m - 1\n\n # set top/bottom BCs:\n zet[1:m+1, 0] = 2 * (psi[1:m+1, 1] - psi[1:m+1, 0])\n zet[1:m+1, n+1] = 2 * (psi[1:m+1, n] - psi[1:m+1, n+1])\n\n # Set left BCs\n if 0 == rank:\n zet[0, 1:n+1] = 2 * (psi[1, 1:n+1] - psi[0, 1:n+1])\n\n # Set right BCs\n if size-1 == rank:\n zet[m+1, 1:n+1] = 2 * (psi[m, 1:n+1] - psi[m+1, 1:n+1])\n\n return zet\n\ndef haloSWAP(x, lm, n, comm):\n tag = 1\n status = MPI.Status()\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n # no need to halo swap if serial:\n if size > 1:\n # send right boundaries and receive left ones\n if rank == 0:\n comm.Send(x[lm][1:n+1], rank+1, tag)\n elif rank == size-1:\n comm.Recv(x[0][1:n+1], rank-1, tag, status)\n else:\n comm.Sendrecv(x[lm][1:n+1], rank+1, tag, x[0][1:n+1], rank-1, tag, status)\n # send left boundary and receive right\n if rank == 0:\n comm.Recv(x[lm+1][1:n+1], rank+1, tag, status)\n elif rank == size-1:\n comm.Send(x[1][1:n+1], rank-1, tag)\n else:\n comm.Sendrecv(x[1][1:n+1], rank-1, tag, x[lm+1][1:n+1], rank+1, tag, status)\n\n\n##################################################################################################################################################################\n# util.py\n\ndef write_data(lm, n, scale, psi, velfile, colfile, comm):\n # mpi essentials\n m = lm\n rank = comm.Get_rank()\n size = comm.Get_size()\n # calculate velocities and hue2rgd\n vel = np.zeros((m,n, 2))\n rgb = np.zeros((m,n,3), dtype='i')\n print(psi)\n for i in range(0, m-1):\n for j in range(0, n-1):\n vel[i][j][0] = (psi[i+1][j+2]-psi[i+1][j])/2.0\n vel[i][j][1] = -(psi[i+2][j+1]-psi[i][j+1])/2.0\n\n v1 = vel[i][j][0]\n v2 = vel[i][j][1]\n\n hue = (v1*v1 + v2*v2)**0.4 # modvsq**0.4\n rgb[i][j] = hue2rgb(hue)\n\n if 0 == rank:\n\n # Open the specified files\n velout = open(velfile, \"w\")\n #velout.write(\"{0} {1}\\n\".format(m/scale, n/scale))\n colout = open(colfile, \"w\")\n #colout.write(\"{0} {1}\\n\".format(m, n))\n for irank in range(0, size):\n if 0 == rank:\n comm.Recv(rgb[0][0][0:3*m*n], source=irank, tag=1, status=MPI.Status())\n comm.Recv(vel[0][0][0:2*m*n], source=irank, tag=1, status=MPI.Status())\n\n for irank in range(0, m):\n ix = irank*m+i+1\n for j in range(0, n):\n \n iy = j+1\n colout.write(f'{ix} {iy} {rgb[i][j][0]:d} {rgb[i][j][1]:d} {rgb[i][j][2]:d}\\n')\n \n #print(((ix-1)%scale, int((scale-1)/2), (iy-1)%scale, int((scale-1)/2)))\n scale_int = int((scale-1)/2)\n if ((ix-1)%scale == scale_int) and (iy-1)%scale == scale_int:\n velout.write(f'{ix} {iy} {vel[i][j][0]} {vel[i][j][1]}\\n')\n\n velout.close()\n colout.close()\n else:\n comm.Send(rgb[0][0][0:3*m*n], dest=0, tag=1)\n comm.Send(vel[0][0][0:2*m*n], dest=0, tag=1) \n\ndef writeplotfile(m, n, scale):\n \"\"\"\n Writing the plt-file to make the gnuplot\n \"\"\"\n print('scalefactor', scale)\n with open('cfd.plt', 'w') as f:\n f.write('set size square\\nset key off'\n '\\nunset xtics\\nunset ytics\\n'\n )\n f.write(f'set xrange[{1-scale}:{m+scale}]\\nset yrange[{1-scale}:{n+scale}]\\n')\n f.write(f\"plot \\\"colourmap.dat\\\" w rgbimage, \\\"velocity.dat\\\" u 1:2:({scale}*0.75*$3/sqrt($3**2+$4**2)):({scale}*0.75*$4/sqrt($3**2+$4**2)) with vectors lc rgb \\\"#7F7F7F\\\"\")\n\n print(\"\\nWritten gnuplot script 'cfd.plt'\\n\");\n\ndef hue2rgb(hue):\n rgbmax = 255\n\n r = int(rgbmax*colfunc(hue-1.0))\n g = int(rgbmax*colfunc(hue-0.5))\n b = int(rgbmax*colfunc(hue))\n \n return int(r), int(g), int(b)\n\n\ndef colfunc(x):\n\n x1=0.2\n x2=0.5\n absx=abs(x)\n\n if absx > x2:\n return 0.0\n elif absx < x1:\n return 1.0\n else:\n return 1.0-((absx-x1)/(x2-x1))**2\n############################################################################################################################################\n\n# jacobi.py \n\ndef jacobistep(psi, m, n):\n \"\"\"\n Generates one step of the jacobi function for the whole grid\n \"\"\"\n #return 0.25 * (psi[0:m, 1:n+1]+psi[2:m+2, 1:n+1]+psi[1:m+1, 0:n] + psi[1:m+1, 2:n+2])\n return ne.evaluate(\"0.25 * (a + b + c + d)\", {'a':psi[0:m, 1:n+1],'b':psi[2:m+2, 1:n+1],'c':psi[1:m+1, 0:n],'d':psi[1:m+1, 2:n+2]})\n\n\ndef jacobistepvort(zet, psi, m, n, re):\n #print(np.sum(zet), np.sum(psi))\n #psinew = 0.25 * (psi[0:m, 1:n+1]+psi[2:m+2, 1:n+1]+psi[1:m+1, 0:n] + psi[1:m+1, 2:n+2] - zet[1:m+1, 1:n+1])\n psinew = ne.evaluate(\"0.25 * (a + b + c + d - e)\", {'a':psi[0:m, 1:n+1],'b':psi[2:m+2, 1:n+1],'c':psi[1:m+1, 0:n],'d':psi[1:m+1, 2:n+2],'e':zet[1:m+1, 1:n+1]})\n\n #zetnew = - re/16.0 * ((psi[1:m+1, 2:n+2]-psi[1:m+1, 0:n])*(zet[2:m+2, 1:n+1]-zet[0:m, 1:n+1]) - (psi[2:m+2, 1:n+1]-psi[0:m, 1:n+1])*(zet[1:m+1, 2:n+2]-zet[1:m+1, 0:n])) + (0.25*(zet[0:m, 1:n+1]+zet[2:m+2, 1:n+1]+zet[1:m+1, 0:n]+zet[1:m+1, 2:n+2]))\n zetnew = ne.evaluate(\"- re / 16.0 * ((d - c) * (f - g) - (b - a) * (h - i)) + (0.25 * (f + g + h + i))\", {'re':re,'a':psi[0:m, 1:n+1],'b':psi[2:m+2, 1:n+1],'c':psi[1:m+1, 0:n],'d':psi[1:m+1, 2:n+2],'f':zet[2:m+2, 1:n+1],'g':zet[0:m, 1:n+1],'h':zet[1:m+1, 2:n+2],'i':zet[1:m+1, 0:n]})\n return psinew, zetnew\n\n\ndef deltasq(psi_os_zet_temp, oldarr, m, n):\n dsq = np.sum(np.power(psi_os_zet_temp - oldarr[1: m+1, 1:n+1], 2))\n return float(dsq)\n\n##################################################################MAIN#################################################\n# cfd_numpy.py MPI4PY MAIN-file\ndef main(argv):\n # Test we have the correct number of arguments\n if len(argv) < 2:\n sys.stdout.write(\"Usage: cfd.py <scalefactor> <iterations> [reynolds]\\n\")\n sys.exit(1)\n\n # Get the systen parameters from the arguments\n scalefactor = int(argv[0])\n niter = int(argv[1])\n\n # print interval\n printfreq = 1000\n # Set the minimum size parameters\n bbase = 10\n hbase = 15\n wbase = 5\n mbase = 32\n nbase = 32\n\n # Set the parameters for boundary conditions\n b = bbase * scalefactor\n h = hbase * scalefactor\n w = wbase * scalefactor\n # Set the dimensions of the array\n m = mbase * scalefactor\n n = nbase * scalefactor\n\n # checkreynolds\n checkerr = 0\n # //tolerance for convergence. <=0 means do not check\n tolerance = 0\n\n #parallelisation parameters\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n # check command line and add reynolds\n if len(argv) == 3:\n re = float(argv[2])\n irrotational = 0\n if 0 == rank:\n print(f\"Reynolds number = {re}\")\n else:\n re = -1\n irrotational = 1\n if 0 == rank:\n print(\"Irrotational flow\\n\")\n\n # irrotational?\n if not irrotational:\n zet = np.zeros((m + 2, n + 2))\n if rank == 0:\n sys.stdout.write(\"\\n2D CFD Simulation\\n\")\n sys.stdout.write(\"=================\\n\")\n sys.stdout.write(\"Scale factor = {0}\\n\".format(scalefactor))\n sys.stdout.write(\"Iterations = {0}\\n\".format(niter))\n \n # //calculate local size\n lm = int(m/size)\n \n # bnorm\n bnorm = np.array([0.0])\n\n # consistency check\n if size*lm != m:\n if 0 == rank:\n print(f'Error: {m} dies not divide into {size} processes')\n comm.MPI_Finalize()\n if 0 == rank:\n print(f'Running CFD on {m}x{n} grid using {size} processes')\n # Write the simulation details\n sys.stdout.write(\"\\nGrid size = {0} x {1}\\n\".format(m, n))\n\n # didn't need it\n #print('before', scalefactor, niter, re, irrotational)\n #broadcast runtime params to other processors\n #comm.bcast(scalefactor, root=0) # MPI_Bcast(&scalefactor,1,MPI_INT,0,comm);\n #comm.bcast(niter, root=0) # MPI_Bcast(&numiter,1,MPI_INT,0,comm);\n #comm.bcast(re, root=0) # MPI_Bcast(&re,1,MPI_DOUBLE,0,comm);\n #comm.bcast(irrotational, root=0) # MPI_Bcast(&irrotational,1,MPI_INT,0,comm);\n #print('after bcast', scalefactor, niter, re, irrotational)\n\n # reynolds number\n re = re / scalefactor\n\n # //do we stop because of tolerance?\n if tolerance > 0:\n checkerr = 1\n\n\n # Define the psi array of dimension [m+2][n+2] and set it to zero\n psi = np.zeros((lm + 2, n + 2))\n\n # Set the psi boundary conditions \n boundarypsi(psi, lm, n, b, h, w, comm)\n\n # compute normalisation factor for error\n localbnorm = 0\n # better than double for-loop:\n localbnorm += np.sum(psi * psi) # this is not working, just keep for the moment the iterative version\n\n # boundary swap of psi\n haloSWAP(psi, lm, n, comm)\n\n if not irrotational:\n # update zeta BCs that depends on psi\n boundaryzet(zet, psi, lm, n, comm)\n\n # update normalisation\n localbnorm += np.sum(zet * zet)\n\n # boundary swap of psi\n haloSWAP(zet, lm, n, comm)\n\n comm.Allreduce(sendbuf=localbnorm, recvbuf=bnorm, op=MPI.SUM)\n\n bnorm = np.sqrt(bnorm)\n\n # Call the Jacobi iterative loop (and calculate timings)\n if 0 == rank:\n sys.stdout.write(\"\\nStarting main Jacobi loop ...\\n\\n\")\n \n #barrier for accurate timing - not needed for correctness\n comm.Barrier() \n \n tstart = MPI.Wtime()\n\n # -------------------\n for iter in range(1, niter + 1):\n # //calculate psi for next iteration\n if irrotational:\n psitmp = jacobistep(psi, lm, n)\n else:\n psitmp, zettmp = jacobistepvort(zet, psi, lm, n, re)\n\n # //calculate current error if required\n if checkerr or iter == niter:\n localerror = deltasq(psitmp, psi, lm, n)\n\n if not irrotational:\n localerror += deltasq(zettmp, zet, lm, n)\n\n # only rank 0 has the \"error\" variable!\n error = comm.reduce(localerror, op=MPI.SUM)\n if 0 == rank:\n error = np.sqrt(error) / bnorm\n\n # //copy back but not all!!\n psi[1:lm+1, 1:n+1] = psitmp\n\n if not irrotational:\n # //copy back but not all!!\n zet[1:lm+1, 1:n+1] = zettmp\n\n # do a boundary swap\n haloSWAP(psi, lm, n, comm)\n\n if not irrotational:\n haloSWAP(zet, lm, n, comm)\n # update zeta BCs that depend on psi\n boundaryzet(zet, psi, lm, n, comm)\n\n # //quit early if we have reached required tolerance\n if 0 == rank and checkerr and error < tolerance:\n print(f\"Converged on iteration {iter}\")\n break\n\n # //print loop information\n if (iter % printfreq == 0) and 0 == rank:\n if not checkerr:\n print(f\"Completed iteration {iter}\")\n else:\n print(f\"Completed iteration {iter}, error = {error}\\n\")\n\n if iter > niter:\n iter = niter\n # -------------------\n\n #barrier for accurate timing - not needed for correctness\n comm.Barrier()\n\n tend = MPI.Wtime()\n \n ttot = tend - tstart\n titer = ttot / niter\n # print out some stats\n if 0 == rank:\n print(\"\\n... finished\\n\")\n print(f\"After {iter} iterations, the error is {error}\\n\")\n print(f\"Time for {iter} iterations was {ttot} seconds\\n\")\n print(f\"Each iteration took {titer} seconds\\n\")\n\n # Write the output files for subsequent visualisation\n #write_data(m, n, scalefactor, psi, \"velocity.dat\", \"colourmap.dat\", comm)\n\n # generate gnuplot file\n # Finish nicely\n if 0 == rank:\n # writeplotfile(m, n, scalefactor)\n sys.exit(0)\n\n MPI.Finalize()\n\n\n\n##############################################################\n# Function to create tidy way to have main method\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n##############################################################\n" ]
[ [ "numpy.sqrt", "numpy.power", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rmaiko/pyvsim
[ "18d51d8fc3678ffcb08fd0939dc72c1a8834327d" ]
[ "examples/demo2.py" ]
[ "#!/usr/bin/python\n\"\"\"\nPyVSim part2.1\nCopyright 2013 Ricardo Entz\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nif __name__ == '__main__':\n import sys\n sys.path.append(\"../\")\n import numpy as np\n from pyvsim import *\n \"\"\"\n This demo shows a simple render of a famous image, but with \n physically correct angles\n \"\"\"\n vol = Primitives.Volume()\n vol.points = np.array([[0 ,0,0],\n [1 ,0,0],\n [0.5 ,0.866,0],\n [1e-6,0,0],\n [0 ,0,0.1],\n [1 ,0,0.1],\n [0.5 ,0.866,0.1],\n [1e-6,0,0.1]])\n vol.surfaceProperty = vol.TRANSPARENT\n sellmeierCoeffs = np.array([[1.03961212, 0.00600069867],\n [0.23179234, 0.02001791440],\n [70.01046945, 103.560653000]])\n vol.material = Library.Glass(sellmeierCoeffs)\n vol.material.name = \"The dark side of the moon glass\"\n \n r = Primitives.RayBundle()\n n = 200\n v = Utils.normalize(np.array([0.5,0.17,0]))\n p = np.array([-0.5,0.1,0.05])\n v = np.tile(v,(n,1))\n w = np.linspace(380e-9, 780e-9, n) #all the visible spectrum\n r.append(v, p, w)\n \n a = Primitives.Assembly()\n a.append(vol)\n a.append(r)\n \n r.maximumRayTrace = 2\n r.trace()\n \n System.save(obj = a, filename = \"./test.dat\", mode = \"json\")\n\n dec = System.load(filename = \"./test.dat\")\n \n System.plot(dec,displayAxes=False)" ]
[ [ "numpy.array", "numpy.tile", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CardiacModelling/model-reduction-manifold-boundaries
[ "88ccb24d0ec9d0742a4a93e820fec7fee1a65b61" ]
[ "Parameter_inference_real_data/figures/plot-complex-ap-supplement-compare.py" ]
[ "import myokit\nimport myokit.pacing as pacing\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as pl\nimport myokit.lib.markov as markov\nimport pints\nimport argparse\nimport os\nimport sys\n\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes\nfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset\n\n# Load project modules\nsys.path.append(os.path.abspath(os.path.join('../', 'python')))\nimport cells\nimport data\n\n# Check input arguments\nparser = argparse.ArgumentParser(\n description='Plot model and experimental data')\nparser.add_argument('--cell', type=int, default=2, metavar='N',\n help='repeat number : 1, 2, 3, 4, 5, 6')\nparser.add_argument('--model', type=str, default='wang', metavar='N',\n help='which model to use')\nparser.add_argument('--repeats', type=int, default=25, metavar='N',\n help='number of CMA-ES runs from different initial guesses')\nparser.add_argument('--protocol', type=int, default=1, metavar='N',\n help='which protocol is used to fit the data: 1 for staircase #1, 2 for sine wave')\nparser.add_argument(\"--show\", action='store_true',\n help=\"whether to show figures instead of saving them\",\n default=False)\nparser.add_argument('--params', type=int, default=1, metavar='N',\n help='which params to use')\nparser.add_argument('--figsize', type=float, nargs='+', default=[9, 7], \\\n help='Figure size in x and y, e.g. --figsize2 2.5 3.5')\nparser.add_argument(\"--grid\", action='store_true',\n help=\"whether to add grid to figures or not\",\n default=False)\nargs = parser.parse_args()\n\ncell = args.cell\n\n#\n# Simple IKr test script\n#\n\n# Get model\np = myokit.load_protocol('../model-and-protocols/pr6-ap-steps.mmt')\n\ncurrent = 'ikr.IKr'\n\nek = cells.ek(cell)\n\nprint('Reversal potential ' + str(ek) + ' mV')\n\nif args.protocol == 1:\n protocol_str = 'staircase1'\nelse:\n protocol_str = 'sine-wave'\n\n# Run simulation\ndt = 0.1\n\nfig, (a0, a1) = pl.subplots(2, 1, gridspec_kw={'height_ratios': [1, 3]}, figsize=args.figsize, dpi=100 if args.show else 200, constrained_layout=True)\na0.set_xlim([0, 8000])\na0.set_ylim([-140, 80])\na0.set_ylabel( '(mV)' )\nif args.grid:\n a0.grid(True)\n[label.set_visible(False) for label in a0.get_xticklabels()]\na1.set_xlim([0, 8000])\na1.set_ylim([-12.7, 3.3])\na1.set_xlabel( 'Time (ms)' )\na1.set_ylabel( 'Current (nA)' )\nif args.grid:\n a1.grid(True)\n\naxins = zoomed_inset_axes(a1, 5, loc='lower left') # zoom-factor: 5\nx1, x2, y1, y2 = 3170, 4370, -0.3, 2 # specify the limits\naxins.set_xlim(x1, x2) # apply the x-limits\naxins.set_ylim(y1, y2) # apply the y-limits\npl.yticks(visible=False)\npl.xticks(visible=False)\nmark_inset(a1, axins, loc1=2, loc2=1, fc=\"none\", ec=\"0.5\")\n\naxins2 = inset_axes(a1, 1.75, 3.2, loc='lower right') # zoom-factor: 5\nx1, x2, y1, y2 = 6590, 6640, 0, 2.3 # specify the limits\naxins2.set_xlim(x1, x2) # apply the x-limits\naxins2.set_ylim(y1, y2) # apply the y-limits\npl.yticks(visible=False)\npl.xticks(visible=False)\nmark_inset(a1, axins2, loc1=2, loc2=1, fc=\"none\", ec=\"0.5\")\n\ne = myokit.DataLog.load_csv('../data/SFU-data/AP/complex-AP-WT-cell-' + str(cell) + '.csv').npview()\n\n# Apply capacitance filtering for experiment and simulated data\nsignals = [e.time(), e['current']]\nvoltage = 'voltage' in e\nif voltage:\n signals.append(e['voltage'])\nsignals = data.capacitance(p, dt, *signals)\n\ne = myokit.DataLog()\ne.set_time_key('time')\ne['time'] = signals[0]\ne['current'] = signals[1] / 1000 # Correct units\nif voltage:\n e['voltage'] = signals[2]\n\n# Filtered experimental data\ne = e.npview()\n\n# colors = ['orange', 'red']\n\nmodels = ['wang', 'wang-r1', 'wang-r2', 'wang-r3', 'wang-r4', 'wang-r5', 'wang-r6', 'wang-r7', 'wang-r8',]\nnmodels = len(models)\n\n# Create colormap for plotting\ncmap = matplotlib.cm.get_cmap('winter')\nnorm = matplotlib.colors.Normalize(0, nmodels)\n\nfor n, model in enumerate(models):\n\n m = myokit.load_model('../model-and-protocols/' + model + '-ikr-markov.mmt')\n if model == 'mazhari':\n model_str = 'Mazhari'\n states = ['ikr.c2', 'ikr.c3', 'ikr.o', 'ikr.i']\n elif model == 'mazhari-reduced':\n model_str = 'Maz-red'\n states = ['ikr.c1', 'ikr.c3', 'ikr.o', 'ikr.i']\n elif model == 'wang':\n model_str = 'Wang'\n states = ['ikr.c2', 'ikr.c3', 'ikr.o', 'ikr.i']\n elif model == 'wang-r1':\n model_str = 'Wang-r1'\n states = ['ikr.c2', 'ikr.c3', 'ikr.o', 'ikr.i']\n elif model == 'wang-r2':\n model_str = 'Wang-r2'\n states = ['ikr.c3', 'ikr.o', 'ikr.i']\n elif model == 'wang-r3':\n model_str = 'Wang-r3'\n states = ['ikr.c3', 'ikr.o', 'ikr.i']\n elif model == 'wang-r4':\n model_str = 'Wang-r4'\n states = ['ikr.c3', 'ikr.o', 'ikr.i']\n elif model == 'wang-r5':\n model_str = 'Wang-r5'\n states = ['ikr.o', 'ikr.i']\n elif model == 'wang-r6':\n model_str = 'Wang-r6'\n states = ['ikr.o', 'ikr.i']\n elif model == 'wang-r7':\n model_str = 'Wang-r7'\n states = ['ikr.o', 'ikr.i']\n elif model == 'wang-r8':\n model_str = 'Wang-r8'\n states = ['ikr.o']\n else:\n pass\n n_params = int(m.get('misc.n_params').value())\n m = markov.convert_markov_models_to_full_ode_form(m)\n\n # Set steady state potential\n LJP = m.get('misc.LJP').value()\n ss_V = -80 - LJP\n\n x_found = np.loadtxt('../cmaesfits/' + model_str + '-model-fit-' + protocol_str + '-iid-noise-parameters-' + str(args.params) + '.txt', unpack=True)\n\n parameters = []\n for i in range(n_params):\n parameters.append('ikr.p'+str(i+1))\n\n d = ['engine.time', 'membrane.V', 'ikr.IKr']\n\n # Run simulation\n m.get('nernst.EK').set_rhs(ek)\n\n print('Updating model to steady-state for ' + str(ss_V) + ' mV')\n m.get('membrane.V').set_label('membrane_potential')\n\n mm = markov.LinearModel.from_component(m.get('ikr'))\n\n x = mm.steady_state(ss_V, x_found)\n for i in range(len(states)):\n m.get(states[i]).set_state_value(x[i])\n\n log = data.load_ap_protocol().npview()\n t, v = log['time'], log['voltage']\n\n m.get('membrane.V').set_rhs('engine.pace - misc.LJP')\n\n s = myokit.Simulation(m, p)\n s.set_fixed_form_protocol(t, v)\n s.set_tolerance(1e-8, 1e-8)\n s.set_max_step_size(0.1)\n\n # Update model parameters\n for i in range(n_params):\n s.set_constant('ikr.p'+str(i+1), x_found[i])\n\n d = s.run(p.characteristic_time(), log_interval=dt, log=d)\n\n signals2 = [d.time(), d['ikr.IKr'], d['membrane.V']]\n d = myokit.DataLog()\n d.set_time_key('time')\n d['time'] = signals2[0]\n d['current'] = signals2[1]\n d['voltage'] = signals2[2]\n\n # Filtered simulated data\n d = d.npview()\n e = e.regularize(0.1)\n d = d.regularize(0.1)\n if n == 0:\n a0.plot(d.time(), d['voltage'], color='grey')\n if n == 0:\n a1.plot(e.time(), e['current'], color='silver', label='Experiment')\n a1.plot(d.time(), d['current'], label=model_str, color=cmap(norm(n)))\n\n if n == 0:\n axins.plot(e.time(), e['current'], color='silver', label='Expt.')\n axins2.plot(e.time(), e['current'], color='silver')\n axins2.axhline(np.max(e['current'][65900:66400]), color='silver', linestyle='--')\n axins.plot(d.time(), d['current'], color=cmap(norm(n)), label=model_str)\n axins2.plot(d.time(), d['current'], color=cmap(norm(n)))\n axins2.axhline(np.max(d['current'][65900:66400]), color=cmap(norm(n)), linestyle='--')\n\naxins.legend(loc='lower right', fontsize=8, ncol=3)\n\nif args.show == True:\n pl.show()\nelse:\n filename = 'Complex-AP-all-models-compare-fit-' + protocol_str + '-iid-noise'\n pl.savefig('All_figures/' + filename + '.png')\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.colors.Normalize", "matplotlib.pyplot.savefig", "numpy.max", "matplotlib.cm.get_cmap", "matplotlib.pyplot.yticks", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xxxhycl2010/pytorch-lightning
[ "7e18b118449133a5184b9014082ff1fb9818cf9b" ]
[ "tests/loggers/test_tensorboard.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom argparse import Namespace\nfrom unittest import mock\n\nimport pytest\nimport torch\nimport yaml\nfrom omegaconf import OmegaConf\nfrom packaging.version import Version\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom tests.helpers import BoringModel\nfrom tests.helpers.runif import RunIf\n\n\n@RunIf(min_torch=\"1.5.0\")\ndef test_tensorboard_hparams_reload(tmpdir):\n\n class CustomModel(BoringModel):\n\n def __init__(self, b1=0.5, b2=0.999):\n super().__init__()\n self.save_hyperparameters()\n\n trainer = Trainer(max_steps=1, default_root_dir=tmpdir)\n model = CustomModel()\n assert trainer.log_dir == trainer.logger.log_dir\n trainer.fit(model)\n\n assert trainer.log_dir == trainer.logger.log_dir\n folder_path = trainer.log_dir\n\n # make sure yaml is there\n with open(os.path.join(folder_path, \"hparams.yaml\")) as file:\n # The FullLoader parameter handles the conversion from YAML\n # scalar values to Python the dictionary format\n yaml_params = yaml.safe_load(file)\n assert yaml_params[\"b1\"] == 0.5\n assert yaml_params[\"b2\"] == 0.999\n assert len(yaml_params.keys()) == 2\n\n # verify artifacts\n assert len(os.listdir(os.path.join(folder_path, \"checkpoints\"))) == 1\n\n # verify tb logs\n event_acc = EventAccumulator(folder_path)\n event_acc.Reload()\n\n data_pt_1_5 = b'\\x12\\x1b\"\\x04\\n\\x02b1\"\\x04\\n\\x02b2*\\r\\n\\x0b\\x12\\thp_metric'\n data_pt_1_6 = b'\\x12\\x1f\"\\x06\\n\\x02b1 \\x03\"\\x06\\n\\x02b2 \\x03*\\r\\n\\x0b\\x12\\thp_metric'\n hparams_data = data_pt_1_6 if Version(torch.__version__) >= Version(\"1.6.0\") else data_pt_1_5\n\n assert event_acc.summary_metadata['_hparams_/experiment'].plugin_data.plugin_name == 'hparams'\n assert event_acc.summary_metadata['_hparams_/experiment'].plugin_data.content == hparams_data\n\n\ndef test_tensorboard_automatic_versioning(tmpdir):\n \"\"\"Verify that automatic versioning works\"\"\"\n\n root_dir = tmpdir / \"tb_versioning\"\n root_dir.mkdir()\n (root_dir / \"version_0\").mkdir()\n (root_dir / \"version_1\").mkdir()\n\n logger = TensorBoardLogger(save_dir=tmpdir, name=\"tb_versioning\")\n assert logger.version == 2\n\n\ndef test_tensorboard_manual_versioning(tmpdir):\n \"\"\"Verify that manual versioning works\"\"\"\n\n root_dir = tmpdir / \"tb_versioning\"\n root_dir.mkdir()\n (root_dir / \"version_0\").mkdir()\n (root_dir / \"version_1\").mkdir()\n (root_dir / \"version_2\").mkdir()\n\n logger = TensorBoardLogger(save_dir=tmpdir, name=\"tb_versioning\", version=1)\n\n assert logger.version == 1\n\n\ndef test_tensorboard_named_version(tmpdir):\n \"\"\"Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' \"\"\"\n\n name = \"tb_versioning\"\n (tmpdir / name).mkdir()\n expected_version = \"2020-02-05-162402\"\n\n logger = TensorBoardLogger(save_dir=tmpdir, name=name, version=expected_version)\n logger.log_hyperparams({\"a\": 1, \"b\": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written\n\n assert logger.version == expected_version\n assert os.listdir(tmpdir / name) == [expected_version]\n assert os.listdir(tmpdir / name / expected_version)\n\n\[email protected](\"name\", [\"\", None])\ndef test_tensorboard_no_name(tmpdir, name):\n \"\"\"Verify that None or empty name works\"\"\"\n logger = TensorBoardLogger(save_dir=tmpdir, name=name)\n logger.log_hyperparams({\"a\": 1, \"b\": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written\n assert logger.root_dir == tmpdir\n assert os.listdir(tmpdir / \"version_0\")\n\n\ndef test_tensorboard_log_sub_dir(tmpdir):\n\n class TestLogger(TensorBoardLogger):\n # for reproducibility\n @property\n def version(self):\n return \"version\"\n\n @property\n def name(self):\n return \"name\"\n\n trainer_args = dict(\n default_root_dir=tmpdir,\n max_steps=1,\n )\n\n # no sub_dir specified\n save_dir = tmpdir / \"logs\"\n logger = TestLogger(save_dir)\n trainer = Trainer(**trainer_args, logger=logger)\n assert trainer.logger.log_dir == os.path.join(save_dir, \"name\", \"version\")\n\n # sub_dir specified\n logger = TestLogger(save_dir, sub_dir=\"sub_dir\")\n trainer = Trainer(**trainer_args, logger=logger)\n assert trainer.logger.log_dir == os.path.join(save_dir, \"name\", \"version\", \"sub_dir\")\n\n # test home dir (`~`) handling\n save_dir = \"~/tmp\"\n explicit_save_dir = os.path.expanduser(save_dir)\n logger = TestLogger(save_dir, sub_dir=\"sub_dir\")\n trainer = Trainer(**trainer_args, logger=logger)\n assert trainer.logger.log_dir == os.path.join(explicit_save_dir, \"name\", \"version\", \"sub_dir\")\n\n # test env var (`$`) handling\n test_env_dir = \"some_directory\"\n os.environ[\"test_env_dir\"] = test_env_dir\n save_dir = \"$test_env_dir/tmp\"\n explicit_save_dir = f\"{test_env_dir}/tmp\"\n logger = TestLogger(save_dir, sub_dir=\"sub_dir\")\n trainer = Trainer(**trainer_args, logger=logger)\n assert trainer.logger.log_dir == os.path.join(explicit_save_dir, \"name\", \"version\", \"sub_dir\")\n\n\[email protected](\"step_idx\", [10, None])\ndef test_tensorboard_log_metrics(tmpdir, step_idx):\n logger = TensorBoardLogger(tmpdir)\n metrics = {\n \"float\": 0.3,\n \"int\": 1,\n \"FloatTensor\": torch.tensor(0.1),\n \"IntTensor\": torch.tensor(1),\n }\n logger.log_metrics(metrics, step_idx)\n\n\ndef test_tensorboard_log_hyperparams(tmpdir):\n logger = TensorBoardLogger(tmpdir)\n hparams = {\n \"float\": 0.3,\n \"int\": 1,\n \"string\": \"abc\",\n \"bool\": True,\n \"dict\": {\n \"a\": {\n \"b\": \"c\"\n }\n },\n \"list\": [1, 2, 3],\n \"namespace\": Namespace(foo=Namespace(bar=\"buzz\")),\n \"layer\": torch.nn.BatchNorm1d,\n }\n logger.log_hyperparams(hparams)\n\n\ndef test_tensorboard_log_hparams_and_metrics(tmpdir):\n logger = TensorBoardLogger(tmpdir, default_hp_metric=False)\n hparams = {\n \"float\": 0.3,\n \"int\": 1,\n \"string\": \"abc\",\n \"bool\": True,\n \"dict\": {\n \"a\": {\n \"b\": \"c\"\n }\n },\n \"list\": [1, 2, 3],\n \"namespace\": Namespace(foo=Namespace(bar=\"buzz\")),\n \"layer\": torch.nn.BatchNorm1d,\n }\n metrics = {\"abc\": torch.tensor([0.54])}\n logger.log_hyperparams(hparams, metrics)\n\n\ndef test_tensorboard_log_omegaconf_hparams_and_metrics(tmpdir):\n logger = TensorBoardLogger(tmpdir, default_hp_metric=False)\n hparams = {\n \"float\": 0.3,\n \"int\": 1,\n \"string\": \"abc\",\n \"bool\": True,\n \"dict\": {\n \"a\": {\n \"b\": \"c\"\n }\n },\n \"list\": [1, 2, 3],\n # \"namespace\": Namespace(foo=Namespace(bar=\"buzz\")),\n # \"layer\": torch.nn.BatchNorm1d,\n }\n hparams = OmegaConf.create(hparams)\n\n metrics = {\"abc\": torch.tensor([0.54])}\n logger.log_hyperparams(hparams, metrics)\n\n\[email protected](\"example_input_array\", [None, torch.rand(2, 32)])\ndef test_tensorboard_log_graph(tmpdir, example_input_array):\n \"\"\" test that log graph works with both model.example_input_array and\n if array is passed externaly\n \"\"\"\n model = BoringModel()\n if example_input_array is not None:\n model.example_input_array = None\n\n logger = TensorBoardLogger(tmpdir, log_graph=True)\n logger.log_graph(model, example_input_array)\n\n\ndef test_tensorboard_log_graph_warning_no_example_input_array(tmpdir):\n \"\"\" test that log graph throws warning if model.example_input_array is None \"\"\"\n model = BoringModel()\n model.example_input_array = None\n logger = TensorBoardLogger(tmpdir, log_graph=True)\n with pytest.warns(\n UserWarning,\n match='Could not log computational graph since the `model.example_input_array`'\n ' attribute is not set or `input_array` was not given'\n ):\n logger.log_graph(model)\n\n\[email protected]('pytorch_lightning.loggers.TensorBoardLogger.log_metrics')\ndef test_tensorboard_with_accummulated_gradients(mock_log_metrics, tmpdir):\n \"\"\"Tests to ensure that tensorboard log properly when accumulated_gradients > 1\"\"\"\n\n class TestModel(BoringModel):\n\n def __init__(self):\n super().__init__()\n self.indexes = []\n\n def training_step(self, *args):\n self.log('foo', 1, on_step=True, on_epoch=True)\n if not self.trainer.train_loop.should_accumulate():\n if self.trainer.logger_connector.should_update_logs:\n self.indexes.append(self.trainer.global_step)\n return super().training_step(*args)\n\n model = TestModel()\n model.training_epoch_end = None\n logger_0 = TensorBoardLogger(tmpdir, default_hp_metric=False)\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=12,\n limit_val_batches=0,\n max_epochs=3,\n accumulate_grad_batches=2,\n logger=[logger_0],\n log_every_n_steps=3,\n )\n trainer.fit(model)\n\n calls = [m[2] for m in mock_log_metrics.mock_calls]\n count_epochs = [c[\"step\"] for c in calls if \"foo_epoch\" in c[\"metrics\"]]\n assert count_epochs == [5, 11, 17]\n\n count_steps = [c[\"step\"] for c in calls if \"foo_step\" in c[\"metrics\"]]\n assert count_steps == model.indexes\n\n\[email protected]('pytorch_lightning.loggers.tensorboard.SummaryWriter')\ndef test_tensorboard_finalize(summary_writer, tmpdir):\n \"\"\" Test that the SummaryWriter closes in finalize. \"\"\"\n logger = TensorBoardLogger(save_dir=tmpdir)\n logger.finalize(\"any\")\n summary_writer().flush.assert_called()\n summary_writer().close.assert_called()\n\n\ndef test_tensorboard_save_hparams_to_yaml_once(tmpdir):\n model = BoringModel()\n logger = TensorBoardLogger(save_dir=tmpdir, default_hp_metric=False)\n trainer = Trainer(max_steps=1, default_root_dir=tmpdir, logger=logger)\n assert trainer.log_dir == trainer.logger.log_dir\n trainer.fit(model)\n\n hparams_file = \"hparams.yaml\"\n assert os.path.isfile(os.path.join(trainer.log_dir, hparams_file))\n assert not os.path.isfile(os.path.join(tmpdir, hparams_file))\n\n\[email protected]('pytorch_lightning.loggers.tensorboard.log')\ndef test_tensorboard_with_symlink(log, tmpdir):\n \"\"\"\n Tests a specific failure case when tensorboard logger is used with empty name, symbolic link ``save_dir``, and\n relative paths.\n \"\"\"\n os.chdir(tmpdir) # need to use relative paths\n source = os.path.join('.', 'lightning_logs')\n dest = os.path.join('.', 'sym_lightning_logs')\n\n os.makedirs(source, exist_ok=True)\n os.symlink(source, dest)\n\n logger = TensorBoardLogger(save_dir=dest, name='')\n _ = logger.version\n\n log.warning.assert_not_called()\n" ]
[ [ "torch.rand", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jokerakos/ekpa-papadimitriou
[ "fe008b1fc963de4acddd5391a3bb4962bb706c97" ]
[ "model/models.py" ]
[ "import psycopg2\nimport pandas as pd \n\ndef connect_to_db():\n db_connection = psycopg2.connect(\n host=\"***.***.***.**\",\n database=\"********\",\n user=\"*********\",\n password=\"********\")\n db_connection.set_session(autocommit=True)\n cursor = db_connection.cursor()\n cursor.execute('SELECT version()')\n db_version = cursor.fetchone()\n print(db_version)\n return db_connection,cursor\n\n\nconn,db=connect_to_db()\ndef create_table():\n try:\n table_creation=\"\"\"\n CREATE TABLE newscrawler (\n id serial PRIMARY KEY,\n title VARCHAR ( 500 ) ,\n text VARCHAR ( 2000 ) ,\n time TIMESTAMP ,\n newsource VARCHAR ( 500 ) ,\n image VARCHAR ( 500 ) ,\n country VARCHAR ( 500 ) ,\n countrycode VARCHAR ( 500 ) ,\n newslet VARCHAR ( 500 ) ,\n created_at TIMESTAMP NOT NULL DEFAULT NOW(),\n updated_at TIMESTAMP NOT NULL DEFAULT NOW()\n );\n \"\"\"\n\n db.execute(table_creation)\n db.close()\n return True \n except Exception as e :\n print(\"error:\",e)\n return False\n \ndef insert_to_db(new_source,data=None):\n if data is None:\n data=[]\n try:\n record_to_insert=[]\n if len(data)>0:\n for d in data:\n checkrecord=record_exists(d['title'])\n print(\"checkrecord:\",checkrecord)\n if not checkrecord:\n title=str(d['title']).replace(\"'\",\"''\") if 'title' in d else None\n text=d['text'] if 'text' in d else None\n time=d['time'] if 'time' in d else None\n newsource=new_source\n image=d['image'] if 'image' in d else None\n country=d['country'] if 'country' in d else None\n countrycode=d['countrycode'] if 'countrycode' in d else None\n newslet=d['newslet'] if 'newslet' in d else None\n db_data=(title,text,time,newsource,image,country,countrycode,newslet)\n record_to_insert.append(db_data)\n\n db_insert_query = \"\"\" INSERT INTO newscrawler (title, text, time,newsource,image,country,countrycode,newslet) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\n for record in record_to_insert :\n db.execute(db_insert_query, record)\n conn.commit()\n return True\n except Exception as e :\n print(\"error:\",e)\n return False\n\n\ndef record_exists(title):\n title=str(title).replace(\"'\",\"''\")\n query=\"\"\"SELECT id FROM newscrawler WHERE title = '{title}'\"\"\".format(title=title)\n db.execute(query)\n return db.fetchone() is not None\n\n\n\nif __name__ == '__main__':\n # print(create_table())\n df = pd.read_csv(\"news.csv\") \n data=df.to_dict(orient='records')\n print(insert_to_db('news247',data))\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
katarinaslama/transformers-1
[ "a5a8eeb772b185b0746f3ce9be6ae43181d2ca71" ]
[ "tests/test_modeling_gpt2.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\n\nfrom transformers import is_torch_available\nfrom transformers.testing_utils import require_torch, slow, torch_device\n\nfrom .test_configuration_common import ConfigTester\nfrom .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\n\n\nif is_torch_available():\n import torch\n\n from transformers import (\n GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,\n GPT2Config,\n GPT2DoubleHeadsModel,\n GPT2ForSequenceClassification,\n GPT2LMHeadModel,\n GPT2Model,\n GPT2Tokenizer,\n )\n\n\nclass GPT2ModelTester:\n def __init__(\n self,\n parent,\n batch_size=14,\n seq_length=7,\n is_training=True,\n use_token_type_ids=True,\n use_input_mask=True,\n use_labels=True,\n use_mc_token_ids=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n num_choices=4,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_token_type_ids = use_token_type_ids\n self.use_input_mask = use_input_mask\n self.use_labels = use_labels\n self.use_mc_token_ids = use_mc_token_ids\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.num_choices = num_choices\n self.scope = None\n self.bos_token_id = vocab_size - 1\n self.eos_token_id = vocab_size - 1\n self.pad_token_id = vocab_size - 1\n\n def prepare_config_and_inputs(self, gradient_checkpointing=False):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n mc_token_ids = None\n if self.use_mc_token_ids:\n mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = GPT2Config(\n vocab_size=self.vocab_size,\n n_embd=self.hidden_size,\n n_layer=self.num_hidden_layers,\n n_head=self.num_attention_heads,\n # intermediate_size=self.intermediate_size,\n # hidden_act=self.hidden_act,\n # hidden_dropout_prob=self.hidden_dropout_prob,\n # attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n n_positions=self.max_position_embeddings,\n n_ctx=self.max_position_embeddings,\n # type_vocab_size=self.type_vocab_size,\n # initializer_range=self.initializer_range,\n bos_token_id=self.bos_token_id,\n eos_token_id=self.eos_token_id,\n pad_token_id=self.pad_token_id,\n return_dict=True,\n gradient_checkpointing=gradient_checkpointing,\n )\n\n head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)\n\n return (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n )\n\n def prepare_config_and_inputs_for_decoder(self):\n (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = self.prepare_config_and_inputs()\n\n encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])\n encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)\n\n return (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n\n def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = GPT2Model(config=config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)\n result = model(input_ids, token_type_ids=token_type_ids)\n result = model(input_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertEqual(len(result.past_key_values), config.n_layer)\n\n def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = GPT2Model(config=config)\n model.to(torch_device)\n model.eval()\n\n # first forward pass\n outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)\n outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids)\n outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False)\n\n self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))\n self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)\n\n output, past = outputs.to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size)\n\n # append to next input_ids and token_type_ids\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)\n\n output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, token_type_ids=next_token_types, past=past)[\"last_hidden_state\"]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_gpt2_model_attention_mask_past(\n self, config, input_ids, input_mask, head_mask, token_type_ids, *args\n ):\n model = GPT2Model(config=config)\n model.to(torch_device)\n model.eval()\n\n # create attention mask\n attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)\n half_seq_length = self.seq_length // 2\n attn_mask[:, half_seq_length:] = 0\n\n # first forward pass\n output, past = model(input_ids, attention_mask=attn_mask).to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # change a random masked slice from input_ids\n random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1\n random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)\n input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens\n\n # append to next input_ids and attn_mask\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n attn_mask = torch.cat(\n [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],\n dim=1,\n )\n\n # get two different outputs\n output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, past=past, attention_mask=attn_mask)[\"last_hidden_state\"]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_gpt2_model_past_large_inputs(\n self, config, input_ids, input_mask, head_mask, token_type_ids, *args\n ):\n model = GPT2Model(config=config)\n model.to(torch_device)\n model.eval()\n\n # first forward pass\n outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)\n\n output, past = outputs.to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size)\n\n # append to next input_ids and token_type_ids\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)\n\n output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, token_type_ids=next_token_types, past=past)[\"last_hidden_state\"]\n self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = GPT2LMHeadModel(config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)\n self.parent.assertEqual(result.loss.shape, ())\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_forward_and_backwards(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = GPT2LMHeadModel(config)\n model.to(torch_device)\n\n result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)\n self.parent.assertEqual(result.loss.shape, ())\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n result.loss.backward()\n\n def create_and_check_double_lm_head_model(\n self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args\n ):\n model = GPT2DoubleHeadsModel(config)\n model.to(torch_device)\n model.eval()\n\n multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n\n inputs = {\n \"input_ids\": multiple_choice_inputs_ids,\n \"mc_token_ids\": mc_token_ids,\n \"attention_mask\": multiple_choice_input_mask,\n \"token_type_ids\": multiple_choice_token_type_ids,\n \"labels\": multiple_choice_inputs_ids,\n }\n\n result = model(**inputs)\n self.parent.assertEqual(result.loss.shape, ())\n self.parent.assertEqual(\n result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)\n )\n self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))\n\n def create_and_check_gpt2_for_sequence_classification(\n self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args\n ):\n config.num_labels = self.num_labels\n model = GPT2ForSequenceClassification(config)\n model.to(torch_device)\n model.eval()\n print(config.num_labels, sequence_labels.size())\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n\n (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n\n inputs_dict = {\n \"input_ids\": input_ids,\n \"token_type_ids\": token_type_ids,\n \"head_mask\": head_mask,\n }\n\n return config, inputs_dict\n\n\n@require_torch\nclass GPT2ModelTest(ModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel, GPT2ForSequenceClassification)\n if is_torch_available()\n else ()\n )\n all_generative_model_classes = (GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else ()\n test_missing_keys = False\n\n def setUp(self):\n self.model_tester = GPT2ModelTester(self)\n self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_gpt2_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_gpt2_model(*config_and_inputs)\n\n def test_gpt2_model_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs)\n\n def test_gpt2_model_att_mask_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs)\n\n def test_gpt2_model_past_large_inputs(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs)\n\n def test_gpt2_lm_head_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_lm_head_model(*config_and_inputs)\n\n def test_gpt2_double_lm_head_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)\n\n def test_gpt2_sequence_classification_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs)\n\n def test_gpt2_gradient_checkpointing(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs(gradient_checkpointing=True)\n self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs)\n\n @slow\n def test_batch_generation(self):\n model = GPT2LMHeadModel.from_pretrained(\"gpt2\")\n model.to(torch_device)\n tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n\n tokenizer.padding_side = \"left\"\n\n # Define PAD Token = EOS Token = 50256\n tokenizer.pad_token = tokenizer.eos_token\n model.config.pad_token_id = model.config.eos_token_id\n\n # use different length sentences to test batching\n sentences = [\n \"Hello, my dog is a little\",\n \"Today, I\",\n ]\n\n inputs = tokenizer(sentences, return_tensors=\"pt\", padding=True)\n\n torch.manual_seed(0)\n outputs = model.generate(\n input_ids=inputs[\"input_ids\"].to(torch_device),\n attention_mask=inputs[\"attention_mask\"].to(torch_device),\n )\n\n inputs_non_padded = tokenizer(sentences[0], return_tensors=\"pt\").input_ids.to(torch_device)\n output_non_padded = model.generate(input_ids=inputs_non_padded)\n\n num_paddings = inputs_non_padded.shape[-1] - inputs[\"attention_mask\"][-1].long().sum().cpu().item()\n inputs_padded = tokenizer(sentences[1], return_tensors=\"pt\").input_ids.to(torch_device)\n output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)\n\n batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)\n non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)\n padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)\n\n expected_output_sentence = [\n \"Hello, my dog is a little bit of a mess. I'm not sure if he's going\",\n \"Today, I'm going to be doing a lot of research on this. I\",\n ]\n self.assertListEqual(expected_output_sentence, batch_out_sentence)\n self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = GPT2Model.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_torch\nclass GPT2ModelLanguageGenerationTest(unittest.TestCase):\n @slow\n def test_lm_generate_gpt2(self):\n for checkpointing in [True, False]:\n model = GPT2LMHeadModel.from_pretrained(\"gpt2\", gradient_checkpointing=checkpointing)\n model.to(torch_device)\n input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog\n expected_output_ids = [\n 464,\n 3290,\n 373,\n 1043,\n 287,\n 257,\n 2214,\n 1474,\n 262,\n 16246,\n 286,\n 2688,\n 290,\n 2688,\n 27262,\n 13,\n 198,\n 198,\n 464,\n 3290,\n ] # The dog was found in a field near the intersection of West and West Streets.\\n\\nThe dog\n output_ids = model.generate(input_ids, do_sample=False)\n self.assertListEqual(output_ids[0].tolist(), expected_output_ids)\n\n @slow\n def test_lm_generate_distilgpt2(self):\n model = GPT2LMHeadModel.from_pretrained(\"distilgpt2\")\n model.to(torch_device)\n input_ids = torch.tensor([[464, 1893]], dtype=torch.long, device=torch_device) # The president\n expected_output_ids = [\n 464,\n 1893,\n 286,\n 262,\n 1578,\n 1829,\n 11,\n 290,\n 262,\n 1893,\n 286,\n 262,\n 1578,\n 7526,\n 11,\n 423,\n 587,\n 287,\n 262,\n 2635,\n ] # The president of the United States, and the president of the United Kingdom, have been in the White\n\n output_ids = model.generate(input_ids, do_sample=False)\n self.assertListEqual(output_ids[0].tolist(), expected_output_ids)\n" ]
[ [ "torch.ones", "torch.cat", "torch.manual_seed", "torch.tensor", "torch.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Suerte412/SemSeg
[ "46515f36291bb7e068ceb1b455fe1fc4a26842ef" ]
[ "src/semseg/data/potsdam.py" ]
[ "from os.path import join\n\nimport numpy as np\nimport os \n\nfrom .isprs import IsprsDataset\nfrom .generators import FileGenerator, TRAIN, VALIDATION, TEST\nfrom .utils import (\n save_img, load_img, get_img_size, compute_ndvi, _makedirs,\n save_numpy_array)\n\nPOTSDAM = 'potsdam'\nPROCESSED_POTSDAM = 'processed_potsdam'\n\n# dataset dependent parameters\nclass PotsdamDataset(IsprsDataset):\n sharah_train_ratio = 17 / 24\n\n def __init__(self, include_depth=True, include_ndvi=True):\n self.include_ir = True\n\n self.include_depth = include_depth\n self.include_ndvi = include_ndvi\n\n # DEBUG\n # For 3 active channels\n self.include_ir = False\n\n self.include_depth = False\n self.include_ndvi = False\n\n self.red_ind = 0\n self.green_ind = 1\n self.blue_ind = 2\n self.rgb_inds = [self.red_ind, self.green_ind, self.blue_ind]\n\n self.ir_ind = 3\n self.depth_ind = 4\n self.ndvi_ind = 5\n\n self.active_input_inds = list(self.rgb_inds)\n\n # Add extra channels to batch_x in addition to rgb\n if self.include_ir:\n self.active_input_inds.append(self.ir_ind) \n if self.include_depth:\n self.active_input_inds.append(self.depth_ind)\n if self.include_ndvi:\n self.active_input_inds.append(self.ndvi_ind)\n\n\n super().__init__()\n\n def get_output_file_name(self, file_ind):\n return 'top_potsdam_{}_{}_label.tif'.format(file_ind[0], file_ind[1])\n\n def augment_channels(self, batch_x):\n red = batch_x[:, :, :, [self.red_ind]]\n ir = batch_x[:, :, :, [self.ir_ind]]\n ndvi = compute_ndvi(red, ir)\n return np.concatenate([batch_x, ndvi], axis=3)\n\n\nclass PotsdamFileGenerator(FileGenerator):\n \"\"\"\n A data generator for the Potsdam dataset that creates batches from\n files on disk.\n \"\"\"\n# def __init__(self, active_input_inds, train_ratio, cross_validation):\n def __init__(self, train_ratio):\n self.dataset = PotsdamDataset()\n\n # The first 24 indices correspond to the training set,\n # and the rest to the validation set used\n # in https://arxiv.org/abs/1606.02585\n self.file_inds = [\n (2, 10), (3, 10), (3, 11), (3, 12), (4, 11), (4, 12), (5, 10),\n (5, 12), (6, 10), (6, 11), (6, 12), (6, 8), (6, 9), (7, 11),\n (7, 12), (7, 7), (7, 9), (2, 11), (2, 12), (4, 10), (5, 11),\n (6, 7), (7, 10), (7, 8)\n ]\n\n self.test_file_inds = [\n (2, 13), (2, 14), (3, 13), (3, 14), (4, 13), (4, 14), (4, 15),\n (5, 13), (5, 14), (5, 15), (6, 13), (6, 14), (6, 15), (7, 13)\n ]\n\n # super().__init__(active_input_inds, train_ratio, cross_validation)\n super().__init__(self.dataset.active_input_inds, train_ratio, cross_validation=None)\n\nclass PotsdamImageFileGenerator(PotsdamFileGenerator):\n \"\"\"\n A data generator for the Potsdam dataset that creates batches from\n the original TIFF and JPG files.\n \"\"\"\n # def __init__(self, datasets_path, active_input_inds,\n # train_ratio=0.8, cross_validation=None):\n # self.dataset_path = join(datasets_path, POTSDAM)\n # super().__init__(active_input_inds, train_ratio, cross_validation)\n\n def __init__(self, datasets_path,\n train_ratio=0.8):\n self.dataset_path = join(datasets_path, POTSDAM)\n super().__init__(train_ratio)\n\n @staticmethod\n def preprocess(datasets_path):\n # Fix the depth image that is missing a column if it hasn't been\n # fixed already.\n data_path = join(datasets_path, POTSDAM)\n file_path = join(\n data_path,\n '1_DSM_normalisation/dsm_potsdam_03_13_normalized_lastools.jpg')\n\n im = load_img(file_path)\n if im.shape[1] == 5999:\n im_fix = np.zeros((6000, 6000), dtype=np.uint8)\n im_fix[:, 0:-1] = im[:, :, 0]\n save_img(im_fix, file_path)\n\n def get_file_size(self, file_ind):\n ind0, ind1 = file_ind\n\n rgbir_file_path = join(\n self.dataset_path,\n '4_Ortho_RGBIR' + os.sep + 'top_potsdam_{}_{}_RGBIR.tif'.format(ind0, ind1))\n nb_rows, nb_cols = get_img_size(rgbir_file_path)\n return nb_rows, nb_cols\n\n def get_img(self, file_ind, window, has_y=True):\n ind0, ind1 = file_ind\n\n rgbir_file_path = join(\n self.dataset_path,\n '4_Ortho_RGBIR' + os.sep + 'top_potsdam_{}_{}_RGBIR.tif'.format(ind0, ind1))\n depth_file_path = join(\n self.dataset_path,\n '1_DSM_normalisation' + os.sep + 'dsm_potsdam_{:0>2}_{:0>2}_normalized_lastools.jpg'.format(ind0, ind1)) # noqa\n batch_y_file_path = join(\n self.dataset_path,\n '5_Labels_for_participants' + os.sep + 'top_potsdam_{}_{}_label.tif'.format(ind0, ind1)) # noqa\n batch_y_no_boundary_file_path = join(\n self.dataset_path,\n '5_Labels_for_participants_no_Boundary' + os.sep + 'top_potsdam_{}_{}_label_noBoundary.tif'.format(ind0, ind1)) # noqa\n\n rgbir = load_img(rgbir_file_path, window)\n depth = load_img(depth_file_path, window)\n channels = [rgbir, depth]\n #DEBUG\n # print(\"rgbir.shape: {}\".format(rgbir.shape))\n # print(\"depth.shape: {}\".format(depth.shape))\n\n if has_y:\n batch_y = load_img(batch_y_file_path, window)\n # print(\"batch_y.shape: {}\".format(batch_y.shape))\n batch_y_no_boundary = load_img(\n batch_y_no_boundary_file_path, window)\n # print(\"batch_y_no_boundary.shape: {}\".format(batch_y_no_boundary.shape))\n channels.extend([batch_y, batch_y_no_boundary])\n\n img = np.concatenate(channels, axis=2)\n\n\n return img\n\n def parse_batch(self, batch, has_y=True):\n\n # DEBUG\n # print(\"Active input inds: {}\".format(self.dataset.active_input_inds))\n # print(\"Batch shape with everything: {}\".format(batch.shape))\n\n # DEBUG\n # batch_x = batch[:, :, :, 0:5]\n # Number of channels extracted from images\n nb_channels = len(self.dataset.active_input_inds) - 1\n # print(\"Number of channels without ndvi: {}\".format(nb_channels))\n batch_x = batch[:, :, :, self.dataset.active_input_inds[0]:nb_channels]\n # print(\"Batch_x shape: {}\".format(batch_x.shape))\n batch_y = None\n batch_y_mask = None\n if has_y:\n # batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, 5:8])\n # batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, 8:])\n batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, nb_channels:nb_channels+3])\n # print(\"Batch_y shape: {0} from {1}\".format(batch_y.shape, batch[:, :, :, nb_channels:nb_channels+3].shape))\n\n # print(\"Batch_y_mask in rgb shape: {0}\".format(batch[:, :, :, nb_channels+3:].shape))\n batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, nb_channels+3:])\n # print(\"Batch_y_mask shape: {0} and {1}\".format(batch_y_mask.shape, batch[:, :, :, nb_channels+3:].shape))\n\n # batch_x = batch[:, :, :, self.active_input_inds[0]:self.active_input_inds[-1]]\n # batch_y = None\n # batch_y_mask = None\n # if has_y:\n # batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, self.active_input_inds[-1]:self.active_input_inds[-1] + 3])\n # batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, self.active_input_inds[-1] + 3:])\n # DEBUG\n # print(\"Batch_x shape after parsing from image: {}\".format(batch_x.shape))\n # if has_y:\n # print(\"Batch_y shape after parsing from image: {}\".format(batch_y.shape))\n # print(\"Batch_y_mask shape after parsing from image: {}\".format(batch_y_mask.shape))\n \n return batch_x, batch_y, batch_y_mask\n\n\nclass PotsdamNumpyFileGenerator(PotsdamFileGenerator):\n \"\"\"\n A data generator for the Potsdam dataset that creates batches from\n numpy array files. This is about 20x faster than reading the raw files.\n \"\"\"\n # def __init__(self, datasets_path, active_input_inds,\n # train_ratio=0.8, cross_validation=None):\n # self.raw_dataset_path = join(datasets_path, POTSDAM)\n # self.dataset_path = join(datasets_path, PROCESSED_POTSDAM)\n # super().__init__(active_input_inds, train_ratio, cross_validation)\n\n def __init__(self, datasets_path,\n train_ratio=0.8):\n self.raw_dataset_path = join(datasets_path, POTSDAM)\n self.dataset_path = join(datasets_path, PROCESSED_POTSDAM)\n # super().__init__(active_input_inds, train_ratio, cross_validation) \n if (train_ratio == None):\n train_ratio = 0.8 \n print(\"Train_ratio is: {0}\".format(train_ratio)) \n super().__init__(train_ratio) \n\n @staticmethod\n def preprocess(datasets_path):\n proc_data_path = join(datasets_path, PROCESSED_POTSDAM)\n _makedirs(proc_data_path)\n\n # generator = PotsdamImageFileGenerator(\n # datasets_path, include_ir=True, include_depth=True,\n # include_ndvi=False)\n generator = PotsdamImageFileGenerator(\n datasets_path\n )\n dataset = generator.dataset\n\n def _preprocess(split):\n gen = generator.make_split_generator(\n split, batch_size=1, shuffle=False, augment=False,\n normalize=False, eval_mode=True)\n\n # for batch_x, batch_y, batch_y_mask, file_inds in gen:\n\n for batch_tuple in gen: \n batch_x = batch_tuple[0]\n batch_y = batch_tuple[1]\n batch_y_mask = batch_tuple[3]\n file_inds = batch_tuple[4]\n\n file_ind = file_inds[0]\n\n batch_x = np.squeeze(batch_x, axis=0)\n channels = [batch_x]\n\n\n if batch_y is not None:\n batch_y = np.squeeze(batch_y, axis=0)\n batch_y = dataset.one_hot_to_label_batch(batch_y)\n\n # DEBUG\n # print(\"Batch_y shape after squeezing and one_hot_batching is: {}\".format(batch_y.shape))\n\n batch_y_mask = np.squeeze(batch_y_mask, axis=0)\n channels.extend([batch_y, batch_y_mask])\n channels = np.concatenate(channels, axis=2)\n\n # Indexes in name of produced .npy files\n ind0, ind1 = file_ind\n # ind0 = file_ind[0]\n # ind1 = file_ind[1]\n file_name = '{}_{}'.format(ind0, ind1)\n\n print(\"We are ready to save {0} to .npy and the batch shape is: {1}\".format(file_name, channels.shape))\n\n # Creating numpy arrays from input images\n # DEBUG\n\n save_numpy_array(\n join(proc_data_path, file_name), channels)\n\n # Free memory\n channels = None\n batch_x = None\n batch_y = None\n batch_y_mask = None\n\n _preprocess(TRAIN)\n _preprocess(VALIDATION)\n _preprocess(TEST)\n\n print (\"Files have been preprocessed.\")\n\n def get_file_path(self, file_ind):\n ind0, ind1 = file_ind\n return join(self.dataset_path, '{}_{}.npy'.format(ind0, ind1))\n\n def get_file_size(self, file_ind):\n file_path = self.get_file_path(file_ind)\n im = np.load(file_path, mmap_mode='r')\n nb_rows, nb_cols = im.shape[0:2]\n return nb_rows, nb_cols\n\n def get_img(self, file_ind, window, has_y=True):\n file_path = self.get_file_path(file_ind)\n im = np.load(file_path, mmap_mode='r')\n ((row_begin, row_end), (col_begin, col_end)) = window\n img = im[row_begin:row_end, col_begin:col_end, :]\n\n return img\n\n def parse_batch(self, batch, has_y=True):\n # batch_x = batch[:, :, :, 0:5]\n # print(\"Batch given to parse batch has shape: {}\".format(batch.shape))\n nb_channels = self.dataset.active_input_inds[-1]\n # # DEBUG_BEGIN\n # nb_channels = len(self.active_input_inds)\n # print(\"Number of active channels: {0}\".format(nb_channels))\n # # DEBUG_END\n batch_x = batch[:, :, :, self.dataset.active_input_inds[0]:nb_channels+1]\n batch_y = None\n batch_y_mask = None\n if has_y:\n\n # # DEBUG_BEGIN\n # test_batch_y_label = batch[:, :, :, nb_channels+1:nb_channels+2]\n # print(\"test_batch_y_label type: {0}\".format(type(test_batch_y_label)))\n # print(\"test_batch_y_label shape: {0}\".format(test_batch_y_label.shape))\n # print(\"batch with x, y and y_mask shape: {0}\".format(batch.shape))\n # # print(test_batch_y_label)\n # # DEBUG_END\n\n # batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, 5:6])\n batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, -2:-1])\n # batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, nb_channels+1:nb_channels+2])\n # batch_y_mask = batch[:, :, :, 6:7]\n batch_y_mask = batch[:, :, :, -1:]\n # batch_y_mask = batch[:, :, :, nb_channels+2:nb_channels+3]\n\n # # DEBUG_BEGIN\n # print(\"batch_y type: {0}\".format(type(batch_y)))\n # print(\"batch_y shape: {0}\".format(batch_y.shape))\n # # print(batch_y)\n # # DEBUG_END\n\n # DEBUG\n # print(\"Batch_y shape after parsing numpy array: {}\".format(batch_y.shape))\n # print(\"Batch_y_mask shape after parsing numpy array: {}\".format(batch_y_mask.shape))\n\n # DEBUG\n # print(\"Batch_x shape after parsing from numpy array: {}\".format(batch_x.shape))\n\n\n return batch_x, batch_y, batch_y_mask\n" ]
[ [ "numpy.concatenate", "numpy.load", "numpy.squeeze", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wdd0225/RetinaNet-and-SSD-in-PyTorch-Detectron
[ "4140e197e78dfd59c8f09dcd33e97f6040a0f39e" ]
[ "lib/datasets/coco_train.py" ]
[ "from lib.core import ssd_config as cfg\nimport os\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\n\n# def get_label_map(label_file):\n# label_map = {}\n# labels = open(label_file, 'r')\n# for line in labels:\n# ids = line.split(',')\n# label_map[int(ids[0])] = int(ids[1])\n# return label_map\ndef get_label_map(labels):\n label_map = {}\n for ids in labels:\n label_map[int(ids[0])] = int(ids[1])\n return label_map\n\nclass COCOAnnotationTransform(object):\n \"\"\"Transforms a COCO annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n \"\"\"\n def __init__(self):\n self.label_map = get_label_map(cfg.COCO_LABEL_MAP)\n\n def __call__(self, target, width, height):\n \"\"\"\n Args:\n target (dict): COCO target json annotation as a python dict\n height (int): height\n width (int): width\n Returns:\n a list containing lists of bounding boxes [bbox coords, class idx]\n \"\"\"\n scale = np.array([width, height, width, height])\n res = []\n for obj in target:\n if 'bbox' in obj:\n bbox = obj['bbox']\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n label_idx = self.label_map[obj['category_id']] - 1\n final_box = list(np.array(bbox)/scale)\n final_box.append(label_idx)\n res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]\n else:\n print(\"no bbox problem!\")\n\n return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]\n\n\nclass COCODetection(data.Dataset):\n \"\"\"`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n set_name (string): Name of the specific set of COCO images.\n transform (callable, optional): A function/transform that augments the\n raw images`\n target_transform (callable, optional): A function/transform that takes\n in the target (bbox) and transforms it.\n \"\"\"\n\n def __init__(self, root, image_set='train2017', transform=None,\n target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):\n sys.path.append(osp.join(root, cfg.COCO_API))\n from pycocotools.coco import COCO\n # self.root = osp.join(root, IMAGES, image_set)\n # print('XXXXXXXX: ',self.root)\n # self.coco = COCO(osp.join(root, ANNOTATIONS,\n # 'image_info_test-dev2015.json'))\n # self.root = osp.join(root, IMAGES, image_set)\n # self.coco = COCO(osp.join(root, ANNOTATIONS,\n # INSTANCES_SET.format(image_set)))\n self.root = osp.join(cfg.COCO_ROOT, cfg.IMAGES, image_set)\n self.coco = COCO(osp.join(cfg.COCO_ROOT, cfg.ANNOTATIONS,\n cfg.INSTANCES_SET.format(image_set)))\n \n self.ids = list(self.coco.imgToAnns.keys())\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def __len__(self):\n return len(self.ids)\n\n def pull_item(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target, height, width).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n img_id = self.ids[index]\n target = self.coco.imgToAnns[img_id]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n\n target = self.coco.loadAnns(ann_ids)\n path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])\n # print('XXXXXXXX: ',path)\n assert osp.exists(path), 'Image path does not exist: {}'.format(path)\n img = cv2.imread(osp.join(self.root, path))\n height, width, _ = img.shape\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n if self.transform is not None:\n target = np.array(target)\n img, boxes, labels = self.transform(img, target[:, :4],\n target[:, 4])\n # to rgb\n img = img[:, :, (2, 1, 0)]\n\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n cv2 img\n '''\n img_id = self.ids[index]\n path = self.coco.loadImgs(img_id)[0]['file_name']\n return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.ids[index]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n return img_id, self.coco.loadAnns(ann_ids)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n" ]
[ [ "numpy.array", "numpy.expand_dims", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TCatshoek/lstar
[ "042b0ae3a0627db7a412c828f3752a9c30928ec1", "042b0ae3a0627db7a412c828f3752a9c30928ec1" ]
[ "experiments/tsp/tsp.py", "experiments/compare_fuzzers/plot.py" ]
[ "import tempfile\n\nimport numpy as np\nfrom graphviz import Digraph\n\nfrom equivalencecheckers.bruteforce import BFEquivalenceChecker\n#from experiments.tsp.tsplearner import TSPLearner\nfrom experiments.tsp.tsplearner import TSPLearner\nfrom learners.mealylearner import MealyLearner\nfrom suls.mealymachine import MealyState\nfrom suls.sul import SUL\nfrom teachers.teacher import Teacher\nfrom itertools import permutations\nimport random\n\nclass TSPProblem:\n def __init__(self, width=100, height=100):\n self.cities = None\n self.distances = None\n self.width = width\n self.height = height\n\n def make_random(self, n_cities):\n # Do all the math in numpy because fast\n self.cities = np.random.rand(n_cities, 2) * np.array([self.width, self.height])\n # Unreadable, but FAST\n self.distances = np.sqrt(np.sum(np.square(self.cities.reshape(len(self.cities), -1, 2) - self.cities.reshape(-1, len(self.cities), 2)), axis=2))\n return self\n\n def get_dist(self, frm, to):\n return self.distances[frm, to]\n\n def get_path_dist(self, path):\n assert len(path) > 1, f\"cannot get path lenght of paths with just one state: {path}\"\n return sum([self.get_dist(a, b) for [a, b] in [path[x: x + 2] for x in range(len(path) - 1)]])\n\n def bruteforce_shortestpath(self):\n shortest_len = 999999999999\n shortest_path = None\n\n actions = list(range(1, len(self.cities)))\n for p in permutations(actions):\n dist = self.get_path_dist([0] + list(p) + [0])\n print(dist)\n if dist < shortest_len:\n shortest_len = dist\n shortest_path = [0] + list(p) + [0]\n\n return (shortest_len, shortest_path)\n\nclass TSPSul(SUL):\n def __init__(self, problem, initial_state):\n self.problem = problem\n self.initial_state = initial_state\n self.state = initial_state\n self.mem = {}\n\n def calc_expected_future_len(self, inputs, n):\n if tuple(inputs) in self.mem:\n return self.mem[tuple(inputs)]\n\n # if len(inputs) == len(self.problem.cities):\n # return 0\n\n alphabet = set(self.get_alphabet())\n not_visited = alphabet.difference(set(inputs))\n #not_visited.remove(str(self.initial_state))\n not_visited = list(not_visited)\n acc_dist = 0\n\n for i in range(n):\n random.shuffle(not_visited)\n remaining_path = [int(self.initial_state) if len(inputs) < 1 else int(inputs[-1])] + [int(x) for x in not_visited] + [int(self.initial_state)]\n acc_dist += self.problem.get_path_dist(remaining_path)\n\n self.mem[tuple(inputs)] = acc_dist / n\n return acc_dist / n\n\n def process_input(self, inputs):\n if len(inputs) < 1:\n return None\n\n output = 0\n\n # We impose the restriction of not being able to visit a city twice,\n # # Except for returning to the initial city as the last action\n # visited = set()\n # visited.add(str(self.initial_state))\n #\n # for idx, input in enumerate(inputs):\n # # Last action can only be returning to the initial city:\n # if idx == len(self.problem.cities) - 1:\n # if int(input) == self.initial_state:\n # output += self.problem.get_dist(self.state, int(input))\n # self.state = int(input)\n # return 0\n # else:\n # return 'invalid_input'\n #\n # else:\n # if input not in visited:\n # output += self.problem.get_dist(self.state, int(input))\n # self.state = int(input)\n # visited.add(input)\n # else:\n # return 'invalid_input'\n\n return self.calc_expected_future_len(inputs, 1000)\n\n def reset(self):\n self.state = self.initial_state\n\n def get_alphabet(self):\n return [str(x) for x in list(range(len(self.problem.cities)))]\n\n\ndef filter_errs(hyp):\n for state in hyp.get_states():\n todelete = []\n\n for action, (nextstate, output) in state.edges.items():\n if output == 'invalid_input':\n todelete.append(action)\n\n for action in todelete:\n del state.edges[action]\n\ndef cleanup(hyp):\n for state in hyp.get_states():\n for action, (nextstate, output) in state.edges.items():\n state.edges[action] = (nextstate, f'{output:.2f}')\n\n\ndef draw(hyp, filename):\n g = Digraph('G', filename=filename)\n g.attr(rankdir='LR')\n\n # Collect nodes and edges\n to_visit = [hyp.initial_state]\n visited = []\n\n # Hacky way to draw start arrow pointing to first node\n g.attr('node', shape='none')\n g.node('startz', label='', _attributes={'height': '0', 'width': '0'})\n\n # Draw initial state\n g.attr('node', shape='circle')\n g.node(hyp.initial_state.name, label='0')\n\n g.edge('startz', hyp.initial_state.name)\n\n laststeps = []\n lastname = None\n\n while len(to_visit) > 0:\n cur_state = to_visit.pop()\n visited.append(cur_state)\n\n g.attr('node', shape='circle')\n for action, (other_state, output) in cur_state.edges.items():\n # Draw other states, but only once\n if other_state not in visited and other_state not in to_visit:\n to_visit.append(other_state)\n if action == '0':\n laststeps.append(float(output))\n lastname = other_state.name\n else:\n g.node(other_state.name, label=output)\n\n # Draw edges too\n if action == '0':\n g.edge(cur_state.name, other_state.name, label=f'{action}/{output}')\n else:\n g.edge(cur_state.name, other_state.name, label=f'{action}')\n\n g.node(lastname, label=str(min(laststeps)))\n\n g.view()\n\nif __name__ == \"__main__\":\n np.random.seed(1337)\n tspprob = TSPProblem().make_random(4)\n tsp = TSPSul(tspprob, 0)\n tsp.calc_expected_future_len([], 1000)\n eqc = BFEquivalenceChecker(tsp, max_depth=6)\n\n teacher = Teacher(tsp, eqc)\n\n learner = TSPLearner(teacher, tsp=tsp)\n #learner = MealyLearner(teacher)\n\n hyp = learner.run(show_intermediate=True)\n #filter_errs(hyp)\n cleanup(hyp)\n #raw(hyp, tempfile.mktemp('.gv'))\n hyp.render_graph(tempfile.mktemp('.gv'))\n\n # tspprob = TSPProblem().make_random(5)\n # tsp = TSPSul(tspprob, 0)\n\n", "from afl.utils import AFLUtils\nfrom libfuzzer.utils import CorpusUtils\nfrom suls.rerssoconnector import RERSSOConnector\nfrom pathlib import Path\nimport re\nimport matplotlib.pyplot as plt\n\ndef check_reached_afl(problem, problemset, rers_basepath, afl_basepath):\n rers_path = f\"{rers_basepath}/{problemset}/{problem}/{problem}.so\"\n afl_dir = f'{afl_basepath}/{problemset}/{problem}'\n bin_path = f'{afl_basepath}/{problemset}/{problem}/{problem}'\n\n sul = RERSSOConnector(rers_path)\n\n aflutils = AFLUtils(afl_dir,\n bin_path,\n [str(x) for x in sul.get_alphabet()],\n sul)\n\n reached = aflutils.gather_reached_errors(return_time_date=True)\n\n # Filter reached so only the earliest of each error counts\n time_error_reached = {}\n for (error, time_cur_reached) in reached:\n if error in time_error_reached:\n if time_error_reached[error] > time_cur_reached:\n time_error_reached[error] = time_cur_reached\n else:\n time_error_reached[error] = time_cur_reached\n\n # Sort by time reached\n sorted_time_reached = sorted(time_error_reached.items(), key=lambda x: x[1])\n\n # Accumulate which errors were found by which time\n acc_err_reached = {}\n acc_errs = set()\n for err, time in sorted_time_reached:\n acc_errs.add(err)\n acc_err_reached[time] = acc_errs.copy()\n\n sorted_acc_reached = sorted(acc_err_reached.items(), key=lambda x: x[0])\n sorted_acc_reached_count = [(time, len(errs)) for (time, errs) in sorted_acc_reached]\n times, counts = list(zip(*sorted_acc_reached_count))\n\n # Get some time info from the AFL directory\n start_time = aflutils.get_start_date_time()\n last_time = aflutils.get_last_date_time()\n\n # Calculate some time stuff for plotting\n # min_time = min(list(times))\n min_time = start_time\n rel_start_time = start_time - min_time\n rel_times = [time - min_time for time in times]\n rel_last_time = last_time - min_time\n\n all_times = [rel_start_time] + rel_times + [rel_last_time]\n all_counts = [0] + list(counts) + [max(counts)]\n\n return all_counts, all_times\n\n\ndef check_reached_libfuzzer(problem, problemset, rers_basepath, fuzzer_basepath):\n rers_path = f\"{rers_basepath}/{problemset}/{problem}/{problem}.so\"\n fuzzer_dir = Path(f'{fuzzer_basepath}/{problemset}/{problem}')\n\n assert fuzzer_dir.exists(), fuzzer_dir\n\n sul = RERSSOConnector(rers_path)\n\n cutils = CorpusUtils(\n corpus_path=fuzzer_dir.joinpath('corpus_errors'),\n fuzzer_path=fuzzer_dir.joinpath(f'{problem}_fuzz'),\n sul=sul\n )\n\n return cutils.get_plot_data()\n\nproblem = \"Problem11\"\nproblemset = \"TrainingSeqReachRers2019\"\nlibfuzzer_basepath = \"/home/tom/afl/thesis_benchmark_2/libFuzzer\"\nafl_basepath = \"afl\"\nrers_basepath = \"../../rers\"\n\nlibfuzzer_reached = check_reached_libfuzzer(problem, problemset, rers_basepath, libfuzzer_basepath)\nafl_reached = check_reached_afl(problem, problemset, rers_basepath, afl_basepath)\n\nif max(libfuzzer_reached[1]) > max(afl_reached[1]):\n afl_reached[0].append(afl_reached[0][-1])\n afl_reached[1].append(libfuzzer_reached[1][-1])\nelif max(libfuzzer_reached[1]) < max(afl_reached[1]):\n libfuzzer_reached[0].append(libfuzzer_reached[0][-1])\n libfuzzer_reached[1].append(afl_reached[1][-1])\n\nprint(libfuzzer_reached)\nprint(afl_reached)\n\nplt.step(libfuzzer_reached[1], libfuzzer_reached[0], label=\"libFuzzer\")\nplt.step(afl_reached[1], afl_reached[0], label=\"AFL\")\nplt.legend()\nplt.xlabel(\"time(s)\")\nplt.ylabel(\"Errors reached\")\nplt.title(f\"Fuzzer comparison - {problem}\")\nplt.show()\n\n# problem = \"Problem13\"\n# problemset = \"TrainingSeqReachRers2019\"\n# libfuzzer_basepath = \"/home/tom/afl/thesis_benchmark_2/libFuzzer\"\n# #afl_basepath = \"afl\"\n# rers_basepath = \"../../rers\"\n#\n# libfuzzer_reached = check_reached_libfuzzer(problem, problemset, rers_basepath, libfuzzer_basepath)\n# #afl_reached = check_reached_afl(problem, problemset, rers_basepath, afl_basepath)\n#\n# print(libfuzzer_reached)\n# #print(afl_reached)\n#\n# plt.step(libfuzzer_reached[1], libfuzzer_reached[0], label=\"libFuzzer\")\n# #plt.plot(afl_reached[1], afl_reached[0], label=\"AFL\")\n# plt.legend()\n# plt.title(f\"Fuzzer comparison - {problem}\")\n# plt.show()\n\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.random.seed" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.step", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
acse-hz6818/Armageddon
[ "de62affe0b3f08cd74090d5d5e9e3c0905c9c8ed" ]
[ "cher2.py" ]
[ "# pylint: disable=invalid-name\r\n\"\"\"\r\nExtension 3:\r\n\r\nInversion to calculate the chern explosion\r\nTry different values of Y and r in order to find the ones that give a result closer to the data \r\n-------------------------\r\nreturns:\r\na graphical output\r\ndf with errors and parameters choosen\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\nimport scipy.interpolate as si\r\nimport armageddon\r\n#init class\r\nearth = armageddon.Planet()\r\n#read the csv of the values\r\nprotoCEAtable = pd.read_csv(r'C:\\Users\\gc2016\\OneDrive - Imperial College London\\ACSE\\ACSE-4.2\\acse-4-armageddon-hygiea\\data\\ChelyabinskEnergyAltitude.csv')\r\n#initialise inital values\r\nrc = range(10, 30, 2)\r\nv0c = 19200\r\nthetac = 18.3\r\nrhoc = 3300\r\n#put the csv values into arrays\r\nCEAheight = np.array(protoCEAtable['Height (km)'])\r\nCEAE = np.array(protoCEAtable['Energy Per Unit Length (kt Km^-1)'])\r\n\r\n\r\n#error for Cheryabinsk\r\ndef rY_finder(r_min, r_max, Y_min, Y_max, nr, nY):\r\n \"\"\"\r\n iterate to find the r and y from a range using the numerical solver\r\n \"\"\"\r\n #convert the points to a continious function\r\n lp = si.interp1d(CEAheight, CEAE)\r\n #array for candidates\r\n rlist = np.linspace(r_min, r_max, nr)\r\n Ylist = np.linspace(Y_min, Y_max, nY)\r\n #containers for results\r\n maperror = []\r\n mapr = []\r\n mapY = []\r\n energies = []\r\n #loop nessesary: you have to loop over all combinations\r\n for i in range(nY):\r\n for j in range(nr):\r\n mapr.append(rlist[j])\r\n mapY.append(Ylist[i])\r\n #call numerical solver\r\n df = earth.solve_atmospheric_entry(rlist[j], v0c, 3300, Ylist[i], 18.3,\r\n 1e5, dt=0.02, radians=False)\r\n df2 = earth.calculate_energy(df)\r\n #use only the curve for the error\r\n df_filtered = df2[(df2['altitude'] > 30) & (df2['altitude'] < 33) ]\r\n energies.append(df2.dedz)\r\n #rms error\r\n maperror.append(np.sqrt(np.sum((df_filtered.dedz)-lp(df_filtered.altitude))**2))\r\n errordf = pd.DataFrame({'Error': maperror, 'Radious': mapr, 'Strenght': mapY})\r\n return errordf, energies\r\n\r\n\r\ndef plot_model(list_e):\r\n \"\"\"\r\n function to plot\r\n \"\"\"\r\n plt.figure(figsize=(10,6))\r\n for i in list_e:\r\n plt.plot(i, np.linspace(100, 0, len(i)))\r\n\r\n plt.plot(CEAE, CEAheight, 'k', label='raw data')\r\n\r\n plt.xlabel('r gridpoints')\r\n plt.ylabel('Y gridpoints')\r\n plt.title('Squared Errors')\r\n plt.show()\r\n\r\n# error, energies_list = (rY_finder(10, 12, 9e6, 1e7, 3, 3))\r\n# print(\"error = \", error)\r\n# plot_model(energies_list)\r\n\r\n\r\n#print(CEAE)\r\n#for initial conditions\r\ndf = earth.solve_atmospheric_entry(radius=10, velocity=21000, density=3000, strength=1e5, angle=45,\r\n init_altitude=100e3, dt=0.01, radians=False)\r\ndf2 = earth.calculate_energy(df)\r\nprint(df2)\r\nplt.plot(df2.dedz, df2.altitude)\r\nplt.show()\r\n# print (\"max energy\", df2.dedz.max())\r\n# print (\"min energy\", df2.dedz.min())\r\n\r\n##################### Plot the initial values ########################################\r\n# fig = plt.figure(figsize=(12, 10))\r\n# CEA = fig.add_subplot(111)\r\n# CEA.margins(0.1)\r\n\r\n# lp = si.interp1d(CEAheight, CEAE)\r\n\r\n# CEA.plot(CEAE, CEAheight, 'k', label='raw data')\r\n# CEA.plot(lp(CEAheight), CEAheight, 'b*', label='approximation')\r\n\r\n# CEA.set_xlabel('$dedz, kT/km$', fontsize=16)\r\n# CEA.set_ylabel('$Height, z/m$', fontsize=16)\r\n# CEA.grid(True)\r\n\r\n# CEA.set_title('dE/dz-z Graph for Chelyabinsk and the interpolation to continuous', fontsize=16)\r\n# CEA.legend(loc='upper left', fontsize=18)\r\n" ]
[ [ "pandas.read_csv", "matplotlib.pylab.show", "numpy.linspace", "pandas.DataFrame", "matplotlib.pylab.xlabel", "matplotlib.pylab.title", "scipy.interpolate.interp1d", "matplotlib.pylab.figure", "matplotlib.pylab.ylabel", "matplotlib.pylab.plot", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
sivaramakrishna7/tensor2tensor
[ "eb0118d3f459913133e3d68a96944480a928bff1", "eb0118d3f459913133e3d68a96944480a928bff1" ]
[ "tensor2tensor/utils/learning_rate.py", "tensor2tensor/layers/modalities_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Optimization.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\n\nimport tensorflow as tf\n\n\ndef learning_rate_factor(name, step_num, hparams):\n if name == \"constant\":\n return hparams.learning_rate_constant\n elif name == \"linear_warmup\":\n return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps)\n elif name == \"rsqrt_decay\":\n return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps))\n elif name == \"rsqrt_hidden_size\":\n return hparams.hidden_size ** -0.5\n elif name == \"legacy\":\n return legacy_learning_rate_schedule(hparams)\n else:\n raise ValueError(\"unknown learning rate factor %s\" % name)\n\n\ndef learning_rate_schedule(hparams):\n \"\"\"Learning rate schedule based on hparams.\"\"\"\n step_num = tf.to_float(tf.train.get_or_create_global_step())\n schedule_string = hparams.learning_rate_schedule\n names = schedule_string.split(\"*\")\n names = [name.strip() for name in names if name.strip()]\n ret = 1.0\n for name in names:\n ret *= learning_rate_factor(name, step_num, hparams)\n return ret\n\n\ndef legacy_learning_rate_schedule(hparams):\n \"\"\"Backwards-compatible learning-rate schedule.\"\"\"\n step_num = tf.to_float(tf.train.get_or_create_global_step())\n warmup_steps = tf.to_float(hparams.learning_rate_warmup_steps)\n if hparams.learning_rate_decay_scheme == \"noam\":\n ret = 5000.0 * hparams.hidden_size**-0.5 * tf.minimum(\n (step_num + 1) * warmup_steps**-1.5, (step_num + 1)**-0.5)\n else:\n warmup_steps = hparams.learning_rate_warmup_steps\n warmup = _learning_rate_warmup(warmup_steps)\n decay = _learning_rate_decay(hparams, warmup_steps)\n ret = tf.where(step_num < warmup_steps, warmup, decay)\n optimizer_correction = 0.002 if \"Adam\" in hparams.optimizer else 1.0\n return ret * optimizer_correction * hparams.learning_rate\n\n\ndef _legacy_sqrt_decay(step):\n \"\"\"Decay like 1 / sqrt(step), multiplied by 500 to normalize.\"\"\"\n return 500.0 / tf.sqrt(tf.maximum(step, 1.0))\n\n\ndef _piecewise_learning_rate(step, boundaries, values):\n \"\"\"Scale learning rate according to the given schedule.\n\n Multipliers are not cumulative.\n\n Args:\n step: global step\n boundaries: List of steps to transition on.\n values: Multiplier to apply at each boundary transition.\n\n Returns:\n Scaled value for the learning rate.\n \"\"\"\n values = [1.0] + values\n boundaries = [float(x) for x in boundaries]\n return tf.train.piecewise_constant(\n step, boundaries, values, name=\"piecewise_lr\")\n\n\ndef _learning_rate_decay(hparams, warmup_steps=0):\n \"\"\"Learning rate decay multiplier.\"\"\"\n scheme = hparams.learning_rate_decay_scheme\n warmup_steps = tf.to_float(warmup_steps)\n global_step = tf.to_float(tf.train.get_or_create_global_step())\n\n if not scheme or scheme == \"none\":\n return tf.constant(1.)\n\n tf.logging.info(\"Applying learning rate decay: %s.\", scheme)\n\n if scheme == \"exp\":\n decay_steps = hparams.learning_rate_decay_steps\n p = (global_step - warmup_steps) / decay_steps\n if hparams.learning_rate_decay_staircase:\n p = tf.floor(p)\n return tf.pow(hparams.learning_rate_decay_rate, p)\n\n if scheme == \"piecewise\":\n return _piecewise_learning_rate(global_step,\n hparams.learning_rate_boundaries,\n hparams.learning_rate_multiples)\n\n if scheme == \"cosine\":\n cycle_steps = hparams.learning_rate_cosine_cycle_steps\n cycle_position = global_step % (2 * cycle_steps)\n cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)\n return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))\n\n if scheme == \"cyclelinear10x\":\n # Cycle the rate linearly by 10x every warmup_steps, up and down.\n cycle_steps = warmup_steps\n cycle_position = global_step % (2 * cycle_steps)\n cycle_position = tf.to_float( # Normalize to the interval [-1, 1].\n cycle_position - cycle_steps) / float(cycle_steps)\n cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0.\n return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3).\n\n if scheme == \"sqrt\":\n return _legacy_sqrt_decay(global_step - warmup_steps)\n\n raise ValueError(\"Unrecognized learning rate decay scheme: %s\" %\n hparams.learning_rate_decay_scheme)\n\n\ndef _learning_rate_warmup(warmup_steps, warmup_schedule=\"exp\"):\n \"\"\"Learning rate warmup multiplier.\"\"\"\n if not warmup_steps:\n return tf.constant(1.)\n\n tf.logging.info(\"Applying %s learning rate warmup for %d steps\",\n warmup_schedule, warmup_steps)\n\n warmup_steps = tf.to_float(warmup_steps)\n global_step = tf.to_float(tf.train.get_or_create_global_step())\n\n if warmup_schedule == \"exp\":\n return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step)\n else:\n assert warmup_schedule == \"linear\"\n start = tf.constant(0.35)\n return ((tf.constant(1.) - start) / warmup_steps) * global_step + start\n", "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Modalities.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\n\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.utils import expert_utils\n\nimport tensorflow as tf\n\n\nclass ModalityTest(tf.test.TestCase):\n\n def testSymbolModalityInputs(self):\n batch_size = 10\n num_datashards = 5\n length = 5\n vocab_size = 5000\n hidden_size = 9\n model_hparams = common_hparams.basic_params1()\n model_hparams.hidden_size = hidden_size\n model_hparams.mode = tf.estimator.ModeKeys.TRAIN\n x = -1 + np.random.random_integers(\n vocab_size, size=(batch_size, length, 1, 1))\n m = modalities.SymbolModality(model_hparams, vocab_size)\n data_parallelism = expert_utils.Parallelism(\n [\"/device:CPU:0\"] * num_datashards)\n with self.test_session() as session:\n xs = tf.split(x, num_datashards)\n sharded_output = m.bottom_sharded(xs, data_parallelism)\n output = tf.concat(sharded_output, 0)\n session.run(tf.global_variables_initializer())\n res = session.run(output)\n self.assertEqual(res.shape, (batch_size, length, 1, hidden_size))\n\n def testSymbolModalityTargets(self):\n batch_size = 10\n num_datashards = 5\n length = 6\n height = 7\n hidden_size = 9\n vocab_size = 11\n model_hparams = common_hparams.basic_params1()\n model_hparams.hidden_size = hidden_size\n model_hparams.mode = tf.estimator.ModeKeys.TRAIN\n body_output = -1 + np.random.random_integers(\n 100, size=(batch_size, length, height, hidden_size))\n targets = -1 + np.random.random_integers(\n vocab_size, size=(batch_size, length, height, 1))\n m = modalities.SymbolModality(model_hparams, vocab_size)\n data_parallelism = expert_utils.Parallelism(\n [\"/device:CPU:0\"] * num_datashards)\n with self.test_session() as session:\n sharded_body_output = tf.split(tf.to_float(body_output), num_datashards)\n sharded_targets = tf.split(targets, num_datashards)\n sharded_logits = m.top_sharded(sharded_body_output, sharded_targets,\n data_parallelism)\n train_loss = m.loss_sharded(sharded_logits, sharded_targets,\n data_parallelism)\n logits = tf.concat(sharded_logits, 0)\n session.run(tf.global_variables_initializer())\n res1, res2 = session.run((logits, train_loss))\n self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size))\n self.assertEqual(res2.shape, ())\n\n def testSymbolModalityTargetsFactored(self):\n batch_size = 10\n num_datashards = 5\n length = 6\n height = 7\n hidden_size = 9\n vocab_size = 11\n model_hparams = common_hparams.basic_params1()\n model_hparams.factored_logits = True\n model_hparams.hidden_size = hidden_size\n model_hparams.mode = tf.estimator.ModeKeys.TRAIN\n body_output = -1 + np.random.random_integers(\n 100, size=(batch_size, length, height, hidden_size))\n targets = -1 + np.random.random_integers(\n vocab_size, size=(batch_size, length, height, 1))\n m = modalities.SymbolModality(model_hparams, vocab_size)\n data_parallelism = expert_utils.Parallelism(\n [\"/device:CPU:0\"] * num_datashards)\n with self.test_session() as session:\n sharded_body_output = tf.split(tf.to_float(body_output), num_datashards)\n sharded_targets = tf.split(targets, num_datashards)\n sharded_logits = m.top_sharded(sharded_body_output, sharded_targets,\n data_parallelism)\n train_loss = m.loss_sharded(sharded_logits, sharded_targets,\n data_parallelism)\n logits = tf.concat(sharded_logits, 0)\n session.run(tf.global_variables_initializer())\n res1, res2 = session.run((logits, train_loss))\n self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size))\n self.assertEqual(res2.shape, ())\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.constant", "tensorflow.cos", "tensorflow.pow", "tensorflow.maximum", "tensorflow.minimum", "tensorflow.floor", "tensorflow.train.get_or_create_global_step", "tensorflow.train.piecewise_constant", "tensorflow.logging.info", "tensorflow.to_float", "tensorflow.where", "tensorflow.log", "tensorflow.abs" ], [ "tensorflow.concat", "tensorflow.test.main", "tensorflow.global_variables_initializer", "numpy.random.random_integers", "tensorflow.to_float", "tensorflow.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
SudeepDasari/video_prediction-1
[ "ef0953b514aa1b7a1f5e96fd30aebef01334fb2d", "ef0953b514aa1b7a1f5e96fd30aebef01334fb2d" ]
[ "consol_real_ensemble.py", "runscript_weak.py" ]
[ "import numpy as np\nimport argparse\nimport imageio\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--log_dirs', nargs='+')\n parser.add_argument('--max_num', type=int, default=32)\n\n args = parser.parse_args()\n\n dirs = args.log_dirs\n big_numpy = None\n for area in dirs:\n file_name = '{}/prediction_eval_psnr_max/outputs/gen_image.npy'.format(area)\n loaded = np.load(file_name)[:args.max_num]\n print(loaded.shape)\n if big_numpy is None:\n big_numpy = loaded\n else:\n big_numpy = np.concatenate([big_numpy, loaded], axis=2)\n \n for i in range(args.max_num):\n imageio.mimsave('consolidated/{}.gif'.format(i), big_numpy[i])\n", "# strong scaling\n\nimport numpy as np\nimport os\n\nngpus = np.array([1,2,4,8])\nbsizes = 16*ngpus\nfor i, g in enumerate(ngpus):\n indexlist = [str(i_gpu) for i_gpu in range(g)]\n gpustr = ','.join(indexlist)\n bsize = bsizes[i]\n cmd = 'CUDA_VISIBLE_DEVICES={} python scripts/train.py --input_dir /mnt/pushing_data/cartgripper_updown_sact/train --dataset cartgripper --model savp --model_hparams_dict hparams/bair/ours_deterministic_l1/model_hparams.json --model_hparams tv_weight=0.001,transformation=flow,last_frames=2,generate_scratch_image=false,batch_size={} --summary_freq 10 --timing_file timeb{}_{}.txt'.format(gpustr, bsize, bsize, gpustr)\n\n print(cmd)\n os.system(cmd)" ]
[ [ "numpy.concatenate", "numpy.load" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ichalkiad/VW_challenge
[ "c1ff50070d0f7367ccfbf473c69e90fd2be5e85e" ]
[ "src/jetson/Sensors/sensors_simple.py" ]
[ "import paho.mqtt.client as mqtt\nimport ev3dev.ev3 as ev3\nimport ctypes\nimport numpy as np\nimport sys\nimport cv2\nfrom Sensors.mpu6050.mpu6050 import MPU6050\nimport smbus\nfrom Sensors.odometry import Odometry\nimport sys, serial\nfrom serial.tools import list_ports\n\nclass Sensor(object):\n def __init__(self, *args, **kwargs):\n pass\n\n def read(self):\n raise ValueError('This function must be implemented by ')\n\nclass IR_teensy(Sensor):\n def __init__(self):\n self.ports = list(list_ports.comports()) # get all the connected serial devices\n self.serial_port = serial.Serial('/dev/'+self.ports[0].name) # connect to the first\n\n def debug(self):\n '''\n Use if cannot connect to the port\n This function will print all found serial devices and prints the name and index of the port \n '''\n for i, item in enumerate(self.ports):\n print(i + ' : ' + item.name)\n\n def read(self):\n '''\n Reads the current value from the teensy\n Returns:\n Distance in cm\n '''\n measurement = self.serial_port.readline() # read the measurement\n measurement = measurement.decode('utf-8').split('\\r') # change it to utf and split it on funny characters\n\n return measurement[0] # only return the actual measurment\n\nclass IMU2(Sensor):\n def __init__(self, bus='/dev/i2c-1', address=0x68):\n self.bus = smbus.SMBus(1)\n self.address = address\n self.mpu = MPU6050(self.bus,self.address, 'IMU')\n\n def read(self):\n '''\n Reads the current values from the IMU using the mpu library\n Returns:\n tuple containing: pitch, roll, gyro x,y,z, accel x,y,z these values are scaled and NOT raw\n '''\n return self.mpu.read_all()\n\nclass IMU(Sensor):\n def __init__(self, path_to_shared_lib_mpu='/home/nvidia/jetson-robot/IOInterface/jetson/Sensors/mpu/libmpu.so', bus_filename='/dev/i2c-1', bus_adresses=[0x68, 0x69]):\n bus_filename = bus_filename.encode('ascii')\n self.libmpu = ctypes.cdll.LoadLibrary(path_to_shared_lib_mpu)\n\n self.file_descriptors = [self.libmpu.initIMU(bus_filename, bus_adress) for bus_adress in bus_adresses]\n self.data_c_arrays = [(ctypes.c_int16*7)() for _ in range(len(bus_adresses))]\n self.name = 'imu'\n self.data_sources = [\"temperature\", \"acceleration\", \"gyro\"]\n\n def read(self):\n data_dict = {}\n for idx, (file_descriptor, data_c_array) in enumerate(zip(self.file_descriptors, self.data_c_arrays)):\n self.libmpu.readIMU(file_descriptor, data_c_array)\n data_np_array = np.array(data_c_array)\n data_dict['temperature_{}'.format(idx)] = data_np_array[0] / 340.0 + 36.53\n data_dict['acceleration_{}'.format(idx)] = np.array([int(data_np_array[1]),\n int(data_np_array[2]),\n int(data_np_array[3]),\n ])\n data_dict['gyro_{}'.format(idx)] = np.array([int(data_np_array[4]),\n int(data_np_array[5]),\n int(data_np_array[6]),\n ])\n return data_dict\n\n def read_sensor_nr(self, sensor_nr):\n # TODO: Ask Max, if the magic values for temperature conversion are correct.\n data_dict = {}\n self.libmpu.readIMU(self.file_descriptors[sensor_nr], self.data_c_arrays[sensor_nr])\n data_np_array = np.array(self.data_c_arrays[sensor_nr])\n data_dict['temperature'] = data_np_array[0] / 340.0 + 36.53\n data_dict['acceleration'] = np.array([int(data_np_array[1]), int(data_np_array[2]), int(data_np_array[3])])\n data_dict['gyro'] = np.array([int(data_np_array[4]), int(data_np_array[5]), int(data_np_array[6])])\n return data_dict\n\n def get_data_sources(self):\n return self.data_sources\n\n\nclass OnBoardCamera(Sensor):\n def __init__(self):\n self.name = 'onBoardCamera'\n self.cap = cv2.VideoCapture(\"nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)160, height=(int)120, format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)I420 ! videoconvert ! video/x-raw, format=(string)BGR ! appsink\")\n #self.cap = cv2.VideoCapture(\"nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)(160), height=(int)(120),format=(string)I420, framerate=(fraction)2/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink\")\n\n def read(self):\n if self.cap.isOpened():\n ret_val, frame = self.cap.read();\n frame = cv2.flip(frame,0)\n frame = cv2.flip(frame,1)\n else:\n raise ValueError('Camera not opened. Sorry this message is not really helpful, blame openCV :-) ')\n return {'onBoardCamera':frame}\n\n def clean_buf(self):\n for i in range(5):\n self.cap.grab()\n\n\n\n#Create camera sensor object\ncamera = OnBoardCamera()\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
drgmk/sdf
[ "a44e66a82f876dda079686b32c767370276c38a1" ]
[ "sdf/analytics.py" ]
[ "'''Analytic routines for debris disks.'''\n\nimport numpy as np\n\nfrom . import photometry\nfrom . import filter\nfrom . import utils\n\nclass BB_Disk(object):\n '''A blackbody disk class.\n \n Takes multiple temperatures, the purpose being for use to show\n disk properties in parameter spaces such as fractional luminosity\n vs. temperature.\n \n Parameters\n ----------\n lstar : float\n Stellar luminosity in Solar units.\n tstar : float\n Stellar effective temperature in Kelvin.\n distance : float\n Stellar distance in parsec.\n wavelengths : 1-D array, optional\n Vector of wavelengths.\n temperatures : 1-D array, optional\n Vector of temperatures.\n\n .. todo:: distance not actually needed for calibration limited, fix.\n\n .. todo:: don't use a for loop over temperatures,\n fix utils.bnu_wav_micron instead.\n '''\n\n def __init__(self,wavelengths=None,temperatures=None,\n lstar=None,tstar=None,distance=None):\n '''Initialise, default T=100K, Omega=1.0'''\n \n if wavelengths is None:\n self.wavelengths = 10**np.linspace(-1,4,1000)\n else:\n self.wavelengths = wavelengths\n \n if temperatures is None:\n self.temperatures = 10**np.linspace(1,3,1000)\n else:\n self.temperatures = temperatures\n\n self.lstar = lstar\n self.tstar = tstar\n self.distance = distance\n\n\n def blackbody_radii(self):\n '''Return the blackbody radii.'''\n \n return (278.3/self.temperatures)**2 * self.lstar**0.5\n\n\n def radiance(self):\n '''Return radiance, in W / m^2 / sr.'''\n \n return 5.67e-8 * self.temperatures**4 / np.pi\n\n\n def f_limits(self,lim_waves,flux_limits=None,r_limits=None,\n stellar_flux=None,fwhm=None,lstar_1pc=None):\n '''Return fractional luminosity limits.\n \n This routine implements Wyatt (2008) equations 8 and 11.\n \n Parameters\n ----------\n lim_waves : numpy.ndarray\n Array of wavelengths at which limits apply.\n flux_limits : numpy.ndarray, optional\n Array of flux limits.\n r_limits : numpy.ndarray, optional\n Array of calibration limits (F_disk/F_star).\n stellar_flux : numpy.ndarray, optional\n Array of stellar fluxes at lim_waves.\n fwhm : numpy.ndarray, optional\n Array of spatial resolutions at lim_waves, affects flux\n limited observations if disk is resolved.\n lstar_1pc : float\n L_star at 1pc, used for flux limits when distance unknown.\n\n One of flux_limits or r_limits must be given. If both, they must\n have the same length, and correspond to the wavelengths given.\n Likewise for stellar_flux and fwhm.\n '''\n\n if flux_limits is not None and r_limits is not None:\n if len(flux_limits) != len(r_limits):\n raise RuntimeError(\n 'flux_limits must be same length as r_limits')\n\n # sensitivity limit\n if flux_limits is not None:\n \n slims = np.zeros((len(self.temperatures),len(flux_limits)))\n\n for i,temp in enumerate(self.temperatures):\n\n if self.distance is not None:\n slims[i,:] = 3.4e9 * flux_limits * self.distance**2 / \\\n self.blackbody_radii()[i]**2 / \\\n utils.bnu_wav_micron(lim_waves,temp)\n else:\n # distance independent calculation, 2487305. is\n # pc^2/Lsun, haven't tracked down the 4 yet\n ldisk_1pc = 4 * 5.6704e-8 * flux_limits * 2487305. * \\\n temp**4 / utils.bnu_wav_micron(lim_waves,temp)\n slims[i,:] = ldisk_1pc / lstar_1pc\n\n # apply correction for resolved disks\n if self.distance is not None and fwhm is not None:\n fwhm_fact = 2 * self.blackbody_radii()[i] / self.distance / fwhm\n resolved = fwhm_fact > 1.0\n slims[i,resolved] *= fwhm_fact[resolved]\n\n \n # calibration limit, use actual stellar flux if given\n if r_limits is not None:\n \n if stellar_flux is not None:\n if len(stellar_flux) != len(r_limits):\n raise RuntimeError(\n 'Stellar flux ({}) must have same '\n 'length as r_limits ({})'.format(\n len(stellar_flux),\n len(r_limits)\n )\n )\n fstar = stellar_flux\n else:\n fstar = 1.77 * utils.bnu_wav_micron(lim_waves,self.tstar) * \\\n self.lstar / self.tstar**4 / self.distance**2\n\n clims = np.zeros((len(self.temperatures),len(r_limits)))\n for i,temp in enumerate(self.temperatures):\n clims[i,:] = 6e9/1.77 * r_limits * fstar / \\\n utils.bnu_wav_micron(lim_waves,temp) * \\\n (self.distance/self.blackbody_radii()[i])**2\n\n if flux_limits is not None and r_limits is not None:\n return np.minimum(slims,clims)\n elif flux_limits is not None:\n return slims\n elif r_limits is not None:\n return clims\n else:\n raise RuntimeError('Need to pass flux_limits or r_limits')\n\n\n def f_limits_from_result(self,r,min_wavelength=8.0, sn=3,\n x={}, x_det={},\n skip_filters=[],keep_filters=None):\n '''Derive fractional luminosity limits from an sdf result object.\n \n Also derive fractional luminosities and signal to noise of excess\n detections. Return low and high limits, expect to plot these\n with pyplot.fill_between and something like:\n \n ax.fill_between(temps, det_lo[:,i], det_hi[:,i],\n where=(det_lo[:,i]<det_hi[:,i]), alpha=0.25)\n\n Account for long wavelength grain inefficiency with X factor, \n used per filter, e.g. {'WAV850':4}.\n\n Rather than worry about flux vs. calibration limited, just do \n the calculation assuming flux limited by calculating the flux\n limit for each observed filter (whether it was an upper limit\n or not).\n \n Parameters\n ----------\n r : sdf.result.Result\n Result object with photometry.\n min_wavelength : float, optional\n Exclude filters with a mean wavelength shorter than this.\n sn : float, optional\n S/N at which detection significant, used only for detections.\n x : dict\n X factor to increase limits by: {filter,X}\n x_det : dict\n X factor to increase upper detection limit by: {filter,X}\n skip_filters : list, optional\n List of filters to skip.\n keep_filters : list, optional\n List of filters to keep, applied after skip_filters.\n '''\n\n waves = np.array([])\n filters = np.array([])\n f_lim = np.array([])\n f_det = np.array([])\n e_det = np.array([])\n f_star = np.array([])\n\n # get stellar luminosity at 1pc if no distance\n lstar = None\n if self.distance is None:\n lstar = 0.0\n if hasattr(r,'star'):\n for s in r.star:\n lstar += s['lstar_1pc']\n \n if lstar == 0.0:\n raise utils.SdfError('dont have lstar_1pc or distance')\n \n for p in r.obs:\n if not isinstance(p,photometry.Photometry):\n continue\n \n ok = np.invert(p.ignore)\n # loop to grab correct stellar photometry\n for i,filt in enumerate(p.filters[ok]):\n \n new_wave = p.mean_wavelength()[ok][i]\n if (filter.iscolour(filt) or \n new_wave < min_wavelength or\n filt in skip_filters):\n continue\n \n if keep_filters is not None:\n if filt not in keep_filters:\n continue\n \n waves = np.append(waves,new_wave)\n filters = np.append(filters,filt)\n filt_i = np.where(filt == np.array(r.all_filters))[0]\n f_star = np.append(f_star,r.all_star_phot[filt_i])\n \n fac = 1\n if filt in x.keys():\n fac = x[filt]\n\n if p.upperlim[ok][i]:\n f_lim = np.append(f_lim,p.fnujy[ok][i]*fac)\n f_det = np.append(f_det, 0)\n e_det = np.append(e_det, 0)\n else:\n # 1sigma uncertainty, observed and star in quadrature\n unc = np.sqrt(\n p.e_fnujy[ok][i]**2 + \\\n 0.25*(r.all_star_phot_1sig_lo[filt_i] + r.all_star_phot_1sig_hi[filt_i])**2\n )\n f_lim = np.append(f_lim,3*unc*fac)\n f_det = np.append(f_det, p.fnujy[ok][i] - f_star[-1])\n e_det = np.append(e_det, unc)\n\n lims = self.f_limits(waves,flux_limits=f_lim,\n stellar_flux=f_star,lstar_1pc=lstar)\n dets = self.f_limits(waves,flux_limits=f_det,\n stellar_flux=f_star,lstar_1pc=lstar)\n\n ok = e_det > 0\n sn_dets = np.zeros(lims.shape[1])\n sn_dets[ok] = f_det[ok] / e_det[ok]\n\n # now compute limit ranges for detections, first get ranges\n det_lo = np.zeros(lims.shape)\n det_hi = lims.copy()\n both_hi = lims.copy()\n for i in range(lims.shape[1]):\n if sn_dets[i]>sn:\n fac = 1\n if filters[i] in x_det.keys():\n fac = x_det[filters[i]]\n det_lo[:,i] = dets[:,i]*(1-sn/sn_dets[i])\n det_hi[:,i] = dets[:,i]*(fac+sn/sn_dets[i])\n both_hi[:,i] = np.max([[det_hi[:,i]],[lims[:,i]]], axis=0)\n\n # now adjust high limit based on other limits\n for i in range(lims.shape[1]):\n other = np.arange(lims.shape[1]) != i\n det_hi[:,i] = np.min( np.hstack((both_hi[:,other],det_hi[:,i].reshape((-1,1)))), axis=1 )\n \n return lims, det_lo, det_hi, sn_dets, filters\n\n\n def f_limits_togrid(self, lims, f=None):\n '''Return boolean grid in f - r/T space indicating detectability.\n \n Sum multiple of these to get the grid that shows how many of the\n systems it was possible to detect a disk for.\n \n Parameters\n ----------\n lims : array\n Array of f limits (i.e. n_temperatures x n_lim).\n f : array, optional\n Array of f to use in grid.\n '''\n \n if f is None:\n f = 10**np.linspace(-7,-1,100)\n \n fs, _ = np.meshgrid(f, self.temperatures)\n return fs > np.min(lim, axis=1), f\n" ]
[ [ "numpy.minimum", "numpy.sqrt", "numpy.invert", "numpy.meshgrid", "numpy.linspace", "numpy.arange", "numpy.min", "numpy.max", "numpy.append", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VidyaKamath1089/models
[ "0e74158f72160a5d25b977de7f6f2cf4d1908dba" ]
[ "official/vision/beta/projects/volumetric_models/modeling/factory_test.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for factory.py.\"\"\"\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\n# pylint: disable=unused-import\nfrom official.vision.beta.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg\nfrom official.vision.beta.projects.volumetric_models.modeling import backbones\nfrom official.vision.beta.projects.volumetric_models.modeling import decoders\nfrom official.vision.beta.projects.volumetric_models.modeling import factory\n\n\nclass SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(((128, 128, 128), 5e-5), ((64, 64, 64), None))\n def test_unet3d_builder(self, input_size, weight_decay):\n num_classes = 3\n input_specs = tf.keras.layers.InputSpec(\n shape=[None, input_size[0], input_size[1], input_size[2], 3])\n model_config = exp_cfg.SemanticSegmentationModel3D(num_classes=num_classes)\n l2_regularizer = (\n tf.keras.regularizers.l2(weight_decay) if weight_decay else None)\n model = factory.build_segmentation_model_3d(\n input_specs=input_specs,\n model_config=model_config,\n l2_regularizer=l2_regularizer)\n self.assertIsInstance(\n model, tf.keras.Model,\n 'Output should be a tf.keras.Model instance but got %s' % type(model))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.keras.regularizers.l2", "tensorflow.keras.layers.InputSpec", "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
isabuster/snip
[ "8e7644edd1f4dcca0f833666cf54474bcacf2aea", "8e7644edd1f4dcca0f833666cf54474bcacf2aea" ]
[ "snip-tensorflow/main.py", "snip-tensorflow/train.py" ]
[ "import os\nimport sys\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nimport scipy.stats as st\nimport matplotlib.pyplot as plt\n\nfrom dataset import Dataset\nfrom model import Model\nimport prune\nimport train\nimport test\n\n# from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n # Data options\n parser.add_argument('--datasource', type=str, default='mnist', help='dataset to use')\n parser.add_argument('--path_data', type=str, default='./data', help='location to dataset')\n parser.add_argument('--aug_kinds', nargs='+', type=str, default=[], help='augmentations to perform')\n # Model options\n parser.add_argument('--arch', type=str, default='lenet5', help='network architecture to use')\n parser.add_argument('--target_sparsity', type=float, default=0.9, help='level of sparsity to achieve')\n # Train options\n parser.add_argument('--batch_size', type=int, default=100, help='number of examples per mini-batch')\n parser.add_argument('--train_iterations', type=int, default=10000, help='number of training iterations')\n parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer of choice')\n parser.add_argument('--lr_decay_type', type=str, default='constant', help='learning rate decay type')\n parser.add_argument('--lr', type=float, default=1e-2, help='initial learning rate')\n parser.add_argument('--decay_boundaries', nargs='+', type=int, default=[], help='boundaries for piecewise_constant decay')\n parser.add_argument('--decay_values', nargs='+', type=float, default=[], help='values for piecewise_constant decay')\n # Initialization\n parser.add_argument('--initializer_w_bp', type=str, default='vs', help='initializer for w before pruning')\n parser.add_argument('--initializer_b_bp', type=str, default='zeros', help='initializer for b before pruning')\n parser.add_argument('--initializer_w_ap', type=str, default='vs', help='initializer for w after pruning')\n parser.add_argument('--initializer_b_ap', type=str, default='zeros', help='initializer for b after pruning')\n # Logging, saving, options\n parser.add_argument('--logdir', type=str, default='logs', help='location for summaries and checkpoints')\n parser.add_argument('--check_interval', type=int, default=100, help='check interval during training')\n parser.add_argument('--save_interval', type=int, default=1000, help='save interval during training')\n args = parser.parse_args()\n # Add more to args\n args.path_summary = os.path.join(args.logdir, 'summary')\n args.path_model = os.path.join(args.logdir, 'model')\n args.path_assess = os.path.join(args.logdir, 'assess')\n return args\n\n\ndef plot_distribution(sess, layers, pruned=False):\n for idx, var in enumerate(layers):\n if pruned == False:\n layer = np.array(sess.run(var)).flatten()\n else:\n layer = var.flatten()[var.flatten() != 0]\n ax = plt.axes()\n ax.set_axisbelow(True)\n plt.hist(layer, bins=30, label=\"Weights\", density=True, edgecolor='white')\n plt.grid(ls='--')\n left, right = plt.xlim()\n kde_xs = np.linspace(left, right)\n kde = st.gaussian_kde(layer)\n plt.plot(kde_xs, kde.pdf(kde_xs), label=\"PDF\")\n plt.legend(loc=\"upper left\")\n plt.ylabel('Density')\n plt.xlabel('Weights')\n if pruned == False:\n plt.title(\"Histogram of Weights for layer{} before Pruning\".format(idx+1))\n plt.savefig('layer{} before pruning.png'.format(idx+1))\n else:\n plt.title(\"Histogram of Weights for layer{} after Pruning\".format(idx+1))\n plt.savefig('layer{} after pruning.png'.format(idx+1))\n plt.close()\n\n\ndef main():\n args = parse_arguments()\n\n # Dataset\n dataset = Dataset(**vars(args))\n\n # Tensorflow 2.0 by default uses Eager-Execution, hence Placeholders are not getting executed\n tf.compat.v1.disable_eager_execution()\n\n # Reset the default graph and set a graph-level seed\n tf.compat.v1.reset_default_graph()\n tf.compat.v1.set_random_seed(9)\n\n # Model\n model = Model(num_classes=dataset.num_classes, **vars(args))\n model.construct_model()\n\n # Session\n sess = tf.compat.v1.InteractiveSession()\n saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables())\n tf.compat.v1.global_variables_initializer().run()\n tf.compat.v1.local_variables_initializer().run()\n # saver.restore(sess, \"/data1/liyilin/vgg/model0/itr-0\")\n\n # Calculate sparsity per layer using SNIP but not prune\n num_weights, kappa = prune.prune_snip(args, model, sess, dataset)\n sparsity_fraction = {k: 1 - kappa[k] / num_weights[k] for k in num_weights}\n print('sparsity per layer:')\n print(sparsity_fraction)\n\n rewinding_weights0 = sess.run(model.weights, {model.pruned: True})\n # Train and test the dense network\n rewinding_weights1, rewinding_weights2 = train.train(args, model, sess, dataset, lr=args.lr, rewinding_itr1=60000, rewinding_itr2=120000)\n print('|========= FINISH TRAINING DENSE NETWORK =========|')\n test.test(args, model, sess, dataset)\n\n # Prune each layer based on the magnitude of the weights according to sparsity per layer\n prune.prune_magnitude(args, model, sess, dataset, kappa)\n\n # Train and test with the sparse network\n train.train(args, model, sess, dataset, lr=1e-1)\n print('|========= FINISH TRAINING SPARSE NETWORK =========|')\n test.test(args, model, sess, dataset)\n\n # Rewind\n prune.rewind(args, model, sess, dataset, rewinding_weights2, rewinding_itr=120000)\n\n # Train and test with the sparse network\n train.train(args, model, sess, dataset, lr=1e-1)\n print('|========= FINISH TRAINING SPARSE NETWORK =========|')\n test.test(args, model, sess, dataset)\n\n # Rewind\n prune.rewind(args, model, sess, dataset, rewinding_weights1, rewinding_itr=60000)\n\n # Train and test with the sparse network\n train.train(args, model, sess, dataset, lr=1e-1)\n print('|========= FINISH TRAINING SPARSE NETWORK =========|')\n test.test(args, model, sess, dataset)\n\n # Rewind\n prune.rewind(args, model, sess, dataset, rewinding_weights0, rewinding_itr=0)\n\n # Train and test with the sparse network\n train.train(args, model, sess, dataset, lr=1e-1)\n print('|========= FINISH TRAINING SPARSE NETWORK =========|')\n test.test(args, model, sess, dataset)\n\n sess.close()\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n", "import os\nimport tensorflow as tf\nimport time\nimport numpy as np\n\nfrom augment import augment\n\n\ndef train(args, model, sess, dataset, lr, rewinding_itr1=10000, rewinding_itr2=10000):\n print('|========= START TRAINING =========|')\n if not os.path.isdir(args.path_summary): os.makedirs(args.path_summary)\n if not os.path.isdir(args.path_model): os.makedirs(args.path_model)\n saver = tf.compat.v1.train.Saver(max_to_keep=10)\n random_state = np.random.RandomState(9)\n writer = {}\n writer['train'] = tf.compat.v1.summary.FileWriter(args.path_summary + '/train', sess.graph)\n writer['val'] = tf.compat.v1.summary.FileWriter(args.path_summary + '/val')\n t_start = time.time()\n\n for itr in range(args.train_iterations):\n batch = dataset.get_next_batch('train', args.batch_size)\n batch = augment(batch, args.aug_kinds, random_state)\n feed_dict = {}\n feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})\n feed_dict.update({model.compress: False, model.new_compress: False, model.is_train: True, model.pruned: True})\n feed_dict.update({model.lr: lr})\n input_tensors = [model.outputs] # always execute the graph outputs\n if (itr+1) % args.check_interval == 0:\n input_tensors.extend([model.summ_op, model.sparsity])\n input_tensors.extend([model.train_op, model.w_final])\n result = sess.run(input_tensors, feed_dict)\n\n # Check on validation set.\n if (itr+1) % args.check_interval == 0:\n batch = dataset.get_next_batch('val', args.batch_size)\n batch = augment(batch, args.aug_kinds, random_state)\n feed_dict = {}\n feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})\n feed_dict.update({model.compress: False, model.new_compress: False, model.is_train: False, model.pruned: True})\n input_tensors = [model.outputs, model.summ_op, model.sparsity]\n result_val = sess.run(input_tensors, feed_dict)\n\n # Check summary and print results\n if (itr+1) % args.check_interval == 0:\n writer['train'].add_summary(result[1], itr)\n writer['val'].add_summary(result_val[1], itr)\n pstr = '(train/val) los:{:.3f}/{:.3f} acc:{:.3f}/{:.3f} spa:{:.3f}'.format(\n result[0]['los'], result_val[0]['los'],\n result[0]['acc'], result_val[0]['acc'],\n result[2],\n )\n print('itr{}: {} (t:{:.1f})'.format(itr+1, pstr, time.time() - t_start))\n t_start = time.time()\n\n # Save model\n if (itr+1) % args.save_interval == 0:\n saver.save(sess, args.path_model + '/itr-' + str(itr))\n\n # Save weights for rewinding\n if (itr+1) == rewinding_itr1:\n rewinding_weights1 = result[-1]\n if (itr+1) == rewinding_itr2:\n rewinding_weights2 = result[-1]\n\n return rewinding_weights1, rewinding_weights2\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.compat.v1.local_variables_initializer", "numpy.linspace", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.global_variables_initializer", "matplotlib.pyplot.axes", "matplotlib.pyplot.xlim", "scipy.stats.gaussian_kde", "tensorflow.compat.v1.InteractiveSession", "tensorflow.compat.v1.disable_eager_execution", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "tensorflow.compat.v1.set_random_seed", "tensorflow.compat.v1.reset_default_graph", "matplotlib.pyplot.close", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ], [ "tensorflow.compat.v1.summary.FileWriter", "tensorflow.compat.v1.train.Saver", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bergr7/KNN_from_scratch
[ "8c1e40f89b078a46b27c347d2faa3d3491a47958" ]
[ "knn.py" ]
[ "import numpy as np\nfrom sklearn.metrics import confusion_matrix\n\nclass Knn:\n \"\"\"Classifier implementing the k-nearest neighbors vote.\n\n Parameters\n __________\n :param n_neighbors : int\n Number of neighbors to use.\n\n :param metric : {'manhattan', 'euclidean', 'minkowski'}, default='minkowski'\n The distance metric to use for defining K-nearest neighbors. The default metric is minkowski, and with p=2 is\n equivalent to the standard Euclidean metric.\n\n :param p : int, default=2\n Power parameter for the Minkowski metric. When p=1, this is equivalent to using manhattan_distance (l1), and\n euclidean_distance (l2) for p=2.\n\n :param weights : {'uniform', 'distance'}, default='uniform'\n Weight function used in prediction. Possible values:\n\n - 'uniform' : uniform weights. All points in each neighborhood are weighted equally.\n - 'distance' : weight points by the inverse of their distance. In this case, closer neighbors of a query point\n will have a greater influence than neighbors which are further away.\n\n Methods\n __________\n :method fit :\n It fits the model using X as training data and y as target values.\n\n :method predict :\n Loop through all data points and predict the class labels for each of the new data point based on training data.\n \"\"\"\n\n def __init__(self, n_neighbors, metric='minkowski', p=2, weights='uniform'):\n\n if p < 0:\n raise ValueError(\"p should be larger than 0.\")\n\n if metric not in ['minkowski', 'manhattan', 'euclidean']:\n raise ValueError(\"Distance method not supported. Must be {'manhattan', 'euclidean', 'minkowski'}\")\n\n if weights not in ['uniform', 'distance']:\n raise ValueError(\n \"Weights can be only assigned uniformly or based on distance. Must be {'uniform', 'distance'}\")\n\n self.n_neighbors = n_neighbors\n self.metric = metric\n self.p = p\n self.weights = weights\n\n def fit(self, X, y):\n \"\"\"Fit the model using X as training data and y as target values\n Parameters\n __________\n :argument X: {array-like, sparse matrix}\n Training data. If array or matrix, shape = [n_samples, n_features]\n\n :argument y: {array-like, sparse matrix}\n Target values of shape = [n_samples] or [n_samples, n_outputs]\n\n :return: Training data and associated labels\n \"\"\"\n\n # check data shape\n if X.shape[0] == y.shape[0]:\n self.X = X\n self.y = y\n else:\n raise ValueError(\"Dimensional mismatch: Number of rows in X must be equal to the number of rows in y\")\n\n # check for missing values\n if np.isnan(X).any() or np.isnan(y).any():\n raise TypeError(\"There are missing values in the dataset. Consider removing samples with missing values\"\n \"or imputation methods.\")\n return X, y\n\n def _manhattan_distance(self, point):\n \"\"\"Calculate manhattan distance from one data point to all the samples in the training set.\n\n :param point: {array-like}\n New data point of shape [n_features]\n\n :return: numpy array with manhattan distances from the data point to all the samples in the training set.\n \"\"\"\n return np.sum(abs(self.X - point), axis=1)\n\n def _euclidean_distance(self, point):\n \"\"\"Calculate euclidean distance from one data point to all the samples in the training set.\n\n :param point: {array-like}\n New data point of shape [n_features]\n\n :return: numpy array with euclidean distances from the data point to all the samples in the training set.\n \"\"\"\n return np.sqrt(np.sum((self.X - point) ** 2, axis=1))\n\n def _minkowski_distance(self, point):\n \"\"\"Calculate minkowski distance from one data point to all the samples in the training set.\n\n :param point: {array-like}\n New data point of shape [n_features]\n\n :return: numpy array with minkowski distances from the data point to all the samples in the training set.\n \"\"\"\n return np.sum(abs(self.X - point) ** self.p, axis=1) ** (1 / self.p)\n\n def _uniform_weights(self, distances):\n \"\"\"Assign equal weights to all points.\n\n :param distances: {array-like}\n numpy array with distances from one data point to all the samples in the training set.\n\n :return: numpy array with weight-distance pairs for each sample in the training set.\n \"\"\"\n return np.array([(1, d) for _, d in enumerate(distances)])\n\n def _distance_weights(self, distances):\n \"\"\"Weight points by the inverse of their distance.\n\n :param distances: {array-like}\n numpy array with distances from one data point to all the samples in the training set.\n\n :return: numpy array with weight-distance pairs for each sample in the training set.\n \"\"\"\n return np.array([(1 / d, d) if d > 0 else (1, d) for _, d in enumerate(distances)])\n\n def _predict_point(self, point):\n \"\"\" Predict class label of a single data point.\n\n :argument point: {array-like}\n New data point of shape [n_features]\n :return: str\n Assigned class label based on training data.\n \"\"\"\n # calculate point distance from all other samples\n if self.metric == 'manhattan':\n distances = self._manhattan_distance(point)\n elif self.metric == 'euclidean':\n distances = self._euclidean_distance(point)\n elif self.metric == 'minkowski':\n distances = self._minkowski_distance(point)\n else:\n AttributeError(\"Distance method not supported. Must be {'manhattan', 'euclidean', 'minkowski'}\")\n\n # calculate point distance weights\n if self.weights == 'uniform':\n weights = self._uniform_weights(distances)\n else:\n weights = self._distance_weights(distances)\n\n # sort index of distances from nearest to farthest and keep only first \"n_neighbors\" ones\n sorted_distances_idxs = distances.argsort()[:self.n_neighbors]\n\n # Vote - count number of classes for Knn\n class_count = {}\n\n if self.weights == 'uniform':\n # assign uniform weights\n for idx in sorted_distances_idxs:\n vote_label = self.y[idx]\n class_count[vote_label] = class_count.get(vote_label, 0) + 1\n else:\n # assign weights based on distance\n for idx in sorted_distances_idxs:\n vote_label = self.y[idx]\n class_count[vote_label] = class_count.get(vote_label, 0) + weights[idx][0]\n\n # Descending sort the resulting class counts dictionary by class counts values\n sorted_class_count = sorted(class_count.items(),\n key=lambda item: (item[1], item[0]),\n reverse=True)\n\n # Return the predicted label\n return sorted_class_count[0][0]\n\n def predict(self, x):\n \"\"\"Loop through all data points and predict the class labels\n\n :argument x: {array-like}\n New data points to be assigned a label of shape [n_points, n_features]\n\n :return: list\n A list with class labels assign to each new data point.\n \"\"\"\n # Loop through all samples and predict the class labels and store the results\n return [self._predict_point(point) for point in x]\n\n def display_results(self, y_test, y_pred):\n labels = np.unique(y_pred)\n confusion_mat = confusion_matrix(y_test, y_pred, labels=labels)\n accuracy = (y_pred == y_test).mean()\n\n print(\"Labels:\", labels)\n print(\"Confusion Matrix:\\n\", confusion_mat)\n print(\"Accuracy:\", accuracy)\n\n def __repr__(self):\n return \"<n_neighbors:\"+self.n_neighbors+\", metric:\" +self.metric+\", p:\"+str(self.p)+\", weights:\"+self.weights+\">\"\n\n def __str__(self):\n return \"Knn(n_neighbors=\"+self.n_neighbors+\", metric=\" +self.metric+\", p=\"+str(self.p)+\", weights=\"+self.weights+\")\"\n" ]
[ [ "numpy.isnan", "numpy.sum", "sklearn.metrics.confusion_matrix", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ChenYutongTHU/slt
[ "2fb617feccccedb008446d34dcf5b3527b004ce6" ]
[ "signjoey/initialization.py" ]
[ "# coding: utf-8\n\n\"\"\"\nImplements custom initialization\n\"\"\"\n\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.nn.init import _calculate_fan_in_and_fan_out\n\n\ndef orthogonal_rnn_init_(cell: nn.RNNBase, gain: float = 1.0):\n \"\"\"\n Orthogonal initialization of recurrent weights\n RNN parameters contain 3 or 4 matrices in one parameter, so we slice it.\n \"\"\"\n with torch.no_grad():\n for _, hh, _, _ in cell.all_weights:\n for i in range(0, hh.size(0), cell.hidden_size):\n nn.init.orthogonal_(hh.data[i : i + cell.hidden_size], gain=gain)\n\n\ndef lstm_forget_gate_init_(cell: nn.RNNBase, value: float = 1.0) -> None:\n \"\"\"\n Initialize LSTM forget gates with `value`.\n\n :param cell: LSTM cell\n :param value: initial value, default: 1\n \"\"\"\n with torch.no_grad():\n for _, _, ih_b, hh_b in cell.all_weights:\n l = len(ih_b)\n ih_b.data[l // 4 : l // 2].fill_(value)\n hh_b.data[l // 4 : l // 2].fill_(value)\n\n\ndef xavier_uniform_n_(w: Tensor, gain: float = 1.0, n: int = 4) -> None:\n \"\"\"\n Xavier initializer for parameters that combine multiple matrices in one\n parameter for efficiency. This is e.g. used for GRU and LSTM parameters,\n where e.g. all gates are computed at the same time by 1 big matrix.\n\n :param w: parameter\n :param gain: default 1\n :param n: default 4\n \"\"\"\n with torch.no_grad():\n fan_in, fan_out = _calculate_fan_in_and_fan_out(w)\n assert fan_out % n == 0, \"fan_out should be divisible by n\"\n fan_out //= n\n std = gain * math.sqrt(2.0 / (fan_in + fan_out))\n a = math.sqrt(3.0) * std\n nn.init.uniform_(w, -a, a)\n\n\n# pylint: disable=too-many-branches\ndef initialize_model(model: nn.Module, cfg: dict, txt_padding_idx: int) -> None:\n \"\"\"\n This initializes a model based on the provided config.\n\n All initializer configuration is part of the `model` section of the\n configuration file.\n For an example, see e.g. `https://github.com/joeynmt/joeynmt/\n blob/master/configs/iwslt_envi_xnmt.yaml#L47`\n\n The main initializer is set using the `initializer` key.\n Possible values are `xavier`, `uniform`, `normal` or `zeros`.\n (`xavier` is the default).\n\n When an initializer is set to `uniform`, then `init_weight` sets the\n range for the values (-init_weight, init_weight).\n\n When an initializer is set to `normal`, then `init_weight` sets the\n standard deviation for the weights (with mean 0).\n\n The word embedding initializer is set using `embed_initializer` and takes\n the same values. The default is `normal` with `embed_init_weight = 0.01`.\n\n Biases are initialized separately using `bias_initializer`.\n The default is `zeros`, but you can use the same initializers as\n the main initializer.\n\n Set `init_rnn_orthogonal` to True if you want RNN orthogonal initialization\n (for recurrent matrices). Default is False.\n\n `lstm_forget_gate` controls how the LSTM forget gate is initialized.\n Default is `1`.\n\n :param model: model to initialize\n :param cfg: the model configuration\n :param txt_padding_idx: index of spoken language text padding token\n \"\"\"\n\n # defaults: xavier, embeddings: normal 0.01, biases: zeros, no orthogonal\n gain = float(cfg.get(\"init_gain\", 1.0)) # for xavier\n init = cfg.get(\"initializer\", \"xavier\")\n init_weight = float(cfg.get(\"init_weight\", 0.01))\n\n embed_init = cfg.get(\"embed_initializer\", \"normal\")\n embed_init_weight = float(cfg.get(\"embed_init_weight\", 0.01))\n embed_gain = float(cfg.get(\"embed_init_gain\", 1.0)) # for xavier\n\n bias_init = cfg.get(\"bias_initializer\", \"zeros\")\n bias_init_weight = float(cfg.get(\"bias_init_weight\", 0.01))\n\n # pylint: disable=unnecessary-lambda, no-else-return\n def _parse_init(s, scale, _gain):\n scale = float(scale)\n assert scale > 0.0, \"incorrect init_weight\"\n if s.lower() == \"xavier\":\n return lambda p: nn.init.xavier_uniform_(p, gain=_gain)\n elif s.lower() == \"uniform\":\n return lambda p: nn.init.uniform_(p, a=-scale, b=scale)\n elif s.lower() == \"normal\":\n return lambda p: nn.init.normal_(p, mean=0.0, std=scale)\n elif s.lower() == \"zeros\":\n return lambda p: nn.init.zeros_(p)\n else:\n raise ValueError(\"unknown initializer\")\n\n init_fn_ = _parse_init(init, init_weight, gain)\n embed_init_fn_ = _parse_init(embed_init, embed_init_weight, embed_gain)\n bias_init_fn_ = _parse_init(bias_init, bias_init_weight, gain)\n\n with torch.no_grad():\n for name, p in model.named_parameters():\n\n if \"txt_embed\" in name:\n if \"lut\" in name:\n embed_init_fn_(p)\n\n elif \"bias\" in name:\n bias_init_fn_(p)\n\n elif len(p.size()) > 1:\n\n # RNNs combine multiple matrices is one, which messes up\n # xavier initialization\n if init == \"xavier\" and \"rnn\" in name:\n n = 1\n if \"encoder\" in name:\n n = 4 if isinstance(model.encoder.rnn, nn.LSTM) else 3\n elif \"decoder\" in name:\n n = 4 if isinstance(model.decoder.rnn, nn.LSTM) else 3\n xavier_uniform_n_(p.data, gain=gain, n=n)\n else:\n init_fn_(p)\n\n # zero out paddings\n if model.txt_embed is not None:\n model.txt_embed.lut.weight.data[txt_padding_idx].zero_()\n\n orthogonal = cfg.get(\"init_rnn_orthogonal\", False)\n lstm_forget_gate = cfg.get(\"lstm_forget_gate\", 1.0)\n\n # encoder rnn orthogonal initialization & LSTM forget gate\n if hasattr(model.encoder, \"rnn\"):\n\n if orthogonal:\n orthogonal_rnn_init_(model.encoder.rnn)\n\n if isinstance(model.encoder.rnn, nn.LSTM):\n lstm_forget_gate_init_(model.encoder.rnn, lstm_forget_gate)\n\n # decoder rnn orthogonal initialization & LSTM forget gate\n if hasattr(model.decoder, \"rnn\"):\n\n if orthogonal:\n orthogonal_rnn_init_(model.decoder.rnn)\n\n if isinstance(model.decoder.rnn, nn.LSTM):\n lstm_forget_gate_init_(model.decoder.rnn, lstm_forget_gate)\n" ]
[ [ "torch.nn.init.uniform_", "torch.no_grad", "torch.nn.init.orthogonal_", "torch.nn.init.normal_", "torch.nn.init.xavier_uniform_", "torch.nn.init._calculate_fan_in_and_fan_out", "torch.nn.init.zeros_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hcbh96/Random-Forest-FYP
[ "4af2c85a4f4d998f616751f9c366329bdc559b13", "4af2c85a4f4d998f616751f9c366329bdc559b13" ]
[ "decision_tree.py", "permutations_eda.py" ]
[ "\"\"\"\nIn this file I want to:\n create DT\n Train DT\n Test DT\n Analyse Accurancy\n Analyse Sensitivity\n Analyse Precision\n Check Feature Importance\n\"\"\"\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nfrom evaluate_model import evaluate_model, performance_assessor\nfrom confusion_matrix import plot_confusion_matrix\nfrom sklearn.metrics import confusion_matrix\nimport graphviz\n\n# Set random seed to ensure reproducible runs\nRSEED = 30\n\ndtfm=pd.read_excel('cleaned_data.xlsx', sheet_name='Sheet1')\n\n#Remove columns not to be used in modelling\ndtfm = dtfm.drop(columns=['ORDEM','DATA','AMOSTRA','REPLICATA','ANIMAL','PARTIDA','CLIV','CELLS_COUNT'])\n\n\nprint(\"Describe Output Vars: \\n {}\".format(dtfm[\"BLAST_D8\"].describe()))\n\"\"\"\nOne of the thigns i need to do is categorise the output data\n\nWhere:\n- 0 is bad quality 0 - 50%\n- 1 is good quality 50 - 100%\n\nI will use the following statistics to make the decsion:\n\nStatistics for each column after outlier removal\n CLIV BLAST_D8 CELLS_COUNT\ncount 313.000000 313.000000 180.000000\nmean 72.070374 21.475320 171.115891\nstd 8.942164 11.093061 42.876076\nmin 49.350649 0.000000 57.000000\n25% 65.079365 12.121212 144.875000\n50% 72.151899 20.312500 169.875000\n75% 79.487179 29.629630 195.437500\nmax 90.140845 53.623188 269.000000\n\n\nFor BLAST_D8:\n 0 < 21.475320\n 1 >= 21.475320\n\n\"\"\"\n# Update Labels in Blast_D8 and CLIV\n\ndtfm['BLAST_D8'] = dtfm['BLAST_D8'].where(dtfm['BLAST_D8'] >= 21.475320, other=0)\ndtfm['BLAST_D8'] = dtfm['BLAST_D8'].where(dtfm['BLAST_D8'] < 21.475320, other=1)\n\n\n# Make a copy for dtfm blast\nprint(\"Blast_D8 value counts:\\n {}\".format(dtfm['BLAST_D8'].value_counts()))\n\n\n# Extract the labels\nlabels = np.array(dtfm.pop('BLAST_D8'))\n\n# 30% examples in test data\ntrain, test, train_labels, test_labels = train_test_split(dtfm, labels, stratify = labels, test_size = 0.3, random_state = RSEED)\n\n#imputation of missing values\ntrain = train.fillna(train.mean())\ntest = test.fillna(test.mean())\n\n# Features for feature importances\nfeatures = list(train.columns)\n\nprint(\"Train Shape: {}\".format(train.shape))\nprint(\"Test Shape: {}\".format(test.shape))\n\n\n\"\"\"\nTrain decision tree on data with unlimited depth to check for overfitting\n\"\"\"\n\n# Make a decision tree and train\ntree = DecisionTreeClassifier(random_state=RSEED)\n\n# Train tree\ntree.fit(train, train_labels)\nprint('Decision tree has {} nodes with maximum depth {}.'.format(tree.tree_.node_count, tree.tree_.max_depth))\n\n\n\"\"\"\nAssess decision tree performance\n\nI would expect this to overfit but we want to make sure\n\"\"\"\n\n# Make probability predictions\ntrain_probs = tree.predict_proba(train)[:, 1]\nprobs = tree.predict_proba(test)[:, 1]\n\ntrain_predictions = tree.predict(train)\npredictions = tree.predict(test)\n\n# evaluate model\nevaluate_model(predictions, probs, train_predictions, train_probs, test_labels, train_labels, title='Tree ROC Curve')\n\n# print other metrics\nperformance_assessor(predictions, probs, train_predictions, train_probs, test_labels, train_labels, logger=True)\n\n# display example decision tree\nexport_graphviz(tree, out_file='tree.dot',\n filled=True, rounded=True,\n special_characters=True)\n\nprint('\\033[94m' + \"To view decision tree example run the following command in terminal:\\ndot -Tpng tree.dot -o tree.png\" + '\\033[0m')\n\n# Plot confusion matrix\ncm = confusion_matrix(test_labels, predictions)\nplot_confusion_matrix(cm, classes = ['Poor Health', 'Good Health'],\n title = 'Tree Confusion Matrix')\n\"\"\"\nConfusion Matrix:\n[[35 9]\n [10 35]]\nClassification Accuracy: 0.7865168539325843\nClassification Sensitivity: 0.7865168539325843\n\nFrom a single with a confusion matrix we can see above Accuracy and Sesitivity\n\nThese should form our base projection or possibly projections from Mayra?\n\nShould we instead maybe take two classes as this would allow the plotting of\nROC curves etc -\n\nMayra mentioned that\n\n**The idea with this project more than predict embryo production is to see if there is any variables from sperm analysis that can predict these production.\nThat's why we used so many bulls. Ore research is based on these ideas, the bull effect, which sperm analysis can we do to predict embryo production. **\n\nConsider this when deciding whether to use binary or non binary classification\n\nLet check out feature importance in the decision tree\n\"\"\"\nfi = pd.DataFrame({'feature': features,\n 'importance': tree.feature_importances_}).\\\n sort_values('importance', ascending = False)\n\nprint(\"Features of most importance in decision tree: \\n{}\".format(fi.head()))\n\n\"\"\"\nThis porucdes the following results\n\nFeatures of most importance in decision tree:\n feature importance\n17 ALH 0.151271\n3 SUB_3_LS 0.145387\n8 FRAG_CRO 0.079971\n18 BCF 0.077984\n20 LIN 0.065810\n\n\nI want to at some point check co-linearity between the above variables.\n\n\"\"\"\n\n", "\"\"\"\nThis file will be used to do some\nexploratory data analysis on the data\n\n\"\"\"\n\nif __name__ == '__main__':\n import pandas as pd\n import matplotlib.pyplot as plt\n import seaborn as sea\n from sklearn.model_selection import train_test_split\n import numpy as np\n from sklearn.tree import DecisionTreeClassifier, export_graphviz\n from evaluate_model import evaluate_model, performance_assessor\n from confusion_matrix import plot_confusion_matrix\n from sklearn import preprocessing\n from sklearn.metrics import confusion_matrix\n from test_threasholds import find_best_params\n import graphviz\n from tqdm import tqdm\n\n #run time options\n logger = False\n metric_plot = False\n variable_boxplot = False\n decision_tree = True\n predictors_100 = False\n save_fig=False\n\n # read in all dataframes and combine\n dtfms = []\n for i in range(11):\n print(\"Fetching excel_permutations_{}.xlsx\".format(i))\n dtfms.append(pd.read_excel('excel_permutations/permutations_{}.xlsx'.format(i), sheet_name='Sheet1', index_col=0))\n\n # loop and join dataframes\n dtfm = pd.DataFrame()\n for d in dtfms:\n dtfm = dtfm.append(d)\n\n # clear the variables\n dtfms = None\n\n # print dtfm head\n if logger:\n print('Head:\\n{}'.format(dtfm.head()))\n print(\"Mean Values:\\n{}\".format(dtfm.mean()))\n print(\"Standard Deviation Values:\\n{}\".format(dtfm.std()))\n\n # specify variable list\n var_list = ['AI','PI','ALTO','FRAG_CRO','MOT_PRE','MOT_POS','CONC_CAMARA','VF','AD','VAP','VSL','VCL','ALH','BCF','STR','LIN','MOTILE_PCT','PROGRESSIVE_PCT','RAPID_PCT','MEDIUM_PCT', 'SLOW_PCT','STATIC_PCT']\n\n # plot simple dtfm\n if metric_plot:\n dtfm[['AUC','Accuracy', 'Precision','Recall']].boxplot()\n plt.xticks(rotation=90)\n plt.xlabel('Measurment')\n plt.ylabel('Value')\n if save_fig:\n plt.savefig('permutations_metric_plot')\n else:\n plt.show()\n\n # boxplot of separate dtfms\n if variable_boxplot:\n dtfm_plot = pd.DataFrame()\n for v in tqdm(var_list):\n dtfm_v = pd.DataFrame()\n for i in range(7):\n is_v = dtfm['Var {}'.format(i)] == v\n is_v_dtfm = dtfm[is_v]\n dtfm_v = dtfm_v.append(is_v_dtfm)\n # create a datadrame containing only variable used in loop\n v_array = [v for i in range(len(dtfm_v))]\n dtfm_v['Var'] = v_array\n\n dtfm_plot = dtfm_plot.append(dtfm_v)\n\n ax = sea.boxplot(y='AUC', x='Var', data=dtfm_plot)\n plt.xticks(rotation=90)\n if save_fig:\n plt.savefig('permutations_boxplot')\n else:\n plt.show()\n\n # 100 best predictors\n if predictors_100:\n # sort values\n dtfm_100 = dtfm.nlargest(17000, 'AUC')\n\n # tot tally\n tot_tally = {}\n for i in tqdm(range(7)):\n val_counts = dtfm_100['Var {}'.format(i)].value_counts()\n # add to tot tally of value counts\n for k in val_counts.keys():\n if tot_tally.get(k) == None:\n tot_tally[k] = 0\n\n # increment value by 1\n tot_tally[k] = tot_tally[k] + val_counts[k]\n\n # prep a dataframe\n dtfm_tot_tally = pd.DataFrame(\n tot_tally.values(),\n index=tot_tally.keys(),\n columns=['Tally'])\n # sort values\n dtfm_tot_tally = dtfm_tot_tally.sort_values(by=['Tally'])\n # plot bar\n dtfm_tot_tally.plot.bar(y='Tally')\n plt.xticks(rotation=90)\n plt.ylabel('Number of occurences in top 10% of permutations')\n if save_fig:\n plt.savefig('permutations_100_countplot')\n else:\n plt.show()\n\n\n # decision with target variables on main dataset\n if decision_tree:\n # Set random seed to ensure reproducible runs\n RSEED = 30\n\n # copy dataframe\n dtfm_tree = dtfm.drop(columns=['Accuracy', 'Precision', 'Recall'])\n\n # Update Labels in\n dtfm_tree['AUC'] = dtfm_tree['AUC'].where(dtfm_tree['AUC'] >= dtfm['AUC'].mean(), other=0)\n dtfm_tree['AUC'] = dtfm_tree['AUC'].where(dtfm_tree['AUC'] < dtfm_tree['AUC'].mean(), other=1)\n\n # encode categorical vars\n le = preprocessing.LabelEncoder()\n le.fit(var_list)\n for i in range(7):\n dtfm_tree['Var {}'.format(i)] = le.transform(dtfm_tree['Var {}'.format(i)])\n\n # Extract the labels\n labels = np.array(dtfm_tree['AUC'])\n\n #find optimal params\n c_params = find_best_params(dtfm_tree, 'AUC', classifier=DecisionTreeClassifier,\n test_size=0.3, random_state=RSEED, logger=logger)\n\n # 30% examples in test data\n train, test, train_labels, test_labels = train_test_split(dtfm_tree, labels, stratify = labels, test_size = 0.3, random_state = RSEED)\n\n # Features for feature importances\n features = list(train.columns)\n\n if logger:\n print(\"Train Shape: {}\".format(train.shape))\n print(\"Test Shape: {}\".format(test.shape))\n\n # Make a decision tree and train\n tree = DecisionTreeClassifier(\n max_features=c_params['max_features'],\n max_leaf_nodes=c_params['max_leaf_nodes'],\n min_samples_split=c_params['min_samples_split'],\n random_state=RSEED)\n\n # Train tree\n tree.fit(train, train_labels)\n if logger:\n print('Decision tree has {} nodes with maximum depth {}.'.format(tree.tree_.node_count, tree.tree_.max_depth))\n\n # Make probability predictions\n train_probs = tree.predict_proba(train)[:, 1]\n probs = tree.predict_proba(test)[:, 1]\n\n train_predictions = tree.predict(train)\n predictions = tree.predict(test)\n\n # evaluate model\n evaluate_model(predictions, probs, train_predictions, train_probs, test_labels, train_labels, title='Tree ROC Curve')\n\n # print other metrics\n performance_assessor(predictions, probs, train_predictions, train_probs, test_labels, train_labels, logger=True)\n\n # Plot confusion matrix\n cm = confusion_matrix(test_labels, predictions)\n\n print(\"Confusion Matrix:\\n{}\".format(cm))\n # display example decision tree\n export_graphviz(tree, out_file='perm_tree.dot',\n filled=True, rounded=True,\n special_characters=True,\n feature_names=features)\n\n print('\\033[94m' + \"To view decision tree example run the following command in terminal:\\ndot -Tpng perm_tree.dot -o perm_tree.png\" + '\\033[0m')\n\n for i in var_list:\n print(\"{0} is encoded as {1}\".format(i, le.transform([i])))\n\n\n" ]
[ [ "sklearn.tree.export_graphviz", "pandas.read_excel", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.tree.DecisionTreeClassifier" ], [ "numpy.array", "sklearn.tree.export_graphviz", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.savefig", "sklearn.tree.DecisionTreeClassifier", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "sklearn.preprocessing.LabelEncoder", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
fhoehle/fletcher
[ "09f1c9bc03c1603fa0facefb7a485a84f136a578", "09f1c9bc03c1603fa0facefb7a485a84f136a578" ]
[ "tests/test_text.py", "fletcher/base.py" ]
[ "import math\nimport string\nfrom typing import Optional, Sequence, Tuple\n\nimport hypothesis.strategies as st\nimport numpy as np\nimport pandas as pd\nimport pandas.testing as tm\nimport pyarrow as pa\nimport pytest\nfrom hypothesis import example, given, settings\n\nimport fletcher as fr\nfrom fletcher.testing import examples\n\ntry:\n # Only available in pandas 1.2+\n # When this class is defined, we can also use `.str` on fletcher columns.\n from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401\n\n _str_accessors = [\"str\", \"fr_str\"]\nexcept ImportError:\n _str_accessors = [\"fr_str\"]\n\n\[email protected](params=_str_accessors, scope=\"module\")\ndef str_accessor(request):\n return request.param\n\n\[email protected]\ndef string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:\n ab_charset_st = st.sampled_from(\"ab\")\n ascii_charset_st = st.sampled_from(string.ascii_letters)\n charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))\n charset = draw(charset_st)\n\n fixed_pattern_st = st.sampled_from([\"a\", \"aab\", \"aabaa\"])\n generated_pattern_st = st.text(alphabet=charset, max_size=max_len)\n pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)\n pattern = draw(pattern_st)\n\n min_str_size = 0 if len(pattern) > 0 else 1\n\n raw_str_st = st.one_of(\n st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)\n )\n raw_seq_st = st.lists(raw_str_st, max_size=max_len)\n raw_seq = draw(raw_seq_st)\n\n for s in raw_seq:\n if s is None:\n continue\n\n \"\"\"\n There seems to be a bug in pandas for this edge case\n >>> pd.Series(['']).str.replace('', 'abc', n=1)\n 0\n dtype: object\n\n But\n >>> pd.Series(['']).str.replace('', 'abc')\n 0 abc\n dtype: object\n\n I believe the second result is the correct one and this is what the\n fletcher implementation returns.\n \"\"\"\n\n max_ind = len(s) - len(pattern)\n if max_ind < 0:\n continue\n repl_ind_st = st.integers(min_value=0, max_value=max_ind)\n repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))\n\n repl_ind_list = draw(repl_ind_list_st)\n for j in repl_ind_list:\n s[j : j + len(pattern)] = pattern\n\n seq = [\"\".join(s) if s is not None else None for s in raw_seq]\n offset = draw(st.integers(min_value=0, max_value=len(seq)))\n\n return (seq, pattern, offset)\n\n\nstring_patterns = pytest.mark.parametrize(\n \"data, pat\",\n [\n ([], \"\"),\n ([\"a\", \"b\"], \"\"),\n ([\"aa\", \"ab\", \"ba\"], \"a\"),\n ([\"aa\", \"ab\", \"ba\", \"bb\", None], \"a\"),\n ([\"aa\", \"ab\", \"ba\", \"bb\", None], \"A\"),\n ([\"aa\", \"ab\", \"bA\", \"bB\", None], \"a\"),\n ([\"aa\", \"AB\", \"ba\", \"BB\", None], \"A\"),\n ],\n)\n\n\ndef _fr_series_from_data(data, fletcher_variant, dtype=pa.string()):\n arrow_data = pa.array(data, type=dtype)\n if fletcher_variant == \"chunked\":\n fr_array = fr.FletcherChunkedArray(arrow_data)\n else:\n fr_array = fr.FletcherContinuousArray(arrow_data)\n return pd.Series(fr_array)\n\n\n@settings(deadline=None)\n@given(data=st.lists(st.one_of(st.text(), st.none())))\ndef test_text_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):\n if any(\"\\x00\" in x for x in data if x):\n # pytest.skip(\"pandas cannot handle \\\\x00 characters in tests\")\n # Skip is not working properly with hypothesis\n return\n ser_pd = pd.Series(data, dtype=str)\n ser_fr = _fr_series_from_data(data, fletcher_variant)\n ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)\n\n result_pd = ser_pd.str.cat(ser_pd)\n result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)\n result_fr = result_fr.astype(object)\n # Pandas returns np.nan for NA values in cat, keep this in line\n result_fr[result_fr.isna()] = np.nan\n tm.assert_series_equal(result_fr, result_pd)\n\n\ndef _check_series_equal(result_fr, result_pd):\n result_fr = result_fr.astype(result_pd.dtype)\n tm.assert_series_equal(result_fr, result_pd)\n\n\ndef _check_str_to_t(\n t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs\n):\n \"\"\"Check a .str. function that returns a series with type t.\"\"\"\n tail_len = len(data) - test_offset\n\n ser_pd = pd.Series(data, dtype=str).tail(tail_len)\n result_pd = getattr(ser_pd.str, func)(*args, **kwargs)\n\n ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)\n result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)\n\n _check_series_equal(result_fr, result_pd)\n\n\ndef _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):\n _check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)\n\n\ndef _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):\n _check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)\n\n\n@string_patterns\ndef test_text_endswith(data, pat, str_accessor, fletcher_variant):\n _check_str_to_bool(\"endswith\", data, str_accessor, fletcher_variant, pat=pat)\n\n\n@string_patterns\ndef test_text_startswith(data, pat, str_accessor, fletcher_variant):\n _check_str_to_bool(\"startswith\", data, str_accessor, fletcher_variant, pat=pat)\n\n\n@string_patterns\ndef test_contains_no_regex(data, pat, str_accessor, fletcher_variant):\n _check_str_to_bool(\n \"contains\", data, str_accessor, fletcher_variant, pat=pat, regex=False\n )\n\n\[email protected](\n \"data, pat, expected\",\n [\n ([], \"\", []),\n ([\"a\", \"b\"], \"\", [True, True]),\n ([\"aa\", \"Ab\", \"ba\", \"bb\", None], \"a\", [True, False, True, False, None]),\n ],\n)\ndef test_contains_no_regex_ascii(data, pat, expected, str_accessor, fletcher_variant):\n if str_accessor == \"str\":\n pytest.skip(\n \"return types not stable yet, might sometimes return null instead of bool\"\n )\n return\n fr_series = _fr_series_from_data(data, fletcher_variant)\n fr_expected = _fr_series_from_data(expected, fletcher_variant, pa.bool_())\n\n # Run over slices to check offset handling code\n for i in range(len(data)):\n ser = fr_series.tail(len(data) - i)\n expected = fr_expected.tail(len(data) - i)\n result = getattr(ser, str_accessor).contains(pat, regex=False)\n tm.assert_series_equal(result, expected)\n\n\n@settings(deadline=None)\n@given(data_tuple=string_patterns_st())\ndef test_contains_no_regex_case_sensitive(data_tuple, str_accessor, fletcher_variant):\n data, pat, test_offset = data_tuple\n _check_str_to_bool(\n \"contains\",\n data,\n str_accessor,\n fletcher_variant,\n test_offset=test_offset,\n pat=pat,\n case=True,\n regex=False,\n )\n\n\n@string_patterns\ndef test_contains_no_regex_ignore_case(data, pat, str_accessor, fletcher_variant):\n _check_str_to_bool(\n \"contains\",\n data,\n str_accessor,\n fletcher_variant,\n pat=pat,\n regex=False,\n case=False,\n )\n\n\nregex_patterns = pytest.mark.parametrize(\n \"data, pat\",\n [\n ([], \"\"),\n ([\"a\", \"b\"], \"\"),\n ([\"aa\", \"ab\", \"ba\"], \"a\"),\n ([\"aa\", \"ab\", \"ba\", None], \"a\"),\n ([\"aa\", \"ab\", \"ba\", None], \"a$\"),\n ([\"aa\", \"ab\", \"ba\", None], \"^a\"),\n ([\"Aa\", \"ab\", \"ba\", None], \"A\"),\n ([\"aa\", \"AB\", \"ba\", None], \"A$\"),\n ([\"aa\", \"AB\", \"ba\", None], \"^A\"),\n ],\n)\n\n\n@regex_patterns\ndef test_contains_regex(data, pat, str_accessor, fletcher_variant):\n _check_str_to_bool(\n \"contains\", data, str_accessor, fletcher_variant, pat=pat, regex=True\n )\n\n\n@regex_patterns\ndef test_contains_regex_ignore_case(data, pat, str_accessor, fletcher_variant):\n _check_str_to_bool(\n \"contains\",\n data,\n str_accessor,\n fletcher_variant,\n pat=pat,\n regex=True,\n case=False,\n )\n\n\n@settings(deadline=None)\n@given(\n data_tuple=string_patterns_st(),\n n=st.integers(min_value=0, max_value=10),\n repl=st.sampled_from([\"len4\", \"\", \"z\"]),\n)\n@example(\n data_tuple=([\"aababaa\"], \"aabaa\", 0),\n repl=\"len4\",\n n=1,\n fletcher_variant=\"continuous\",\n)\n@example(data_tuple=([\"aaa\"], \"a\", 0), repl=\"len4\", n=1, fletcher_variant=\"continuous\")\ndef test_replace_no_regex_case_sensitive(\n data_tuple, repl, n, str_accessor, fletcher_variant\n):\n data, pat, test_offset = data_tuple\n _check_str_to_str(\n \"replace\",\n data,\n str_accessor,\n fletcher_variant,\n test_offset=test_offset,\n pat=pat,\n repl=repl,\n n=n,\n case=True,\n regex=False,\n )\n\n\n@settings(deadline=None)\n@given(data_tuple=string_patterns_st())\n@example(data_tuple=([\"a\"], \"\", 0), fletcher_variant=\"chunked\")\ndef test_count_no_regex(data_tuple, str_accessor, fletcher_variant):\n \"\"\"Check a .str. function that returns a series with type t.\"\"\"\n data, pat, test_offset = data_tuple\n\n tail_len = len(data) - test_offset\n\n ser_pd = pd.Series(data, dtype=str).tail(tail_len)\n result_pd = getattr(ser_pd.str, \"count\")(pat=pat)\n\n ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)\n kwargs = {}\n if str_accessor.startswith(\"fr_\"):\n kwargs[\"regex\"] = False\n result_fr = getattr(ser_fr, str_accessor).count(pat=pat, **kwargs)\n\n _check_series_equal(result_fr, result_pd)\n\n\ndef _optional_len(x: Optional[str]) -> int:\n if x is not None:\n return len(x)\n else:\n return 0\n\n\n@settings(deadline=None)\n@given(data=st.lists(st.one_of(st.text(), st.none())))\ndef test_text_zfill(data, str_accessor, fletcher_variant):\n if any(\"\\x00\" in x for x in data if x):\n # pytest.skip(\"pandas cannot handle \\\\x00 characters in tests\")\n # Skip is not working properly with hypothesis\n return\n ser_pd = pd.Series(data, dtype=str)\n max_str_len = ser_pd.map(_optional_len).max()\n if pd.isna(max_str_len):\n max_str_len = 0\n arrow_data = pa.array(data, type=pa.string())\n if fletcher_variant == \"chunked\":\n fr_array = fr.FletcherChunkedArray(arrow_data)\n else:\n fr_array = fr.FletcherContinuousArray(arrow_data)\n ser_fr = pd.Series(fr_array)\n\n result_pd = ser_pd.str.zfill(max_str_len + 1)\n result_fr = getattr(ser_fr, str_accessor).zfill(max_str_len + 1)\n result_fr = result_fr.astype(object)\n # Pandas returns np.nan for NA values in cat, keep this in line\n result_fr[result_fr.isna()] = np.nan\n tm.assert_series_equal(result_fr, result_pd)\n\n\n@settings(deadline=None, max_examples=3)\n@given(data=st.lists(st.one_of(st.text(), st.none())))\n@examples(\n example_list=[\n [\n \" 000000000000000000000000000000000000000000İࠀࠀࠀࠀ𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐤱000000000000𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀\"\n ],\n [\"\\x80 \"],\n [],\n ],\n example_kword=\"data\",\n)\ndef test_text_strip_offset(str_accessor, fletcher_variant, fletcher_slice_offset, data):\n _do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data)\n\n\n@settings(deadline=None)\n@given(data=st.lists(st.one_of(st.text(), st.none())))\n@examples(\n example_list=[\n [],\n [\"\"],\n [None],\n [\" \"],\n [\"\\u2000\"],\n [\" a\"],\n [\"a \"],\n [\" a \"],\n # https://github.com/xhochy/fletcher/issues/174\n [\"\\xa0\"],\n [\"\\u2000a\\u2000\"],\n [\"\\u2000\\u200C\\u2000\"],\n [\"\\n\\u200C\\r\"],\n [\"\\u2000\\x80\\u2000\"],\n [\"\\t\\x80\\x0b\"],\n [\"\\u2000\\u10FFFF\\u2000\"],\n [\" \\u10FFFF \"],\n ]\n + [\n [c]\n for c in \" \\t\\r\\n\\x1f\\x1e\\x1d\\x1c\\x0c\\x0b\"\n \"\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2000\\u2009\\u200A\\u200B\\u2028\\u2029\\u202F\\u205F\"\n ]\n + [[chr(c)] for c in range(0x32)]\n + [[chr(c)] for c in range(0x80, 0x85)]\n + [[chr(c)] for c in range(0x200C, 0x2030)]\n + [[chr(c)] for c in range(0x2060, 0x2070)]\n + [[chr(c)] for c in range(0x10FFFE, 0x110000)],\n example_kword=\"data\",\n)\ndef test_text_strip(str_accessor, fletcher_variant, data):\n _do_test_text_strip(str_accessor, fletcher_variant, 1, data)\n\n\ndef _do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data):\n if any(\"\\x00\" in x for x in data if x):\n # pytest.skip(\"pandas cannot handle \\\\x00 characters in tests\")\n # Skip is not working properly with hypothesis\n return\n ser_pd = pd.Series(data, dtype=str)\n arrow_data = pa.array(\n [None for _ in range(fletcher_slice_offset)] + data, type=pa.string()\n )\n if fletcher_variant == \"chunked\":\n fr_array = fr.FletcherChunkedArray(arrow_data)\n else:\n fr_array = fr.FletcherContinuousArray(arrow_data)\n ser_fr = pd.Series(fr_array[fletcher_slice_offset:])\n\n result_pd = ser_pd.str.strip()\n result_fr = getattr(ser_fr, str_accessor).strip()\n result_fr = result_fr.astype(object)\n # Pandas returns np.nan for NA values in cat, keep this in line\n result_fr[result_fr.isna()] = np.nan\n result_pd[result_pd.isna()] = np.nan\n tm.assert_series_equal(result_fr, result_pd)\n\n\ndef test_fr_str_accessor(fletcher_array):\n data = [\"a\", \"b\"]\n ser_pd = pd.Series(data)\n\n # object series is returned\n s = ser_pd.fr_str.encode(\"utf8\")\n assert s.dtype == np.dtype(\"O\")\n\n # test fletcher functionality and fallback to pandas\n arrow_data = pa.array(data, type=pa.string())\n fr_array = fletcher_array(arrow_data)\n ser_fr = pd.Series(fr_array)\n # pandas strings only method\n s = ser_fr.fr_str.encode(\"utf8\")\n assert isinstance(s.values, fr.FletcherBaseArray)\n\n\ndef test_fr_str_accessor_fail(fletcher_variant):\n\n data = [1, 2]\n ser_pd = pd.Series(data)\n\n with pytest.raises(Exception):\n ser_pd.fr_str.startswith(\"a\")\n\n\[email protected](\"regex\", [\"([0-9]+)\", \"([0-9]+)\\\\+([a-z]+)*\"])\[email protected](\n \"data\", [[\"123+\"], [\"123+a\"], [\"123+a\", \"123+\"], [\"123+\", \"123+a\"]]\n)\ndef test_text_extractall(str_accessor, fletcher_variant, data, regex):\n\n if str_accessor == \"str\":\n pytest.skip(\"extractall is not yet dispatched to the ExtensionArray\")\n return\n\n ser_fr = _fr_series_from_data(data, fletcher_variant)\n result_fr = getattr(ser_fr, str_accessor).extractall(regex)\n assert isinstance(result_fr[0].dtype, fr.FletcherBaseDtype)\n\n ser_pd = pd.Series(data)\n result_pd = ser_pd.str.extractall(regex)\n\n tm.assert_frame_equal(result_pd, result_fr.astype(object))\n\n\[email protected](\"data\", [[\"123\"], [\"123+\"], [\"123+a+\", \"123+\"]])\[email protected](\"expand\", [True, False])\ndef test_text_split(str_accessor, fletcher_variant, data, expand):\n\n ser_fr = _fr_series_from_data(data, fletcher_variant)\n result_fr = getattr(ser_fr, str_accessor).split(\"+\", expand=expand)\n\n ser_pd = pd.Series(data)\n result_pd = ser_pd.str.split(\"+\", expand=expand)\n\n if expand:\n tm.assert_frame_equal(result_pd, result_fr.astype(object))\n else:\n tm.assert_series_equal(result_pd, result_fr.astype(object))\n\n\n@settings(deadline=None)\n@given(\n data=st.lists(st.one_of(st.text(), st.none())),\n slice_=st.tuples(st.integers(-20, 20), st.integers(-20, 20), st.integers(-20, 20)),\n)\ndef test_slice(data, slice_, str_accessor, fletcher_variant):\n if slice_[2] == 0:\n pytest.raises(ValueError)\n return\n if data == [None] or data == [\"\"]:\n return\n\n ser_fr = _fr_series_from_data(data, fletcher_variant)\n result_fr = getattr(ser_fr, str_accessor).slice(*slice_)\n result_fr = result_fr.astype(object)\n # Pandas returns np.nan for NA values in cat, keep this in line\n result_fr[result_fr.isna()] = np.nan\n\n ser_pd = pd.Series(data, dtype=object)\n result_pd = ser_pd.str.slice(*slice_)\n\n tm.assert_series_equal(result_fr, result_pd)\n\n\n@settings(deadline=None)\n@given(char=st.characters(blacklist_categories=(\"Cs\",)))\ndef test_utf8_size(char):\n char_bytes = char.encode(\"utf-8\")\n expected = len(char_bytes)\n computed = fr.algorithms.string.get_utf8_size(char_bytes[0])\n\n assert computed == expected\n", "import datetime\nimport operator\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\nfrom copy import copy as copycopy\nfrom distutils.version import LooseVersion\nfrom functools import partialmethod\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nfrom pandas.api.types import (\n is_array_like,\n is_bool_dtype,\n is_int64_dtype,\n is_integer,\n is_integer_dtype,\n)\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.dtypes.dtypes import ExtensionDtype, register_extension_dtype\n\nfrom fletcher._algorithms import (\n kurt_op,\n max_op,\n median_op,\n min_op,\n np_ufunc_op,\n prod_op,\n skew_op,\n std_op,\n sum_op,\n var_op,\n)\nfrom fletcher.algorithms.bool import all_op, all_true, any_op, or_na, or_vectorised\nfrom fletcher.algorithms.utils.chunking import _calculate_chunk_offsets\nfrom fletcher.string_mixin import StringSupportingExtensionArray\n\nPANDAS_GE_0_26_0 = LooseVersion(pd.__version__) >= \"0.26.0\"\nif PANDAS_GE_0_26_0:\n from pandas.core.indexers import check_array_indexer\n\nARROW_GE_0_18_0 = LooseVersion(pa.__version__) >= \"0.18.0\"\n\n_python_type_map = {\n pa.null().id: str,\n pa.bool_().id: bool,\n pa.int8().id: int,\n pa.uint8().id: int,\n pa.int16().id: int,\n pa.uint16().id: int,\n pa.int32().id: int,\n pa.uint32().id: int,\n pa.int64().id: int,\n pa.uint64().id: int,\n pa.float16().id: float,\n pa.float32().id: float,\n pa.float64().id: float,\n pa.date32().id: datetime.date,\n pa.date64().id: datetime.date,\n pa.timestamp(\"ms\").id: datetime.datetime,\n pa.binary().id: bytes,\n pa.string().id: str,\n # Use any list type here, only LIST is important\n pa.list_(pa.string()).id: list,\n # Use any large list type here, only LIST is important\n pa.large_list(pa.string()).id: list,\n # Use any dictionary type here, only dict is important\n pa.dictionary(pa.int32(), pa.int32()).id: dict,\n pa.duration(\"ns\").id: datetime.timedelta,\n}\n\n_string_type_map = {\"date64[ms]\": pa.date64(), \"string\": pa.string()}\n\n_examples = {\n pa.null(): pa.array([None, None], type=pa.null()),\n pa.bool_(): pa.array([None, True], type=pa.bool_()),\n pa.int8(): pa.array([None, -1], type=pa.int8()),\n pa.uint8(): pa.array([None, 1], type=pa.uint8()),\n pa.int16(): pa.array([None, -1], type=pa.int16()),\n pa.uint16(): pa.array([None, 1], type=pa.uint16()),\n pa.int32(): pa.array([None, -1], type=pa.int32()),\n pa.uint32(): pa.array([None, 1], type=pa.uint32()),\n pa.int64(): pa.array([None, -1], type=pa.int64()),\n pa.uint64(): pa.array([None, 1], type=pa.uint64()),\n pa.float16(): pa.array([None, np.float16(-0.1)], type=pa.float16()),\n pa.float32(): pa.array([None, -0.1], type=pa.float32()),\n pa.float64(): pa.array([None, -0.1], type=pa.float64()),\n pa.date32(): pa.array([None, datetime.date(2010, 9, 8)], type=pa.date32()),\n pa.date64(): pa.array([None, datetime.date(2010, 9, 8)], type=pa.date64()),\n pa.timestamp(\"s\"): pa.array(\n [None, datetime.datetime(2013, 12, 11, 10, 9, 8)], type=pa.timestamp(\"s\")\n ),\n pa.timestamp(\"ms\"): pa.array(\n [None, datetime.datetime(2013, 12, 11, 10, 9, 8, 1000)], type=pa.timestamp(\"ms\")\n ),\n pa.timestamp(\"us\"): pa.array(\n [None, datetime.datetime(2013, 12, 11, 10, 9, 8, 7)], type=pa.timestamp(\"us\")\n ),\n pa.timestamp(\"ns\"): pa.array(\n [None, datetime.datetime(2013, 12, 11, 10, 9, 8, 7)], type=pa.timestamp(\"ns\")\n ),\n pa.binary(): pa.array([None, b\"122\"], type=pa.binary()),\n pa.string(): pa.array([None, \"🤔\"], type=pa.string()),\n pa.duration(\"s\"): pa.array(\n [None, datetime.timedelta(seconds=9)], type=pa.duration(\"s\")\n ),\n pa.duration(\"ms\"): pa.array(\n [None, datetime.timedelta(milliseconds=8)], type=pa.duration(\"ms\")\n ),\n pa.duration(\"us\"): pa.array(\n [None, datetime.timedelta(microseconds=7)], type=pa.duration(\"us\")\n ),\n pa.duration(\"ns\"): pa.array(\n [None, datetime.timedelta(microseconds=7)], type=pa.duration(\"ns\")\n ),\n}\n\n\ndef _get_example(arrow_dtype: pa.DataType) -> pa.Array:\n if isinstance(arrow_dtype, pa.ListType):\n return pa.array(\n [None, _get_example(arrow_dtype.value_type).to_pylist()], type=arrow_dtype\n )\n return _examples[arrow_dtype]\n\n\ndef _is_numeric(arrow_dtype: pa.DataType) -> bool:\n return (\n pa.types.is_integer(arrow_dtype)\n or pa.types.is_floating(arrow_dtype)\n or pa.types.is_decimal(arrow_dtype)\n )\n\n\nclass FletcherBaseDtype(ExtensionDtype):\n \"\"\"Dtype base for a pandas ExtensionArray backed by an Apache Arrow structure.\"\"\"\n\n def __init__(self, arrow_dtype: pa.DataType):\n self.arrow_dtype = arrow_dtype\n\n def __hash__(self) -> int:\n \"\"\"Hash the Dtype.\"\"\"\n return hash(self.arrow_dtype)\n\n def __eq__(self, other) -> bool:\n \"\"\"Check whether 'other' is equal to self.\n\n By default, 'other' is considered equal if\n * it's a string matching 'self.name'.\n * it's an instance of this type.\n\n Parameters\n ----------\n other : Any\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(other, str):\n return other == self.name\n elif isinstance(other, type(self)):\n return self.arrow_dtype == other.arrow_dtype\n else:\n return False\n\n @property\n def type(self):\n \"\"\"Return the scalar type for the array, e.g. ``int``.\n\n It's expected ``ExtensionArray[item]`` returns an instance\n of ``ExtensionDtype.type`` for scalar ``item``.\n \"\"\"\n return _python_type_map[self.arrow_dtype.id]\n\n @property\n def kind(self) -> str:\n \"\"\"Return a character code (one of 'biufcmMOSUV'), default 'O'.\n\n This should match the NumPy dtype used when the array is\n converted to an ndarray, which is probably 'O' for object if\n the extension type cannot be represented as a built-in NumPy\n type.\n\n See Also\n --------\n numpy.dtype.kind\n \"\"\"\n if pa.types.is_date(self.arrow_dtype):\n return \"O\"\n elif self._is_list:\n return \"O\"\n elif pa.types.is_string(self.arrow_dtype):\n return \"U\"\n else:\n return np.dtype(self.arrow_dtype.to_pandas_dtype()).kind\n\n @property\n def name(self) -> str:\n \"\"\"Return a string identifying the data type.\n\n Will be used for display in, e.g. ``Series.dtype``\n \"\"\"\n return str(self)\n\n @property\n def _is_boolean(self):\n return pa.types.is_boolean(self.arrow_dtype)\n\n @property\n def _is_numeric(self):\n return _is_numeric(self.arrow_dtype)\n\n @property\n def _is_list(self):\n return pa.types.is_list(self.arrow_dtype) or pa.types.is_large_list(\n self.arrow_dtype\n )\n\n def __from_arrow__(self, data):\n \"\"\"Construct a FletcherArray from an arrow array.\"\"\"\n return self.construct_array_type()(data)\n\n def example(self):\n \"\"\"Get a simple array with example content.\"\"\"\n return self.construct_array_type()(_get_example(self.arrow_dtype))\n\n\n@register_extension_dtype\nclass FletcherContinuousDtype(FletcherBaseDtype):\n \"\"\"Dtype for a pandas ExtensionArray backed by Apache Arrow's pyarrow.Array.\"\"\"\n\n def __str__(self) -> str:\n \"\"\"Convert to string.\"\"\"\n return f\"fletcher_continuous[{self.arrow_dtype}]\"\n\n def __repr__(self) -> str:\n \"\"\"Return the textual representation of this object.\"\"\"\n return \"FletcherContinuousDtype({})\".format(str(self.arrow_dtype))\n\n @classmethod\n def construct_from_string(cls, string: str):\n \"\"\"Attempt to construct this type from a string.\n\n Parameters\n ----------\n string\n\n Returns\n -------\n self : instance of 'cls'\n\n Raises\n ------\n TypeError\n If a class cannot be constructed from this 'string'.\n\n Examples\n --------\n If the extension dtype can be constructed without any arguments,\n the following may be an adequate implementation.\n >>> @classmethod\n ... def construct_from_string(cls, string)\n ... if string == cls.name:\n ... return cls()\n ... else:\n ... raise TypeError(\"Cannot construct a '{}' from \"\n ... \"'{}'\".format(cls, string))\n \"\"\"\n if not isinstance(string, str):\n raise TypeError(\n \"'construct_from_string' expects a string, got <class 'int'>\"\n )\n\n # Remove fletcher specific naming from the arrow type string.\n if string.startswith(\"fletcher_continuous[\"):\n string = string[len(\"fletcher_continuous[\") : -1]\n else:\n raise TypeError(\n f\"Cannot construct a 'FletcherContinuousDtype' from '{string}'\"\n )\n\n if string == \"list<item: string>\":\n return cls(pa.list_(pa.string()))\n\n try:\n type_for_alias = pa.type_for_alias(string)\n except (ValueError, KeyError):\n # pandas API expects a TypeError\n raise TypeError(string)\n\n return cls(type_for_alias)\n\n @classmethod\n def construct_array_type(cls, *args):\n \"\"\"\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n \"\"\"\n if len(args) > 0:\n raise NotImplementedError(\"construct_array_type does not support arguments\")\n return FletcherContinuousArray\n\n\n@register_extension_dtype\nclass FletcherChunkedDtype(FletcherBaseDtype):\n \"\"\"Dtype for a pandas ExtensionArray backed by Apache Arrow's pyarrow.ChunkedArray.\"\"\"\n\n def __str__(self) -> str:\n \"\"\"Convert to string.\"\"\"\n return f\"fletcher_chunked[{self.arrow_dtype}]\"\n\n def __repr__(self) -> str:\n \"\"\"Return the textual representation of this object.\"\"\"\n return \"FletcherChunkedDtype({})\".format(str(self.arrow_dtype))\n\n @classmethod\n def construct_from_string(cls, string: str) -> \"FletcherChunkedDtype\":\n \"\"\"Attempt to construct this type from a string.\n\n Parameters\n ----------\n string : str\n\n Returns\n -------\n self : instance of 'cls'\n\n Raises\n ------\n TypeError\n If a class cannot be constructed from this 'string'.\n\n Examples\n --------\n If the extension dtype can be constructed without any arguments,\n the following may be an adequate implementation.\n >>> @classmethod\n ... def construct_from_string(cls, string)\n ... if string == cls.name:\n ... return cls()\n ... else:\n ... raise TypeError(\"Cannot construct a '{}' from \"\n ... \"'{}'\".format(cls, string))\n \"\"\"\n if not isinstance(string, str):\n raise TypeError(\n \"'construct_from_string' expects a string, got <class 'int'>\"\n )\n\n # Remove fletcher specific naming from the arrow type string.\n if string.startswith(\"fletcher_chunked[\"):\n string = string[len(\"fletcher_chunked[\") : -1]\n else:\n raise TypeError(\n f\"Cannot construct a 'FletcherChunkedDtype' from '{string}'\"\n )\n\n if string == \"list<item: string>\":\n return cls(pa.list_(pa.string()))\n\n try:\n type_for_alias = pa.type_for_alias(string)\n except (ValueError, KeyError):\n # pandas API expects a TypeError\n raise TypeError(string)\n\n return cls(type_for_alias)\n\n @classmethod\n def construct_array_type(cls, *args) -> \"Type[FletcherChunkedArray]\":\n \"\"\"\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n \"\"\"\n if len(args) > 0:\n raise NotImplementedError(\"construct_array_type does not support arguments\")\n return FletcherChunkedArray\n\n\nclass FletcherBaseArray(StringSupportingExtensionArray):\n \"\"\"Pandas ExtensionArray implementation base backed by an Apache Arrow structure.\"\"\"\n\n _can_hold_na = True\n\n @property\n def dtype(self) -> ExtensionDtype:\n \"\"\"Return the ExtensionDtype of this array.\"\"\"\n return self._dtype\n\n def __array__(self, *args, **kwargs) -> np.ndarray:\n \"\"\"Correctly construct numpy arrays when passed to `np.asarray()`.\"\"\"\n return self.data.__array__(*args, **kwargs)\n\n def __arrow_array__(self, type=None):\n \"\"\"Convert myself to a pyarrow Array or ChunkedArray.\"\"\"\n return self.data\n\n @property\n def size(self) -> int:\n \"\"\"\n Return the number of elements in this array.\n\n Returns\n -------\n size : int\n \"\"\"\n return len(self.data)\n\n @property\n def shape(self) -> Tuple[int]:\n \"\"\"Return the shape of the data.\"\"\"\n # This may be patched by pandas to support pseudo-2D operations.\n return (self.size,)\n\n @property\n def ndim(self) -> int:\n \"\"\"Return the number of dimensions of the underlying data.\"\"\"\n return len(self.shape)\n\n def __len__(self) -> int:\n \"\"\"\n Length of this array.\n\n Returns\n -------\n length : int\n \"\"\"\n return self.shape[0]\n\n @property\n def base(self) -> Union[pa.Array, pa.ChunkedArray]:\n \"\"\"Return base object of the underlying data.\"\"\"\n return self.data\n\n def all(self, skipna: bool = False) -> Optional[bool]:\n \"\"\"Compute whether all boolean values are True.\"\"\"\n if pa.types.is_boolean(self.data.type):\n return all_op(self.data, skipna=skipna)\n else:\n raise TypeError(\"Can only execute all on boolean arrays\")\n\n def any(self, skipna: bool = False, **kwargs) -> Optional[bool]:\n \"\"\"Compute whether any boolean value is True.\"\"\"\n if pa.types.is_boolean(self.data.type):\n return any_op(self.data, skipna=skipna)\n else:\n raise TypeError(\"Can only execute all on boolean arrays\")\n\n def sum(self, skipna: bool = True):\n \"\"\"Return the sum of the values.\"\"\"\n return self._reduce(\"sum\", skipna=skipna)\n\n def _reduce(self, name: str, skipna: bool = True, **kwargs):\n \"\"\"\n Return a scalar result of performing the reduction operation.\n\n Parameters\n ----------\n name : str\n Name of the function, supported values are:\n { any, all, min, max, sum, mean, median, prod,\n std, var, sem, kurt, skew }.\n skipna : bool, default True\n If True, skip NaN values.\n **kwargs\n Additional keyword arguments passed to the reduction function.\n Currently, `ddof` is the only supported kwarg.\n\n Returns\n -------\n scalar\n\n Raises\n ------\n TypeError : subclass does not define reductions\n \"\"\"\n if name == \"any\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return any_op(self.data, skipna=skipna)\n elif name == \"all\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return all_op(self.data, skipna=skipna)\n elif name == \"sum\" and self.dtype._is_numeric:\n return sum_op(self.data, skipna=skipna)\n elif name == \"max\" and self.dtype._is_numeric:\n return max_op(self.data, skipna=skipna)\n elif name == \"min\" and self.dtype._is_numeric:\n return min_op(self.data, skipna=skipna)\n elif name == \"mean\" and self.dtype._is_numeric:\n return sum_op(self.data, skipna=skipna) / len(self.data)\n elif name == \"prod\" and self.dtype._is_numeric:\n return prod_op(self.data, skipna=skipna)\n elif name == \"std\" and self.dtype._is_numeric:\n return std_op(self.data, skipna=skipna)\n elif name == \"skew\" and self.dtype._is_numeric:\n return skew_op(self.data, skipna=skipna)\n elif name == \"kurt\" and self.dtype._is_numeric:\n return kurt_op(self.data, skipna=skipna)\n elif name == \"var\" and self.dtype._is_numeric:\n return var_op(self.data, skipna=skipna)\n elif name == \"median\" and self.dtype._is_numeric:\n return median_op(self.data, skipna=skipna)\n\n raise TypeError(\n \"cannot perform {name} with type {dtype}\".format(\n name=name, dtype=self.dtype\n )\n )\n\n def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):\n \"\"\"Apply a NumPy ufunc on the ExtensionArray.\"\"\"\n if method != \"__call__\":\n if (\n method == \"reduce\"\n and getattr(ufunc, \"__name__\") == \"logical_or\"\n and self.dtype.arrow_dtype.id == 1\n ):\n return any_op(self.data, skipna=False)\n else:\n raise NotImplementedError(\n f\"Only method == '__call__' is supported in ufuncs, not '{method}'\"\n )\n if len(inputs) == 1:\n if getattr(ufunc, \"__name__\") == \"isnan\":\n return self.isna()\n else:\n raise NotImplementedError(\n f\"ufunc with single input not supported: {ufunc}\"\n )\n if len(inputs) != 2:\n raise NotImplementedError(\"Only ufuncs with a second input are supported\")\n if len(kwargs) > 0:\n raise NotImplementedError(\"ufuncs with kwargs aren't supported\")\n if isinstance(inputs[0], FletcherBaseArray):\n left = inputs[0].data\n else:\n left = inputs[0]\n if isinstance(inputs[1], FletcherBaseArray):\n right = inputs[1].data\n else:\n right = inputs[1]\n return type(self)(np_ufunc_op(left, right, ufunc))\n\n def _np_ufunc_op(self, op: Callable, other):\n \"\"\"Apply a NumPy ufunc on the instance and any other object.\"\"\"\n if isinstance(other, (pd.Series, pd.DataFrame)):\n return NotImplemented\n if isinstance(other, FletcherBaseArray):\n other = other.data\n return type(self)(np_ufunc_op(self.data, other, op))\n\n def _np_compare_op(self, op: Callable, np_op: Callable, other):\n \"\"\"Apply a NumPy-based comparison on the instance and any other object.\"\"\"\n if isinstance(other, (pd.Series, pd.DataFrame)):\n return NotImplemented\n # TODO: Only numeric comparisons are fast currently\n if not self.dtype._is_numeric:\n if isinstance(other, FletcherBaseArray):\n other = other.data.to_pandas()\n return type(self)(op(self.data.to_pandas(), other))\n return self._np_ufunc_op(np_op, other)\n\n __eq__ = partialmethod( # type: ignore\n _np_compare_op, operator.eq, np.ndarray.__eq__\n )\n __ne__ = partialmethod( # type: ignore\n _np_compare_op, operator.ne, np.ndarray.__ne__\n )\n __le__ = partialmethod(_np_compare_op, operator.le, np.ndarray.__le__)\n __lt__ = partialmethod(_np_compare_op, operator.lt, np.ndarray.__lt__)\n __ge__ = partialmethod(_np_compare_op, operator.ge, np.ndarray.__ge__)\n __gt__ = partialmethod(_np_compare_op, operator.gt, np.ndarray.__gt__)\n\n __add__ = partialmethod(_np_ufunc_op, np.ndarray.__add__)\n __radd__ = partialmethod(_np_ufunc_op, np.ndarray.__radd__)\n __sub__ = partialmethod(_np_ufunc_op, np.ndarray.__sub__)\n __rsub__ = partialmethod(_np_ufunc_op, np.ndarray.__rsub__)\n __mul__ = partialmethod(_np_ufunc_op, np.ndarray.__mul__)\n __rmul__ = partialmethod(_np_ufunc_op, np.ndarray.__rmul__)\n __floordiv__ = partialmethod(_np_ufunc_op, np.ndarray.__floordiv__)\n __rfloordiv__ = partialmethod(_np_ufunc_op, np.ndarray.__rfloordiv__)\n __truediv__ = partialmethod(_np_ufunc_op, np.ndarray.__truediv__)\n __rtruediv__ = partialmethod(_np_ufunc_op, np.ndarray.__rtruediv__)\n __pow__ = partialmethod(_np_ufunc_op, np.ndarray.__pow__)\n __rpow__ = partialmethod(_np_ufunc_op, np.ndarray.__rpow__)\n __mod__ = partialmethod(_np_ufunc_op, np.ndarray.__mod__)\n __rmod__ = partialmethod(_np_ufunc_op, np.ndarray.__rmod__)\n\n def __or__(self, other):\n \"\"\"Compute vectorised or.\"\"\"\n if not pa.types.is_boolean(self.dtype.arrow_dtype):\n raise NotImplementedError(\"__or__ is only supported for boolean arrays yet\")\n\n if other is pd.NA or (pd.api.types.is_scalar(other) and pd.isna(other)):\n # All fields that are True stay True, all others get set to NA\n return type(self)(or_na(self.data))\n elif isinstance(other, bool):\n if other:\n # or with True yields all-True\n return type(self)(all_true(self.data))\n else:\n return self\n else:\n if isinstance(other, FletcherBaseArray):\n other = other.data\n return type(self)(or_vectorised(self.data, other))\n\n def __divmod__(self, other):\n \"\"\"Compute divmod via floordiv and mod.\"\"\"\n return (self.__floordiv__(other), self.__mod__(other))\n\n def unique(self):\n \"\"\"\n Compute the ExtensionArray of unique values.\n\n It relies on the Pyarrow.ChunkedArray.unique and if\n it fails, comes back to the naive implementation.\n\n Returns\n -------\n uniques : ExtensionArray\n \"\"\"\n try:\n return type(self)(self.data.unique())\n except NotImplementedError:\n return super().unique()\n\n def _pd_object_take(\n self,\n indices: Union[Sequence[int], np.ndarray],\n allow_fill: bool = False,\n fill_value: Optional[Any] = None,\n ) -> ExtensionArray:\n \"\"\"Run take using object dtype and pandas' built-in algorithm.\n\n This is slow and should be avoided in future but is kept here as not all\n special cases are yet supported.\n \"\"\"\n from pandas.core.algorithms import take\n\n data = self.astype(object)\n if allow_fill and fill_value is None:\n fill_value = self.dtype.na_value\n # fill value should always be translated from the scalar\n # type for the array, to the physical storage type for\n # the data, before passing to take.\n result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill)\n return self._from_sequence(result, dtype=self.data.type)\n\n def _take_array(\n self,\n array: pa.Array,\n indices: Union[Sequence[int], np.ndarray],\n allow_fill: bool = False,\n fill_value: Optional[Any] = None,\n ) -> ExtensionArray:\n \"\"\"\n Take elements from a pyarrow.Array.\n\n Parameters\n ----------\n indices : sequence of integers\n Indices to be taken.\n allow_fill : bool, default False\n How to handle negative values in `indices`.\n * False: negative values in `indices` indicate positional indices\n from the right (the default). This is similar to\n :func:`numpy.take`.\n * True: negative values in `indices` indicate\n missing values. These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n fill_value : any, optional\n Fill value to use for NA-indices when `allow_fill` is True.\n This may be ``None``, in which case the default NA value for\n the type, ``self.dtype.na_value``, is used.\n For many ExtensionArrays, there will be two representations of\n `fill_value`: a user-facing \"boxed\" scalar, and a low-level\n physical NA value. `fill_value` should be the user-facing version,\n and the implementation should handle translating that to the\n physical version for processing the take if nescessary.\n\n Returns\n -------\n ExtensionArray\n\n Raises\n ------\n IndexError\n When the indices are out of bounds for the array.\n ValueError\n When `indices` contains negative values other than ``-1``\n and `allow_fill` is True.\n\n Notes\n -----\n ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,\n ``iloc``, when `indices` is a sequence of values. Additionally,\n it's called by :meth:`Series.reindex`, or any other method\n that causes realignemnt, with a `fill_value`.\n\n See Also\n --------\n numpy.take\n pandas.api.extensions.take\n \"\"\"\n if isinstance(indices, pa.Array) and pa.types.is_integer(indices):\n # TODO: handle allow_fill, fill_value\n if allow_fill or fill_value is not None:\n raise NotImplementedError(\n \"Cannot use allow_fill or fill_value with a pa.Array\"\n )\n indices_array = indices\n elif isinstance(indices, Iterable):\n # Why is np.ndarray inferred as Iterable[Any]?\n if len(indices) == 0: # type: ignore\n return type(self)(pa.array([], type=array.type))\n elif not is_array_like(indices):\n indices = np.array(indices)\n if not is_integer_dtype(indices):\n raise ValueError(\"Only integer dtyped indices are supported\")\n # TODO: handle fill_value\n mask = indices < 0\n if allow_fill and indices.min() < -1:\n raise ValueError(\n \"Invalid value in 'indices'. Must be between -1 \"\n \"and the length of the array.\"\n )\n if len(self) == 0 and (~mask).any():\n raise IndexError(\"cannot do a non-empty take\")\n if indices.max() >= len(self):\n raise IndexError(\"out of bounds value in 'indices'.\")\n if not allow_fill:\n indices[mask] = len(array) + indices[mask]\n mask = None\n elif not pd.isna(fill_value):\n # TODO: Needs fillna on pa.Array\n return self._pd_object_take(\n indices, allow_fill=True, fill_value=fill_value\n )\n indices_array = pa.array(indices, mask=mask)\n elif is_array_like(indices) and len(indices) == 0:\n indices_array = pa.array([], type=pa.int64())\n else:\n raise NotImplementedError(f\"take is not implemented for {type(indices)}\")\n return type(self)(array.take(indices_array))\n\n def astype(self, dtype, copy=True):\n \"\"\"\n Cast to a NumPy array with 'dtype'.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n copy : bool, default True\n Whether to copy the data, even if not necessary. If False,\n a copy is made only if the old dtype does not match the\n new dtype.\n\n Returns\n -------\n array : ndarray\n NumPy ndarray with 'dtype' for its dtype.\n \"\"\"\n if self.dtype == dtype:\n if copy:\n return copycopy(self)\n else:\n return self\n\n arrow_type = None\n arrow_class = None\n pandas_type = None\n if isinstance(dtype, FletcherChunkedDtype):\n arrow_type = dtype.arrow_dtype\n dtype = dtype.arrow_dtype.to_pandas_dtype()\n if isinstance(self, FletcherChunkedArray):\n arrow_class = type(self)\n else:\n arrow_class = FletcherChunkedArray\n elif isinstance(dtype, FletcherContinuousDtype):\n arrow_type = dtype.arrow_dtype\n dtype = dtype.arrow_dtype.to_pandas_dtype()\n if isinstance(self, FletcherContinuousArray):\n arrow_class = type(self)\n else:\n arrow_class = FletcherContinuousArray\n elif isinstance(dtype, pa.DataType):\n arrow_type = dtype\n dtype = dtype.to_pandas_dtype()\n arrow_class = type(self)\n elif isinstance(dtype, pd.StringDtype):\n pandas_type = dtype\n dtype = np.dtype(str)\n else:\n dtype = np.dtype(dtype)\n\n # NumPy's conversion of list->unicode is differently from Python's\n # default. We want to have the default Python output, so force it here.\n if (self.dtype._is_list) and dtype.kind == \"U\":\n result = np.array([str(x) for x in self.data.to_pylist()])\n if pandas_type is not None:\n return pd.array(result, dtype=pandas_type)\n else:\n return result\n\n if arrow_type is not None and arrow_class is not None:\n return arrow_class(np.asarray(self).astype(dtype), dtype=arrow_type)\n else:\n result = np.asarray(self).astype(dtype)\n if pandas_type is not None:\n return pd.array(result, dtype=pandas_type)\n else:\n return result\n\n def value_counts(self, dropna: bool = True) -> \"pd.Series\":\n \"\"\"\n Return a Series containing counts of each unique value.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include counts of missing values.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.value_counts\n \"\"\"\n vc = self.data.value_counts()\n\n # Index cannot hold ExtensionArrays yet\n index = pd.Index(type(self)(vc.field(0)).astype(object))\n # No missings, so we can adhere to the interface and return a numpy array.\n counts = np.array(vc.field(1))\n\n if dropna and self.data.null_count > 0:\n raise NotImplementedError(\"yo\")\n\n return pd.Series(counts, index=index)\n\n def isna(self) -> np.ndarray:\n \"\"\"\n Boolean NumPy array indicating if each value is missing.\n\n This should return a 1-D array the same length as 'self'.\n \"\"\"\n return np.array(self.data.is_null())\n\n\nclass FletcherContinuousArray(FletcherBaseArray):\n \"\"\"Pandas ExtensionArray implementation backed by Apache Arrow's pyarrow.Array.\"\"\"\n\n def __init__(self, array, dtype=None, copy: Optional[bool] = None):\n # Copy is not used at the moment. It's only affect will be when we\n # allow array to be a FletcherContinuousArray\n if is_array_like(array) or isinstance(array, list):\n self.data = pa.array(array, type=dtype)\n elif isinstance(array, pa.Array):\n # TODO: Assert dtype\n self.data = array\n elif isinstance(array, pa.ChunkedArray):\n # TODO: Assert dtype\n if array.num_chunks == 1:\n self.data = array.chunk(0)\n else:\n self.data = pa.concat_arrays(array.iterchunks())\n else:\n raise ValueError(\n \"Unsupported type passed for {}: {}\".format(\n self.__class__.__name__, type(array)\n )\n )\n self._dtype = FletcherContinuousDtype(self.data.type)\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n # type: (Sequence[ExtensionArray]) -> ExtensionArray\n \"\"\"Concatenate multiple array.\n\n Parameters\n ----------\n to_concat : sequence of this type\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n return cls(pa.concat_arrays([array.data for array in to_concat]))\n\n def __setitem__(self, key, value):\n # type: (Union[int, np.ndarray], Any) -> None\n \"\"\"Set one or more values inplace.\n\n Parameters\n ----------\n key : int, ndarray, or slice\n When called from, e.g. ``Series.__setitem__``, ``key`` will be\n one of\n\n * scalar int\n * ndarray of integers.\n * boolean ndarray\n * slice object\n\n value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object\n value or values to be set of ``key``.\n\n Returns\n -------\n None\n \"\"\"\n if PANDAS_GE_0_26_0:\n key = check_array_indexer(self, key)\n\n if self.dtype._is_list:\n # TODO: We can probably implement this for the scalar case?\n # TODO: Implement a list accessor and then the three mentioned methods\n raise ValueError(\n \"__setitem__ is not supported for list types \"\n \"due to the ambiguity of the arguments, use .fr_list.setvalue, \"\n \".fr_list.setslice or fr_list.setmask instead.\"\n )\n # Convert all possible input key types to an array of integers\n if is_bool_dtype(key):\n key_array = np.argwhere(key).flatten()\n elif isinstance(key, slice):\n key_array = np.array(range(len(self))[key])\n elif is_integer(key):\n key_array = np.array([key])\n else:\n key_array = np.asanyarray(key)\n\n if pd.api.types.is_scalar(value):\n value = np.broadcast_to(value, len(key_array))\n else:\n value = np.asarray(value)\n\n if len(key_array) != len(value):\n raise ValueError(\"Length mismatch between index and value.\")\n\n arr = self.data.to_pandas().values\n # In the case where we zero-copy Arrow to Pandas conversion, the\n # the resulting arrays are read-only.\n if not arr.flags.writeable:\n arr = arr.copy()\n arr[key_array] = value\n\n mask = None\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if (\n pa.types.is_integer(self.dtype.arrow_dtype)\n or pa.types.is_date(self.dtype.arrow_dtype)\n or pa.types.is_floating(self.dtype.arrow_dtype)\n or pa.types.is_boolean(self.dtype.arrow_dtype)\n ):\n nan_values = pd.isna(value)\n if any(nan_values):\n nan_index = key_array[nan_values]\n mask = np.zeros_like(arr, dtype=bool)\n mask[nan_index] = True\n self.data = pa.array(arr, self.dtype.arrow_dtype, mask=mask)\n\n def __getitem__(self, item):\n # type (Any) -> Any\n \"\"\"Select a subset of self.\n\n Parameters\n ----------\n item : int, slice, or ndarray\n * int: The position in 'self' to get.\n * slice: A slice object, where 'start', 'stop', and 'step' are\n integers or None\n * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'\n\n Returns\n -------\n item : scalar or ExtensionArray\n\n Notes\n -----\n For scalar ``item``, return a scalar value suitable for the array's\n type. This should be an instance of ``self.dtype.type``.\n For slice ``key``, return an instance of ``ExtensionArray``, even\n if the slice is length 0 or 1.\n For a boolean mask, return an instance of ``ExtensionArray``, filtered\n to the values where ``item`` is True.\n \"\"\"\n if PANDAS_GE_0_26_0:\n item = check_array_indexer(self, item)\n\n # Arrow 0.18+ supports slices perfectly\n if isinstance(item, slice) and not ARROW_GE_0_18_0:\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n step = item.step if item.step is not None else 1\n # Arrow can't handle slices with steps other than 1\n # https://issues.apache.org/jira/browse/ARROW-2714\n if step != 1:\n arr = np.asarray(self)[item]\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if pa.types.is_integer(self.dtype.arrow_dtype) or pa.types.is_floating(\n self.dtype.arrow_dtype\n ):\n mask = pd.isna(arr)\n else:\n mask = None\n return type(self)(pa.array(arr, type=self.dtype.arrow_dtype, mask=mask))\n if stop - start == 0:\n return type(self)(pa.array([], type=self.data.type))\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item) or len(item) == 0:\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean arrays are valid indices.\"\n )\n elif is_integer(item):\n item = int(item)\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n value = self.data[item]\n if isinstance(value, pa.Array):\n return type(self)(value)\n else:\n return value.as_py()\n\n def copy(self):\n # type: () -> ExtensionArray\n \"\"\"\n Return a copy of the array.\n\n Currently is a shadow copy - pyarrow array are supposed to be immutable.\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n return type(self)(self.data)\n\n @property\n def nbytes(self):\n # type: () -> int\n \"\"\"Return the number of bytes needed to store this object in memory.\"\"\"\n size = 0\n for buf in self.data.buffers():\n if buf is not None:\n size += buf.size\n return size\n\n def factorize(self, na_sentinel=-1):\n # type: (int) -> Tuple[np.ndarray, ExtensionArray]\n \"\"\"Encode the extension array as an enumerated type.\n\n Parameters\n ----------\n na_sentinel : int, default -1\n Value to use in the `labels` array to indicate missing values.\n\n Returns\n -------\n labels : ndarray\n An integer NumPy array that's an indexer into the original\n ExtensionArray.\n uniques : ExtensionArray\n An ExtensionArray containing the unique values of `self`.\n .. note::\n uniques will *not* contain an entry for the NA value of\n the ExtensionArray if there are any missing values present\n in `self`.\n\n See Also\n --------\n pandas.factorize : Top-level factorize method that dispatches here.\n\n Notes\n -----\n :meth:`pandas.factorize` offers a `sort` keyword as well.\n \"\"\"\n if pa.types.is_dictionary(self.data.type):\n indices = self.data.indices.to_pandas()\n return indices.values, type(self)(self.data.dictionary)\n else:\n # Dictionaryencode and do the same as above\n encoded = self.data.dictionary_encode()\n indices = encoded.indices.to_pandas()\n if indices.dtype.kind == \"f\":\n indices[np.isnan(indices)] = na_sentinel\n indices = indices.astype(int)\n if not is_int64_dtype(indices):\n indices = indices.astype(np.int64)\n return indices.values, type(self)(encoded.dictionary)\n\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=None):\n \"\"\"\n Construct a new ExtensionArray from a sequence of scalars.\n\n Parameters\n ----------\n scalars : Sequence\n Each element will be an instance of the scalar type for this\n array, ``cls.dtype.type``.\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n if isinstance(scalars, FletcherContinuousArray):\n return scalars\n if dtype and isinstance(dtype, FletcherContinuousDtype):\n dtype = dtype.arrow_dtype\n return cls(pa.array(scalars, type=dtype, from_pandas=True))\n\n def fillna(self, value=None, method=None, limit=None):\n \"\"\"Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, array-like\n If a scalar value is passed it is used to fill all missing values.\n Alternatively, an array-like 'value' can be given. It's expected\n that the array-like have the same length as 'self'.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : ExtensionArray with NA/NaN filled\n \"\"\"\n from pandas.api.types import is_array_like\n from pandas.util._validators import validate_fillna_kwargs\n import pandas.core.missing as pd_missing\n\n value, method = validate_fillna_kwargs(value, method)\n\n mask = self.isna()\n\n if is_array_like(value):\n if len(value) != len(self):\n raise ValueError(\n \"Length of 'value' does not match. Got ({}) \"\n \" expected {}\".format(len(value), len(self))\n )\n value = value[mask]\n\n if mask.any():\n if method is not None:\n # pandas 1.2+ doesn't expose pad_1d anymore\n if not hasattr(pd_missing, \"pad_1d\"):\n func = pd_missing.get_fill_func(method)\n else:\n func = (\n pd_missing.pad_1d if method == \"pad\" else pd_missing.backfill_1d\n )\n new_values = func(self.astype(object), limit=limit, mask=mask)\n new_values = self._from_sequence(new_values, self._dtype.arrow_dtype)\n else:\n # fill with value\n new_values = self.copy()\n new_values[mask] = value\n else:\n new_values = self.copy()\n return new_values\n\n def take(\n self,\n indices: Union[Sequence[int], np.ndarray],\n allow_fill: bool = False,\n fill_value: Optional[Any] = None,\n ) -> ExtensionArray:\n \"\"\"\n Take elements from an array.\n\n Parameters\n ----------\n indices : sequence of integers\n Indices to be taken.\n allow_fill : bool, default False\n How to handle negative values in `indices`.\n * False: negative values in `indices` indicate positional indices\n from the right (the default). This is similar to\n :func:`numpy.take`.\n * True: negative values in `indices` indicate\n missing values. These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n fill_value : any, optional\n Fill value to use for NA-indices when `allow_fill` is True.\n This may be ``None``, in which case the default NA value for\n the type, ``self.dtype.na_value``, is used.\n For many ExtensionArrays, there will be two representations of\n `fill_value`: a user-facing \"boxed\" scalar, and a low-level\n physical NA value. `fill_value` should be the user-facing version,\n and the implementation should handle translating that to the\n physical version for processing the take if nescessary.\n\n Returns\n -------\n ExtensionArray\n\n Raises\n ------\n IndexError\n When the indices are out of bounds for the array.\n ValueError\n When `indices` contains negative values other than ``-1``\n and `allow_fill` is True.\n\n Notes\n -----\n ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,\n ``iloc``, when `indices` is a sequence of values. Additionally,\n it's called by :meth:`Series.reindex`, or any other method\n that causes realignemnt, with a `fill_value`.\n\n See Also\n --------\n numpy.take\n pandas.api.extensions.take\n \"\"\"\n return self._take_array(self.data, indices, allow_fill, fill_value)\n\n def flatten(self):\n \"\"\"\n Flatten the array.\n \"\"\"\n return type(self)(self.data.flatten())\n\n\nclass FletcherChunkedArray(FletcherBaseArray):\n \"\"\"Pandas ExtensionArray implementation backed by Apache Arrow.\"\"\"\n\n _can_hold_na = True\n\n def __init__(self, array, dtype=None, copy=None):\n # Copy is not used at the moment. It's only affect will be when we\n # allow array to be a FletcherChunkedArray\n if is_array_like(array) or isinstance(array, list):\n self.data = pa.chunked_array([pa.array(array, type=dtype)])\n elif isinstance(array, pa.Array):\n # ARROW-7008: pyarrow.chunked_array([array]) fails on array with all-None buffers\n if len(array) == 0 and all(b is None for b in array.buffers()):\n array = pa.array([], type=array.type)\n # TODO: Assert dtype\n self.data = pa.chunked_array([array])\n elif isinstance(array, pa.ChunkedArray):\n # TODO: Assert dtype\n self.data = array\n else:\n raise ValueError(\n \"Unsupported type passed for {}: {}\".format(\n self.__class__.__name__, type(array)\n )\n )\n self._dtype = FletcherChunkedDtype(self.data.type)\n self.offsets = self._calculate_chunk_offsets()\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n # type: (Sequence[ExtensionArray]) -> ExtensionArray\n \"\"\"Concatenate multiple array.\n\n Parameters\n ----------\n to_concat : sequence of this type\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n return cls(\n pa.chunked_array(\n [array for ea in to_concat for array in ea.data.iterchunks()]\n )\n )\n\n def _calculate_chunk_offsets(self) -> np.ndarray:\n \"\"\"Return an array holding the indices pointing to the first element of each chunk.\"\"\"\n return _calculate_chunk_offsets(self.data)\n\n def _get_chunk_indexer(self, array):\n \"\"\"Return an array with the chunk number for each index.\"\"\"\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])\n\n def __setitem__(self, key, value):\n # type: (Union[int, np.ndarray], Any) -> None\n \"\"\"Set one or more values inplace.\n\n Parameters\n ----------\n key : int, ndarray, or slice\n When called from, e.g. ``Series.__setitem__``, ``key`` will be\n one of\n\n * scalar int\n * ndarray of integers.\n * boolean ndarray\n * slice object\n\n value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object\n value or values to be set of ``key``.\n\n Returns\n -------\n None\n \"\"\"\n if PANDAS_GE_0_26_0:\n key = check_array_indexer(self, key)\n\n if self.dtype._is_list:\n # TODO: We can probably implement this for the scalar case?\n # TODO: Implement a list accessor and then the three mentioned methods\n raise ValueError(\n \"__setitem__ is not supported for list types \"\n \"due to the ambiguity of the arguments, use .fr_list.setvalue, \"\n \".fr_list.setslice or fr_list.setmask instead.\"\n )\n # Convert all possible input key types to an array of integers\n if is_bool_dtype(key):\n key_array = np.argwhere(key).flatten()\n elif isinstance(key, slice):\n key_array = np.array(range(len(self))[key])\n elif is_integer(key):\n key_array = np.array([key])\n else:\n key_array = np.asanyarray(key)\n\n if pd.api.types.is_scalar(value):\n value = np.broadcast_to(value, len(key_array))\n else:\n value = np.asarray(value)\n\n if len(key_array) != len(value):\n raise ValueError(\"Length mismatch between index and value.\")\n\n affected_chunks_index = self._get_chunk_indexer(key_array)\n affected_chunks_unique = np.unique(affected_chunks_index)\n\n all_chunks = list(self.data.iterchunks())\n\n for ix, offset in zip(\n affected_chunks_unique, self.offsets[affected_chunks_unique]\n ):\n chunk = all_chunks[ix]\n\n # Translate the array-wide indices to indices of the chunk\n key_chunk_indices = np.argwhere(affected_chunks_index == ix).flatten()\n array_chunk_indices = key_array[key_chunk_indices] - offset\n\n arr = chunk.to_pandas().values\n # In the case where we zero-copy Arrow to Pandas conversion, the\n # the resulting arrays are read-only.\n if not arr.flags.writeable:\n arr = arr.copy()\n arr[array_chunk_indices] = value[key_chunk_indices]\n\n mask = None\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if (\n pa.types.is_integer(self.dtype.arrow_dtype)\n or pa.types.is_date(self.dtype.arrow_dtype)\n or pa.types.is_floating(self.dtype.arrow_dtype)\n or pa.types.is_boolean(self.dtype.arrow_dtype)\n ):\n nan_values = pd.isna(value[key_chunk_indices])\n if any(nan_values):\n nan_index = array_chunk_indices[nan_values]\n mask = np.zeros_like(arr, dtype=bool)\n mask[nan_index] = True\n pa_arr = pa.array(arr, self.dtype.arrow_dtype, mask=mask)\n all_chunks[ix] = pa_arr\n\n self.data = pa.chunked_array(all_chunks)\n\n def __getitem__(self, item):\n # type (Any) -> Any\n \"\"\"Select a subset of self.\n\n Parameters\n ----------\n item : int, slice, or ndarray\n * int: The position in 'self' to get.\n * slice: A slice object, where 'start', 'stop', and 'step' are\n integers or None\n * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'\n\n Returns\n -------\n item : scalar or ExtensionArray\n\n Notes\n -----\n For scalar ``item``, return a scalar value suitable for the array's\n type. This should be an instance of ``self.dtype.type``.\n For slice ``key``, return an instance of ``ExtensionArray``, even\n if the slice is length 0 or 1.\n For a boolean mask, return an instance of ``ExtensionArray``, filtered\n to the values where ``item`` is True.\n \"\"\"\n if PANDAS_GE_0_26_0:\n item = check_array_indexer(self, item)\n\n # Arrow 0.18+ supports slices perfectly\n if isinstance(item, slice) and not ARROW_GE_0_18_0:\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n step = item.step if item.step is not None else 1\n # Arrow can't handle slices with steps other than 1\n # https://issues.apache.org/jira/browse/ARROW-2714\n if step != 1:\n arr = np.asarray(self)[item]\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if pa.types.is_integer(self.dtype.arrow_dtype) or pa.types.is_floating(\n self.dtype.arrow_dtype\n ):\n mask = pd.isna(arr)\n else:\n mask = None\n return type(self)(pa.array(arr, type=self.dtype.arrow_dtype, mask=mask))\n if stop - start == 0:\n return type(self)(pa.array([], type=self.data.type))\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item) or (len(item) == 0):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean arrays are valid indices.\"\n )\n elif is_integer(item):\n item = int(item)\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n value = self.data[item]\n if isinstance(value, pa.ChunkedArray):\n return type(self)(value)\n else:\n return value.as_py()\n\n def copy(self):\n # type: () -> ExtensionArray\n \"\"\"\n Return a copy of the array.\n\n Parameters\n ----------\n deep : bool, default False\n Also copy the underlying data backing this array.\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n return type(self)(self.data)\n\n @property\n def nbytes(self):\n # type: () -> int\n \"\"\"Return the number of bytes needed to store this object in memory.\"\"\"\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n if buf is not None:\n size += buf.size\n return size\n\n def factorize(self, na_sentinel=-1):\n # type: (int) -> Tuple[np.ndarray, ExtensionArray]\n \"\"\"Encode the extension array as an enumerated type.\n\n Parameters\n ----------\n na_sentinel : int, default -1\n Value to use in the `labels` array to indicate missing values.\n\n Returns\n -------\n labels : ndarray\n An integer NumPy array that's an indexer into the original\n ExtensionArray.\n uniques : ExtensionArray\n An ExtensionArray containing the unique values of `self`.\n .. note::\n uniques will *not* contain an entry for the NA value of\n the ExtensionArray if there are any missing values present\n in `self`.\n\n See Also\n --------\n pandas.factorize : Top-level factorize method that dispatches here.\n\n Notes\n -----\n :meth:`pandas.factorize` offers a `sort` keyword as well.\n \"\"\"\n if pa.types.is_dictionary(self.data.type):\n raise NotImplementedError()\n elif self.data.num_chunks == 1:\n # Dictionaryencode and do the same as above\n encoded = self.data.chunk(0).dictionary_encode()\n indices = encoded.indices.to_pandas()\n if indices.dtype.kind == \"f\":\n indices[np.isnan(indices)] = na_sentinel\n indices = indices.astype(int)\n if not is_int64_dtype(indices):\n indices = indices.astype(np.int64)\n return indices.values, type(self)(encoded.dictionary)\n else:\n np_array = self.data.to_pandas().values\n return pd.factorize(np_array, na_sentinel=na_sentinel)\n\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=None):\n \"\"\"\n Construct a new ExtensionArray from a sequence of scalars.\n\n Parameters\n ----------\n scalars : Sequence\n Each element will be an instance of the scalar type for this\n array, ``cls.dtype.type``.\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n if isinstance(scalars, FletcherChunkedArray):\n return scalars\n if dtype and isinstance(dtype, FletcherChunkedDtype):\n dtype = dtype.arrow_dtype\n return cls(pa.array(scalars, type=dtype, from_pandas=True))\n\n def fillna(self, value=None, method=None, limit=None):\n \"\"\"Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, array-like\n If a scalar value is passed it is used to fill all missing values.\n Alternatively, an array-like 'value' can be given. It's expected\n that the array-like have the same length as 'self'.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : ExtensionArray with NA/NaN filled\n \"\"\"\n from pandas.api.types import is_array_like\n from pandas.util._validators import validate_fillna_kwargs\n import pandas.core.missing as pd_missing\n\n value, method = validate_fillna_kwargs(value, method)\n\n mask = self.isna()\n\n if is_array_like(value):\n if len(value) != len(self):\n raise ValueError(\n \"Length of 'value' does not match. Got ({}) \"\n \" expected {}\".format(len(value), len(self))\n )\n value = value[mask]\n\n if mask.any():\n if method is not None:\n # pandas 1.2+ doesn't expose pad_1d anymore\n if not hasattr(pd_missing, \"pad_1d\"):\n func = pd_missing.get_fill_func(method)\n else:\n func = (\n pd_missing.pad_1d if method == \"pad\" else pd_missing.backfill_1d\n )\n new_values = func(self.astype(object), limit=limit, mask=mask)\n new_values = self._from_sequence(new_values, self._dtype.arrow_dtype)\n else:\n # fill with value\n new_values = self.copy()\n new_values[mask] = value\n else:\n new_values = self.copy()\n return new_values\n\n def take(\n self,\n indices: Union[Sequence[int], np.ndarray],\n allow_fill: bool = False,\n fill_value: Optional[Any] = None,\n ) -> ExtensionArray:\n \"\"\"\n Take elements from an array.\n\n Parameters\n ----------\n indices : sequence of integers\n Indices to be taken.\n allow_fill : bool, default False\n How to handle negative values in `indices`.\n * False: negative values in `indices` indicate positional indices\n from the right (the default). This is similar to\n :func:`numpy.take`.\n * True: negative values in `indices` indicate\n missing values. These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n fill_value : any, optional\n Fill value to use for NA-indices when `allow_fill` is True.\n This may be ``None``, in which case the default NA value for\n the type, ``self.dtype.na_value``, is used.\n For many ExtensionArrays, there will be two representations of\n `fill_value`: a user-facing \"boxed\" scalar, and a low-level\n physical NA value. `fill_value` should be the user-facing version,\n and the implementation should handle translating that to the\n physical version for processing the take if nescessary.\n\n Returns\n -------\n ExtensionArray\n\n Raises\n ------\n IndexError\n When the indices are out of bounds for the array.\n ValueError\n When `indices` contains negative values other than ``-1``\n and `allow_fill` is True.\n\n Notes\n -----\n ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,\n ``iloc``, when `indices` is a sequence of values. Additionally,\n it's called by :meth:`Series.reindex`, or any other method\n that causes realignemnt, with a `fill_value`.\n\n See Also\n --------\n numpy.take\n pandas.api.extensions.take\n \"\"\"\n if self.data.num_chunks == 1:\n return self._take_array(self.data.chunk(0), indices, allow_fill, fill_value)\n\n from pandas.core.algorithms import take\n\n data = self.astype(object)\n if allow_fill and fill_value is None:\n fill_value = self.dtype.na_value\n # fill value should always be translated from the scalar\n # type for the array, to the physical storage type for\n # the data, before passing to take.\n result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill)\n return self._from_sequence(result, dtype=self.data.type)\n\n def flatten(self):\n \"\"\"\n Flatten the array.\n \"\"\"\n return type(self)(\n pa.chunked_array(ch.flatten() for ch in self.data.iterchunks())\n )\n\n\ndef pandas_from_arrow(\n arrow_object: Union[pa.RecordBatch, pa.Table, pa.Array, pa.ChunkedArray],\n continuous: bool = False,\n):\n \"\"\"\n Convert Arrow object instance to their Pandas equivalent by using Fletcher.\n\n The conversion rules are:\n * {RecordBatch, Table} -> DataFrame\n * {Array, ChunkedArray} -> Series\n\n Parameters\n ----------\n arrow_object : RecordBatch, Table, Array or ChunkedArray\n object to be converted\n continuous : bool\n Use FletcherContinuousArray instead of FletcherChunkedArray\n \"\"\"\n if continuous:\n array_type = FletcherContinuousArray\n else:\n array_type = FletcherChunkedArray\n if isinstance(arrow_object, pa.RecordBatch):\n data: OrderedDict = OrderedDict()\n for ix, arr in enumerate(arrow_object):\n col_name = arrow_object.schema.names[ix]\n data[col_name] = array_type(arr)\n return pd.DataFrame(data)\n elif isinstance(arrow_object, pa.Table):\n data = OrderedDict()\n for name, col in zip(arrow_object.column_names, arrow_object.itercolumns()):\n data[name] = array_type(col)\n return pd.DataFrame(data)\n elif isinstance(arrow_object, (pa.ChunkedArray, pa.Array)):\n return pd.Series(array_type(arrow_object))\n else:\n raise NotImplementedError(\n \"Objects of type {} are not supported\".format(type(arrow_object))\n )\n\n\n__all__: List[str] = []\n" ]
[ [ "pandas.isna", "pandas.testing.assert_series_equal", "pandas.Series", "numpy.dtype" ], [ "pandas.api.types.is_integer", "pandas.Series", "numpy.asarray", "pandas.api.types.is_scalar", "pandas.DataFrame", "numpy.dtype", "numpy.zeros_like", "pandas.core.indexers.check_array_indexer", "pandas.isna", "numpy.digitize", "pandas.api.types.is_bool_dtype", "numpy.unique", "numpy.float16", "numpy.asanyarray", "pandas.api.types.is_integer_dtype", "numpy.isnan", "pandas.factorize", "pandas.array", "numpy.array", "pandas.core.missing.get_fill_func", "pandas.util._validators.validate_fillna_kwargs", "numpy.argwhere", "pandas.api.types.is_array_like", "pandas.api.types.is_int64_dtype", "pandas.core.algorithms.take" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
sgheb/ml-for-asset-managers
[ "53f9ee5a59a00004ac67920ad11e244ffc02a503" ]
[ "code_snippets/chap2.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KernelDensity\nfrom scipy.optimize import minimize\nfrom scipy.linalg import block_diag\nfrom sklearn.covariance import LedoitWolf\n\n\ndef fix_shape(x):\n if len(x.shape) == 1:\n x = x.reshape(-1, 1)\n return x\n\n\n# Snippet 2.1\ndef mpPDF(var, q, pts):\n \"\"\"Marcenko--Pastur PDF\"\"\"\n # q = T/N\n\n eMin, eMax = var * (1 - (1.0 / q) ** 0.5) ** 2, var * (1 + (1.0 / q) ** 0.5) ** 2\n eVal = np.linspace(eMin, eMax, pts)\n pdf = q / (2 * np.pi * var * eVal) * ((eMax - eVal) * (eVal - eMin)) ** 0.5\n pdf = pd.Series(pdf, index=eVal)\n return pdf\n\n\n# Snippet 2.2\ndef getPCA(matrix):\n \"\"\"Get eVal, eVec from a Hermitian matrix\"\"\"\n eVal, eVec = np.linalg.eigh(matrix)\n indices = eVal.argsort()[::-1] # arguments for sorting eVal desc\n eVal, eVec = eVal[indices], eVec[:, indices]\n eVal = np.diagflat(eVal)\n return eVal, eVec\n\n\ndef fitKDE(obs, bWidth=0.25, kernel=\"gaussian\", x=None):\n \"\"\"\n Fit kernel to a series of observations `obs` and derive the probability.\n\n `x` is the array of values on which the fit KDE will be evaluated\n \"\"\"\n obs = fix_shape(obs)\n kde = KernelDensity(kernel=kernel, bandwidth=bWidth).fit(obs)\n if x is None:\n x = np.unique(obs).reshape(-1, 1)\n x = fix_shape(x)\n logProb = kde.score_samples(x) # log(density)\n pdf = pd.Series(np.exp(logProb), index=x.flatten())\n return pdf\n\n\n# Snippet 2.3\ndef getRndCov(nCols, nFacts):\n w = np.random.normal(size=(nCols, nFacts))\n cov = np.dot(w, w.T)\n cov += np.diag(np.random.uniform(size=nCols))\n return cov\n\n\ndef cov2corr(cov):\n \"\"\"Derive the correlation matrix from covariance matrix\"\"\"\n std = np.sqrt(np.diag(cov))\n corr = cov / np.outer(std, std)\n corr[corr < -1], corr[corr > 1] = -1, 1 # numerical error\n return corr\n\n\n# Snippet 2.4\ndef errPDFs(var, eVal, q, bWidth, pts=1000):\n \"\"\"Fit error\"\"\"\n pdf0 = mpPDF(var, q, pts) # theoretical pdf\n pdf1 = fitKDE(\n eVal, bWidth, x=pdf0.index.values\n ) # empirical pdf with same x values as theoretical\n sse = np.sum((pdf1 - pdf0) ** 2) # sum of square error\n return sse\n\n\ndef findMaxEval(eVal, q, bWidth):\n \"\"\"Find max random eVal by fitting Marcenko's distribution\"\"\"\n out = minimize(\n lambda x, *args: errPDFs(x[0], *args),\n 0.5,\n args=(eVal, q, bWidth),\n bounds=((1e-5, 1 - 1e-5),),\n )\n if out[\"success\"]:\n var = out[\"x\"][0]\n else:\n var = 1\n eMax = var * (1 + (1.0 / q) ** 0.5) ** 2\n return eMax, var\n\n\n# Snippet 2.5\ndef denoisedCorr(eVal, eVec, nFacts):\n \"\"\"Remove noise from corr by fixing random eigenvalues\"\"\"\n eVal_ = np.diag(eVal).copy()\n eVal_[nFacts:] = eVal_[nFacts:].sum() / float(eVal_.shape[0] - nFacts)\n eVal_ = np.diag(eVal_)\n corr1 = np.dot(eVec, eVal_).dot(eVec.T)\n corr1 = cov2corr(corr1)\n return corr1\n\n\n# Snippet 2.6\ndef denoisedCorr2(eVal, eVec, nFacts, alpha=0):\n \"\"\"Remove noise from corr through targeted shrinkage\"\"\"\n eValL, eVecL = eVal[:nFacts, :nFacts], eVec[:, :nFacts]\n eValR, eVecR = eVal[nFacts:, nFacts:], eVec[:, nFacts:]\n corr0 = np.dot(eVecL, eValL).dot(eVecL.T)\n corr1 = np.dot(eVecR, eValR).dot(eVecR.T)\n corr2 = corr0 + alpha * corr1 + (1 - alpha) * np.diag(np.diag(corr1))\n return corr2\n\n\n# Snippet 2.7\ndef formBlockMatrix(nBlocks, bSize, bCorr):\n block = np.ones((bSize, bSize)) * bCorr\n block[range(bSize), range(bSize)] = 1\n corr = block_diag(*([block] * nBlocks))\n return corr\n\n\ndef formTrueMatrix(nBlocks, bSize, bCorr):\n corr0 = formBlockMatrix(nBlocks, bSize, bCorr)\n corr0 = pd.DataFrame(corr0)\n cols = corr0.columns.tolist()\n np.random.shuffle(cols)\n corr0 = corr0[cols].loc[cols].copy(deep=True)\n std0 = np.random.uniform(0.05, 0.2, corr0.shape[0])\n cov0 = corr2cov(corr0, std0)\n mu0 = np.random.normal(std0, std0, cov0.shape[0]).reshape(-1, 1)\n return mu0, cov0\n\n\n# Snippet 2.8\ndef simCovMu(mu0, cov0, nObs, shrink=False):\n x = np.random.multivariate_normal(mu0.flatten(), cov0, size=nObs)\n mu1 = x.mean(axis=0).reshape(-1, 1)\n if shrink:\n cov1 = LedoitWolf().fit(x).covariance_\n else:\n cov1 = np.cov(x, rowvar=0)\n return mu1, cov1\n\n\n# Snippet 2.9\ndef corr2cov(corr, std):\n cov = corr * np.outer(std, std)\n return cov\n\n\ndef deNoiseCov(cov0, q, bWidth):\n corr0 = cov2corr(cov0)\n eVal0, eVec0 = getPCA(corr0)\n eMax0, var0 = findMaxEval(np.diag(eVal0), q, bWidth)\n nFacts0 = eVal0.shape[0] - np.diag(eVal0)[::-1].searchsorted(eMax0)\n corr1 = denoisedCorr(eVal0, eVec0, nFacts0)\n cov1 = corr2cov(corr1, np.diag(cov0) ** 0.5)\n return cov1\n\n\n# Snippet 2.10\ndef optPort(cov, mu=None):\n inv = np.linalg.inv(cov)\n ones = np.ones(shape=(inv.shape[0], 1))\n if mu is None:\n mu = ones\n w = np.dot(inv, mu)\n w /= np.dot(ones.T, w)\n return w\n" ]
[ [ "numpy.diag", "numpy.dot", "pandas.Series", "numpy.linspace", "sklearn.covariance.LedoitWolf", "pandas.DataFrame", "numpy.exp", "numpy.unique", "numpy.outer", "numpy.diagflat", "numpy.linalg.inv", "numpy.linalg.eigh", "numpy.cov", "sklearn.neighbors.KernelDensity", "numpy.sum", "scipy.linalg.block_diag", "numpy.random.shuffle", "numpy.ones", "numpy.random.normal", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [] } ]
txyugood/PaddleTableTennis
[ "be4d33b5990da9c75fcd11f341ae09a73bfdbaba" ]
[ "applications/BasketballAction/predict/eval.py" ]
[ "\"\"\"\nget instance for lstm\n根据gts计算每个proposal_bmn的iou、ioa、label等信息\n\"\"\"\nimport os\nimport sys\nimport json\nimport random\nimport pickle\nimport numpy as np\n\nimport io\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding = 'utf-8')\n\ndataset = \"datasets/\"\n\nlabel_index_file = './configs_basketball/index_label_basketball_6.json'\neval_datasets = ['EuroCup2016']\nlabel_files = {'train': 'label_cls6_train.json',\n 'validation': 'label_cls6_val.json'}\n\nglobal fps, mode\nlabel_index = json.load(open(label_index_file, 'rb'))\n\ndef load_gts():\n global fps\n gts_data = {'fps': 0, 'gts': {}}\n for eval_data in eval_datasets:\n for item, value in label_files.items():\n label_file = '{}/{}/{}'.format(dataset, eval_data, value)\n gts = json.load(open(label_file, 'rb'))\n gts_data['fps'] = gts['fps']\n fps = gts['fps']\n for gt in gts['gts']:\n gt['mode'] = item\n basename = '{}/{}/mp4/{}'.format(dataset, eval_data, os.path.basename(gt['url']))\n gts_data['gts'][basename] = gt\n return gts_data['gts']\n \n\ndef computeIoU(e1, e2):\n \"\"\"\n clc iou and ioa\n \"\"\"\n if not (e1['label'] == e2['label'] and e1['basename'] == e2['basename']):\n return 0.\n area1 = e1[\"end\"] - e1[\"start\"]\n area2 = e2[\"end\"] - e2[\"start\"]\n x1 = np.maximum(e1[\"start\"], e2[\"start\"])\n x2 = np.minimum(e1[\"end\"], e2[\"end\"])\n inter = np.maximum(0.0, x2 - x1)\n iou = 0.0 if (area1 + area2 - inter) == 0 else inter * 1.0 / (area1 + area2 - inter)\n if not mode == 'proposal':\n iou = 0.0 if area2 == 0 else inter * 1.0 / area2\n return iou\n\n\ndef convert_proposal(boxes, basename, score_threshold=0.01):\n boxes = sorted(boxes, key=lambda x:float(x['score']), reverse=True)\n res = []\n for box in boxes:\n if not float(box['score']) >= score_threshold:\n continue\n res.append({'basename': basename,\n 'start': int(float(box['start']) / fps),\n 'end': int(float(box['end']) / fps),\n 'label': 0})\n return res\n\ndef convert_classify(boxes, basename, iou_threshold, score_threshold):\n boxes = sorted(boxes, key=lambda x:(float(x['classify_score']), float(x['iou_score'])), reverse=True)\n def convert_time_to_frame(time_type):\n return int(time_type)\n h, m, s = time_type.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)\n res = []\n for box in boxes:\n if not (box['iou_score'] >= iou_threshold and\n box['classify_score'] >= score_threshold):\n continue\n res.append({'basename': basename,\n 'start': convert_time_to_frame(box['start_time']),\n 'end': convert_time_to_frame(box['end_time']),\n 'label': box['label_id']})\n return res\n \ndef convert_groundtruth(boxes, basename, phase=None):\n res = []\n for box in boxes:\n for item in box['label_ids']:\n label = 0 if phase == 'proposal' else item\n res.append({'basename': basename,\n 'start': box['start_id'],\n 'end': box['end_id'],\n 'label': label})\n return res\ndef print_head(iou):\n print(\"\\nioa = {:.1f}\".format(iou))\n res_str = ''\n for item in ['label_name']:\n res_str += '{:<12s}'.format(item)\n for item in ['label_id', 'precision', 'recall', 'hit_prop', 'num_prop', 'hit_gts', 'num_gts']:\n res_str += '{:<10s}'.format(item)\n print(res_str)\n\ndef print_result(res_dict, label='avg'):\n if label == 'avg':\n res_str = '{:<22s}'.format(str(label))\n else:\n res_str = '{0:{2}<6s}{1:<10s}'.format(label_index[str(label)], str(label), chr(12288))\n\n for item in ['prec', 'recall']:\n res_str += '{:<10.4f}'.format(res_dict[item])\n for item in ['hit_prop', 'num_prop', 'hit_gts', 'num_gts']:\n res_str += '{:<10d}'.format(res_dict[item])\n print(res_str)\n\ndef evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = False):\n iou_map = [computeIoU(resId, gtsId) for resId in res_boxes \\\n for gtsId in gts_boxes]\n iou_map = np.array(iou_map).reshape((len(res_boxes), len(gts_boxes)))\n hit_map_prop_total = np.max(iou_map, axis=1)\n hit_map_index_total = np.argmax(iou_map, axis=1)\n\n res_dict = ['hit_prop', 'num_prop', 'hit_gts', 'num_gts']\n\n for iou_threshold in iou_range:\n if show_sub:\n print_head(iou_threshold)\n \n iou_prop = np.array([k >= iou_threshold for k in hit_map_prop_total])\n average_results = {}\n for label_id in label_range:\n sub_results = {}\n label_prop = np.array([k['label'] == label_id for k in res_boxes])\n label_gts = np.array([k['label'] == label_id for k in gts_boxes])\n sub_results['num_prop'] = sum(label_prop)\n sub_results['num_gts'] = sum(label_gts)\n if sub_results['num_prop'] == 0:\n hit_prop_index = []\n else:\n hit_prop_index = label_prop & iou_prop\n sub_results['hit_prop'] = sum(hit_prop_index)\n sub_results['hit_gts'] = len(set(hit_map_index_total[hit_prop_index]))\n\n sub_results['prec'] = 0.0 if sub_results['num_prop'] == 0 \\\n else sub_results['hit_prop'] * 1.0 / sub_results['num_prop']\n sub_results['recall'] = 0.0 if sub_results['num_gts'] == 0 \\\n else sub_results['hit_gts'] * 1.0 / sub_results['num_gts']\n if show_sub:\n print_result(sub_results, label=label_id)\n for item in res_dict:\n if not item in average_results:\n average_results[item] = 0\n average_results[item] += sub_results[item]\n if len(label_range) == 1: # proposal 不需要输出average值\n continue\n average_results['prec'] = 0.0 if average_results['num_prop'] == 0 \\\n else average_results['hit_prop'] * 1.0 / average_results['num_prop']\n average_results['recall'] = 0.0 if average_results['num_gts'] == 0 \\\n else average_results['hit_gts'] * 1.0 / average_results['num_gts']\n if show_sub:\n print_result(average_results)\n\n average_results['F1'] = 0.0 if (average_results['prec'] + average_results['recall'] == 0) \\\n else 2 * average_results['prec'] * average_results['recall'] / \\\n (average_results['prec'] + average_results['recall'])\n return average_results\n\ndef get_eval_results(predicts, gts_data, phase, iou_threshold = 0.3, score_threshold = 0.3, show_sub = False):\n global mode\n mode = phase\n res_boxes = []\n gts_boxes = []\n for ped_data in predicts:\n basename = ped_data['video_name']\n\n # eval sub data\n such_eval = False\n for eval_name in eval_datasets:\n if eval_name in basename:\n such_eval = True\n break\n if not such_eval:\n continue\n\n gts = gts_data[basename]['actions']\n if phase == 'proposal':\n res_boxes.extend(convert_proposal(ped_data['bmn_results'], basename, score_threshold))\n gts_boxes.extend(convert_groundtruth(gts, basename, phase='proposal'))\n label_range = [0]\n iou_range = np.arange(0.1, 1, 0.1)\n else:\n res_boxes.extend(convert_classify(ped_data['action_results'], basename, iou_threshold, score_threshold))\n gts_boxes.extend(convert_groundtruth(gts, basename))\n label_range = range(1, len(label_index))\n iou_range = np.arange(0.5, 0.6, 0.1)\n \n eval_results = evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = show_sub)\n \n return eval_results\n \n\nif __name__ == \"__main__\":\n result_file = sys.argv[1]\n predicts = json.load(open(result_file, 'r', encoding='utf-8'))\n gts_data = load_gts()\n\n get_eval_results(predicts, gts_data, 'proposal', \n score_threshold = 0.03,\n show_sub = True)\n #get_eval_results(predicts, gts_data, 'actions')\n\n best_F1 = -0.1\n best_res = {}\n best_iou_threshold = 0.\n best_score_threshold = 0.\n for iou_threshold in np.arange(0.1, 0.9, 0.1):\n for score_threshold in np.arange(0.1, 1, 0.1):\n avg_res = get_eval_results(predicts, gts_data, 'actions', \n iou_threshold = iou_threshold,\n score_threshold = score_threshold,\n show_sub = False)\n if best_F1 < avg_res['F1']:\n best_F1 = avg_res['F1']\n best_res = avg_res\n best_iou_threshold = iou_threshold\n best_score_threshold = score_threshold\n print(\"best iou threshold = {:.1f}\".format(best_iou_threshold))\n print(\"best score threshold = {:.1f}\".format(best_score_threshold))\n print('best F1 score = {:.4f}'.format(best_F1))\n print_head(0.5)\n print_result(best_res)\n\n get_eval_results(predicts, gts_data, 'actions', iou_threshold = best_iou_threshold,\n score_threshold = best_score_threshold,\n show_sub = True)\n \n" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.arange", "numpy.max", "numpy.argmax", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sotuken2021/s
[ "2b37fdcaa60c1f5ac492edddd4a0960882fbc0aa" ]
[ "07ctc_att_mtl/attention.py" ]
[ "# -*- coding: utf-8 -*-\n\n#\n# Attention (Location aware attention) の実装です.\n# 参考文献\n# - D. Bahdanau, et al., \n# ``End-to-end attention-based large vocabulary speech\n# recognition,''\n# in Proc. ICASSP, 2016.\n# - J. Chorowski, et al.,\n# ``Attention-based models for speech recognition,''\n# in Proc. NIPS , 2015.\n#\n\n# Pytorchを用いた処理に必要なモジュールをインポート\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass LocationAwareAttention(nn.Module):\n ''' Location aware attention\n dim_encoder: エンコーダRNN出力の次元数\n dim_decoder: デコーダRNN出力の次元数\n dim_attention: Attention機構の次元数\n filter_size: location filter (前のAttention重みに\n 畳み込まれるフィルタ)のサイズ\n filter_num: location filterの数\n temperature: Attention重み計算時に用いる温度パラメータ\n '''\n def __init__(self,\n dim_encoder,\n dim_decoder,\n dim_attention,\n filter_size, \n filter_num,\n temperature=1.0):\n\n super(LocationAwareAttention, self).__init__()\n\n # F: 前のAttention重みに畳み込まれる畳み込み層\n self.loc_conv = nn.Conv1d(in_channels=1,\n out_channels=filter_num, \n kernel_size=2*filter_size+1,\n stride=1, \n padding=filter_size,\n bias=False)\n # 以下三つの層のうち,一つのみbiasをTrueにし,他はFalseにする\n # W: 前ステップのデコーダRNN出力にかかる射影層\n self.dec_proj = nn.Linear(in_features=dim_decoder, \n out_features=dim_attention,\n bias=False)\n # V: エンコーダRNN出力にかかる射影層\n self.enc_proj = nn.Linear(in_features=dim_encoder, \n out_features=dim_attention,\n bias=False)\n # U: 畳み込み後のAttention重みにかかる射影層\n self.att_proj = nn.Linear(in_features=filter_num, \n out_features=dim_attention,\n bias=True)\n # w: Ws + Vh + Uf + b にかかる線形層\n self.out = nn.Linear(in_features=dim_attention,\n out_features=1)\n\n # 各次元数\n self.dim_encoder = dim_encoder\n self.dim_decoder = dim_decoder\n self.dim_attention = dim_attention\n\n # 温度パラメータ\n self.temperature = temperature\n\n # エンコーダRNN出力(h)とその射影(Vh)\n # これらは毎デコードステップで同じ値のため,\n # 一回のみ計算し,計算結果を保持しておく\n self.input_enc = None\n self.projected_enc = None\n # エンコーダRNN出力の,発話ごとの系列長\n self.enc_lengths = None\n # エンコーダRNN出力の最大系列長\n # (=ゼロ詰めしたエンコーダRNN出力の系列長)\n self.max_enc_length = None\n # Attentionマスク\n # エンコーダの系列長以降\n # (ゼロ詰めされている部分)の重みをゼロにするマスク\n self.mask = None\n\n\n def reset(self):\n ''' 内部パラメータのリセット\n この関数は1バッチの処理を行うたびに,\n 最初に呼び出す必要がある\n '''\n self.input_enc = None\n self.projected_enc = None\n self.enc_lengths = None\n self.max_enc_length = None\n self.mask = None\n\n \n def forward(self, \n input_enc,\n enc_lengths,\n input_dec=None,\n prev_att=None):\n ''' ネットワーク計算(forward処理)の関数\n input_enc: エンコーダRNNの出力 [B x Tenc x Denc]\n enc_lengths: バッチ内の各発話のエンコーダRNN出力の系列長 [B]\n input_dec: 前ステップにおけるデコーダRNNの出力 [B x Ddec]\n prev_att: 前ステップにおけるAttention重み [B x Tenc]\n []の中はテンソルのサイズ\n B: ミニバッチ内の発話数(ミニバッチサイズ)\n Tenc: エンコーダRNN出力の系列長(ゼロ埋め部分含む)\n Denc: エンコーダRNN出力の次元数(dim_encoder)\n Ddec: デコーダRNN出力の次元数(dim_decoder)\n '''\n # バッチサイズ(発話数)を得る\n batch_size = input_enc.size()[0]\n\n #\n # エンコーダRNN出力とその射影ベクトルを一度のみ計算\n #\n if self.input_enc is None:\n # エンコーダRNN出力(h)\n self.input_enc = input_enc\n # 各発話の系列長\n self.enc_lengths = enc_lengths\n # 最大系列長\n self.max_enc_length = input_enc.size()[1]\n # 射影を行う(Vhの計算)\n self.projected_enc = self.enc_proj(self.input_enc)\n \n #\n # 前ステップにおけるデコーダRNN出力を射影する(Wsの計算)\n #\n # 前のデコーダRNN出力が無い場合は初期値としてゼロ行列を使用\n if input_dec is None:\n input_dec = torch.zeros(batch_size, self.dim_decoder)\n # 作成したテンソルをエンコーダRNN出力と\n # 同じデバイス(GPU/CPU)に配置\n input_dec = input_dec.to(device=self.input_enc.device, \n dtype=self.input_enc.dtype)\n # 前のデコーダRNN出力を射影する\n projected_dec = self.dec_proj(input_dec)\n\n #\n # 前ステップにおけるのAttention重み情報を\n # 射影する(Uf+bの計算)\n #\n # Attentionマスクを作成\n if self.mask is None:\n self.mask = torch.zeros(batch_size, \n self.max_enc_length, \n dtype=torch.bool)\n # バッチ内の各発話について,その発話の\n # 系列長以上の要素(つまりゼロ埋めされている部分)を\n # 1(=マスキング対象)にする\n for i, length in enumerate(self.enc_lengths):\n length = length.item()\n self.mask[i, length:] = 1\n # 作成したテンソルをエンコーダRNN出力と\n # 同じデバイス(GPU/CPU)に配置\n self.mask = self.mask.to(device=self.input_enc.device)\n\n # 前のAttention重みが無い場合は初期値として,\n # 一様の重みを与える\n if prev_att is None:\n # 全ての要素を1のテンソルを作成\n prev_att = torch.ones(batch_size, self.max_enc_length)\n # 発話毎の系列長で割る\n # このとき,prev_attは2次のテンソル,\n # enc_lengthsは1次のテンソルなので,\n # view(batch_size, 1)によりenc_lengthsを\n # 2次テンソルの形にしてから割り算する\n prev_att = prev_att \\\n / self.enc_lengths.view(batch_size, 1)\n # 作成したテンソルをエンコーダRNN出力と\n # 同じデバイス(GPU/CPU)に配置\n prev_att = prev_att.to(device=self.input_enc.device, \n dtype=self.input_enc.dtype)\n # 発話長以降の重みをゼロにするようマスキングを実行\n prev_att.masked_fill_(self.mask, 0)\n\n # Attention重みの畳み込みを計算する {f} = F*a\n # このとき,Conv1Dが受け付ける入力のサイズは\n # (batch_size, in_channels, self.max_enc_length)\n # (in_channelsは入力のチャネル数で,\n # このプログラムではin_channels=1) \n # サイズを合わせるため,viewを行う\n convolved_att \\\n = self.loc_conv(prev_att.view(batch_size, \n 1, self.max_enc_length))\n \n # convolved_attのサイズは\n # (batch_size, filter_num, self.max_enc_length)\n # Linearレイヤーが受け付ける入力のサイズは\n # (batch_size, self.max_enc_length, filter_num) なので,\n # transposeにより1次元目と2次元目をの入れ替えた上で\n # att_projに通す\n projected_att = self.att_proj(convolved_att.transpose(1, 2))\n \n #\n # Attention重みを計算する\n # \n # この時点での各テンソルのサイズは\n # self.projected_enc: (batch_size, self.max_enc_length, \n # self.dim_attention)\n # projected_dec: (batch_size, self.dim_attention)\n # projected_att: (batch_size, self.max_enc_length, self.dim_attention)\n # projected_decのテンソルの次元数を合わせるため,viewを用いる\n projected_dec = projected_dec.view(batch_size,\n 1,\n self.dim_attention)\n\n # scoreを計算するため,各射影テンソルの加算,\n # tanh,さらに射影を実施\n # w tanh(Ws + Vh + Uf + b)\n score = self.out(torch.tanh(projected_dec \\\n + self.projected_enc \n + projected_att))\n\n # 現時点のscoreのテンソルサイズは\n # (batch_size, self.max_enc_length, 1)\n # viewを用いて元々のattentionのサイズに戻す\n score = score.view(batch_size, self.max_enc_length)\n\n # マスキングを行う\n # (エンコーダRNN出力でゼロ埋めされている部分の\n # 重みをゼロにする)\n # ただし,この後softmax関数の中で計算される\n # exp(score)がゼロになるように\n # しないといけないので,scoreの段階では0ではなく,\n # 0の対数値である-infで埋めておく\n score.masked_fill_(self.mask, -float('inf'))\n\n # 温度付きSoftmaxを計算することで,Attention重みを得る\n att_weight = F.softmax(self.temperature * score, dim=1)\n\n # att_weightを使って,エンコーダRNN出力の重みづけ和を計算し,\n # contextベクトルを得る\n # (viewによりinput_encとattention_weightの\n # テンソルサイズを合わせている)\n context \\\n = torch.sum(self.input_enc * \\\n att_weight.view(batch_size, self.max_enc_length, 1),\n dim=1)\n\n # contextベクトルとattention重みを出力\n return context, att_weight\n\n" ]
[ [ "torch.nn.functional.softmax", "torch.ones", "torch.zeros", "torch.tanh", "torch.nn.Linear", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
void-zxh/CS337-SRDGI
[ "e86413affd5867d42f9bbe66030d13b29bd2e067" ]
[ "Dynamic-channels/test.py" ]
[ "import argparse\r\nimport os\r\nimport time\r\n\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom dynamic_channels import sample_tiny_sub_channel\r\nfrom model import G\r\nfrom util import is_image, load_image, save_image\r\n\r\nparser = argparse.ArgumentParser(description='DeepRendering-implementation')\r\nparser.add_argument('--dataset', required=True, help='unity')\r\nparser.add_argument('--model', type=str, required=True, help='model file')\r\nparser.add_argument('--accuracy', type=int, default=1, help='model file')\r\nparser.add_argument('--n_channel_input', type=int, default=3, help='input channel')\r\nparser.add_argument('--n_channel_output', type=int, default=3, help='output channel')\r\nparser.add_argument('--n_generator_filters', type=int, default=64, help=\"number of generator filters\")\r\nopt = parser.parse_args()\r\n\r\nnetG_model = torch.load(opt.model)\r\nnetG = G(opt.n_channel_input * 4, opt.n_channel_output, opt.n_generator_filters)\r\nnetG.load_state_dict(netG_model['state_dict_G'])\r\nroot_dir = 'dataset/{}/test/'.format(opt.dataset)\r\nimage_dir = 'dataset/{}/test/albedo'.format(opt.dataset)\r\nimage_filenames = [x for x in os.listdir(image_dir) if is_image(x)]\r\ntime_list=[]\r\n\r\nfor image_name in image_filenames:\r\n albedo_image = load_image(root_dir + 'albedo/' + image_name)\r\n direct_image = load_image(root_dir + 'direct/' + image_name)\r\n normal_image = load_image(root_dir + 'normal/' + image_name)\r\n depth_image = load_image(root_dir + 'depth/' + image_name)\r\n gt_image = load_image(root_dir + 'gt/' + image_name)\r\n\r\n albedo = Variable(albedo_image).view(1, -1, 256, 256).cuda()\r\n direct = Variable(direct_image).view(1, -1, 256, 256).cuda()\r\n normal = Variable(normal_image).view(1, -1, 256, 256).cuda()\r\n depth = Variable(depth_image).view(1, -1, 256, 256).cuda()\r\n\r\n sample_tiny_sub_channel(netG, size=opt.accuracy, n_filters=opt.n_generator_filters)\r\n netG = netG.cuda()\r\n \r\n start_p=time.time()\r\n out = netG(torch.cat((albedo, direct, normal, depth), 1))\r\n end_p=time.time()\r\n out = out.cpu()\r\n out_img = out.data[0]\r\n time_list.append(end_p-start_p)\r\n\r\n if not os.path.exists(\"result\"):\r\n os.mkdir(\"result\")\r\n if not os.path.exists(os.path.join(\"result\", \"accuracy_{}\".format(opt.accuracy))):\r\n os.mkdir(os.path.join(\"result\", \"accuracy_{}\".format(opt.accuracy)))\r\n save_image(out_img, \"result/accuracy_{}/{}\".format(opt.accuracy, image_name))\r\n save_image(gt_image, \"result/accuracy_{}/GT{}\".format(opt.accuracy, image_name))\r\nprint(time_list)" ]
[ [ "torch.autograd.Variable", "torch.cat", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]