repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
cclauss/hummingbot
[ "441041faac0d5e9f06fafef0f625ab1f19642371" ]
[ "hummingbot/client/command/config_command.py" ]
[ "import asyncio\nfrom typing import (\n List,\n Any,\n)\nfrom decimal import Decimal\nimport pandas as pd\nfrom os.path import join\nfrom sqlalchemy.orm import Session\nfrom hummingbot.client.settings import (\n GLOBAL_CONFIG_PATH,\n CONF_FILE_PATH,\n)\nfrom hummingbot.client.config.global_config_map import global_config_map\nfrom hummingbot.client.config.config_validators import validate_bool, validate_decimal\nfrom hummingbot.client.config.config_helpers import (\n missing_required_configs,\n save_to_yml\n)\nfrom hummingbot.client.config.security import Security\nfrom hummingbot.client.config.config_var import ConfigVar\nfrom hummingbot.core.utils.async_utils import safe_ensure_future\nfrom hummingbot.core.utils import map_df_to_str\nfrom hummingbot.model.inventory_cost import InventoryCost\nfrom hummingbot.strategy.pure_market_making import (\n PureMarketMakingStrategy\n)\nfrom hummingbot.strategy.perpetual_market_making import (\n PerpetualMarketMakingStrategy\n)\nfrom hummingbot.user.user_balances import UserBalances\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from hummingbot.client.hummingbot_application import HummingbotApplication\n\n\nno_restart_pmm_keys_in_percentage = [\"bid_spread\", \"ask_spread\", \"order_level_spread\", \"inventory_target_base_pct\"]\nno_restart_pmm_keys = [\"order_amount\", \"order_levels\", \"filled_order_delay\", \"inventory_skew_enabled\", \"inventory_range_multiplier\"]\nglobal_configs_to_display = [\"0x_active_cancels\",\n \"autofill_import\",\n \"kill_switch_enabled\",\n \"kill_switch_rate\",\n \"telegram_enabled\",\n \"telegram_token\",\n \"telegram_chat_id\",\n \"send_error_logs\",\n \"script_enabled\",\n \"script_file_path\",\n \"ethereum_chain_name\",\n \"gateway_enabled\",\n \"gateway_cert_passphrase\",\n \"gateway_api_host\",\n \"gateway_api_port\",\n \"rate_oracle_source\",\n \"global_token\",\n \"global_token_symbol\",\n \"rate_limits_share_pct\",\n \"create_command_timeout\",\n \"other_commands_timeout\"]\n\n\nclass ConfigCommand:\n def config(self, # type: HummingbotApplication\n key: str = None,\n value: str = None):\n self.app.clear_input()\n if key is None:\n self.list_configs()\n return\n else:\n if key not in self.config_able_keys():\n self._notify(\"Invalid key, please choose from the list.\")\n return\n safe_ensure_future(self._config_single_key(key, value), loop=self.ev_loop)\n\n def list_configs(self, # type: HummingbotApplication\n ):\n columns = [\"Key\", \" Value\"]\n data = [[cv.key, cv.value] for cv in global_config_map.values()\n if cv.key in global_configs_to_display and not cv.is_secure]\n df = map_df_to_str(pd.DataFrame(data=data, columns=columns))\n self._notify(\"\\nGlobal Configurations:\")\n lines = [\" \" + line for line in df.to_string(index=False, max_colwidth=50).split(\"\\n\")]\n self._notify(\"\\n\".join(lines))\n\n if self.strategy_name is not None:\n data = [[cv.printable_key or cv.key, cv.value] for cv in self.strategy_config_map.values() if not cv.is_secure]\n df = map_df_to_str(pd.DataFrame(data=data, columns=columns))\n self._notify(\"\\nStrategy Configurations:\")\n lines = [\" \" + line for line in df.to_string(index=False, max_colwidth=50).split(\"\\n\")]\n self._notify(\"\\n\".join(lines))\n\n def config_able_keys(self # type: HummingbotApplication\n ) -> List[str]:\n \"\"\"\n Returns a list of configurable keys - using config command, excluding exchanges api keys\n as they are set from connect command.\n \"\"\"\n keys = [c.key for c in global_config_map.values() if c.prompt is not None and not c.is_connect_key]\n if self.strategy_config_map is not None:\n keys += [c.key for c in self.strategy_config_map.values() if c.prompt is not None]\n return keys\n\n async def check_password(self, # type: HummingbotApplication\n ):\n password = await self.app.prompt(prompt=\"Enter your password >>> \", is_password=True)\n if password != Security.password:\n self._notify(\"Invalid password, please try again.\")\n return False\n else:\n return True\n\n # Make this function static so unit testing can be performed.\n @staticmethod\n def update_running_mm(mm_strategy, key: str, new_value: Any):\n if key in no_restart_pmm_keys_in_percentage:\n setattr(mm_strategy, key, new_value / Decimal(\"100\"))\n return True\n elif key in no_restart_pmm_keys:\n setattr(mm_strategy, key, new_value)\n return True\n return False\n\n async def _config_single_key(self, # type: HummingbotApplication\n key: str,\n input_value):\n \"\"\"\n Configure a single variable only.\n Prompt the user to finish all configurations if there are remaining empty configs at the end.\n \"\"\"\n\n self.placeholder_mode = True\n self.app.hide_input = True\n\n try:\n config_var, config_map, file_path = None, None, None\n if key in global_config_map:\n config_map = global_config_map\n file_path = GLOBAL_CONFIG_PATH\n elif self.strategy_config_map is not None and key in self.strategy_config_map:\n config_map = self.strategy_config_map\n file_path = join(CONF_FILE_PATH, self.strategy_file_name)\n config_var = config_map[key]\n if input_value is None:\n self._notify(\"Please follow the prompt to complete configurations: \")\n if config_var.key == \"inventory_target_base_pct\":\n await self.asset_ratio_maintenance_prompt(config_map, input_value)\n elif config_var.key == \"inventory_price\":\n await self.inventory_price_prompt(config_map, input_value)\n else:\n await self.prompt_a_config(config_var, input_value=input_value, assign_default=False)\n if self.app.to_stop_config:\n self.app.to_stop_config = False\n return\n await self.update_all_secure_configs()\n missings = missing_required_configs(config_map)\n if missings:\n self._notify(\"\\nThere are other configuration required, please follow the prompt to complete them.\")\n missings = await self._prompt_missing_configs(config_map)\n save_to_yml(file_path, config_map)\n self._notify(\"\\nNew configuration saved:\")\n self._notify(f\"{key}: {str(config_var.value)}\")\n for config in missings:\n self._notify(f\"{config.key}: {str(config.value)}\")\n if isinstance(self.strategy, PureMarketMakingStrategy) or \\\n isinstance(self.strategy, PerpetualMarketMakingStrategy):\n updated = ConfigCommand.update_running_mm(self.strategy, key, config_var.value)\n if updated:\n self._notify(f\"\\nThe current {self.strategy_name} strategy has been updated \"\n f\"to reflect the new configuration.\")\n except asyncio.TimeoutError:\n self.logger().error(\"Prompt timeout\")\n except Exception as err:\n self.logger().error(str(err), exc_info=True)\n finally:\n self.app.hide_input = False\n self.placeholder_mode = False\n self.app.change_prompt(prompt=\">>> \")\n\n async def _prompt_missing_configs(self, # type: HummingbotApplication\n config_map):\n missings = missing_required_configs(config_map)\n for config in missings:\n await self.prompt_a_config(config)\n if self.app.to_stop_config:\n self.app.to_stop_config = False\n return\n if missing_required_configs(config_map):\n return missings + (await self._prompt_missing_configs(config_map))\n return missings\n\n async def asset_ratio_maintenance_prompt(self, # type: HummingbotApplication\n config_map,\n input_value = None):\n if input_value:\n config_map['inventory_target_base_pct'].value = Decimal(input_value)\n else:\n exchange = config_map['exchange'].value\n market = config_map[\"market\"].value\n base, quote = market.split(\"-\")\n balances = await UserBalances.instance().balances(exchange, base, quote)\n if balances is None:\n return\n base_ratio = await UserBalances.base_amount_ratio(exchange, market, balances)\n if base_ratio is None:\n return\n base_ratio = round(base_ratio, 3)\n quote_ratio = 1 - base_ratio\n base, quote = config_map[\"market\"].value.split(\"-\")\n\n cvar = ConfigVar(key=\"temp_config\",\n prompt=f\"On {exchange}, you have {balances.get(base, 0):.4f} {base} and \"\n f\"{balances.get(quote, 0):.4f} {quote}. By market value, \"\n f\"your current inventory split is {base_ratio:.1%} {base} \"\n f\"and {quote_ratio:.1%} {quote}.\"\n f\" Would you like to keep this ratio? (Yes/No) >>> \",\n required_if=lambda: True,\n type_str=\"bool\",\n validator=validate_bool)\n await self.prompt_a_config(cvar)\n if cvar.value:\n config_map['inventory_target_base_pct'].value = round(base_ratio * Decimal('100'), 1)\n else:\n if self.app.to_stop_config:\n self.app.to_stop_config = False\n return\n await self.prompt_a_config(config_map[\"inventory_target_base_pct\"])\n\n async def inventory_price_prompt(\n self, # type: HummingbotApplication\n config_map,\n input_value=None,\n ):\n key = \"inventory_price\"\n if input_value:\n config_map[key].value = Decimal(input_value)\n else:\n exchange = config_map[\"exchange\"].value\n market = config_map[\"market\"].value\n base_asset, quote_asset = market.split(\"-\")\n if global_config_map[\"paper_trade_enabled\"].value:\n balances = global_config_map[\"paper_trade_account_balance\"].value\n else:\n balances = await UserBalances.instance().balances(\n exchange, base_asset, quote_asset\n )\n if balances.get(base_asset) is None:\n return\n\n cvar = ConfigVar(\n key=\"temp_config\",\n prompt=f\"On {exchange}, you have {balances[base_asset]:.4f} {base_asset}. \"\n f\"What was the price for this amount in {quote_asset}? >>> \",\n required_if=lambda: True,\n type_str=\"decimal\",\n validator=lambda v: validate_decimal(\n v, min_value=Decimal(\"0\"), inclusive=True\n ),\n )\n await self.prompt_a_config(cvar)\n config_map[key].value = cvar.value\n\n try:\n quote_volume = balances[base_asset] * cvar.value\n except TypeError:\n # TypeError: unsupported operand type(s) for *: 'decimal.Decimal' and 'NoneType' - bad input / no input\n self._notify(\"Inventory price not updated due to bad input\")\n return\n\n session: Session = self.trade_fill_db.get_shared_session()\n InventoryCost.add_volume(\n session,\n base_asset=base_asset,\n quote_asset=quote_asset,\n base_volume=balances[base_asset],\n quote_volume=quote_volume,\n overwrite=True,\n )\n" ]
[ [ "pandas.DataFrame" ] ]
hunterhawk/Attention-on-Audio
[ "fcc7be4c9202978b74f0b1a5caabb0d950596e7b" ]
[ "test_mnist/rnn_mnist_movan.py" ]
[ "import torch\nfrom torch import nn\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\n\n\n# torch.manual_seed(1) # reproducible\n\n# Hyper Parameters\nEPOCH = 1 # train the training data n times, to save time, we just train 1 epoch\nBATCH_SIZE = 64\nTIME_STEP = 28 # rnn time step / image height\nINPUT_SIZE = 28 # rnn input size / image width\nLR = 0.01 # learning rate\nDOWNLOAD_MNIST = False # set to True if haven't download the data\n\n\n# Mnist digital dataset\ntrain_data = dsets.MNIST(\n root='./data/',\n train=True, # this is training data\n transform=transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to\n # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]\n download=DOWNLOAD_MNIST, # download it if you don't have it\n)\n\n# plot one example\nprint(train_data.train_data.size()) # (60000, 28, 28)\nprint(train_data.train_labels.size()) # (60000)\nplt.imshow(train_data.train_data[0].numpy(), cmap='gray')\nplt.title('%i' % train_data.train_labels[0])\nplt.show()\n\n# Data Loader for easy mini-batch return in training\ntrain_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)\n\n# convert test data into Variable, pick 2000 samples to speed up testing\ntest_data = dsets.MNIST(root='./data/', train=False, transform=transforms.ToTensor())\ntest_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255. # shape (2000, 28, 28) value in range(0,1)\ntest_y = test_data.test_labels.numpy()[:2000] # covert to numpy array\n\n\nclass RNN(nn.Module):\n def __init__(self):\n super(RNN, self).__init__()\n\n self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns\n input_size=INPUT_SIZE,\n hidden_size=64, # rnn hidden unit\n num_layers=1, # number of rnn layer\n batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)\n )\n\n self.out = nn.Linear(64, 10)\n\n def forward(self, x):\n # x shape (batch, time_step, input_size)\n # r_out shape (batch, time_step, output_size)\n # h_n shape (n_layers, batch, hidden_size)\n # h_c shape (n_layers, batch, hidden_size)\n r_out, (h_n, h_c) = self.rnn(x, None) # None represents zero initial hidden state\n\n # choose r_out at the last time step\n out = self.out(r_out[:, -1, :])\n return out\n\n\nrnn = RNN()\nprint(rnn)\n\noptimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters\nloss_func = nn.CrossEntropyLoss() # the target label is not one-hotted\n\n# training and testing\nfor epoch in range(EPOCH):\n for step, (b_x, b_y) in enumerate(train_loader): # gives batch data\n b_x = b_x.view(-1, 28, 28) # reshape x to (batch, time_step, input_size)\n\n output = rnn(b_x) # rnn output\n loss = loss_func(output, b_y) # cross entropy loss\n optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n optimizer.step() # apply gradients\n\n if step % 50 == 0:\n test_output = rnn(test_x) # (samples, time_step, input_size)\n pred_y = torch.max(test_output, 1)[1].data.numpy()\n accuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size)\n print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)\n\n# print 10 predictions from test data\ntest_output = rnn(test_x[:10].view(-1, 28, 28))\npred_y = torch.max(test_output, 1)[1].data.numpy()\nprint(pred_y, 'prediction number')\nprint(test_y[:10], 'real number')\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "matplotlib.pyplot.title", "torch.nn.LSTM", "torch.utils.data.DataLoader", "torch.nn.Linear", "matplotlib.pyplot.show" ] ]
pranshumalviya2/playwith_rnn
[ "f9d4c851d9aef08ac265992daac137fa9b9b7a5e" ]
[ "Text - word/text_io.py" ]
[ "import numpy as np \nfrom keras.utils import np_utils\n# import sys\n\nclass inp(object):\n\tdef __init__(self, inp='karpathy', seq=10):\n\t\tself.inp = inp\n\t\tself.seq = seq\n\t\tself.inpFile = self.inp+\".txt\"\n\t\tself.outFile = self.inp+\"-out.txt\"\n\t\tself.content = open(self.inpFile).read().split(\" \")\n\t\tself.vocab = sorted(list(set(self.content)))\n\t\tself.vocab_indices = dict((c, i) for i, c in enumerate(self.vocab))\n\t\tself.indices_vocab = dict((i, c) for i, c in enumerate(self.vocab))\n\t\tself.dataX = []\n\t\tself.dataY = []\n\n\tdef get_content(self):\n\t\treturn self.content\n\n\tdef get_vocab(self):\n\t\treturn self.vocab,self.vocab_indices,self.indices_vocab\n\n\tdef text_seq(self):\n\t\tfor i in range(0, len(self.content) - self.seq, 1):\n\t\t\tseq_in = self.content[i:i + self.seq]\n\t\t\tseq_out = self.content[i + self.seq]\n\t\t\tself.dataX.append([self.vocab_indices[word] for word in seq_in])\n\t\t\tself.dataY.append(self.vocab_indices[seq_out])\n\n\tdef rnn_input(self):\n\t\tn_patterns = len(self.dataX)\n\t\tX = np.reshape(self.dataX, (n_patterns, self.seq, 1))\n\t\tX = X / float(len(self.vocab))\n\t\ty = np_utils.to_categorical(self.dataY)\n\t\treturn X,y\n\n\tdef save(self,new):\n\t\twith open(self.outFile, 'w') as f:\n\t\t\tf.write(new)\n\t\t\tf.close()" ]
[ [ "numpy.reshape" ] ]
kkraus14/cuxfilter
[ "99d7cf67802270d24db0051162df4feb798f2e15" ]
[ "python/cuxfilter/charts/datashader/plots.py" ]
[ "from ..core.non_aggregate import (\n BaseScatter,\n BaseLine,\n BaseStackedLine,\n BaseGraph,\n)\nfrom .custom_extensions import (\n InteractiveImage,\n CustomInspectTool,\n calc_connected_edges,\n)\n\nfrom distutils.version import LooseVersion\nimport datashader as ds\nfrom datashader import transfer_functions as tf\nfrom datashader.colors import Hot\nimport dask_cudf\nimport dask.dataframe as dd\nimport numpy as np\nimport cupy as cp\nimport pandas as pd\nimport bokeh\nimport cudf\nfrom bokeh import events\nfrom bokeh.plotting import figure\nfrom bokeh.models import (\n BoxSelectTool,\n ColorBar,\n LassoSelectTool,\n LinearColorMapper,\n LogColorMapper,\n BasicTicker,\n FixedTicker,\n)\nfrom bokeh.tile_providers import get_provider\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\n\nds_version = LooseVersion(ds.__version__)\n\n_color_mapper = {\"linear\": LinearColorMapper, \"log\": LogColorMapper}\n\n\ndef load_image(url):\n response = requests.get(url)\n return Image.open(BytesIO(response.content))\n\n\ndef _rect_vertical_mask(px):\n \"\"\"\n Produce a vertical rectangle mask with truth\n values in ``(2 * px + 1) * ((2 * px + 1)/2)``\n \"\"\"\n px = int(px)\n w = 2 * px + 1\n zero_bool = np.zeros((w, px), dtype=\"bool\")\n x_bool = np.ones((w, w - px), dtype=\"bool\")\n return np.concatenate((x_bool, zero_bool), axis=1)\n\n\ndef _rect_horizontal_mask(px):\n \"\"\"\n Produce a horizontal rectangle mask with truth\n values in ``((2 * px + 1)/2) * (2 * px + 1)``\n \"\"\"\n px = int(px)\n w = 2 * px + 1\n zero_bool = np.zeros((px, w), dtype=\"bool\")\n x_bool = np.ones((w - px, w), dtype=\"bool\")\n return np.concatenate((x_bool, zero_bool), axis=0)\n\n\ndef _compute_datashader_assets(\n data, x, aggregate_col, aggregate_fn, color_palette\n):\n aggregator = None\n cmap = {\"cmap\": color_palette}\n\n if isinstance(data[x].dtype, cudf.core.dtypes.CategoricalDtype):\n if ds_version >= \"0.11\":\n aggregator = ds.by(x, getattr(ds, aggregate_fn)(aggregate_col),)\n else:\n print(\"only count_cat supported by datashader <=0.10\")\n aggregator = ds.count_cat(x)\n cmap = {\n \"color_key\": {\n k: v\n for k, v in zip(list(data[x].cat.categories), color_palette,)\n }\n }\n else:\n if aggregate_fn:\n aggregator = getattr(ds, aggregate_fn)(aggregate_col)\n return aggregator, cmap\n\n\ndef _get_provider(tile_provider):\n if tile_provider is None:\n return None\n elif isinstance(tile_provider, str):\n return get_provider(tile_provider)\n elif isinstance(tile_provider, bokeh.models.tiles.WMTSTileSource):\n return tile_provider\n return None\n\n\ndef _get_legend_title(aggregate_fn, aggregate_col):\n if aggregate_fn == \"count\":\n return aggregate_fn\n else:\n return aggregate_fn + \" \" + aggregate_col\n\n\ndef _generate_legend(\n pixel_shade_type,\n color_palette,\n legend_title,\n constant_limit,\n color_bar=None,\n update=False,\n):\n mapper = _color_mapper[pixel_shade_type](\n palette=color_palette, low=constant_limit[0], high=constant_limit[1]\n )\n if update and color_bar:\n color_bar.color_mapper = mapper\n return color_bar\n\n color_bar = ColorBar(\n color_mapper=mapper,\n location=(0, 0),\n ticker=BasicTicker(desired_num_ticks=len(color_palette)),\n title=legend_title,\n background_fill_alpha=0,\n )\n return color_bar\n\n\nds.transfer_functions._mask_lookup[\"rect_vertical\"] = _rect_vertical_mask\nds.transfer_functions._mask_lookup[\"rect_horizontal\"] = _rect_horizontal_mask\n\n\nclass Scatter(BaseScatter):\n \"\"\"\n Description:\n \"\"\"\n\n reset_event = events.Reset\n data_y_axis = \"y\"\n data_x_axis = \"x\"\n no_colors_set = False\n constant_limit = None\n color_bar = None\n legend_added = False\n\n def format_source_data(self, data):\n \"\"\"\n Description:\n format source\n -------------------------------------------\n Input:\n source_dict = {\n 'X': [],\n 'Y': []\n }\n -------------------------------------------\n\n Ouput:\n \"\"\"\n self.source = data\n\n def show_legend(self):\n return self.legend and (\n self.pixel_shade_type in list(_color_mapper.keys())\n )\n\n def render_legend(self):\n if self.show_legend():\n update = self.color_bar is not None\n self.color_bar = _generate_legend(\n self.pixel_shade_type,\n self.color_palette,\n _get_legend_title(self.aggregate_fn, self.aggregate_col),\n self.constant_limit,\n color_bar=self.color_bar,\n update=update,\n )\n if (update and self.legend_added) is False:\n self.chart.add_layout(self.color_bar, self.legend_position)\n self.legend_added = True\n\n def generate_InteractiveImage_callback(self):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n def viewInteractiveImage(\n x_range, y_range, w, h, data_source, **kwargs\n ):\n dd = data_source[[self.x, self.y, self.aggregate_col]]\n dd[self.x] = self._to_xaxis_type(dd[self.x])\n dd[self.y] = self._to_yaxis_type(dd[self.y])\n\n x_range = self._to_xaxis_type(x_range)\n y_range = self._to_yaxis_type(y_range)\n\n cvs = ds.Canvas(\n plot_width=w, plot_height=h, x_range=x_range, y_range=y_range\n )\n aggregator, cmap = _compute_datashader_assets(\n dd,\n self.x,\n self.aggregate_col,\n self.aggregate_fn,\n self.color_palette,\n )\n agg = cvs.points(dd, self.x, self.y, aggregator,)\n\n if self.constant_limit is None or self.aggregate_fn == \"count\":\n self.constant_limit = [\n float(cp.nanmin(agg.data)),\n float(cp.nanmax(agg.data)),\n ]\n self.render_legend()\n\n span = {\"span\": self.constant_limit}\n if self.pixel_shade_type == \"eq_hist\":\n span = {}\n\n img = tf.shade(agg, how=self.pixel_shade_type, **cmap, **span)\n\n if self.pixel_spread == \"dynspread\":\n return tf.dynspread(\n img,\n threshold=self.pixel_density,\n max_px=self.point_size,\n shape=self.point_shape,\n )\n else:\n return tf.spread(\n img, px=self.point_size, shape=self.point_shape\n )\n\n return viewInteractiveImage\n\n def generate_chart(self):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if self.color_palette is None:\n self.no_colors_set = True\n self.color_palette = Hot\n\n if len(self.title) == 0:\n self.title = (\n \"Scatter plot for \"\n + self.aggregate_col\n + \" \"\n + self.aggregate_fn\n )\n\n self.chart = figure(\n title=self.title,\n toolbar_location=\"right\",\n tools=\"pan, wheel_zoom, reset\",\n active_scroll=\"wheel_zoom\",\n active_drag=\"pan\",\n x_range=self.x_range,\n y_range=self.y_range,\n width=self.width,\n height=self.height,\n )\n\n self.chart.add_tools(BoxSelectTool())\n self.chart.add_tools(LassoSelectTool())\n\n self.tile_provider = _get_provider(self.tile_provider)\n if self.tile_provider is not None:\n self.chart.add_tile(self.tile_provider)\n self.chart.axis.visible = False\n # reset legend and color_bar\n self.legend_added = False\n self.color_bar = None\n\n self.chart.xgrid.grid_line_color = None\n self.chart.ygrid.grid_line_color = None\n\n self.interactive_image = InteractiveImage(\n self.chart,\n self.generate_InteractiveImage_callback(),\n data_source=self.source,\n timeout=self.timeout,\n x_dtype=self.x_dtype,\n y_dtype=self.y_dtype,\n )\n\n if self.legend_added is False:\n self.render_legend()\n\n def update_dimensions(self, width=None, height=None):\n \"\"\"\n Description:\n\n\n Input:\n\n\n\n Ouput:\n \"\"\"\n if width is not None:\n self.chart.plot_width = width\n if height is not None:\n self.chart.plot_height = height\n\n def reload_chart(self, data=None, patch_update=False):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if data is not None:\n if len(data) == 0:\n data = cudf.DataFrame({k: cp.nan for k in data.columns})\n self.interactive_image.update_chart(data_source=data)\n\n def add_selection_geometry_event(self, callback):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n self.chart.on_event(events.SelectionGeometry, callback)\n\n def apply_theme(self, properties_dict):\n \"\"\"\n apply thematic changes to the chart based on the input\n properties dictionary.\n\n \"\"\"\n if self.no_colors_set:\n self.color_palette = properties_dict[\"chart_color\"][\n \"color_palette\"\n ]\n self.interactive_image.update_chart()\n self.chart.xgrid.grid_line_color = properties_dict[\"geo_charts_grids\"][\n \"xgrid\"\n ]\n self.chart.ygrid.grid_line_color = properties_dict[\"geo_charts_grids\"][\n \"ygrid\"\n ]\n\n # title\n self.chart.title.text_color = properties_dict[\"title\"][\"text_color\"]\n self.chart.title.text_font = properties_dict[\"title\"][\"text_font\"]\n self.chart.title.text_font_style = properties_dict[\"title\"][\n \"text_font_style\"\n ]\n self.chart.title.text_font_size = properties_dict[\"title\"][\n \"text_font_size\"\n ]\n\n if self.show_legend():\n self.color_bar.major_label_text_color = properties_dict[\"title\"][\n \"text_color\"\n ]\n self.color_bar.title_text_color = properties_dict[\"title\"][\n \"text_color\"\n ]\n\n # background, border, padding\n self.chart.background_fill_color = properties_dict[\n \"background_fill_color\"\n ]\n self.chart.border_fill_color = properties_dict[\"border_fill_color\"]\n self.chart.min_border = properties_dict[\"min_border\"]\n self.chart.outline_line_width = properties_dict[\"outline_line_width\"]\n self.chart.outline_line_alpha = properties_dict[\"outline_line_alpha\"]\n self.chart.outline_line_color = properties_dict[\"outline_line_color\"]\n\n # x axis title\n self.chart.xaxis.major_label_text_color = properties_dict[\"xaxis\"][\n \"major_label_text_color\"\n ]\n self.chart.xaxis.axis_line_width = properties_dict[\"xaxis\"][\n \"axis_line_width\"\n ]\n self.chart.xaxis.axis_line_color = properties_dict[\"xaxis\"][\n \"axis_line_color\"\n ]\n\n # y axis title\n self.chart.yaxis.major_label_text_color = properties_dict[\"yaxis\"][\n \"major_label_text_color\"\n ]\n self.chart.yaxis.axis_line_width = properties_dict[\"yaxis\"][\n \"axis_line_width\"\n ]\n self.chart.yaxis.axis_line_color = properties_dict[\"yaxis\"][\n \"axis_line_color\"\n ]\n\n # axis ticks\n self.chart.axis.major_tick_line_color = properties_dict[\"axis\"][\n \"major_tick_line_color\"\n ]\n self.chart.axis.minor_tick_line_color = properties_dict[\"axis\"][\n \"minor_tick_line_color\"\n ]\n self.chart.axis.minor_tick_out = properties_dict[\"axis\"][\n \"minor_tick_out\"\n ]\n self.chart.axis.major_tick_out = properties_dict[\"axis\"][\n \"major_tick_out\"\n ]\n self.chart.axis.major_tick_in = properties_dict[\"axis\"][\n \"major_tick_in\"\n ]\n\n\nclass Graph(BaseGraph):\n \"\"\"\n Description:\n \"\"\"\n\n reset_event = events.Reset\n data_y_axis = \"node_y\"\n data_x_axis = \"node_x\"\n no_colors_set = False\n image = None\n constant_limit_nodes = None\n constant_limit_edges = None\n color_bar = None\n legend_added = False\n\n def compute_colors(self):\n if self.node_color_palette is None:\n self.node_color_palette = bokeh.palettes.Purples9\n\n BREAKS = np.linspace(\n self.nodes[self.node_aggregate_col].min(),\n self.nodes[self.node_aggregate_col].max(),\n len(self.node_color_palette),\n )\n\n x = self.source.data[self.node_aggregate_col]\n inds = pd.cut(x, BREAKS, labels=False, include_lowest=True)\n colors = [self.node_color_palette[i] for i in inds]\n self.source.data[self.node_aggregate_col] = colors\n\n def show_legend(self):\n \"\"\"\n return if legend=True and pixel_shade_type is ['linear', 'log']\n \"\"\"\n return self.legend and (\n self.node_pixel_shade_type in list(_color_mapper.keys())\n )\n\n def render_legend(self):\n \"\"\"\n render legend\n \"\"\"\n if self.show_legend():\n update = self.color_bar is not None\n self.color_bar = _generate_legend(\n self.node_pixel_shade_type,\n self.node_color_palette,\n _get_legend_title(\n self.node_aggregate_fn, self.node_aggregate_col\n ),\n self.constant_limit_nodes,\n color_bar=self.color_bar,\n update=update,\n )\n if (update and self.legend_added) is False:\n self.chart.add_layout(self.color_bar, self.legend_position)\n self.legend_added = True\n\n def nodes_plot(self, canvas, nodes, name=None):\n \"\"\"\n plot nodes(scatter)\n \"\"\"\n aggregator, cmap = _compute_datashader_assets(\n nodes,\n self.node_id,\n self.node_aggregate_col,\n self.node_aggregate_fn,\n self.node_color_palette,\n )\n\n agg = canvas.points(\n nodes.sort_index(), self.node_x, self.node_y, aggregator\n )\n\n if (\n self.constant_limit_nodes is None\n or self.node_aggregate_fn == \"count\"\n ):\n self.constant_limit_nodes = [\n float(cp.nanmin(agg.data)),\n float(cp.nanmax(agg.data)),\n ]\n self.render_legend()\n\n span = {\"span\": self.constant_limit_nodes}\n if self.node_pixel_shade_type == \"eq_hist\":\n span = {}\n\n return getattr(tf, self.node_pixel_spread)(\n tf.shade(\n agg, how=self.node_pixel_shade_type, name=name, **cmap, **span\n ),\n threshold=self.node_pixel_density,\n max_px=self.node_point_size,\n shape=self.node_point_shape,\n )\n\n def edges_plot(self, canvas, nodes, name=None):\n \"\"\"\n plot edges(lines)\n \"\"\"\n aggregator, cmap = _compute_datashader_assets(\n self.connected_edges,\n self.node_x,\n self.edge_aggregate_col,\n self.edge_aggregate_fn,\n self.edge_color_palette,\n )\n\n agg = canvas.line(\n self.connected_edges, self.node_x, self.node_y, aggregator\n )\n\n if (\n self.constant_limit_edges is None\n or self.edge_aggregate_fn == \"count\"\n ):\n self.constant_limit_edges = [\n float(cp.nanmin(agg.data)),\n float(cp.nanmax(agg.data)),\n ]\n\n span = {\"span\": self.constant_limit_nodes}\n\n return getattr(tf, self.node_pixel_spread)(\n tf.shade(\n agg,\n name=name,\n how=\"linear\",\n alpha=255 - 255 * self.edge_transparency,\n **cmap,\n **span,\n ),\n max_px=1,\n )\n\n def format_source_data(self, dataframe):\n \"\"\"\n Description:\n format source\n -------------------------------------------\n Input:\n source_dict = {\n 'X': [],\n 'Y': []\n }\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if isinstance(dataframe, cudf.core.DataFrame):\n self.nodes = dataframe\n else:\n self.nodes = dataframe.data\n self.edges = dataframe.edges\n\n if self.edges is not None:\n # update connected_edges value for datashaded edges\n self.connected_edges = calc_connected_edges(\n self.nodes,\n self.edges,\n self.node_x,\n self.node_y,\n self.node_id,\n self.edge_source,\n self.edge_target,\n self.edge_aggregate_col,\n self.x_dtype,\n self.y_dtype,\n self.edge_render_type,\n self.curve_params,\n )\n\n def generate_InteractiveImage_callback(self):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n def viewInteractiveImage(\n x_range,\n y_range,\n w,\n h,\n data_source=self.nodes,\n nodes_plot=self.nodes_plot,\n edges_plot=self.edges_plot,\n chart=self.chart,\n **kwargs,\n ):\n dd = data_source[\n [\n self.node_id,\n self.node_x,\n self.node_y,\n self.node_aggregate_col,\n ]\n ]\n dd[self.node_x] = self._to_xaxis_type(dd[self.node_x])\n dd[self.node_y] = self._to_yaxis_type(dd[self.node_y])\n\n x_range = self._to_xaxis_type(x_range)\n y_range = self._to_yaxis_type(y_range)\n\n cvs = ds.Canvas(\n plot_width=w, plot_height=h, x_range=x_range, y_range=y_range\n )\n plot = None\n if self.source is not None:\n self.source.data = {\n self.node_x: [],\n self.node_y: [],\n self.node_aggregate_col: [],\n self.node_aggregate_col + \"_color\": [],\n }\n np = nodes_plot(cvs, dd)\n if self.display_edges._active:\n ep = edges_plot(cvs, dd)\n plot = tf.stack(ep, np, how=\"over\")\n else:\n plot = np\n\n return plot\n\n return viewInteractiveImage\n\n def generate_chart(self):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if self.node_color_palette is None:\n self.no_colors_set = True\n self.node_color_palette = Hot\n\n if len(self.title) == 0:\n self.title = \"Graph\"\n self.x_range = (\n self.x_range[0] - self.node_point_size,\n self.x_range[1] + self.node_point_size,\n )\n self.y_range = (\n self.y_range[0] - self.node_point_size,\n self.y_range[1] + self.node_point_size,\n )\n self.chart = figure(\n title=self.title,\n toolbar_location=\"right\",\n tools=\"pan, wheel_zoom, reset\",\n active_scroll=\"wheel_zoom\",\n active_drag=\"pan\",\n x_range=self.x_range,\n y_range=self.y_range,\n width=self.width,\n height=self.height,\n )\n\n self.tile_provider = _get_provider(self.tile_provider)\n if self.tile_provider is not None:\n self.chart.add_tile(self.tile_provider)\n self.chart.axis.visible = False\n # reset legend and color_bar\n self.legend_added = False\n self.color_bar = None\n # loading icon from a url\n impath = (\n \"https://raw.githubusercontent.com/rapidsai/cuxfilter/\"\n + \"branch-0.15/python/cuxfilter/charts/datashader/icons/graph.png\"\n )\n\n self.inspect_neighbors = CustomInspectTool(\n icon=load_image(impath),\n _active=True,\n tool_name=\"Inspect Neighboring Edges\",\n )\n # loading icon from a url\n impath = (\n \"https://raw.githubusercontent.com/rapidsai/cuxfilter/\"\n + \"branch-0.15/python/cuxfilter/charts/datashader/icons/XPan.png\"\n )\n self.display_edges = CustomInspectTool(\n icon=load_image(impath), _active=True, tool_name=\"Display Edges\"\n )\n\n def cb(attr, old, new):\n if new:\n self.connected_edges = calc_connected_edges(\n self.interactive_image.kwargs[\"data_source\"],\n self.edges,\n self.node_x,\n self.node_y,\n self.node_id,\n self.edge_source,\n self.edge_target,\n self.edge_aggregate_col,\n self.x_dtype,\n self.y_dtype,\n self.edge_render_type,\n self.curve_params,\n )\n self.interactive_image.update_chart()\n\n self.display_edges.on_change(\"_active\", cb)\n\n self.chart.add_tools(BoxSelectTool())\n self.chart.add_tools(LassoSelectTool())\n self.chart.add_tools(self.inspect_neighbors)\n self.chart.add_tools(self.display_edges)\n\n self.chart.xgrid.grid_line_color = None\n self.chart.ygrid.grid_line_color = None\n\n self.interactive_image = InteractiveImage(\n self.chart,\n self.generate_InteractiveImage_callback(),\n data_source=self.nodes,\n timeout=self.timeout,\n x_dtype=self.x_dtype,\n y_dtype=self.y_dtype,\n )\n\n if self.legend_added is False:\n self.render_legend()\n\n def update_dimensions(self, width=None, height=None):\n \"\"\"\n Description:\n\n\n Input:\n\n\n\n Ouput:\n \"\"\"\n if width is not None:\n self.chart.plot_width = width\n if height is not None:\n self.chart.plot_height = height\n\n def reload_chart(self, nodes, edges=None, patch_update=False):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if nodes is not None:\n if len(nodes) == 0:\n nodes = cudf.DataFrame({k: cp.nan for k in self.nodes.columns})\n\n # update connected_edges value for datashaded edges\n # if display edge toggle is active\n if self.display_edges._active:\n self.connected_edges = calc_connected_edges(\n nodes,\n self.edges if edges is None else edges,\n self.node_x,\n self.node_y,\n self.node_id,\n self.edge_source,\n self.edge_target,\n self.edge_aggregate_col,\n self.x_dtype,\n self.y_dtype,\n self.edge_render_type,\n self.curve_params,\n )\n\n self.interactive_image.update_chart(data_source=nodes)\n\n def add_selection_geometry_event(self, callback):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n self.chart.on_event(events.SelectionGeometry, callback)\n\n def apply_theme(self, properties_dict):\n \"\"\"\n apply thematic changes to the chart based on the input\n properties dictionary.\n \"\"\"\n if self.no_colors_set:\n self.node_color_palette = properties_dict[\"chart_color\"][\n \"color_palette\"\n ]\n self.interactive_image.update_chart()\n self.chart.xgrid.grid_line_color = properties_dict[\"geo_charts_grids\"][\n \"xgrid\"\n ]\n self.chart.ygrid.grid_line_color = properties_dict[\"geo_charts_grids\"][\n \"ygrid\"\n ]\n\n # title\n self.chart.title.text_color = properties_dict[\"title\"][\"text_color\"]\n self.chart.title.text_font = properties_dict[\"title\"][\"text_font\"]\n self.chart.title.text_font_style = properties_dict[\"title\"][\n \"text_font_style\"\n ]\n self.chart.title.text_font_size = properties_dict[\"title\"][\n \"text_font_size\"\n ]\n if self.show_legend():\n self.color_bar.major_label_text_color = properties_dict[\"title\"][\n \"text_color\"\n ]\n self.color_bar.title_text_color = properties_dict[\"title\"][\n \"text_color\"\n ]\n\n # background, border, padding\n self.chart.background_fill_color = properties_dict[\n \"background_fill_color\"\n ]\n self.chart.border_fill_color = properties_dict[\"border_fill_color\"]\n self.chart.min_border = properties_dict[\"min_border\"]\n self.chart.outline_line_width = properties_dict[\"outline_line_width\"]\n self.chart.outline_line_alpha = properties_dict[\"outline_line_alpha\"]\n self.chart.outline_line_color = properties_dict[\"outline_line_color\"]\n\n # x axis title\n self.chart.xaxis.major_label_text_color = properties_dict[\"xaxis\"][\n \"major_label_text_color\"\n ]\n self.chart.xaxis.axis_line_width = properties_dict[\"xaxis\"][\n \"axis_line_width\"\n ]\n self.chart.xaxis.axis_line_color = properties_dict[\"xaxis\"][\n \"axis_line_color\"\n ]\n\n # y axis title\n self.chart.yaxis.major_label_text_color = properties_dict[\"yaxis\"][\n \"major_label_text_color\"\n ]\n self.chart.yaxis.axis_line_width = properties_dict[\"yaxis\"][\n \"axis_line_width\"\n ]\n self.chart.yaxis.axis_line_color = properties_dict[\"yaxis\"][\n \"axis_line_color\"\n ]\n\n # axis ticks\n self.chart.axis.major_tick_line_color = properties_dict[\"axis\"][\n \"major_tick_line_color\"\n ]\n self.chart.axis.minor_tick_line_color = properties_dict[\"axis\"][\n \"minor_tick_line_color\"\n ]\n self.chart.axis.minor_tick_out = properties_dict[\"axis\"][\n \"minor_tick_out\"\n ]\n self.chart.axis.major_tick_out = properties_dict[\"axis\"][\n \"major_tick_out\"\n ]\n self.chart.axis.major_tick_in = properties_dict[\"axis\"][\n \"major_tick_in\"\n ]\n\n\nclass Line(BaseLine):\n \"\"\"\n Description:\n \"\"\"\n\n reset_event = events.Reset\n data_y_axis = \"y\"\n data_x_axis = \"x\"\n use_data_tiles = False\n\n def calculate_source(self, data):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n data = cudf.DataFrame\n -------------------------------------------\n\n Ouput:\n \"\"\"\n self.format_source_data(data)\n\n def format_source_data(self, data):\n \"\"\"\n Description:\n format source\n -------------------------------------------\n Input:\n source_dict = {\n 'X': [],\n 'Y': []\n }\n -------------------------------------------\n\n Ouput:\n \"\"\"\n self.source = data\n\n self.x_range = (self.source[self.x].min(), self.source[self.x].max())\n self.y_range = (self.source[self.y].min(), self.source[self.y].max())\n\n if isinstance(data, dask_cudf.core.DataFrame):\n self.x_range = dd.compute(*self.x_range)\n self.y_range = dd.compute(*self.y_range)\n\n def generate_InteractiveImage_callback(self):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n def viewInteractiveImage(\n x_range, y_range, w, h, data_source, **kwargs\n ):\n dd = data_source[[self.x, self.y]]\n dd[self.x] = self._to_xaxis_type(dd[self.x])\n dd[self.y] = self._to_yaxis_type(dd[self.y])\n\n x_range = self._to_xaxis_type(x_range)\n y_range = self._to_yaxis_type(y_range)\n\n cvs = ds.Canvas(\n plot_width=w, plot_height=h, x_range=x_range, y_range=y_range\n )\n\n agg = cvs.line(source=dd, x=self.x, y=self.y)\n\n img = tf.shade(\n agg, cmap=[\"white\", self.color], how=self.pixel_shade_type\n )\n return img\n\n return viewInteractiveImage\n\n def generate_chart(self):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if self.color is None:\n self.color = \"#8735fb\"\n\n if len(self.title) == 0:\n if self.x == self.y:\n self.title = \"Line plot for \" + self.x\n else:\n self.title = \"Line plot for (\" + self.x + \",\" + self.y + \")\"\n\n self.chart = figure(\n title=self.title,\n toolbar_location=\"right\",\n tools=\"pan, wheel_zoom, reset\",\n active_scroll=\"wheel_zoom\",\n active_drag=\"pan\",\n x_range=self.x_range,\n y_range=self.y_range,\n width=self.width,\n height=self.height,\n )\n\n self.chart.add_tools(BoxSelectTool())\n self.chart.axis.visible = False\n if self.x_axis_tick_formatter:\n self.chart.xaxis.formatter = self.x_axis_tick_formatter\n if self.y_axis_tick_formatter:\n self.chart.yaxis.formatter = self.y_axis_tick_formatter\n self.chart.xgrid.grid_line_color = None\n self.chart.ygrid.grid_line_color = None\n\n self.interactive_image = InteractiveImage(\n self.chart,\n self.generate_InteractiveImage_callback(),\n data_source=self.source,\n timeout=self.timeout,\n x_dtype=self.x_dtype,\n y_dtype=self.y_dtype,\n )\n\n def update_dimensions(self, width=None, height=None):\n \"\"\"\n Description:\n\n\n Input:\n\n\n\n Ouput:\n \"\"\"\n if width is not None:\n self.chart.plot_width = width\n if height is not None:\n self.chart.plot_height = height\n\n def reload_chart(self, data, patch_update=False):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if data is not None:\n if len(data) == 0:\n data = cudf.DataFrame({k: cp.nan for k in data.columns})\n self.interactive_image.update_chart(data_source=data)\n\n def add_selection_geometry_event(self, callback):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n self.chart.on_event(events.SelectionGeometry, callback)\n\n def apply_theme(self, properties_dict):\n \"\"\"\n apply thematic changes to the chart based on the input\n properties dictionary.\n\n \"\"\"\n if self.no_color_set:\n self.color = properties_dict[\"chart_color\"][\"color\"]\n self.interactive_image.update_chart()\n self.chart.xgrid.grid_line_color = properties_dict[\"geo_charts_grids\"][\n \"xgrid\"\n ]\n self.chart.ygrid.grid_line_color = properties_dict[\"geo_charts_grids\"][\n \"ygrid\"\n ]\n\n # title\n self.chart.title.text_color = properties_dict[\"title\"][\"text_color\"]\n self.chart.title.text_font = properties_dict[\"title\"][\"text_font\"]\n self.chart.title.text_font_style = properties_dict[\"title\"][\n \"text_font_style\"\n ]\n self.chart.title.text_font_size = properties_dict[\"title\"][\n \"text_font_size\"\n ]\n\n # background, border, padding\n self.chart.background_fill_color = properties_dict[\n \"background_fill_color\"\n ]\n self.chart.border_fill_color = properties_dict[\"border_fill_color\"]\n self.chart.min_border = properties_dict[\"min_border\"]\n self.chart.outline_line_width = properties_dict[\"outline_line_width\"]\n self.chart.outline_line_alpha = properties_dict[\"outline_line_alpha\"]\n self.chart.outline_line_color = properties_dict[\"outline_line_color\"]\n\n # x axis title\n self.chart.xaxis.major_label_text_color = properties_dict[\"xaxis\"][\n \"major_label_text_color\"\n ]\n self.chart.xaxis.axis_line_width = properties_dict[\"xaxis\"][\n \"axis_line_width\"\n ]\n self.chart.xaxis.axis_line_color = properties_dict[\"xaxis\"][\n \"axis_line_color\"\n ]\n\n # y axis title\n self.chart.yaxis.major_label_text_color = properties_dict[\"yaxis\"][\n \"major_label_text_color\"\n ]\n self.chart.yaxis.axis_line_width = properties_dict[\"yaxis\"][\n \"axis_line_width\"\n ]\n self.chart.yaxis.axis_line_color = properties_dict[\"yaxis\"][\n \"axis_line_color\"\n ]\n\n # axis ticks\n self.chart.axis.major_tick_line_color = properties_dict[\"axis\"][\n \"major_tick_line_color\"\n ]\n self.chart.axis.minor_tick_line_color = properties_dict[\"axis\"][\n \"minor_tick_line_color\"\n ]\n self.chart.axis.minor_tick_out = properties_dict[\"axis\"][\n \"minor_tick_out\"\n ]\n self.chart.axis.major_tick_out = properties_dict[\"axis\"][\n \"major_tick_out\"\n ]\n self.chart.axis.major_tick_in = properties_dict[\"axis\"][\n \"major_tick_in\"\n ]\n\n\nclass StackedLines(BaseStackedLine):\n \"\"\"\n Description:\n \"\"\"\n\n reset_event = events.Reset\n data_y_axis = \"y\"\n data_x_axis = \"x\"\n use_data_tiles = False\n no_colors_set = False\n color_bar = None\n\n def calculate_source(self, data):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n data = cudf.DataFrame\n -------------------------------------------\n\n Ouput:\n \"\"\"\n self.format_source_data(data)\n\n def format_source_data(self, data):\n \"\"\"\n Description:\n format source\n -------------------------------------------\n Input:\n source_dict = {\n 'X': [],\n 'Y': []\n }\n -------------------------------------------\n\n Ouput:\n \"\"\"\n self.source = data\n if self.x_range is None:\n self.x_range = (\n self.source[self.x].min(),\n self.source[self.x].max(),\n )\n if self.y_range is None:\n # cudf_df[['a','b','c']].min().min() gives min value\n # between all values in columns a,b and c\n\n self.y_range = (\n self.source[self.y].min().min(),\n self.source[self.y].max().max(),\n )\n if isinstance(data, dask_cudf.core.DataFrame):\n self.x_range = dd.compute(*self.x_range)\n self.y_range = dd.compute(*self.y_range)\n\n def generate_InteractiveImage_callback(self):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n def viewInteractiveImage(\n x_range, y_range, w, h, data_source, **kwargs\n ):\n dd = data_source[[self.x] + self.y]\n dd[self.x] = self._to_xaxis_type(dd[self.x])\n for _y in self.y:\n dd[_y] = self._to_yaxis_type(dd[_y])\n\n x_range = self._to_xaxis_type(x_range)\n y_range = self._to_yaxis_type(y_range)\n\n cvs = ds.Canvas(\n plot_width=w, plot_height=h, x_range=x_range, y_range=y_range\n )\n aggs = dict((_y, cvs.line(dd, x=self.x, y=_y)) for _y in self.y)\n imgs = [\n tf.shade(aggs[_y], cmap=[\"white\", color])\n for _y, color in zip(self.y, self.colors)\n ]\n return tf.stack(*imgs)\n\n return viewInteractiveImage\n\n def generate_chart(self):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n if self.colors == []:\n self.no_colors_set = True\n self.colors = [\"#8735fb\"] * len(self.y)\n\n if len(self.title) == 0:\n self.title = \"Stacked Line plots on x-axis: \" + self.x\n\n self.chart = figure(\n title=self.title,\n toolbar_location=\"right\",\n tools=\"pan, wheel_zoom, reset\",\n active_scroll=\"wheel_zoom\",\n active_drag=\"pan\",\n x_range=self.x_range,\n y_range=self.y_range,\n width=self.width,\n height=self.height,\n **self.library_specific_params,\n )\n\n self.chart.add_tools(BoxSelectTool(dimensions=\"width\"))\n\n if self.legend:\n mapper = LinearColorMapper(\n palette=self.colors, low=1, high=len(self.y)\n )\n self.color_bar = ColorBar(\n color_mapper=mapper,\n location=(0, 0),\n ticker=FixedTicker(ticks=list(range(1, len(self.y) + 1))),\n major_label_overrides=dict(\n zip(list(range(1, len(self.y) + 1)), self.y)\n ),\n major_label_text_baseline=\"top\",\n major_label_text_align=\"left\",\n major_tick_in=0,\n major_tick_out=0,\n )\n self.chart.add_layout(self.color_bar, self.legend_position)\n\n self.chart.xgrid.grid_line_color = None\n self.chart.ygrid.grid_line_color = None\n\n if self.x_axis_tick_formatter:\n self.chart.xaxis.formatter = self.x_axis_tick_formatter\n if self.y_axis_tick_formatter:\n self.chart.yaxis.formatter = self.y_axis_tick_formatter\n\n self.interactive_image = InteractiveImage(\n self.chart,\n self.generate_InteractiveImage_callback(),\n data_source=self.source,\n timeout=self.timeout,\n x_dtype=self.x_dtype,\n y_dtype=self.y_dtype,\n )\n\n def update_dimensions(self, width=None, height=None):\n \"\"\"\n Description:\n\n\n Input:\n\n\n\n Ouput:\n \"\"\"\n if width is not None:\n self.chart.plot_width = width\n if height is not None:\n self.chart.plot_height = height\n\n def reload_chart(self, data, patch_update=False):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if data is not None:\n if len(data) == 0:\n data = cudf.DataFrame({k: cp.nan for k in data.columns})\n self.interactive_image.update_chart(data_source=data)\n\n def add_selection_geometry_event(self, callback):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n self.chart.on_event(events.SelectionGeometry, callback)\n\n def apply_theme(self, properties_dict):\n \"\"\"\n apply thematic changes to the chart based on the input\n properties dictionary.\n\n \"\"\"\n if self.no_colors_set:\n self.colors = [properties_dict[\"chart_color\"][\"color\"]] * len(\n self.y\n )\n self.interactive_image.update_chart()\n self.chart.xgrid.grid_line_color = properties_dict[\"geo_charts_grids\"][\n \"xgrid\"\n ]\n self.chart.ygrid.grid_line_color = properties_dict[\"geo_charts_grids\"][\n \"ygrid\"\n ]\n\n # title\n self.chart.title.text_color = properties_dict[\"title\"][\"text_color\"]\n self.chart.title.text_font = properties_dict[\"title\"][\"text_font\"]\n self.chart.title.text_font_style = properties_dict[\"title\"][\n \"text_font_style\"\n ]\n self.chart.title.text_font_size = properties_dict[\"title\"][\n \"text_font_size\"\n ]\n if self.legend:\n self.color_bar.major_label_text_color = properties_dict[\"title\"][\n \"text_color\"\n ]\n self.color_bar.title_text_color = properties_dict[\"title\"][\n \"text_color\"\n ]\n\n # background, border, padding\n self.chart.background_fill_color = properties_dict[\n \"background_fill_color\"\n ]\n self.chart.border_fill_color = properties_dict[\"border_fill_color\"]\n self.chart.min_border = properties_dict[\"min_border\"]\n self.chart.outline_line_width = properties_dict[\"outline_line_width\"]\n self.chart.outline_line_alpha = properties_dict[\"outline_line_alpha\"]\n self.chart.outline_line_color = properties_dict[\"outline_line_color\"]\n\n # x axis title\n self.chart.xaxis.major_label_text_color = properties_dict[\"xaxis\"][\n \"major_label_text_color\"\n ]\n self.chart.xaxis.axis_line_width = properties_dict[\"xaxis\"][\n \"axis_line_width\"\n ]\n self.chart.xaxis.axis_line_color = properties_dict[\"xaxis\"][\n \"axis_line_color\"\n ]\n\n # y axis title\n self.chart.yaxis.major_label_text_color = properties_dict[\"yaxis\"][\n \"major_label_text_color\"\n ]\n self.chart.yaxis.axis_line_width = properties_dict[\"yaxis\"][\n \"axis_line_width\"\n ]\n self.chart.yaxis.axis_line_color = properties_dict[\"yaxis\"][\n \"axis_line_color\"\n ]\n\n # axis ticks\n self.chart.axis.major_tick_line_color = properties_dict[\"axis\"][\n \"major_tick_line_color\"\n ]\n self.chart.axis.minor_tick_line_color = properties_dict[\"axis\"][\n \"minor_tick_line_color\"\n ]\n self.chart.axis.minor_tick_out = properties_dict[\"axis\"][\n \"minor_tick_out\"\n ]\n self.chart.axis.major_tick_out = properties_dict[\"axis\"][\n \"major_tick_out\"\n ]\n self.chart.axis.major_tick_in = properties_dict[\"axis\"][\n \"major_tick_in\"\n ]\n" ]
[ [ "pandas.cut" ] ]
ilkhem/WDTW
[ "b8bd27a4494aa52acb45ecd2427512e1a682edb7" ]
[ "code/parallel_pool.py" ]
[ "from multiprocessing import Pool, Lock\n\nimport numpy as np\nfrom chainer import cuda\n\nfrom gradient_descent import gradient_descent\nfrom sdtw import soft_dtw, soft_dtw_grad\nfrom sinkhorn import sinkhorn_fb\n\nGPUCOUNT = 7\nGPU_PRIORITY = [2, 6, 0, 1, 4, 5, 3]\n\n\ndef prepare_data(X, Y_list):\n # example of the iterable: [(x, y, gpuid #the id of the gpu, i #index of x in X, j #index of y in Y,\n # k #index of Y in Y_list), for Y in Y_list, for x in X, for y in Y, for gpuid in range(GPUCOUNT)]\n args_list = []\n\n gpuid = 0\n for i in range(X.shape[3]):\n for k, Y in enumerate(Y_list):\n for j in range(Y.shape[3]):\n args_list.append((X[:, :, :, i], Y[:, :, :, j], GPU_PRIORITY[gpuid], i, j, k))\n gpuid += 1\n if gpuid > GPUCOUNT - 1:\n gpuid = 0\n return args_list\n\n\ndef worker(x, y, gpuid, i, j, k):\n print('\\tWorking on gpu %d for indices (%d, %d, %d)' % (gpuid, i, j, k))\n l_ = locks[gpuid]\n with l_: # lock access to GPU gpuid\n cuda.get_device_from_id(gpuid).use() # current process will use GPU gpuid\n xg = cuda.to_gpu(x, device=gpuid) # copy data to current GPU\n yg = cuda.to_gpu(y, device=gpuid) # copy data to current GPU\n Mg, Jg = sinkhorn_fb(xg, yg) # compute sinkhorn distances, and gradient with respect to x\n M, J = cuda.to_cpu(Mg), cuda.to_cpu(Jg)\n return {(i, j, k): (M, J)} # return a dict\n\n\n# access to GPUs should be locked: only one process can access to a GPU at a time\ndef init_locks(l):\n global locks\n locks = l\n\n\ndef _single_gradient_step(X, Y_list):\n x_shape = X.shape\n y_shapes = [Y.shape for Y in Y_list]\n args_list = prepare_data(X, Y_list)\n\n locks = []\n for _ in range(GPUCOUNT):\n locks.append(Lock())\n pool = Pool(maxtasksperchild=1, processes=GPUCOUNT, initializer=init_locks, initargs=(locks,))\n res = pool.starmap(worker, args_list, chunksize=1)\n pool.close()\n pool.join()\n\n M_glob = [np.empty((x_shape[3], y_shape[3])) for y_shape in y_shapes]\n J_glob = [np.empty((*x_shape, y_shape[3])) for y_shape in y_shapes]\n\n for r in res:\n i, j, k = list(r.keys())[0]\n M, J = list(r.values())[0]\n M_glob[k][i, j] = M\n J_glob[k][:, :, :, i, j] = J.squeeze()\n\n final_gradient = np.empty(x_shape)\n final_energy = 0\n for M, J in zip(M_glob, J_glob):\n d, D = soft_dtw(M)\n D_bar = soft_dtw_grad(D)\n print(D_bar.shape, J.shape)\n G = np.stack([J[:, :, :, i, :].dot(D_bar[i]) for i in range(X.shape[3])], axis=-1)\n final_gradient += G\n final_energy += d\n\n return final_energy, final_gradient\n\n\ndef main(X, Y_list, lr, n_g_iter):\n x = X\n\n for _ in range(n_g_iter):\n f, g = _single_gradient_step(x, Y_list)\n print('\\t\\t Gradient Iteration %d: Energy = %f' % (_, f))\n x = gradient_descent(x, g, lr=lr, norm=True)\n\n return x\n\n\nif __name__ == '__main__':\n\n import sys\n from generate_data import generate_nice\n\n try:\n d1 = int(sys.argv[1])\n except:\n d1 = 80\n\n y_ = generate_nice(d1, d1, d1, 4, 5, 1e-6)\n X = y_[:, :, :, :, 0]\n Y_list = [y_[:, :, :, :, i] for i in range(1, y_.shape[4])]\n\n res_dict = main(X, Y_list, lr=0.1, n_g_iter=20)\n print('done')\n" ]
[ [ "numpy.empty" ] ]
Architecton/diplomska-naloga-koda
[ "85ddb01fc0dbf07befa73445da50656068045f29" ]
[ "algorithms/reliefseq.py" ]
[ "import numpy as np\nimport warnings\nfrom algorithms.relieff import Relieff\nfrom scipy.stats import rankdata\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass ReliefSeq(BaseEstimator, TransformerMixin):\n\n \"\"\"sklearn compatible implementation of the ReliefSeq algorithm\n\n Brett A. McKinney, Bill C. White, Diane E. Grill, Peter W. Li, Richard B. Kennedy, Gregory A. Poland, Ann L. Oberg.\n ReliefSeq: A Gene-Wise Adaptive-K Nearest-Neighbor Feature Selection Tool \n for Finding Gene-Gene Interactions and Main Effects in mRNA-Seq Gene Expression Data.\n\n Author: Jernej Vivod\n\n \"\"\"\n\n def __init__(self, n_features_to_select=10, m=-1, k_max=20,\n dist_func=lambda x1, x2: np.sum(np.abs(x1-x2), 1), learned_metric_func=None):\n self.n_features_to_select = n_features_to_select # number of features to select.\n self.m = m # sample size of examples for the ReliefF algorithm\n self.k_max = k_max # maximal k value\n self.dist_func = dist_func # distance function\n self.learned_metric_func = learned_metric_func # learned metric function\n\n\n def fit(self, data, target):\n\n \"\"\"\n Rank features using ReliefSeq feature selection algorithm\n\n Args:\n data : Array[np.float64] -- matrix of examples\n target : Array[np.int] -- vector of target values of examples\n\n Returns:\n self\n \"\"\"\n\n # Get number of instances with class that has minimum number of instances.\n _, instances_by_class = np.unique(target, return_counts=True)\n min_instances = np.min(instances_by_class)\n\n # If class with minimal number of examples has less than k examples, issue warning\n # that parameter k was reduced.\n if min_instances < self.k_max:\n warnings.warn(\"Parameter k_max was reduced to {0} because one of the classes \" \\\n \"does not have {1} instances associated with it.\".format(min_instances, self.k_max), Warning)\n\n\n # Run ReliefSeq feature selection algorithm.\n if self.learned_metric_func != None:\n self.rank, self.weights = self._reliefseq(data, target, self.m, min(self.k_max, min_instances), \n self.dist_func, learned_metric_func=self.learned_metric_func)\n else:\n self.rank, self.weights = self._reliefseq(data, target, self.m, min(self.k_max, min_instances), \n self.dist_func, learned_metric_func=None)\n\n return self\n\n\n def transform(self, data):\n\n \"\"\"\n Perform feature selection using computed feature ranks\n\n Args:\n data : Array[np.float64] -- matrix of examples on which to perform feature selection\n\n Returns:\n Array[np.float64] -- result of performing feature selection\n \"\"\"\n\n # select n_features_to_select best features and return selected features.\n msk = self.rank <= self.n_features_to_select # Compute mask.\n return data[:, msk] # Perform feature selection.\n\n\n def fit_transform(self, data, target):\n\n \"\"\"\n Compute ranks of features and perform feature selection\n Args:\n data : Array[np.float64] -- matrix of examples on which to perform feature selection\n target : Array[np.int] -- vector of target values of examples\n\n Returns:\n Array[np.float64] -- result of performing feature selection\n \"\"\"\n\n self.fit(data, target) # Fit training data.\n return self.transform(data) # Perform feature selection.\n\n\n def _reliefseq(self, data, target, m, k_max, dist_func, learned_metric_func):\n\n \"\"\"Compute feature scores using ReliefSeq algorithm\n\n Args:\n data : Array[np.float64] -- matrix containing examples' data as rows\n target : Array[np.int] -- matrix containing the example's target variable value\n m : int -- Sample size to use when evaluating the feature scores\n k_max : int -- k sweep upper limit\n dist_func : Callable[[Array[np.float64], Array[np.float64]], Array[np.float64]] -- function for evaluating\n distances between examples. The function should acept two examples or two matrices of examples and return the dictances.\n **kwargs: can contain argument with key 'learned_metric_func' that maps to a function that accepts a distance\n function and indices of two training examples and returns the distance between the examples in the learned\n metric space.\n\n Returns:\n Array[np.int], Array[np.float64] -- Array of feature enumerations based on the scores, array of feature scores\n\n \"\"\"\n\n \n # Initialize matrix of weights by k.\n weights_mat = np.empty((data.shape[1], k_max), dtype=np.float)\n \n # Sweep k from 1 to k_max.\n for k in np.arange(1, k_max+1):\n\n # Initialize ReliefF algorithm implementation with next value of k.\n clf = Relieff(m=m, k=k, dist_func=dist_func, learned_metric_func=learned_metric_func)\n\n # Fit data and target.\n clf.fit(data, target)\n\n # Add weights to matrix.\n weights_mat[:, k-1] = clf.weights\n \n # For each feature choose maximum weight over weights by different values of k.\n weights = np.max(weights_mat, 1)\n\n # Return feature rankings and weights.\n return rankdata(-weights, method='ordinal'), weights\n\n\n" ]
[ [ "numpy.abs", "scipy.stats.rankdata", "numpy.min", "numpy.unique", "numpy.arange", "numpy.max", "numpy.empty" ] ]
jchowk/rbcodes
[ "75ca209ec0d64b1d14b50f53081760ee9a6b340b" ]
[ "GUIs/abstools/Absorber.py" ]
[ "\n\n\n'''\nAbsorber \n\nInputs:\nflux; wave; error; linelist; redshift; bin\n\n1st. Asborber will bin the flux,wave and error to clean the data\n2nd. will pull the actual lamd_rest from the atom.dat file for all lines\n-->this (2nd) will return a dictionary of lamd_rest,ion_name,fval,gamma\n\n3rd. Initialized entries for the Vstack plotting\n'''\n\n\nfrom IGM import rb_setline as rb_setline\nimport numpy as np\nimport numpy.polynomial.legendre as L\nc = 299792.458\n\nclass Absorber:\n \n #defining variables to be used in the transition plotting GUI\n def Transition(self,ion_dict,line_dat,wave,flux,error,z,mask,window_lim,nofrills=False):\n # Edit RB May21, 2020: added nofrills keyword to toggle between initializing the continuum fields. \n # Added to avoid issues of calling Absorber class very near to the edge of the detector.\n # Does not apply when calling abstools.\n\n # VARIABLE INITIALIZATION\n ion_dict['f'] = line_dat['fval']\n ion_dict['lam_0'] = line_dat['wave']\n ion_dict['name'] = line_dat['name']\n ion_dict['gamma'] = line_dat['gamma']\n ion_dict['z'] = z\n ion_dict['window_lim'] = window_lim \n\n '''Shifting to vel-space centered on lam_o'''\n ion_dict['lam_0_z'] = ion_dict['lam_0']*(1+z)\n ion_dict['vel'] = (wave-ion_dict['lam_0_z'])/ion_dict['lam_0_z']*c\n\n '''create window for flux,wave,error based on max and min velocity'''\n window = (ion_dict['vel']>window_lim[0]) & (ion_dict['vel']<window_lim[1])\n ion_dict['flux'] = flux[window]; ion_dict['wave']=wave[window]\n ion_dict['error'] = error[window]; ion_dict['vel'] = ion_dict['vel'][window]\n\n '''Initial Polyfit assuming a masked region of -200:200 and polyorder=4\n cont= continuum, pco= polynomial coefficients for cont fitting; weight= parameter to fit polys\n order = order of polynomial\n\n lets also give each ion object the Legendre function for ease of use during plotting'''\n\n if nofrills==False:\n ion_dict['wc'] = ((ion_dict['vel']<mask[0])|(ion_dict['vel']>mask[1])) &(ion_dict['error'] != 0) #error != 0 is a bad pixel mask\n ion_dict['weight'] = 1/(ion_dict['error']**2)\n ion_dict['order'] = 4 #order of poly fit\n ion_dict['pco']=L.Legendre.fit(ion_dict['wave'][ion_dict['wc']],ion_dict['flux'][ion_dict['wc']],ion_dict['order'],w=ion_dict['weight'][ion_dict['wc']])\n ion_dict['cont'] = ion_dict['pco'](ion_dict['wave'])\n\n\n '''Property initializations:'''\n ion_dict['N']=None; ion_dict['Nsig']=None; ion_dict['EW']=None; ion_dict['EWsig']=None\n ion_dict['med_vel'] = None; ion_dict['EWlims'] = [mask[0],mask[1]]; ion_dict['flag'] = 0\n #for text\n ion_dict['EW_text'] = None\n \n \n \n def __init__(self,z,wave,flux,error,lines=None,mask_init=[-200,200],window_lim=[-1000,1000],load_file = False,nofrills=False):\n mask = mask_init\n self.z =z\n self.ions = {}\n\n if lines:\n for line in lines:\n line_dat = rb_setline.rb_setline(line,method='closest')\n \n #if using Transition class uncomment below line. Also comment transition def, while uncommenting transition class, comment out lines 80-82\n #self.ions[line_dat['name']] =Transition(line_dat,wave,flux,error,self.z,mask_init,window_lim)\n \n self.ions[line_dat['name']] = {}\n ion_dict = self.ions[line_dat['name']]\n self.Transition(ion_dict,line_dat,wave,flux,error,z,mask,window_lim,nofrills=nofrills)\n \n #last dictionary item for full spectra data \n self.ions['Target'] = {}\n self.ions['Target']['flux'] = flux\n self.ions['Target']['wave'] = wave\n self.ions['Target']['error'] = error\n self.ions['Target']['z'] = z\n else:\n print('Input Linelist and rerun')\n \n \n \n \n \n# '''Initializing the properties of each specific transition''' \n# class Transition(object):\n# def __init__(self,line_dat,wave,flux,error,z,mask,window_lim):\n# #line_dat has properties lam_o, fval,name,gamma\n# self.gamma = line_dat['gamma']\n# self.f = line_dat['fval']\n# self.lam_0 = line_dat['wave']\n# self.name = line_dat['name']\n# self.z = z\n# self.window_lim = window_lim \n \n# '''Shifting to vel-space centered on lam_o'''\n# self.lam_0_z = self.lam_0*(1+z)\n# self.vel = (wave-self.lam_0_z)/self.lam_0_z*c\n \n# '''create window for flux,wave,error based on max and min velocity'''\n# vmin,vmax = -1000,1000\n# window = (self.vel>vmin) & (self.vel<vmax)\n# self.flux = flux[window]; self.wave=wave[window]\n# self.error = error[window]; self.vel = self.vel[window]\n \n# '''Initial Polyfit assuming a masked region of -200:200 and polyorder=4\n# cont= continuum, pco= polynomial coefficients for cont fitting; weight= parameter to fit polys\n# order = order of polynomial\n \n# lets also give each ion object the Legendre function for ease of use during plotting'''\n# #self.L = L\n# #wc = ((self.vel<mask[0])|(self.vel>mask[1]))\n# self.wc = ((self.vel<mask[0])|(self.vel>mask[1]))\n# self.weight = 1/(self.error**2)\n# self.order = 4 #order of poly fit\n# self.pco=L.Legendre.fit(self.wave[self.wc],self.flux[self.wc],self.order,w=self.weight[self.wc])\n# self.cont = self.pco(self.wave)\n# #resett the wc so the plotter can mask as many absorbers as needed for fixing\n# self.wc = [True]*len(self.wc)\n \n\n# '''Property initializations:'''\n# self.N=None; self.Nsig=None; self.EW=None; self.EWsig=None\n# self.med_vel = None; self.EWlims = [None,None]; self.flag = 0\n# #for text\n# self.EW_text = None\n" ]
[ [ "numpy.polynomial.legendre.Legendre.fit" ] ]
delwp-species-modelling/frontend
[ "70b2ffeac5cd28b4d384123bf894da9361fe41e6" ]
[ "ai/lib/filter_columns.py" ]
[ "\"\"\"\n lib/filter_columns.py\n\n FIT3162 - Team 10 - Final Year Computer Science Project\n Copyright Luke Silva, Aichi Tsuchihira, Harsil Patel 2019\n\n Script to filter columns to drop the unwanted column, rename the existing columns\n for legibility and merge the multiple reliability columns into a single one.\n\"\"\"\n\nimport sys\n\ntry:\n import pandas as pd\n import numpy as np\nexcept:\n print(\n \"Failed to load packages, make sure your conda environment is setup correctly\"\n )\n sys.exit(1)\n\n\ndef is_reliable(row):\n \"\"\"\n Method that operates on row combine multiple reliability columns into one\n :param row: A row of the dataset\n :return: An integer value stating the reliability of the row\n \"\"\"\n isReliable = not (\n row[\"RELIABILITY\"] in (np.nan, \"Unconfirmed\") and row[\"RATING_INT\"] in (2, 4)\n )\n\n return int(isReliable)\n\n\ndef filter_columns(data):\n \"\"\"\n Method to filter dataset's columns by dropping, renaming and merging them\n :param data: dataset whose columns are to be filtered\n :return: dataset with filtered columns\n \"\"\"\n COLUMNS_TO_KEEP = [\"is_reliable\", \"latitude\", \"longitude\"]\n data[\"is_reliable\"] = data.apply(is_reliable, axis=1)\n return data.rename(\n columns={\"LATITUDEDD_NUM\": \"latitude\", \"LONGITUDEDD_NUM\": \"longitude\"}\n ).loc[:, COLUMNS_TO_KEEP]\n\n\n# sv record count, record type\n\nif __name__ == \"__main__\":\n data = pd.read_csv(\"../../dataset/Agile_Antechinus.csv\")\n print(data.columns)\n\n ndata = filter_columns(data)\n print(ndata.columns)\n print(ndata[:5])\n" ]
[ [ "pandas.read_csv" ] ]
bigb8/coviddanmark
[ "927dfd7c0735edd8f2b7536a32f7ec9bd8166842" ]
[ "lib/readdataexample.py" ]
[ "import os\nimport sys\nimport datetime\n\nimport numpy as np\n\n\n\n#Example of reading data. Hospitalized patients in DK.\n\n\n# Initiation\n#Path to lib folder\nlibpath = os.path.realpath(__file__).split(\"plotter.\")[0] + os.sep\n\n#Path to data folder\ndatap = libpath.split(\"lib\")[0]+ os.sep + \"data DK\" + os.sep\n\n\n\n#List for reading data\nhosp = [] # Hospitalized\nyearday = [] #Day of year\n\n\nwith open(datap + \"hosp.txt\",'r') as f: # Open file\n for i,l in enumerate(f.readlines()): # Loop file\n if i == 0:\n pass #Header - skip it\n else:\n\n data = l.split(\";\") #Split line using \";\" delimiter\n hosp.append(int(data[0])) # Append datapoint to list\n\n #Get date for x axis\n y = int(data[1][:4])\n m = int(data[1][4:6])\n d = int(data[1][6:8])\n\n mydate = datetime.datetime(y, m, d, 12, 00, 00)\n dayofyear = mydate.timetuple().tm_yday\n yearday.append(dayofyear)\n\n\n\n\n#convert to Numpy\nhosp = np.array(hosp)\nyearday = np.array(yearday)\n\n\n\n\nprint(hosp,yearday)\n" ]
[ [ "numpy.array" ] ]
NMNS93/snakepot
[ "58640b5c0e8bc99338f9e4e25552669da763b063" ]
[ "src/train_val_pred.py" ]
[ "\"\"\"train_val_pred.py - Split dataframe based on target variable\"\"\"\n\nimport sys\nimport os\nfrom src.log import Logger\nlog = Logger('clean')\n\nimport argparse\nimport category_encoders as ce\nimport pandas as pd\n\nclass DataSplitter():\n def __init__(self, df, target, target_1, to_predict, perc_split):\n self.df = df\n self.target = target\n assert df[target].nunique() == 3\n self.target_1 = target_1\n self.to_predict = to_predict\n self.perc_split = perc_split\n self.encoder = ce.OneHotEncoder(cols=[target], use_cat_names=True, return_df=True, drop_invariant=True)\n self.encoded = self.encoder.fit_transform(self.df)\n\n def get_test_train(self):\n df = self._get_labelled()\n # Get dataframes for each of the binary outputs\n df_targ1 = df[df[self.target] == 1] \n df_targ0 = df[df[self.target] == 0]\n # Split each dataframe by the input fraction. Test dataset contains the percentage split\n df_targ1_test, df_targ1_train = self._perc_splitter(df_targ1, self.perc_split)\n df_targ0_test, df_targ0_train = self._perc_splitter(df_targ0, self.perc_split)\n # Combine training and test datasets\n test = pd.concat([df_targ1_test, df_targ0_test])\n training = pd.concat([df_targ1_train, df_targ0_train])\n return (test, training)\n\n def _get_labelled(self):\n col_to_predict = f'{self.target}_{self.to_predict}'\n col_to_train = f'{self.target}_{self.target_1}'\n encoded_target_1 = self.encoded[col_to_train]\n to_train_bool = (self.encoded[col_to_predict] == 0)\n df_to_train = self.df[to_train_bool].drop(columns=[self.target])\n df_to_train[self.target] = encoded_target_1[to_train_bool]\n return df_to_train.sample(frac=1,random_state=42)\n\n def get_to_predict(self):\n col_to_predict = f'{self.target}_{self.to_predict}'\n to_predict_bool = (self.encoded[col_to_predict] == 1)\n df_to_predict = self.df[to_predict_bool].drop(columns=[self.target])\n return df_to_predict.sample(frac=1, random_state=42)\n\n def _perc_splitter(self, df, perc):\n \"\"\"Splits a dataframe (df) into two by some fraction (perc).\n Returns:\n split_data(tuple): split_by_perc, data_remainder\"\"\"\n # Split the dataframe by percentage\n split_by_perc = df.sample(frac=perc)\n # Get the remainder dataframe using the split data index\n data_remainder = df.drop(index=split_by_perc.index)\n # Return result\n return split_by_perc, data_remainder\n\n\ndef main():\n # Read data\n parser = argparse.ArgumentParser()\n for argument in ['--cleaned', '--target_column', '--target_1', '--target_0', '--to_predict']:\n parser.add_argument(argument)\n parser.add_argument('--perc_split', type=float)\n args = parser.parse_args()\n\n log.info('BEGIN')\n\n # Read data\n cleaned = pd.read_csv(args.cleaned, index_col=0)\n\n # Encode target\n log.info('Encoding data and asserting 3 unique values in target column')\n ds = DataSplitter(cleaned, args.target_column, args.target_1, args.to_predict, args.perc_split)\n \n # Split and write validation data\n log.info('Getting unlabelled dataset')\n unlabelled = ds.get_to_predict()\n unlabelled.to_csv('unlabelled.csv')\n\n # Get test and train data. Write to output files.\n log.info('Getting training and test datasets')\n test, training = ds.get_test_train()\n training.to_csv('training.csv')\n test.to_csv('test.csv')\n\n log.info('END')\n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
stroblme/hqsp-stqft
[ "c2f8f8964648578755d3938bf8658e4c834548e8" ]
[ "frontend.py" ]
[ "from matplotlib import colors\r\nimport matplotlib as mpl\r\nimport numpy as np\r\nfrom numpy import pi, string_\r\nfrom scipy import signal as scipySignal\r\nimport matplotlib.pyplot as plt\r\nfrom math import log, floor\r\nfrom copy import deepcopy\r\nimport os\r\nimport pickle\r\nimport git\r\nfrom cycler import cycler\r\n# from matplotlib.gridspec import GridSpec\r\n\r\nimport librosa\r\nimport librosa.display\r\n\r\nfrom qbstyles import mpl_style\r\n\r\nclass frontend():\r\n # COLORMAP = 'gist_ncar'\r\n COLORMAP = 'twilight'\r\n SHADING='nearest'\r\n MAIN='#06574b'\r\n WHITE='#FFFFFF'\r\n GRAY='#BBBBBB'\r\n HIGHLIGHT='#9202e1'\r\n LIGHTGRAY='#EEEEEE'\r\n\r\n DARK=False\r\n clickEventHandled = True\r\n\r\n @staticmethod\r\n def enableInteractive():\r\n global plt\r\n plt.ion()\r\n\r\n @staticmethod\r\n def disableInteractive():\r\n global plt\r\n plt.ioff()\r\n\r\n @staticmethod\r\n def setTheme(dark=DARK):\r\n frontend.DARK = dark\r\n\r\n mpl_style(dark=frontend.DARK, minor_ticks=False)\r\n\r\n @staticmethod\r\n def primeTime():\r\n \r\n\r\n plt.show()\r\n frontend.disableInteractive()\r\n input(\"Press any key to close all figures\\n\")\r\n plt.close('all')\r\n\r\n @staticmethod\r\n def on_click(event):\r\n '''\r\n Taken from: https://stackoverflow.com/questions/9012081/matplotlib-grab-single-subplot-from-multiple-subplots\r\n '''\r\n\r\n if not frontend.clickEventHandled:\r\n return\r\n\r\n ax = event.inaxes\r\n if ax is not None:\r\n # Occurs when a region not in an axis is clicked...\r\n if int(event.button) == 1:\r\n # On left click, zoom the selected axes\r\n ax._orig_position = ax.get_position()\r\n ax.set_position([0.1, 0.1, 0.85, 0.85])\r\n for axis in event.canvas.figure.axes:\r\n # Hide all the other axes...\r\n if axis is not ax:\r\n axis.set_visible(False)\r\n event.canvas.draw()\r\n elif int(event.button) == 2:\r\n ax.remove()\r\n # ax.set_visible(False)\r\n event.canvas.draw()\r\n\r\n pass\r\n elif int(event.button) == 3:\r\n # On right click, restore the axes\r\n try:\r\n ax.set_position(ax._orig_position)\r\n for axis in event.canvas.figure.axes:\r\n axis.set_visible(True)\r\n except AttributeError as e:\r\n # If we haven't zoomed, ignore...\r\n print(e.with_traceback())\r\n pass\r\n\r\n event.canvas.draw()\r\n\r\n frontend.clickEventHandled = True\r\n\r\n def _show(self, yData:np.array, x1Data:np.array, title:str, xlabel:str, ylabel:str, x2Data:np.array=None, subplot:tuple=None, plotType:str='stem', log:bool=False, sr=None, xticks=None):\r\n # fighandle = plt.figure()\r\n SMALL_SIZE = 10\r\n MEDIUM_SIZE = 12\r\n BIGGER_SIZE = 14\r\n\r\n plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes\r\n plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title\r\n plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\r\n plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\r\n plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\r\n plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize\r\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\r\n\r\n\r\n mpl.rcParams['axes.prop_cycle'] = cycler('color',[frontend.MAIN, frontend.HIGHLIGHT])\r\n\r\n fig = plt.gcf()\r\n if subplot is not None:\r\n plt.subplot(*subplot,frameon=False)\r\n plt.subplots_adjust(wspace=0.58, top=0.9, left=0.081, bottom=0.16)\r\n fig.set_size_inches(3*int(subplot[1]),int(subplot[0])*4)\r\n else:\r\n # plt.xticks(fontsize=14)\r\n # plt.yticks(fontsize=14)\r\n plt.subplots_adjust(left=0.15, right=0.95, top=0.92)\r\n fig.set_size_inches(6,6)\r\n # plt.figure(figsize = (10, 6))\r\n\r\n fig.canvas.mpl_connect('button_press_event', frontend.on_click)\r\n plt.tight_layout()\r\n\r\n ax = plt.gca()\r\n \r\n if plotType == 'librosa' and sr!=None:\r\n plt.subplots_adjust(left=0.15, bottom=0.145, right=0.96, top=0.92)\r\n img = librosa.display.specshow(yData, x_axis='time', y_axis='linear', sr=sr, fmax=sr/2, ax=ax, cmap=frontend.COLORMAP)\r\n fig.colorbar(img, ax=ax, format='%+2.0f dB')\r\n\r\n else:\r\n if x2Data is None:\r\n if log and plotType != 'box':\r\n ax.set_yscale('log')\r\n plt.autoscale(False)\r\n if type(yData) == np.ndarray:\r\n plt.ylim(max(min(yData.min()*0.92,0.1),0.01),1)\r\n else:\r\n plt.ylim(max(min(min(yData)*0.92,0.1),0.01),1)\r\n plt.xlim(min(x1Data), max(x1Data))\r\n\r\n if plotType == 'stem':\r\n plt.stem(x1Data, yData, linefmt=frontend.MAIN, markerfmt=\"C1o\")\r\n elif plotType == 'box':\r\n ax.boxplot(yData)\r\n else:\r\n plt.plot(x1Data, yData, 'o--')\r\n\r\n else:\r\n # ax = plt.gca()\r\n m = plt.pcolormesh(x2Data, x1Data, yData, cmap=frontend.COLORMAP, shading=frontend.SHADING,linewidth=0, rasterized=True)\r\n # ax.set_rasterized(True)\r\n\r\n if xticks != None:\r\n ax.set_xticks(xticks)\r\n plt.title(title)\r\n plt.xlabel(xlabel)\r\n plt.ylabel(ylabel)\r\n\r\n return {'x1Data':x1Data, 'yData':yData, 'x2Data':x2Data, 'subplot':subplot, 'plotType':plotType, 'log':log, 'xlabel':xlabel, 'ylabel':ylabel, 'title':title}\r\n\r\nclass signal(frontend):\r\n def __init__(self, samplingRate:int=40, amplification:int=1, duration:int=2, nSamples:int=80, signalType:str='sin', path:str='') -> None:\r\n \"\"\"Signal Init\r\n\r\n Args:\r\n samplingRate (int, optional): [description]. Defaults to 40.\r\n amplification (int, optional): [description]. Defaults to 1.\r\n duration (int, optional): Duration of the created signal. Defaults to 2.\r\n nSamples ([type], optional): Sample length of the signal. Defaults to 80.\r\n \"\"\"\r\n # Set the class attributes\r\n self.amplification = amplification\r\n self.setSamplingRate(samplingRate)\r\n self.frequencies = list()\r\n self.phases = list()\r\n\r\n self.signalType = signalType\r\n \r\n # Set the number of samples based on duration and target num of samples such that it matches 2**n\r\n self.setNSamples(duration, nSamples)\r\n\r\n\r\n if signalType=='file':\r\n assert path!=''\r\n self.loadFile(path)\r\n\r\n self.lockSampling = True\r\n else:\r\n # Create the signal\r\n self.createEmptySignal(self.nSamples)\r\n\r\n self.lockSampling = False\r\n\r\n print(f\"Signal duration set to {self.duration}s, resulting in {self.nSamples} samples\")\r\n print(f\"Sampling Rate is {self.samplingRate} with an amplification of {self.amplification}\")\r\n self.t = np.arange(0,self.duration,self.samplingInterval)\r\n self.f = None\r\n\r\n def createEmptySignal(self, nSamples):\r\n self.y = np.zeros(nSamples)\r\n\r\n\r\n def loadFile(self, path, zeroPadding=True):\r\n \"\"\"Loads an audio sample from file\r\n\r\n Args:\r\n path ([type]): [description]\r\n zeroPadding (bool, optional): [description]. Defaults to True.\r\n \"\"\"\r\n samplingRate = librosa.get_samplerate(path)\r\n if samplingRate < self.samplingRate:\r\n print(f'Warning: provided sampling rate ({self.samplingRate}) is higher than the one of the audio ({samplingRate}). Will upsample.')\r\n elif samplingRate > self.samplingRate:\r\n print(f'Warning: provided sampling rate ({self.samplingRate}) is lower than the one of the audio ({samplingRate}). Will downsample.')\r\n \r\n duration = librosa.get_duration(filename=path)\r\n if duration < self.duration:\r\n if zeroPadding:\r\n print(f'Audio is not long enough ({duration}). Will use zero-padding to fill up distance to {self.duration}')\r\n\r\n self.createEmptySignal(self.nSamples)\r\n\r\n y_p, _ = librosa.load(path, sr=self.samplingRate)\r\n\r\n self.y[0:y_p.size] = y_p * self.amplification\r\n return\r\n else:\r\n self.setNSamples(duration=duration, nSamples=0)\r\n\r\n self.y, _ = librosa.load(path, sr=self.samplingRate, duration=self.duration) * self.amplification\r\n\r\n\r\n # mel_feat = librosa.feature.melspectrogram(y, sr=sr, n_fft=1024, hop_length=128, power=1.0, n_mels=60, fmin=40.0, fmax=sr/2)\r\n # all_wave.append(np.expand_dims(mel_feat, axis=2))\r\n # all_label.append(label)\r\n\r\n def setSamplingRate(self, samplingRate:int):\r\n \"\"\"Sets the sampling rate for the current signal instance\r\n\r\n Args:\r\n samplingRate ([type]): [description]\r\n \"\"\"\r\n self.samplingRate = samplingRate\r\n self.samplingInterval = 1/self.samplingRate\r\n\r\n def setNSamples(self, duration:int=2, nSamples:int=80):\r\n # Either use the duration or the number of samples depending on what's longer\r\n t_max = max(duration, nSamples*self.samplingInterval)\r\n\r\n # Get the closest min. int which is a power of 2\r\n nSamples = int(t_max/self.samplingInterval)\r\n nSamples_log2_min = floor(log(nSamples, 2))\r\n\r\n # Update the number of samples and the duration based on the previous modifications\r\n self.nSamples = 2**nSamples_log2_min\r\n self.duration = self.nSamples*self.samplingInterval\r\n\r\n return self.duration\r\n\r\n def addFrequency(self, frequency:float, phase:int=0):\r\n if frequency > self.samplingRate/2:\r\n print(\"WARNING: Nyquist not fulfilled!\")\r\n \r\n self.frequencies.append(frequency)\r\n self.phases.append(phase)\r\n\r\n def externalSample(self, y, t, f=None):\r\n self.y = y\r\n self.t = t\r\n self.f = f\r\n self.setNSamples(0,t.size)\r\n self.lockSampling=True\r\n\r\n def split(self, nSamplesWindow:int, overlapFactor:float=0, windowType:str=None):\r\n self.sample()\r\n\r\n if windowType == 'hanning':\r\n window = np.hanning(nSamplesWindow)\r\n if overlapFactor!=0.5: print(\"Suggest an overlap factor of 0.5 in combination with hanning window\")\r\n elif windowType == 'hamming':\r\n window = np.hamming(nSamplesWindow)\r\n if overlapFactor!=0.5: print(\"Suggest an overlap factor of 0.5 in combination with hamming window\")\r\n elif windowType == 'blackman':\r\n window = np.blackman(nSamplesWindow)\r\n if overlapFactor!=0.5: print(\"Suggest an overlap factor of 0.5 in combination with hamming window\")\r\n elif windowType == 'kaiser':\r\n window = np.kaiser(nSamplesWindow, 8.6-overlapFactor*5.2) #starting from 8.6=blackman over 6=hanning and 5=hamming downtp 0=rect\r\n print(f\"Using {8.6-overlapFactor*5.2} as beta value for window type 'kaiser'\")\r\n \r\n else:\r\n window = 1.\r\n\r\n hopSize = np.int32(np.floor(nSamplesWindow * (1-overlapFactor)))\r\n nParts = np.int32(np.ceil(len(self.y) / np.float32(hopSize)))\r\n \r\n y_split_list = list()\r\n\r\n for i in range(0,nParts-1): # -1 because e.g with an overlap of 0.5 we will get 2*N - 1 segments\r\n currentHop = hopSize * i # figure out the current segment offset\r\n \r\n segment = self.y[currentHop:currentHop+nSamplesWindow] # get the current segment\r\n \r\n #usefull when splitting and overlapping overshoots the available samples\r\n if segment.size < window.size:\r\n segment = self.y[-nSamplesWindow:]\r\n\r\n windowed = segment * window # multiply by the half cosine function\r\n \r\n y = deepcopy(self)\r\n y.externalSample(windowed, self.t[currentHop:currentHop+nSamplesWindow])\r\n y_split_list.append(y)\r\n\r\n print(f\"Signal divided into {nParts-1} parts with a window length of {nSamplesWindow} each\")\r\n\r\n\r\n return y_split_list\r\n\r\n def sample(self):\r\n if self.lockSampling:\r\n return self.y\r\n\r\n self.y = np.zeros(self.nSamples)\r\n if self.signalType == 'sin':\r\n for frequency, phase in zip(self.frequencies, self.phases):\r\n self.y += self.amplification*np.sin(2*np.pi*frequency*self.t-phase)\r\n elif self.signalType == 'chirp':\r\n f0 = -1\r\n f1 = -1\r\n\r\n for frequency, phase in zip(self.frequencies, self.phases):\r\n if f0 == -1:\r\n f0 = frequency\r\n elif f1 == -1 and f0 != -1:\r\n f1 = frequency\r\n if f0 != -1 and f1 != -1:\r\n self.y += self.amplification*scipySignal.chirp(self.t, f0=f0, f1=f1, t1=phase, method='linear')\r\n f0 = -1\r\n f1 = -1\r\n else:\r\n print('Must be either sin, chirp')\r\n return self.y\r\n \r\n def show(self, subplot=None, ignorePhaseShift:bool=False, xlabel:str=\"Time (s)\", ylabel:str=\"Amplitude\", title:str=\"\"):\r\n\r\n if self.signalType=='file':\r\n minSamples = self.y.size-1 # Use all samples\r\n else:\r\n self.sample() # Only sample if not file, as data is assumed to be loaded\r\n minF = min(self.frequencies)\r\n maxP = max(self.phases) if not ignorePhaseShift else 0\r\n maxT = (1/minF + maxP)*2 if minF != 0 else self.duration\r\n minSamples = int(maxT*self.samplingRate)\r\n xData = self.t[:minSamples]\r\n yData = self.y[:minSamples]\r\n\r\n if title==\"\":\r\n title=type(self).__name__\r\n\r\n return self._show(yData, xData, title, xlabel, ylabel, subplot=subplot, plotType=\"plot\")\r\n\r\nclass transform(frontend):\r\n def __init__(self, transformation, **kwargs):\r\n # allow getting called with none to access internal tools\r\n if transformation == None:\r\n print(\"Warning: Transformation called with 'None' parameter. Use with caution!\")\r\n self.transformation = None\r\n else:\r\n self.transformation = transformation(**kwargs)\r\n\r\n def forward(self, y, **kwargs):\r\n y_hat = self.transformation.transform(y, **kwargs)\r\n\r\n # n = np.arange(y_hat.shape[0])\r\n # F = y_hat.shape[0]/y.samplingRate\r\n # f = n/F\r\n f = self.calcFreqArray(y, y_hat)\r\n\r\n if len(y_hat.shape) == 2:\r\n # n = np.arange(y_hat.shape[1])\r\n # T = y_hat.shape[1]/y.duration\r\n # t = n/T\r\n t = self.calcTimeArray(y, y_hat)\r\n\r\n return y_hat, f, t\r\n else:\r\n return y_hat, f\r\n\r\n def backward(self, y_hat, **kwargs):\r\n y = self.transformation.transformInv(y_hat, **kwargs)\r\n\r\n return y, y_hat.t\r\n\r\n def calcFreqArray(self, y, y_hat):\r\n n = np.arange(y_hat.shape[0])\r\n F = y_hat.shape[0]/y.samplingRate\r\n f = n/F\r\n\r\n return f\r\n\r\n def calcTimeArray(self, y, y_hat):\r\n n = np.arange(y_hat.shape[1])\r\n T = y_hat.shape[1]/y.duration\r\n t = n/T\r\n\r\n return t\r\n\r\n def postProcess(self, y_hat, f, t=None, scale=None, autopower=True, normalize=True, fmin=None, fmax=None, samplingRate=None, nMels=None, fOut=None):\r\n if autopower:\r\n y_hat = np.float32(np.abs(y_hat))\r\n # y_hat = np.abs(y_hat)\r\n n = y_hat.shape[0]//2\r\n f = f[:n]\r\n # y_hat =(y_hat[:n]/n if t is None else y_hat[:n,:]/n) \r\n y_hat =(y_hat[:n] if t is None else y_hat[:n,:]) \r\n\r\n if fmax != None:\r\n if fmax >= f.max():\r\n print(f\"f_max {fmax} is not lower than the actual max frequency {f.max()}\")\r\n else:\r\n f_idx = int(np.where(f>fmax)[0][0])\r\n f = f[:f_idx]\r\n y_hat = y_hat[:f_idx,:] \r\n\r\n\r\n if scale == 'log':\r\n y_hat = 20*np.log10(y_hat)\r\n # plt.yscale('log',base=2)\r\n elif scale == 'mel':\r\n fSize = f.size if not autopower else f.size*2\r\n\r\n # apply mel filters with normalization:\r\n # np.inf -> max()=1\r\n # 1 -> max()=\r\n mel_basis = librosa.filters.mel(samplingRate, fSize, n_mels=nMels, fmin=fmin, fmax=fmax, norm=np.inf)\r\n\r\n y_hat = np.dot(mel_basis[:,1:], y_hat)\r\n f = np.dot(mel_basis[:,1:], f)\r\n # y_hat = 1127*np.log10(1+y_hat/700) # mel scale formula\r\n # plt.yscale('log',base=2)\r\n if normalize:\r\n y_hat = y_hat*(1/y_hat.max()) if y_hat.max() != 0 else y_hat\r\n # y_hat = y_hat*(1/sqrt(y_hat.shape[0]))\r\n\r\n if t is None:\r\n return y_hat, f\r\n else:\r\n return y_hat, f, t\r\n\r\n def swapaxes(self, y_hat):\r\n return np.swapaxes(y_hat, 0, 1)\r\n\r\n def show(self, yData, x1Data, x2Data=None, subplot=None, title=\"\", xlabel='', ylabel=''):\r\n # fighandle = plt.figure()\r\n\r\n\r\n if x2Data is None:\r\n yData = np.abs(yData)\r\n if xlabel == \"\":\r\n xlabel = 'Frequency (Hz)'\r\n if ylabel == \"\":\r\n ylabel = 'Amplitude (abs)'\r\n else:\r\n if xlabel == \"\":\r\n xlabel ='Time (s)'\r\n if ylabel == \"\":\r\n ylabel ='Frequency (Hz)'\r\n \r\n if title==\"\":\r\n title = type(self.transformation).__name__\r\n \r\n return self._show(yData, x1Data, title, xlabel, ylabel, x2Data=x2Data, subplot=subplot)\r\n\r\n\r\nclass grader(frontend):\r\n epsilon=1e-10\r\n def __init__(self):\r\n self.yValues = np.array([])\r\n self.xValues = np.array([])\r\n\r\n def correlate2d(self, a, b):\r\n y_hat_diff = scipySignal.correlate2d(a, b, mode='same')\r\n\r\n return y_hat_diff\r\n\r\n def calculateNoisePower(self, y, y_ref):\r\n diff = np.abs(np.power(y,2)-np.power(y_ref,2))\r\n\r\n # snr = np.divide(np.sum(np.abs(y_ref)),np.sum(diff)+self.epsilon)\r\n # snr = 1-(1/np.sum(np.power(y_ref,2)) * np.sum(diff))\r\n snr = 1-(1/len(y_ref) * np.sum(diff))\r\n\r\n return snr\r\n return 10*np.log10(snr)\r\n\r\n def log(self, ylabel, xlabel):\r\n self.yValues = np.append(self.yValues, [ylabel])\r\n self.xValues = np.append(self.xValues, [xlabel]) \r\n\r\n def show(self, subplot=None):\r\n yData = self.yValues\r\n x1Data = self.xValues\r\n title = 'Grader'\r\n xlabel = 'Tick'\r\n ylabel = 'g'\r\n x2Data = None\r\n\r\n return self._show(yData, x1Data, title, xlabel, ylabel, x2Data=x2Data, subplot=subplot, plotType='plot', log=True)\r\n\r\n\r\nclass export():\r\n DATADIRECTORY = './data'\r\n\r\n TOPIC = \"topic\"\r\n DESCRIPTION = \"description\"\r\n IDENTIFIER = \"identifier\"\r\n BACKEND = \"backend\"\r\n JOBRESULT = \"jobresult\"\r\n FILTERRESULT = \"filteresult\"\r\n QCCIRCUIT = \"qccircuit\"\r\n SIGNAL = \"SIGNAL\"\r\n SIGNALPARAM = \"signalparam\"\r\n TRANSFORMPARAM = \"transformparam\"\r\n PLOTDATA = \"plotdata\"\r\n PLOTPARAM = \"plotparam\"\r\n GRADERX = \"graderx\"\r\n GRADERY = \"gradery\"\r\n GITHASH = \"githash\"\r\n GENERICDATA = \"gendata\"\r\n\r\n\r\n def __init__(self, topic=None, identifier=None, dataDir=DATADIRECTORY) -> None:\r\n self.details = dict()\r\n\r\n if topic is not None:\r\n self.setData(self.TOPIC, topic)\r\n if identifier is not None:\r\n self.setData(self.IDENTIFIER, identifier)\r\n\r\n self.DATADIRECTORY = dataDir\r\n\r\n\r\n def setData(self, dkey, data):\r\n self.details[dkey] = data\r\n\r\n def setParam(self, dkey, **kwargs):\r\n self.details[dkey] = dict()\r\n\r\n for key, value in kwargs.items():\r\n self.details[dkey][key] = value\r\n\r\n def nsa(self, fhandle, **params):\r\n self.setParam(self, dkey=type(fhandle).__name__, kwargs=params)\r\n\r\n return fhandle(**params)\r\n\r\n def getBasePath(self):\r\n path = self.DATADIRECTORY + \"/\" + self.details[self.TOPIC] + \"/\" + self.details[self.IDENTIFIER]\r\n return path\r\n\r\n def createTopicOnDemand(self):\r\n content = os.listdir(self.DATADIRECTORY)\r\n\r\n topic = self.details[self.TOPIC]\r\n\r\n for c in content:\r\n if c == topic:\r\n print(f\"Topic {topic} already exists in {self.DATADIRECTORY}\")\r\n return\r\n\r\n try:\r\n os.mkdir(self.DATADIRECTORY+\"/\"+topic)\r\n print(f\"Folder {topic} created in {self.DATADIRECTORY}\")\r\n except Exception as e:\r\n print(e)\r\n\r\n @staticmethod\r\n def checkWorkingTree(dataDir=DATADIRECTORY):\r\n try:\r\n repo = git.Repo(path=dataDir)\r\n except FileNotFoundError:\r\n print(\"Invalid directory\")\r\n return\r\n except git.InvalidGitRepositoryError:\r\n print(\"Try to initialize this directory as a git repo first\")\r\n return\r\n\r\n export.DATADIRECTORY=dataDir\r\n\r\n try:\r\n hcommit = repo.head.commit\r\n except ValueError:\r\n print(\"Try to make a commit in this repository first\")\r\n return\r\n\r\n d = hcommit.diff(None)\r\n if len(d) > 0:\r\n input(f\"Working Tree in {export.DATADIRECTORY} is dirty. You might want to commit first. Press any key to continue regardless\")\r\n\r\n\r\n def getGitCommitId(self):\r\n repo = git.Repo(search_parent_directories=True)\r\n sha = repo.head.object.hexsha\r\n self.details[self.GITHASH] = sha\r\n\r\n def safeDetails(self):\r\n path = self.getBasePath() + \".p\"\r\n\r\n pickle.dump(self.details, open(path, \"wb\"), protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n def doExport(self):\r\n self.createTopicOnDemand()\r\n self.getGitCommitId()\r\n\r\n self.safeDetails()\r\n# ----------------------------------------------------------\r\n# On-Import region\r\n# ----------------------------------------------------------\r\n\r\nfrontend.setTheme()" ]
[ [ "numpy.dot", "numpy.kaiser", "matplotlib.pyplot.autoscale", "matplotlib.pyplot.rc", "matplotlib.pyplot.plot", "scipy.signal.correlate2d", "numpy.hamming", "numpy.hanning", "numpy.where", "matplotlib.pyplot.gca", "numpy.swapaxes", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.stem", "numpy.arange", "matplotlib.pyplot.gcf", "numpy.sin", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots_adjust", "numpy.float32", "numpy.zeros", "matplotlib.pyplot.title", "numpy.power", "numpy.append", "numpy.log10", "numpy.floor", "matplotlib.pyplot.pcolormesh", "numpy.array", "matplotlib.pyplot.ion", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.abs", "numpy.blackman", "matplotlib.pyplot.ioff", "scipy.signal.chirp", "matplotlib.pyplot.xlabel" ] ]
kmamine/caer
[ "d78eccc40d259371a68f834addb6b5b53ce5f340" ]
[ "caer/transforms/position.py" ]
[ "# _____ ______ _____ \n# / ____/ /\\ | ____ | __ \\\n# | | / \\ | |__ | |__) | Caer - Modern Computer Vision\n# | | / /\\ \\ | __| | _ / Languages: Python, C, C++\n# | |___ / ____ \\ | |____ | | \\ \\ http://github.com/jasmcaus/caer\n# \\_____\\/_/ \\_ \\______ |_| \\_\\\n\n# Licensed under the MIT License <http://opensource.org/licenses/MIT>\n# SPDX-License-Identifier: MIT\n# Copyright (c) 2020-21 The Caer Authors <http://github.com/jasmcaus>\n\nimport numpy as np \nimport cv2 as cv \nimport random \nimport collections\n\nfrom ..adorad import Tensor, to_tensor\nfrom .._internal import _check_target_size\nfrom ..globals import (\n INTER_AREA, INTER_CUBIC, INTER_NEAREST, INTER_LINEAR\n)\n\npad_to_str = {\n 'constant': 0,\n 'edge': 1,\n 'reflect': 4,\n 'symmetric': 2\n}\n\nMAX_VALUES_BY_DTYPE = {\n np.dtype(\"uint8\"): 255,\n np.dtype(\"uint16\"): 65535,\n np.dtype(\"uint32\"): 4294967295,\n np.dtype(\"float32\"): 1.0,\n}\n\n__all__ = [\n 'hflip',\n 'vflip',\n 'hvflip',\n 'rand_flip',\n 'transpose',\n 'scale',\n 'rotate',\n 'translate',\n 'solarize',\n 'posterize',\n 'equalize',\n 'clip',\n 'pad'\n]\n\ndef _is_rgb_image(tens):\n tens = to_tensor(tens, override_checks=True)\n return tens.is_rgb()\n # return len(tens.shape) == 3 and tens.shape[-1] == 3\n\n\ndef _is_gray_image(tens):\n tens = to_tensor(tens, override_checks=True)\n return tens.is_gray()\n # return (len(tens.shape) == 2) or (len(tens.shape) == 3 and tens.shape[-1] == 1)\n\n\ndef _get_num_channels(tens):\n return tens.shape[2] if len(tens.shape) == 3 else 1\n\n\ndef hflip(tens) -> Tensor:\n r\"\"\"\n Flip an image horizontally. \n Args:\n tens (Tensor): Image to be flipped.\n\n Returns:\n Flipped image.\n\n \"\"\"\n\n tens = np.ascontiguousarray(tens[:, ::-1, ...])\n return to_tensor(tens, override_checks=True)\n\n\ndef vflip(tens) -> Tensor:\n r\"\"\"\n Flip an image vertically. \n Args:\n tens (Tensor): Image to be flipped.\n\n Returns:\n Flipped image.\n \n \"\"\"\n tens = np.ascontiguousarray(tens[::-1, ...])\n return to_tensor(tens, override_checks=True)\n\n\ndef hvflip(tens) -> Tensor:\n r\"\"\"\n Flip an image both horizontally and vertically. \n\n Args:\n tens (Tensor): Image to be flipped.\n\n Returns:\n Flipped image.\n \n \"\"\"\n return hflip(vflip(tens))\n\n\ndef rand_flip(tens) -> Tensor: \n r\"\"\"\n Randomly flip an image vertically or horizontally. \n\n Args:\n tens (Tensor): Image to be flipped.\n\n Returns:\n Flipped image.\n \n \"\"\"\n p = random.uniform(0, 1)\n\n if p > 0.5:\n return vflip(tens)\n else:\n return hflip(tens)\n\n\ndef transpose(tens) -> Tensor:\n if len(tens.shape) > 2:\n return tens.transpose(1, 0, 2)\n else:\n return tens.transpose(1, 0)\n\n\ndef rotate(tens, angle, rotPoint=None) -> Tensor:\n r\"\"\"\n Rotates an given image by an angle around a particular rotation point (if provided) or centre otherwise.\n \n \"\"\"\n # h, w = image.shape[:2]\n # (cX, cY) = (w/2, h/2)\n\n # # Computing the sine and cosine (rotation components of the matrix)\n # transMat = cv.getRotationMatrix2D((cX, cY), angle, scale=1.0)\n # cos = np.abs(transMat[0, 0])\n # sin = np.abs(transMat[0, 1])\n\n # # compute the new bounding dimensions of the image\n # nW = int((h*sin) + (w*cos))\n # nH = int((h*cos) + (w*sin))\n\n # # Adjusts the rotation matrix to take into account translation\n # transMat[0, 2] += (nW/2) - cX\n # transMat[1, 2] += (nH/2) - cY\n\n # # Performs the actual rotation and returns the image\n # return cv.warpAffine(image, transMat, (nW, nH))\n\n height, width = tens.shape[:2]\n\n # If no rotPoint is specified, we assume the rotation point to be around the centre\n if rotPoint is None:\n rotPoint = (width//2, height//2)\n\n rotMat = cv.getRotationMatrix2D(rotPoint, angle, scale=1.0)\n\n tens = cv.warpAffine(tens, rotMat, (width, height))\n return to_tensor(tens, override_checks=True)\n\n\ndef translate(image, x, y) -> Tensor:\n r\"\"\"Translates a given image across the x-axis and the y-axis\n\n Args:\n x (int): shifts the image right (positive) or left (negative)\n y (int): shifts the image down (positive) or up (negative)\n \n Returns:\n The translated image\n\n \"\"\"\n transMat = np.float32([[1, 0, x], [0, 1, y]])\n return cv.warpAffine(image, transMat, (image.shape[1], image.shape[0]))\n\n\ndef scale(tens, scale_factor, interpolation='bilinear') -> Tensor:\n interpolation_methods = {\n 'nearest': INTER_NEAREST, '0': INTER_NEAREST, 0: INTER_NEAREST, # 0\n 'bilinear': INTER_LINEAR, '1': INTER_LINEAR, 1: INTER_LINEAR, # 1\n 'bicubic': INTER_CUBIC, '2': INTER_CUBIC, 2: INTER_CUBIC, # 2\n 'area': INTER_AREA, '3': INTER_AREA, 3: INTER_AREA # 3\n }\n if interpolation not in interpolation_methods:\n raise ValueError('Specify a valid interpolation type - area/nearest/bicubic/bilinear')\n\n if scale_factor > 1:\n # Neater, more precise\n interpolation = 'bicubic'\n\n height, width = tens.shape[:2]\n new_height, new_width = int(height * scale_factor), int(width * scale_factor)\n\n tens = cv.resize(tens, (new_width,new_height), interpolation=interpolation)\n return to_tensor(tens, override_checks=True)\n\n\ndef pad(tens, padding, fill=0, padding_mode='constant') -> Tensor:\n r\"\"\"\n Pad the given image on all sides with specified padding mode and fill value.\n\n Args:\n tens (Tensor): image to be padded.\n padding (int or tuple): Padding on each border. If a single int is provided this \n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill: Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n - constant: pads with a constant value, this value is specified with fill\n - edge: pads with the last value on the edge of the image\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n \n Returns:\n Tensor of shape ``(height, width, channels)``.\n\n \"\"\"\n if not isinstance(padding, (tuple, list)):\n raise TypeError('Got inappropriate padding argument')\n\n if not isinstance(fill, (str, tuple)):\n raise TypeError('Got inappropriate fill argument')\n\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode argument')\n\n if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:\n raise ValueError(f'Padding must be an int or a 2, or 4 element tuple, not a {len(padding)} element tuple')\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Padding mode should be either constant, edge, reflect or symmetric'\n\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n\n if isinstance(padding, collections.Sequence) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n\n if isinstance(padding, collections.Sequence) and len(padding) == 4:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n\n tens = cv.copyMakeBorder(tens,\n top = pad_top,\n bottom = pad_bottom,\n left = pad_left,\n right = pad_right,\n borderType = pad_to_str[padding_mode],\n value = fill)\n\n return to_tensor(tens, override_checks=True)\n\n \ndef crop(tens, x_min, y_min, x_max, y_max) -> Tensor:\n height, width = tens.shape[:2]\n if x_max <= x_min or y_max <= y_min:\n raise ValueError(\n \"We should have x_min < x_max and y_min < y_max. But we got\"\n \" (x_min = {x_min}, y_min = {y_min}, x_max = {x_max}, y_max = {y_max})\".format(\n x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max\n )\n )\n\n if x_min < 0 or x_max > width or y_min < 0 or y_max > height:\n raise ValueError(\n \"Values for crop should be non negative and equal or smaller than image sizes\"\n \"(x_min = {x_min}, y_min = {y_min}, x_max = {x_max}, y_max = {y_max}, \"\n \"height = {height}, width = {width})\".format(\n x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, height=height, width=width\n )\n )\n\n return to_tensor(tens[y_min:y_max, x_min:x_max], override_checks=True)\n\n\ndef center_crop(image, target_size=None) -> Tensor:\n r\"\"\"Computes the centre crop of an image using `target_size`\n\n Args:\n image (Tensor): Valid image Tensor\n target_size (tuple): Size of the centre crop. Must be in the format `(width,height)`\n \n Returns:\n Cropped Centre (Tensor)\n \n Examples::\n\n >> tens = caer.data.bear() # Standard 640x427 image\n >> cropped = caer.center_crop(tens, target_size=(200,200))\n >> cropped.shape\n (200,200,3)\n\n \"\"\"\n return _compute_centre_crop(image, target_size)\n\n\ndef rand_crop(tens, crop_height, crop_width, h_start, w_start) -> Tensor:\n height, width = tens.shape[:2]\n if height < crop_height or width < crop_width:\n raise ValueError(\n \"Requested crop size ({crop_height}, {crop_width}) is \"\n \"larger than the image size ({height}, {width})\".format(\n crop_height=crop_height, crop_width=crop_width, height=height, width=width\n )\n )\n x1, y1, x2, y2 = _get_random_crop_coords(height, width, crop_height, crop_width, h_start, w_start)\n\n return to_tensor(tens[y1:y2, x1:x2], override_checks=True)\n\n\ndef _compute_centre_crop(tens, target_size) -> Tensor:\n _ = _check_target_size(target_size)\n\n # Getting org height and target\n org_h, org_w = tens.shape[:2]\n target_w, target_h = target_size\n\n # The following line is actually the right way of accessing height and width of an opencv-specific image (height, width). However for some reason, while the code runs, this is flipped (it now becomes (width,height)). Testing needs to be done to catch this little bug\n # org_h, org_w = tens.shape[:2]\n\n\n if target_h > org_h or target_w > org_w:\n raise ValueError('To compute centre crop, target size dimensions must be <= tens dimensions')\n\n diff_h = (org_h - target_h) // 2\n diff_w = (org_w - target_w ) // 2\n \n # tens[y:y+h, x:x+h]\n return to_tensor(tens[diff_h:diff_h + target_h, diff_w:diff_w + target_w], override_checks=True)\n\n\ndef _get_random_crop_coords(height, width, crop_height, crop_width, h_start, w_start):\n y1 = int((height - crop_height) * h_start)\n y2 = y1 + crop_height\n x1 = int((width - crop_width) * w_start)\n x2 = x1 + crop_width\n return x1, y1, x2, y2\n\n\ndef solarize(tens, threshold=128) -> Tensor:\n r\"\"\"\n Invert all pixel values above a threshold.\n\n Args:\n tens (Tensor): The image to solarize.\n threshold (int): All pixels above this grayscale level are inverted.\n\n Returns:\n Solarized image (Tensor)\n \n Examples::\n\n >> tens = caer.data.sunrise()\n >> solarized = caer.solarize(tens, threshold=128)\n >> solarized.shape\n (427,640,3)\n\n \"\"\"\n tens = to_tensor(tens, override_checks=True)\n max_val = MAX_VALUES_BY_DTYPE[tens.dtype]\n\n if tens.dtype == np.dtype(\"uint8\"):\n lut = [(i if i < threshold else max_val - i) for i in range(max_val + 1)]\n\n prev_shape = tens.shape\n tens = cv.LUT(tens, np.array(lut, dtype=tens.dtype))\n\n if len(prev_shape) != len(tens.shape):\n tens = np.expand_dims(tens, -1)\n\n return to_tensor(tens, override_checks=True)\n\n result_tens = tens.copy()\n cond = tens >= threshold\n result_tens[cond] = max_val - result_tens[cond]\n return to_tensor(result_tens, override_checks=True)\n\n\ndef posterize(tens, bits) -> Tensor:\n r\"\"\"Reduce the number of bits for each color channel in the image.\n\n Args:\n tens (Tensor): Image to posterize.\n bits (int): Number of high bits. Must be in range [0, 8]\n\n Returns:\n Image with reduced color channels (Tensor)\n \n Examples::\n\n >> tens = caer.data.sunrise()\n >> posterized = caer.posterize(tens, bits=4)\n >> posterized.shape\n (427,640,3)\n\n \"\"\"\n tens = to_tensor(tens, override_checks=True)\n bits = np.uint8(bits)\n\n if tens.dtype != np.uint8:\n raise TypeError(\"Image must have uint8 channel type\")\n\n if np.any((bits < 0) | (bits > 8)):\n raise ValueError(\"bits must be in range [0, 8]\")\n\n if not bits.shape or len(bits) == 1:\n if bits == 0:\n return np.zeros_like(tens)\n if bits == 8:\n return tens.copy()\n\n lut = np.arange(0, 256, dtype=np.uint8)\n mask = ~np.uint8(2 ** (8 - bits) - 1)\n lut &= mask\n\n return to_tensor(cv.LUT(tens, lut), override_checks=True)\n \n\n if not _is_rgb_image(tens):\n raise TypeError(\"If `bits` is iterable, image must be RGB\")\n\n result_tens = np.empty_like(tens)\n for i, channel_bits in enumerate(bits):\n if channel_bits == 0:\n result_tens[..., i] = np.zeros_like(tens[..., i])\n elif channel_bits == 8:\n result_tens[..., i] = tens[..., i].copy()\n else:\n lut = np.arange(0, 256, dtype=np.uint8)\n mask = ~np.uint8(2 ** (8 - channel_bits) - 1)\n lut &= mask\n\n result_tens[..., i] = cv.LUT(tens[..., i], lut)\n\n return to_tensor(result_tens, override_checks=True)\n\n\ndef clip(tens, dtype, maxval) -> Tensor:\n tens = np.clip(tens, 0, maxval).astype(dtype)\n return to_tensor(tens, override_checks=True)\n\n\ndef _equalize_cv(tens, mask=None) -> Tensor:\n if mask is None:\n tens = cv.equalizeHist(tens)\n return to_tensor(tens, override_checks=True)\n\n histogram = cv.calcHist([tens], [0], mask, [256], (0, 256)).ravel()\n i = 0\n for val in histogram:\n if val > 0:\n break\n i += 1\n i = min(i, 255)\n\n total = np.sum(histogram)\n if histogram[i] == total:\n tens = np.full_like(tens, i)\n return to_tensor(tens, override_checks=True)\n\n scale = 255.0 / (total - histogram[i])\n _sum = 0\n\n lut = np.zeros(256, dtype=np.uint8)\n i += 1\n for i in range(i, len(histogram)):\n _sum += histogram[i]\n lut[i] = clip(round(_sum * scale), np.dtype(\"uint8\"), 255)\n\n tens = cv.LUT(tens, lut)\n return to_tensor(tens, override_checks=True)\n\n\ndef equalize(tens, mask=None, by_channels=True) -> Tensor:\n r\"\"\"Equalize the image histogram.\n\n Args:\n tens (Tensor): RGB or grayscale image.\n mask (Tensor): An optional mask. If given, only the pixels selected by the mask are included in the analysis. Maybe 1 channel or 3 channel array.\n by_channels (bool): If True, use equalization by channels separately, else convert image to YCbCr representation and use equalization by `Y` channel.\n\n Returns:\n Equalized image (Tensor)\n \n\n Examples::\n\n >> tens = caer.data.beverages() \n >> equalized = caer.equalize(tens, mask=None) \n >> equalized.shape \n (427,640,3)\n \n \"\"\"\n if tens.dtype != np.uint8:\n raise TypeError(\"Image must have uint8 channel type\")\n\n if mask is not None:\n if _is_rgb_image(mask) and _is_gray_image(tens):\n raise ValueError(\"Wrong mask shape. Image shape: {}. Mask shape: {}\".format(tens.shape, mask.shape))\n\n if not by_channels and not _is_gray_image(mask):\n raise ValueError(\"When `by_channels=False`, only 1-channel mask is supported. Mask shape: {}\".format(mask.shape))\n\n if mask is not None:\n mask = mask.astype(np.uint8)\n\n if _is_gray_image(tens): \n return to_tensor(_equalize_cv(tens, mask), override_checks=True)\n\n if not by_channels:\n result_tens = cv.cvtColor(tens, cv.COLOR_RGB2YCrCb)\n result_tens[..., 0] = _equalize_cv(result_tens[..., 0], mask)\n tens = cv.cvtColor(result_tens, cv.COLOR_YCrCb2RGB) \n return to_tensor(tens, override_checks=True)\n\n result_tens = np.empty_like(tens)\n for i in range(3):\n if mask is None:\n _mask = None\n elif _is_gray_image(mask):\n _mask = mask\n else:\n _mask = mask[..., i]\n\n result_tens[..., i] = _equalize_cv(tens[..., i], _mask)\n\n return to_tensor(result_tens, override_checks=True)" ]
[ [ "numpy.expand_dims", "numpy.clip", "numpy.ascontiguousarray", "numpy.uint8", "numpy.empty_like", "numpy.arange", "numpy.dtype", "numpy.full_like", "numpy.zeros_like", "numpy.any", "numpy.float32", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
szjarek/articles-AzureML-with-Python
[ "d414a898855e74b79cdf0d06ded3d31844197aed" ]
[ "2-aml-pytorch-samples/code/train/train.py" ]
[ "import os\nimport gzip\nimport struct\nimport numpy as np\n\nimport argparse\nimport mlflow\n\nimport torch\nimport torch.optim as optim\n\nfrom torch.nn import functional as F\nfrom torch import nn\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\n\nfrom azureml.core import Run\nfrom azureml.core.model import Model\n\ndef load_dataset(dataset_path):\n def unpack_mnist_data(filename: str, label=False):\n with gzip.open(filename) as gz:\n struct.unpack('I', gz.read(4))\n n_items = struct.unpack('>I', gz.read(4))\n if not label:\n n_rows = struct.unpack('>I', gz.read(4))[0]\n n_cols = struct.unpack('>I', gz.read(4))[0]\n res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8)\n res = res.reshape(n_items[0], n_rows * n_cols) / 255.0\n else:\n res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8)\n res = res.reshape(-1)\n return res\n \n X_train = unpack_mnist_data(os.path.join(dataset_path, 'train-images.gz'), False)\n y_train = unpack_mnist_data(os.path.join(dataset_path, 'train-labels.gz'), True)\n X_test = unpack_mnist_data(os.path.join(dataset_path, 'test-images.gz'), False)\n y_test = unpack_mnist_data(os.path.join(dataset_path, 'test-labels.gz'), True)\n\n return X_train.reshape(-1,28,28,1), y_train, X_test.reshape(-1,28,28,1), y_test\n\nclass NetMNIST(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5) \n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n \n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2,2))\n x = F.max_pool2d(F.dropout(F.relu(self.conv2(x)), p=0.2), (2,2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\nclass DatasetMnist(torch.utils.data.Dataset):\n def __init__(self, X, y=None):\n self.X, self.y = X,y\n\n self.transform = transforms.Compose([\n transforms.ToTensor()])\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, index):\n item = self.transform(self.X[index])\n if self.y is None:\n return item.float()\n \n label = self.y[index]\n return item.float(), np.long(label)\n\ndef get_aml_workspace():\n run = Run.get_context()\n ws = run.experiment.workspace\n return ws\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--data', type=str, required=True)\n args = parser.parse_known_args()[0]\n\n return args\n\ndef train_epoch(model, device, train_loader, optimizer, epoch):\n model.train()\n\n epoch_loss = 0.0\n epoch_acc = 0.0\n \n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n \n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n \n epoch_loss += loss.item()\n _, preds = torch.max(output.data, 1)\n epoch_acc += (preds == target).sum().item()\n \n if batch_idx % 200 == 0 and batch_idx != 0:\n print(f\"[{epoch:2d}:{batch_idx:5d}] \\tBatch loss: {loss.item():.5f}, Epoch loss: {epoch_loss:.5f}\")\n \n epoch_acc /= len(train_loader.dataset)\n \n print(f\"[{epoch:2d} EPOCH] \\tLoss: {epoch_loss:.6f} \\tAcc: {epoch_acc:.6f}\")\n mlflow.log_metrics({\n 'loss': epoch_loss,\n 'accuracy': epoch_acc})\n\ndef train_model(X, y, model_filename, epochs=5, batch_size=64): \n RANDOM_SEED = 101\n\n use_cuda = torch.cuda.is_available()\n torch.manual_seed(RANDOM_SEED)\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print(f\"Device: {device}\")\n\n if use_cuda:\n cuda_kwargs = {'num_workers': 1,\n 'pin_memory': True,\n 'shuffle': True}\n else:\n cuda_kwargs = {}\n \n train_dataset = DatasetMnist(X, y)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, **cuda_kwargs)\n\n model = NetMNIST().to(device)\n optimizer = optim.Adam(model.parameters())\n\n for epoch in range(1, epochs+1):\n train_epoch(model, device, train_loader, optimizer, epoch)\n\n torch.save(model.state_dict(), model_filename)\n\ndef evaluate_model(X, y, model_filename, batch_size=64):\n test_dataset = DatasetMnist(X)\n test_loader = DataLoader(test_dataset, batch_size=batch_size)\n\n model = NetMNIST()\n model.load_state_dict(torch.load(model_filename))\n preds = []\n with torch.no_grad():\n for batch in test_loader:\n batch_preds = model(batch).numpy()\n preds.extend(np.argmax(batch_preds, axis=1))\n\n accscore = (preds == y).sum().item() \n accscore /= len(test_dataset)\n\n mlflow.log_metric('test_accuracy', accscore)\n\ndef register_model(ws, model_filename):\n model = Model.register(\n workspace=ws,\n model_name=model_filename,\n model_path=model_filename,\n model_framework=Model.Framework.PYTORCH,\n model_framework_version=torch.__version__\n )\n\ndef main():\n args = parse_arguments()\n\n ws = get_aml_workspace()\n mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())\n mlflow.start_run()\n\n X_train, y_train, X_test, y_test = load_dataset(args.data)\n model_filename = \"mnist.pt_model\"\n\n train_model(X_train, y_train, model_filename)\n evaluate_model(X_test, y_test, model_filename)\n register_model(ws, model_filename)\n \nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.long", "torch.max", "torch.nn.functional.log_softmax", "torch.nn.functional.dropout", "torch.nn.functional.nll_loss", "torch.manual_seed", "torch.load", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.nn.Linear", "numpy.argmax", "torch.no_grad", "torch.cuda.is_available", "torch.device" ] ]
borten-fb/snorkel2
[ "12b1768c294e2c00388c4c6d93b089e0fcab45a0" ]
[ "snorkel/classification/training/loggers/tensorboard_writer.py" ]
[ "from typing import Any\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom snorkel.types import Config\n\nfrom .log_writer import LogWriter\n\n\nclass TensorBoardWriter(LogWriter):\n \"\"\"A class for logging to Tensorboard during training process.\n\n See ``LogWriter`` for more attributes.\n\n Parameters\n ----------\n kwargs\n Passed to ``LogWriter`` initializer\n\n Attributes\n ----------\n writer\n tensorboardX ``SummaryWriter`` for logging and visualization\n \"\"\"\n\n def __init__(self, **kwargs: Any) -> None:\n super().__init__(**kwargs)\n self.writer = SummaryWriter(self.log_dir)\n\n def add_scalar(self, name: str, value: float, step: float) -> None:\n \"\"\"Log a scalar variable to TensorBoard.\n\n Parameters\n ----------\n name\n Name of the scalar collection\n value\n Value of scalar\n step\n Step axis value\n \"\"\"\n self.writer.add_scalar(name, value, step)\n\n def write_config(\n self, config: Config, config_filename: str = \"config.json\"\n ) -> None:\n \"\"\"Dump the config to file and add it to TensorBoard.\n\n Parameters\n ----------\n config\n JSON-compatible config to write to TensorBoard\n config_filename\n File to write config to\n \"\"\"\n super().write_config(config, config_filename)\n self.writer.add_text(tag=\"config\", text_string=str(config))\n\n def cleanup(self) -> None:\n \"\"\"Close the ``SummaryWriter``.\"\"\"\n self.writer.close()\n" ]
[ [ "torch.utils.tensorboard.SummaryWriter" ] ]
Aditya-Kapadiya/Iguanas
[ "dcc2c1e71f00574c3427fa530191e7079834c11b" ]
[ "iguanas/pipeline/tests/test_linear_pipeline.py" ]
[ "import pytest\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom iguanas.rule_generation import RuleGeneratorDT, RuleGeneratorOpt\nfrom iguanas.rule_optimisation import BayesianOptimiser\nfrom iguanas.rules import Rules\nfrom iguanas.metrics import FScore, JaccardSimilarity, Precision\nfrom iguanas.rule_selection import SimpleFilter, CorrelatedFilter, GreedyFilter\nfrom iguanas.correlation_reduction import AgglomerativeClusteringReducer\nfrom iguanas.rbs import RBSOptimiser, RBSPipeline\nfrom iguanas.pipeline import LinearPipeline, ClassAccessor\n\nf1 = FScore(1)\njs = JaccardSimilarity()\np = Precision()\n\n\[email protected]\ndef _create_data():\n np.random.seed(0)\n X = pd.DataFrame({\n 'A': np.random.randint(0, 2, 100),\n 'B': np.random.randint(0, 10, 100),\n 'C': np.random.normal(0.7, 0.2, 100),\n 'D': (np.random.uniform(0, 1, 100) > 0.6).astype(int)\n })\n y = pd.Series((np.random.uniform(0, 1, 100) >\n 0.9).astype(int), name='label')\n sample_weight = (y+1)*10\n return X, y, sample_weight\n\n\[email protected]\ndef _instantiate_classes():\n rf = RandomForestClassifier(n_estimators=10, random_state=0)\n rg_dt = RuleGeneratorDT(\n metric=f1.fit,\n n_total_conditions=4,\n tree_ensemble=rf\n )\n rg_opt = RuleGeneratorOpt(\n metric=f1.fit,\n n_total_conditions=4,\n num_rules_keep=10,\n )\n rule_strings = {\n 'Rule1': \"(X['A']>0)&(X['C']>0)\",\n 'Rule2': \"(X['B']>0)&(X['D']>0)\",\n 'Rule3': \"(X['D']>0)\",\n 'Rule4': \"(X['C']>0)\"\n }\n rules = Rules(rule_strings=rule_strings)\n rule_lambdas = rules.as_rule_lambdas(as_numpy=False, with_kwargs=True)\n ro = BayesianOptimiser(\n rule_lambdas=rule_lambdas,\n lambda_kwargs=rules.lambda_kwargs,\n metric=f1.fit,\n n_iter=5\n )\n sf = SimpleFilter(\n threshold=0.05,\n operator='>=',\n metric=f1.fit\n )\n cf = CorrelatedFilter(\n correlation_reduction_class=AgglomerativeClusteringReducer(\n threshold=0.9,\n strategy='bottom_up',\n similarity_function=js.fit\n )\n )\n gf = GreedyFilter(\n metric=f1.fit,\n sorting_metric=p.fit\n )\n rbs = RBSOptimiser(\n RBSPipeline(\n config=[],\n final_decision=0,\n ),\n metric=f1.fit,\n pos_pred_rules=ClassAccessor('gf', 'rules_to_keep'),\n neg_pred_rules=[],\n n_iter=10\n )\n return rg_dt, rg_opt, ro, sf, cf, gf, rbs\n\n\ndef test_fit_predict_rule_gen_dt(_create_data, _instantiate_classes):\n X, y, sample_weight = _create_data\n rg_dt, _, _, sf, cf, gf, rbs = _instantiate_classes\n steps = [\n ('rg_dt', rg_dt),\n ('sf', sf),\n ('cf', cf),\n ('gf', gf),\n ('rbs', rbs)\n ]\n rg_dt._today = '20211220'\n lp = LinearPipeline(steps)\n # Test fit/predict/fit_predict, no sample_weight\n lp.fit(X, y)\n assert len(lp.get_params()['sf']['rules_to_keep']) == 43\n assert len(lp.get_params()['cf']['rules_to_keep']) == 41\n assert len(lp.get_params()['gf']['rules_to_keep']) == 10\n assert lp.get_params()['rbs']['rules_to_keep'] == [\n 'RGDT_Rule_20211220_26', 'RGDT_Rule_20211220_6', 'RGDT_Rule_20211220_11',\n 'RGDT_Rule_20211220_41', 'RGDT_Rule_20211220_36',\n 'RGDT_Rule_20211220_40', 'RGDT_Rule_20211220_5'\n ]\n y_pred = lp.predict(X)\n assert y_pred.mean() == 0.13\n assert f1.fit(y_pred, y) == 0.7826086956521738\n y_pred = lp.fit_predict(X, y)\n assert y_pred.mean() == 0.13\n assert f1.fit(y_pred, y) == 0.7826086956521738\n # Test fit/predict/fit_predict, sample_weight given\n lp.fit(X, y, sample_weight)\n assert len(lp.get_params()['sf']['rules_to_keep']) == 40\n assert len(lp.get_params()['cf']['rules_to_keep']) == 38\n assert len(lp.get_params()['gf']['rules_to_keep']) == 10\n assert lp.get_params()['rbs']['rules_to_keep'] == [\n 'RGDT_Rule_20211220_25', 'RGDT_Rule_20211220_8',\n 'RGDT_Rule_20211220_11', 'RGDT_Rule_20211220_38',\n 'RGDT_Rule_20211220_36', 'RGDT_Rule_20211220_37',\n 'RGDT_Rule_20211220_7'\n ]\n y_pred = lp.predict(X)\n assert y_pred.mean() == 0.1\n assert f1.fit(y_pred, y, sample_weight) == 0.8421052631578948\n y_pred = lp.fit_predict(X, y, sample_weight)\n assert y_pred.mean() == 0.1\n assert f1.fit(y_pred, y, sample_weight) == 0.8421052631578948\n\n\ndef test_fit_predict_rule_gen_opt(_create_data, _instantiate_classes):\n X, y, sample_weight = _create_data\n _, rg_opt, _, sf, cf, gf, rbs = _instantiate_classes\n steps = [\n ('rg_opt', rg_opt),\n ('sf', sf),\n ('cf', cf),\n ('gf', gf),\n ('rbs', rbs)\n ]\n rg_opt._today = '20211220'\n lp = LinearPipeline(steps)\n # Test fit/predict/fit_predict, no sample_weight\n lp.fit(X, y)\n assert len(lp.get_params()['sf']['rules_to_keep']) == 26\n assert len(lp.get_params()['cf']['rules_to_keep']) == 26\n assert len(lp.get_params()['gf']['rules_to_keep']) == 3\n assert lp.get_params()['rbs']['rules_to_keep'] == [\n 'RGO_Rule_20211220_25', 'RGO_Rule_20211220_27', 'RGO_Rule_20211220_41'\n ]\n y_pred = lp.predict(X)\n assert y_pred.mean() == 0.11\n assert f1.fit(y_pred, y) == 0.5714285714285713\n y_pred = lp.fit_predict(X, y)\n assert y_pred.mean() == 0.11\n assert f1.fit(y_pred, y) == 0.5714285714285713\n # Test fit/predict/fit_predict, sample_weight given\n lp.fit(X, y, sample_weight)\n assert len(lp.get_params()['sf']['rules_to_keep']) == 26\n assert len(lp.get_params()['cf']['rules_to_keep']) == 26\n assert len(lp.get_params()['gf']['rules_to_keep']) == 5\n assert lp.get_params()['rbs']['rules_to_keep'] == [\n 'RGO_Rule_20211220_31', 'RGO_Rule_20211220_24', 'RGO_Rule_20211220_34'\n ]\n y_pred = lp.predict(X)\n assert y_pred.mean() == 0.13\n assert f1.fit(y_pred, y, sample_weight) == 0.6153846153846154\n y_pred = lp.fit_predict(X, y, sample_weight)\n assert y_pred.mean() == 0.13\n assert f1.fit(y_pred, y, sample_weight) == 0.6153846153846154\n\n\ndef test_fit_predict_rule_opt(_create_data, _instantiate_classes):\n X, y, sample_weight = _create_data\n _, _, ro, sf, cf, gf, rbs = _instantiate_classes\n steps = [\n ('ro', ro),\n ('sf', sf),\n ('cf', cf),\n ('gf', gf),\n ('rbs', rbs)\n ]\n lp = LinearPipeline(steps)\n # Test fit/predict/fit_predict, no sample_weight\n lp.fit(X, y)\n assert len(lp.get_params()['sf']['rules_to_keep']) == 2\n assert len(lp.get_params()['cf']['rules_to_keep']) == 2\n assert len(lp.get_params()['gf']['rules_to_keep']) == 1\n assert lp.get_params()['rbs']['rules_to_keep'] == ['Rule4']\n y_pred = lp.predict(X)\n assert y_pred.mean() == 0.95\n assert f1.fit(y_pred, y) == 0.1904761904761905\n y_pred = lp.fit_predict(X, y)\n assert y_pred.mean() == 0.95\n assert f1.fit(y_pred, y) == 0.1904761904761905\n # Test fit/predict/fit_predict, sample_weight given\n lp.fit(X, y, sample_weight)\n assert len(lp.get_params()['sf']['rules_to_keep']) == 4\n assert len(lp.get_params()['cf']['rules_to_keep']) == 4\n assert len(lp.get_params()['gf']['rules_to_keep']) == 1\n assert lp.get_params()['rbs']['rules_to_keep'] == ['Rule4']\n y_pred = lp.predict(X)\n assert y_pred.mean() == 0.95\n assert f1.fit(y_pred, y, sample_weight) == 0.32\n y_pred = lp.fit_predict(X, y, sample_weight)\n assert y_pred.mean() == 0.95\n assert f1.fit(y_pred, y, sample_weight) == 0.32\n\n\ndef test_fit_transform(_create_data, _instantiate_classes):\n X, y, sample_weight = _create_data\n _, _, ro, sf, cf, gf, _ = _instantiate_classes\n steps = [\n ('ro', ro),\n ('sf', sf),\n ('cf', cf),\n ('gf', gf),\n ]\n lp = LinearPipeline(steps)\n # Test fit_transform, no sample_weight\n X_rules = lp.fit_transform(X, y)\n assert X_rules.columns.tolist() == ['Rule4']\n np.testing.assert_equal(X_rules.mean().values, np.array([0.95]))\n assert len(lp.get_params()['sf']['rules_to_keep']) == 2\n assert len(lp.get_params()['cf']['rules_to_keep']) == 2\n assert len(lp.get_params()['gf']['rules_to_keep']) == 1\n # Test fit_transform, sample_weight given\n X_rules = lp.fit_transform(X, y, sample_weight)\n assert X_rules.columns.tolist() == ['Rule4']\n np.testing.assert_equal(X_rules.mean().values, np.array([0.95]))\n assert len(lp.get_params()['sf']['rules_to_keep']) == 4\n assert len(lp.get_params()['cf']['rules_to_keep']) == 4\n assert len(lp.get_params()['gf']['rules_to_keep']) == 1\n\n\ndef test_fit_predict_use_init_data(_create_data, _instantiate_classes):\n X, y, sample_weight = _create_data\n rg_dt, _, _, _, _, _, _ = _instantiate_classes\n ro = BayesianOptimiser(\n rule_lambdas=ClassAccessor(\n class_tag='rg_dt',\n class_attribute='rule_lambdas'\n ),\n lambda_kwargs=ClassAccessor(\n class_tag='rg_dt',\n class_attribute='lambda_kwargs'\n ),\n metric=f1.fit,\n n_iter=5\n )\n rg_dt._today = '20220201'\n expected_rule_strings = {\n 'RGDT_Rule_20220201_0': \"(X['A']==False)&(X['B']<=6)&(X['C']>0.6177501837002377)\",\n 'RGDT_Rule_20220201_1': \"(X['A']==False)&(X['B']<=2)&(X['D']==False)\",\n 'RGDT_Rule_20220201_2': \"(X['A']==False)&(X['B']<=2)&(X['B']>=2)&(X['D']==False)\",\n 'RGDT_Rule_20220201_3': \"(X['A']==False)&(X['B']<=4)&(X['C']>0.84748)&(X['D']==True)\",\n 'RGDT_Rule_20220201_4': \"(X['A']==False)&(X['B']<=3)&(X['B']>=0)&(X['C']<=0.6610314524532597)\",\n 'RGDT_Rule_20220201_5': \"(X['A']==False)&(X['B']<=6)&(X['B']>=2)&(X['C']>0.82191)\",\n 'RGDT_Rule_20220201_6': \"(X['A']==False)&(X['B']<=7)&(X['C']>0.85766)&(X['D']==False)\",\n 'RGDT_Rule_20220201_7': \"(X['A']==False)&(X['B']<=7)&(X['B']>=1)&(X['D']==False)\",\n 'RGDT_Rule_20220201_8': \"(X['A']==False)&(X['B']>=1)&(X['C']>0.84748)&(X['D']==True)\",\n 'RGDT_Rule_20220201_9': \"(X['A']==False)&(X['B']>=2)&(X['D']==False)\",\n 'RGDT_Rule_20220201_10': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_11': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_12': \"(X['A']==False)&(X['C']<=0.65965)&(X['D']==False)\",\n 'RGDT_Rule_20220201_13': \"(X['A']==False)&(X['C']<=0.663337811583689)&(X['D']==False)\",\n 'RGDT_Rule_20220201_14': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_15': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)&(X['D']==False)\",\n 'RGDT_Rule_20220201_16': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)&(X['D']==False)\",\n 'RGDT_Rule_20220201_17': \"(X['A']==False)&(X['C']>0.80832)&(X['D']==True)\",\n 'RGDT_Rule_20220201_18': \"(X['A']==False)&(X['C']>0.6177501837002377)&(X['D']==False)\",\n 'RGDT_Rule_20220201_19': \"(X['A']==False)&(X['C']>0.6177501837002377)\",\n 'RGDT_Rule_20220201_20': \"(X['A']==False)&(X['C']>0.6177501837002377)&(X['D']==False)\",\n 'RGDT_Rule_20220201_21': \"(X['A']==False)&(X['C']>0.6177501837002377)&(X['D']==False)\",\n 'RGDT_Rule_20220201_22': \"(X['A']==True)&(X['B']<=6)&(X['C']<=0.663337811583689)\",\n 'RGDT_Rule_20220201_23': \"(X['A']==True)&(X['B']<=6)&(X['C']<=0.50532)\",\n 'RGDT_Rule_20220201_24': \"(X['A']==True)&(X['B']<=7)&(X['C']>0.2740656256979518)&(X['D']==False)\",\n 'RGDT_Rule_20220201_25': \"(X['A']==True)&(X['B']>=0)&(X['C']<=0.6610314524532597)&(X['D']==False)\",\n 'RGDT_Rule_20220201_26': \"(X['A']==True)&(X['C']<=0.5315)&(X['C']>0.46073)&(X['D']==False)\",\n 'RGDT_Rule_20220201_27': \"(X['A']==True)&(X['C']<=0.663337811583689)&(X['D']==False)\",\n 'RGDT_Rule_20220201_28': \"(X['A']==True)&(X['C']<=0.9194077847929631)&(X['C']>0.2740656256979518)&(X['D']==False)\",\n 'RGDT_Rule_20220201_29': \"(X['A']==True)&(X['C']<=0.65082)&(X['C']>0.63671)&(X['D']==False)\",\n 'RGDT_Rule_20220201_30': \"(X['B']<=7)&(X['C']>0.2740656256979518)\",\n 'RGDT_Rule_20220201_31': \"(X['B']<=5)&(X['C']<=0.51004)&(X['C']>0.45317)&(X['D']==False)\",\n 'RGDT_Rule_20220201_32': \"(X['B']<=7)&(X['B']>=2)&(X['C']<=0.52794)&(X['D']==False)\",\n 'RGDT_Rule_20220201_33': \"(X['B']<=7)&(X['B']>=2)&(X['C']>0.52794)&(X['D']==False)\",\n 'RGDT_Rule_20220201_34': \"(X['B']<=3)&(X['B']>=0)&(X['C']<=0.6610314524532597)&(X['D']==False)\",\n 'RGDT_Rule_20220201_35': \"(X['B']<=6)&(X['C']<=0.663337811583689)&(X['D']==False)\",\n 'RGDT_Rule_20220201_36': \"(X['B']<=8)&(X['C']<=0.65965)&(X['C']>0.62615)&(X['D']==False)\",\n 'RGDT_Rule_20220201_37': \"(X['B']>=0)&(X['C']<=0.6610314524532597)&(X['D']==False)\",\n 'RGDT_Rule_20220201_38': \"(X['B']>=4)&(X['C']<=0.9194077847929631)&(X['C']>0.2740656256979518)&(X['D']==False)\",\n 'RGDT_Rule_20220201_39': \"(X['C']<=0.663337811583689)\",\n 'RGDT_Rule_20220201_40': \"(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_41': \"(X['C']<=0.65965)&(X['C']>0.64871)\",\n 'RGDT_Rule_20220201_42': \"(X['C']<=0.87533)&(X['C']>0.84487)\"\n }\n expected_rule_strings_weights = {\n 'RGDT_Rule_20220201_0': \"(X['A']==False)&(X['B']<=6)&(X['C']>0.6177501837002377)\",\n 'RGDT_Rule_20220201_1': \"(X['A']==False)&(X['B']<=2)&(X['D']==False)\",\n 'RGDT_Rule_20220201_2': \"(X['A']==False)&(X['B']<=7)&(X['B']>=1)&(X['D']==False)\",\n 'RGDT_Rule_20220201_3': \"(X['A']==False)&(X['B']<=4)&(X['C']>0.80832)&(X['D']==True)\",\n 'RGDT_Rule_20220201_4': \"(X['A']==False)&(X['B']<=4)&(X['C']>0.84748)&(X['D']==True)\",\n 'RGDT_Rule_20220201_5': \"(X['A']==False)&(X['B']<=3)&(X['B']>=0)&(X['C']<=0.6610314524532597)\",\n 'RGDT_Rule_20220201_6': \"(X['A']==False)&(X['B']<=6)&(X['B']>=2)&(X['C']>0.82191)\",\n 'RGDT_Rule_20220201_7': \"(X['A']==False)&(X['B']<=7)&(X['C']>0.85766)&(X['D']==False)\",\n 'RGDT_Rule_20220201_8': \"(X['A']==False)&(X['B']<=7)&(X['C']>0.2740656256979518)&(X['D']==False)\",\n 'RGDT_Rule_20220201_9': \"(X['A']==False)&(X['B']>=2)&(X['D']==False)\",\n 'RGDT_Rule_20220201_10': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_11': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_12': \"(X['A']==False)&(X['C']<=0.65965)&(X['D']==False)\",\n 'RGDT_Rule_20220201_13': \"(X['A']==False)&(X['C']<=0.66179)&(X['D']==False)\",\n 'RGDT_Rule_20220201_14': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_15': \"(X['A']==False)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)&(X['D']==False)\",\n 'RGDT_Rule_20220201_16': \"(X['A']==False)&(X['C']>0.3866744253009945)&(X['D']==False)\",\n 'RGDT_Rule_20220201_17': \"(X['A']==False)&(X['C']>0.6177501837002377)\",\n 'RGDT_Rule_20220201_18': \"(X['A']==False)&(X['C']>0.3866744253009945)&(X['D']==False)\",\n 'RGDT_Rule_20220201_19': \"(X['A']==True)&(X['B']<=6)&(X['C']<=0.663337811583689)\",\n 'RGDT_Rule_20220201_20': \"(X['A']==True)&(X['B']<=7)&(X['B']>=1)&(X['D']==False)\",\n 'RGDT_Rule_20220201_21': \"(X['A']==True)&(X['B']<=6)&(X['C']<=0.50532)\",\n 'RGDT_Rule_20220201_22': \"(X['A']==True)&(X['B']<=7)&(X['C']>0.2740656256979518)&(X['D']==False)\",\n 'RGDT_Rule_20220201_23': \"(X['A']==True)&(X['B']>=0)&(X['C']<=0.6610314524532597)&(X['D']==False)\",\n 'RGDT_Rule_20220201_24': \"(X['A']==True)&(X['C']<=0.663337811583689)&(X['D']==False)\",\n 'RGDT_Rule_20220201_25': \"(X['A']==True)&(X['C']<=0.9194077847929631)&(X['C']>0.2740656256979518)&(X['D']==False)\",\n 'RGDT_Rule_20220201_26': \"(X['B']<=7)&(X['C']>0.2740656256979518)\",\n 'RGDT_Rule_20220201_27': \"(X['B']<=8)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)&(X['D']==False)\",\n 'RGDT_Rule_20220201_28': \"(X['B']<=3)&(X['B']>=0)&(X['C']<=0.6610314524532597)&(X['D']==False)\",\n 'RGDT_Rule_20220201_29': \"(X['B']<=7)&(X['B']>=2)&(X['C']>0.52794)&(X['D']==False)\",\n 'RGDT_Rule_20220201_30': \"(X['B']<=8)&(X['B']>=2)&(X['C']<=0.84487)&(X['D']==False)\",\n 'RGDT_Rule_20220201_31': \"(X['B']<=6)&(X['C']<=0.663337811583689)&(X['D']==False)\",\n 'RGDT_Rule_20220201_32': \"(X['B']<=8)&(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)&(X['D']==False)\",\n 'RGDT_Rule_20220201_33': \"(X['B']>=0)&(X['C']<=0.6610314524532597)&(X['D']==False)\",\n 'RGDT_Rule_20220201_34': \"(X['B']>=4)&(X['C']<=0.9194077847929631)&(X['C']>0.2740656256979518)&(X['D']==False)\",\n 'RGDT_Rule_20220201_35': \"(X['C']<=0.663337811583689)\",\n 'RGDT_Rule_20220201_36': \"(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_37': \"(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\",\n 'RGDT_Rule_20220201_38': \"(X['C']<=0.85767)&(X['C']>0.84748)&(X['D']==True)\",\n 'RGDT_Rule_20220201_39': \"(X['C']<=1.1179668118672983)&(X['C']>0.3866744253009945)\"\n }\n steps = [\n ('rg_dt', rg_dt),\n ('ro', ro)\n ]\n lp = LinearPipeline(\n steps=steps,\n use_init_data=['ro']\n )\n # No sample_weight\n lp.fit(X, y)\n assert lp.get_params()['ro']['rule_strings'] == expected_rule_strings\n X_rules = lp.fit_transform(X, y)\n assert lp.get_params()['ro']['rule_strings'] == expected_rule_strings\n assert X_rules.sum().sum() == 871\n assert lp.rules.rule_strings == expected_rule_strings\n # sample_weight provided\n lp.fit(X, y, sample_weight)\n assert lp.get_params()[\n 'ro']['rule_strings'] == expected_rule_strings_weights\n X_rules = lp.fit_transform(X, y, sample_weight)\n assert lp.get_params()[\n 'ro']['rule_strings'] == expected_rule_strings_weights\n assert X_rules.sum().sum() == 1178\n assert lp.rules.rule_strings == expected_rule_strings_weights\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "numpy.random.seed", "numpy.random.normal", "numpy.random.uniform", "numpy.array", "numpy.random.randint" ] ]
meghasin/RTX
[ "4e7b73263f60840d206ce759111214c28bfc999d" ]
[ "data/KGmetadata/dumpdata.py" ]
[ "# This script will dump indexes like node names, edge names, etc.\nimport numpy as np\nnp.warnings.filterwarnings('ignore')\nfrom neo4j.v1 import GraphDatabase, basic_auth\nimport requests_cache\nimport os\nimport sys\nimport re\n\n#requests_cache.install_cache('orangeboard')\n# specifiy the path of orangeboard database\npathlist = os.path.realpath(__file__).split(os.path.sep)\nRTXindex = pathlist.index(\"RTX\")\ndbpath = os.path.sep.join([*pathlist[:(RTXindex+1)],'data','orangeboard'])\nrequests_cache.install_cache(dbpath)\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__))+\"/../../code\") # code directory\nfrom RTXConfiguration import RTXConfiguration\n\nremove_tab_newlines = re.compile(r\"\\s+\")\n\ndef dump_name_description_KG2(file_name, session, write_mode):\n\t\"\"\"\n\tdump node names and descriptions of all nodes\n\t:param file_name: name of file to save to (TSV)\n\t:param session: neo4j session\n\t:param write_mode: 'w' for overwriting the file, 'a' for appending to the file at the end (or creating a new on if it DNE)\n\t:return: None\n\t\"\"\"\n\tquery = \"match (n) return properties(n) as p, labels(n) as l\"\n\tres = session.run(query)\n\twith open(file_name, write_mode, encoding=\"utf-8\") as fid:\n\t\tfor item in res:\n\t\t\tprop_dict = item['p']\n\t\t\tlabels = item['l']\n\t\t\tif 'id' in prop_dict and 'name' in prop_dict:\n\t\t\t\tif prop_dict['id'] and prop_dict['name'] and labels:\n\t\t\t\t\tlabel = list(set(labels) - {'Base'}).pop()\n\t\t\t\t\tif label:\n\t\t\t\t\t\tfid.write('%s\\t' % prop_dict['id'])\n\t\t\t\t\t\t#fid.write('%s\\t' % ' '.join(prop_dict['name'].split('\\n'))) # FIXME: ugly workaround for node CHEMBL.COMPOUND:CHEMBL2259757 that has a tab in its name\n\t\t\t\t\t\tfid.write('%s\\t' % remove_tab_newlines.sub(\" \", prop_dict['name'])) # better approach\n\t\t\t\t\t\tfid.write('%s\\n' % label)\n\t\t\t\tif label == \"protein\" and 'id' in prop_dict and 'symbol' in prop_dict: # If it's a protein, also do the symbol\n\t\t\t\t\tif prop_dict['id'] and prop_dict['symbol'] and label:\n\t\t\t\t\t\tfid.write('%s\\t' % prop_dict['id'])\n\t\t\t\t\t\tfid.write('%s\\t' % prop_dict['symbol'])\n\t\t\t\t\t\tfid.write('%s\\n' % label)\n\treturn\n\n\ndef dump_name_description_KG1(file_name, session, write_mode):\n\t\"\"\"\n\tdump node names and descriptions of all nodes\n\t:param file_name: name of file to save to (TSV)\n\t:param session: neo4j session\n\t:param write_mode: 'w' for overwriting the file, 'a' for appending to the file at the end (or creating a new on if it DNE)\n\t:return: None\n\t\"\"\"\n\tquery = \"match (n) return properties(n) as p, labels(n) as l\"\n\tres = session.run(query)\n\twith open(file_name, write_mode) as fid:\n\t\tfor item in res:\n\t\t\tprop_dict = item['p']\n\t\t\tlabels = item['l']\n\t\t\tfid.write('%s\\t' % prop_dict['id'])\n\t\t\tfid.write('%s\\t' % prop_dict['name'])\n\t\t\tlabel = list(set(labels) - {'Base'}).pop()\n\t\t\tfid.write('%s\\n' % label)\n\t\t\tif label == \"protein\": # If it's a protein, also do the symbol\n\t\t\t\tfid.write('%s\\t' % prop_dict['id'])\n\t\t\t\tfid.write('%s\\t' % prop_dict['symbol'])\n\t\t\t\tfid.write('%s\\n' % label)\n\treturn\n\ndef dump_node_labels_KG1(file_name, session, write_mode):\n\t\"\"\"\n\tDump the types of nodes\n\t:param file_name: to write to\n\t:param session: neo4j session\n\t:param write_mode: 'w' for overwriting the file, 'a' for appending to the file at the end (or creating a new on if it DNE)\n\t:return: None\n\t\"\"\"\n\tquery = \"match (n) return distinct labels(n)\"\n\tres = session.run(query)\n\twith open(file_name, write_mode) as fid:\n\t\tfor i in res:\n\t\t\tlabel = list(set(i[\"labels(n)\"]).difference({\"Base\"})) # get rid of the extra base label\n\t\t\tlabel = label.pop() # this assumes only a single relationship type, but that's ok since that's how neo4j works\n\t\t\tfid.write('%s\\n' % label)\n\treturn\n\ndef dump_edge_types_KG1(file_name, session, write_mode):\n\t\"\"\"\n\tDump the types of nodes\n\t:param file_name: to write to\n\t:param session: neo4j session\n\t:param write_mode: 'w' for overwriting the file, 'a' for appending to the file at the end (or creating a new on if it DNE)\n\t:return: None\n\t\"\"\"\n\tquery = \"match ()-[r]-() return distinct type(r)\"\n\tres = session.run(query)\n\twith open(file_name, write_mode) as fid:\n\t\tfor i in res:\n\t\t\ttype = i[\"type(r)\"]\n\t\t\tfid.write('%s\\n' % type)\n\treturn\n\n# Actually dump the data for KG1\nrtxConfig = RTXConfiguration()\nrtxConfig.live = 'Production'\ndriver = GraphDatabase.driver(rtxConfig.neo4j_bolt, auth=basic_auth(rtxConfig.neo4j_username, rtxConfig.neo4j_password))\nsession = driver.session()\ndump_name_description_KG1('NodeNamesDescriptions_KG1.tsv', session, 'w')\n#dump_node_labels_KG1('NodeLabels.tsv', session, 'w') # TODO: these are apparently unused?\n#dump_edge_types_KG1('EdgeTypes.tsv', session, 'w') # TODO: these are apparently unused?\n\n\n# now dump data for KG2\ndel rtxConfig\nrtxConfig = RTXConfiguration()\nrtxConfig.live = 'KG2'\ndriver = GraphDatabase.driver(rtxConfig.neo4j_bolt, auth=basic_auth(rtxConfig.neo4j_username, rtxConfig.neo4j_password))\nsession = driver.session()\ndump_name_description_KG2('NodeNamesDescriptions_KG2.tsv', session, 'w')\n#dump_node_labels_KG2('NodeLabels.tsv', session, 'w') # TODO: these are apparently unused?\n#dump_edge_types_KG2('EdgeTypes.tsv', session, 'w') # TODO: these are apparently unused?\n" ]
[ [ "numpy.warnings.filterwarnings" ] ]
abbyxxn/maskrcnn-benchmark-3d
[ "84c5829288cd10a21d67673e7083141e4c65554f" ]
[ "maskrcnn_benchmark/modeling/roi_heads/box3d_head/box3d_head.py" ]
[ "import os\n\nimport numpy as np\nimport torch\n\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom .roi_box3d_feature_extractors import make_roi_box3d_feature_extractor\nfrom .roi_box3d_feature_extractors import make_roi_pc_feature_extractor\nfrom .roi_box3d_predictors import make_roi_box3d_predictor\nfrom .roi_box3d_predictors import make_roi_box3d_predictor_dimension\nfrom .roi_box3d_predictors import make_roi_box3d_predictor_rotation\nfrom .roi_box3d_predictors import make_roi_box3d_predictor_localization_pc\nfrom .roi_box3d_predictors import make_roi_box3d_predictor_localization_conv\n\n\ndef keep_only_positive_boxes(boxes):\n \"\"\"\n Given a set of BoxList containing the `labels` field,\n return a set of BoxList for which `labels > 0`.\n Arguments:\n boxes (list of BoxList)\n \"\"\"\n assert isinstance(boxes, (list, tuple))\n assert isinstance(boxes[0], BoxList)\n assert boxes[0].has_field(\"labels\")\n positive_boxes = []\n positive_inds = []\n for boxes_per_image in boxes:\n labels = boxes_per_image.get_field(\"labels\")\n inds_mask = labels > 0\n inds = inds_mask.nonzero().squeeze(1)\n positive_boxes.append(boxes_per_image[inds])\n positive_inds.append(inds_mask)\n return positive_boxes, positive_inds\n\n\nclass ROIBox3DHead(torch.nn.Module):\n \"\"\"\n Generic Box3d Head class.\n \"\"\"\n\n # TODO change rotation_angle_sin_add_cos to rotation_regression\n def __init__(self, cfg):\n super(ROIBox3DHead, self).__init__()\n self.cfg = cfg.clone()\n self.feature_extractor = make_roi_box3d_feature_extractor(cfg)\n self.pc_feature_extractor = make_roi_pc_feature_extractor(cfg)\n self.predictor = make_roi_box3d_predictor(cfg)\n self.predictor_dimension = make_roi_box3d_predictor_dimension(cfg)\n self.predictor_rotation = make_roi_box3d_predictor_rotation(cfg)\n self.predictor_localization_conv = make_roi_box3d_predictor_localization_conv(cfg)\n self.predictor_localization_pc = make_roi_box3d_predictor_localization_pc(cfg)\n\n def forward(self, features, proposals, targets=None, img_original_ids=None):\n \"\"\"\n Arguments:\n features (list[Tensor]): feature-maps from possibly several levels\n proposals (list[BoxList]): proposal boxes\n targets (list[BoxList], optional): the ground-truth targets.\n img_original_ids (list[str]): the image original filename index\n Returns:\n x (Tensor): the result of the feature extractor\n proposals (list[BoxList]): during training, the subsampled proposals\n are returned. During testing, the predicted boxlists are returned\n losses (dict[Tensor]): During training, returns the losses for the\n head. During testing, returns an empty dict.\n \"\"\"\n\n if self.training:\n # during training, only focus on positive boxes\n all_proposals = proposals\n proposals, positive_inds = keep_only_positive_boxes(proposals)\n\n if self.training and self.cfg.MODEL.ROI_BOX3D_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:\n x = features\n x = x[torch.cat(positive_inds, dim=0)]\n else:\n x = self.feature_extractor(features, proposals)\n\n # extract pseudo pc features and concatenate with roi features\n pc_features = self.pc_feature_prepare(proposals, img_original_ids)\n pc_features = torch.cat(pc_features)\n fusion_feature = torch.cat((x, pc_features), 1)\n\n # two fc for all\n roi_fusion_feature = self.predictor(fusion_feature)\n\n box3d_dim_regression = self.predictor_dimension(roi_fusion_feature)\n box3d_rotation_logits, box3d_rotation_regression = self.predictor_rotation(roi_fusion_feature)\n box3d_localization_conv_regression = self.predictor_localization_conv(roi_fusion_feature)\n box3d_localization_pc_regression = self.predictor_localization_pc(pc_features)\n\n # inference\n if not self.training:\n post_processor_list = [box3d_dim_regression, box3d_rotation_logits, box3d_rotation_regression,\n box3d_localization_conv_regression, box3d_localization_pc_regression]\n post_processor_tuple = tuple(post_processor_list)\n result = self.post_processor(post_processor_tuple, proposals, img_original_ids)\n return x, result, {}\n\n # training\n loss_box3d_dim, loss_box3d_rot_conf, loss_box3d_rot_reg, loss_box3d_localization = self.loss_evaluator(\n proposals,\n box3d_dim_regression=box3d_dim_regression,\n box3d_rotation_logits=box3d_rotation_logits,\n box3d_rotation_regression=box3d_rotation_regression,\n box3d_localization_conv_regression=box3d_localization_conv_regression,\n box3d_localization_pc_regression=box3d_localization_pc_regression,\n targets=targets, img_original_ids=img_original_ids)\n\n loss_dict = dict()\n loss_dict[\"loss_box3d_dim\"] = loss_box3d_dim\n loss_dict[\"loss_box3d_rot_conf\"] = loss_box3d_rot_conf\n loss_dict[\"loss_box3d_rot_reg\"] = loss_box3d_rot_reg\n loss_dict[\"loss_box3d_loc_reg\"] = loss_box3d_localization\n\n return x, all_proposals, loss_dict\n\n def pc_feature_prepare(self, proposals, img_original_ids):\n pseudo_pc_features = []\n PTH = \"/home/abby/Repositories/maskrcnn-benchmark/datasets/kitti/object/training/pseudo_pc\"\n for proposal_per_image, img_ori_id in zip(proposals, img_original_ids):\n pseudo_pc_path = os.path.join(PTH, img_ori_id + \".npz\")\n pseudo_pc = np.load(pseudo_pc_path)\n pseudo_pc = pseudo_pc['pseudo_pc']\n assert (pseudo_pc.shape[1], pseudo_pc.shape[2] == proposal_per_image.size[1], proposal_per_image.size[0]), \\\n \"{}, {}\".format(pseudo_pc.shape, proposal_per_image.size)\n device = proposal_per_image.bbox.device\n pseudo_pc = torch.as_tensor(pseudo_pc, device=device)\n pseudo_pc_feature = self.pc_feature_extractor(proposal_per_image, pseudo_pc)\n pseudo_pc_features.append(pseudo_pc_feature)\n return pseudo_pc_features\n\n def depth_feature_prepare(self, proposals, img_original_ids):\n stereo_depth_features = []\n stereo_depth_features_precise = []\n PTH = \"/home/abby/Repositories/maskrcnn-benchmark/datasets/kitti/object/training/depth\"\n for proposal_per_image, img_ori_id in zip(proposals, img_original_ids):\n stereo_depth_path = os.path.join(PTH, img_ori_id + \".npz\")\n stereo_depth = np.load(stereo_depth_path)\n stereo_depth = stereo_depth['depth']\n assert (\n stereo_depth.shape[0], stereo_depth.shape[1] == proposal_per_image.size[1], proposal_per_image.size[0]), \\\n \"{}, {}\".format(stereo_depth.shape, proposal_per_image.size)\n device = proposal_per_image.bbox.device\n stereo_depth = torch.as_tensor(stereo_depth, device=device)\n stereo_depth = stereo_depth.reshape(1, stereo_depth.shape[0], stereo_depth.shape[1])\n stereo_depth_feature, stereo_depth_feature_precise = self.depth_feature_extractor(proposal_per_image,\n stereo_depth)\n stereo_depth_features.append(stereo_depth_feature)\n stereo_depth_features_precise.append(stereo_depth_feature_precise)\n return stereo_depth_features, stereo_depth_features_precise\n\n\ndef build_roi_box3d_head(cfg):\n \"\"\"\n Constructs a new box head.\n By default, uses ROIBox3DHead, but if it turns out not to be enough, just register a new class\n and make it a parameter in the config\n \"\"\"\n return ROIBox3DHead(cfg)\n" ]
[ [ "numpy.load", "torch.as_tensor", "torch.cat" ] ]
educational-technology-collective/slicing-analysis
[ "3008875be5ef9c532479006640499c157b21b2b6" ]
[ "gardner/extraction/forum_feature_extractor.py" ]
[ "# Copyright (C) 2016 The Regents of the University of Michigan\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\n\"\"\"\n\nTakes CSVs of Coursera quiz scores as input (from joined Coursera quiz_submission_metadata and quiz_metadata tables) and outputs a set of quiz features.\n\nFor the SQL queries used to generate the CSV files of raw input data, see ./sql/quiz_sql_query.txt\n\n\nUsage: python3 forum_feature_extractor.py\\\n-i /path/to/raw_data_directory\\\n-d /path/to/course_date_file\n-o /path/to/output_directory\n-n course_name [must match name in coursera_course_dates.csv; ex. \"introfinance\"]\n\non JG local:\npython3 forum_feature_extractor.py -i raw_data/thermo/ -d coursera_course_dates.csv -o proc_data/thermo/ -n introthermodynamics\n\"\"\"\n\nimport argparse, datetime, re, os\nimport pandas as pd\nimport numpy as np\nfrom extraction.extraction_utils import course_len, timestamp_week, fetch_start_end_date\nfrom extraction.quiz_feature_extractor import fetch_course_runs, get_users_and_weeks, gen_user_week_df, generate_appended_csv\n# from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom textstat.textstat import textstat\nimport nltk\n\nMILLISECONDS_IN_SECOND = 1000\n\n\ndef read_forum_and_comment_data(dir, run):\n \"\"\"\n Read forum and comments data for a given run; combine into single dataframe.\n :param dir: input directory with CSV files of forum data.\n :param run: run number; must match number in filename exactly (i.e., '006' not '6').\n :return: pd.DataFrame of forum data (including both posts and comments) for run.\n \"\"\"\n forum_file = [x for x in os.listdir(dir) if x.endswith('{0}_forum_text.csv'.format(run))][0]\n forum_df = pd.read_csv(os.path.join(dir, forum_file))\n # read in universal newline mode; this is due to pandas issue documented here: https://github.com/pandas-dev/pandas/issues/11166\n # forum_df = pd.read_csv(open(os.path.join(input_dir, forum_file), 'rU'), encoding='utf-8', engine='c')\n return forum_df\n\n\ndef gen_thread_order(df):\n \"\"\"\n Add column with order of post within each thread, by timestamp.\n :param df: pd.DataFrame of forum post data.\n :return: pd.DataFrame of forum data with thread_order column.\n \"\"\"\n df.sort_values(by = ['thread_id', 'post_time'], inplace = True)\n df['thread_order'] = df.groupby(['thread_id'])['thread_id'].rank(method='first')\n return df\n\n\ndef gen_threads_started(df):\n \"\"\"\n Generate counts of threads started, by user and week.\n :param df: pd.DataFrame of forum post data.\n :return: pd.DataFrame of 'session_user_id', 'week', and threads_started.\n \"\"\"\n df_starts = df[df.thread_order == 1].groupby(['session_user_id', 'week']).size().rename('threads_started').reset_index()\n return df_starts\n\n\ndef gen_num_replies(df):\n \"\"\"\n Generate feature with number of posts by user which were replies to other users (i.e., not to themselves, and not first post in thread).\n :param df: pd.DataFrame of forum post data.\n :return: pd.DataFrame of 'session_user_id', 'week', and num_replies.\n \"\"\"\n df.sort_values(by=['thread_id', 'post_time'], inplace=True)\n df['previous_post_user_id'] = df.groupby('thread_id')['session_user_id'].shift()\n df_reply = df[(df.thread_order != 1) & (df.session_user_id != df.previous_post_user_id)]\n df_out = df_reply.groupby(['session_user_id', 'week']).size().rename('num_replies').reset_index()\n return df_out\n\n\ndef gen_sentiment_feats(df):\n \"\"\"\n Generate features based on sentiment: post_net_sentiment and net_sentiment_diff_from_thread_avg. See https://github.com/cjhutto/vaderSentiment for vader sentiment details.\n :param df: pd.DataFrame of forum post data.\n :return: pd.DataFrame of 'session_user_id', 'week', post_net_sentiment, and net_sentiment_diff_from_thread_avg. Note that users who do not post should have NaNs, not zeros.\n \"\"\"\n import nltk\n # nltk.download('vader_lexicon')\n from nltk.sentiment.vader import SentimentIntensityAnalyzer\n sid = SentimentIntensityAnalyzer()\n # compute post net sentiment and initialize df_out\n df['post_net_sentiment'] = df['post_text'].apply(lambda x: sid.polarity_scores(x).get('compound'))\n df_out = df.groupby(['session_user_id', 'week'])['post_net_sentiment'].mean().rename('avg_net_sentiment').reset_index()\n # compute net_sentiment_diff_from_thread_avg and merge onto df_out\n thread_avg_sentiment = df.groupby('thread_id')['post_net_sentiment'].mean().rename('thread_avg_net_sentiment').reset_index()\n df = df.merge(thread_avg_sentiment, how = 'left')\n df['net_sentiment_diff_from_thread_avg'] = df['post_net_sentiment'] - df['thread_avg_net_sentiment']\n thread_sentiment_sd = df.groupby('thread_id')['post_net_sentiment'].std().rename('thread_sentiment_sd').reset_index()\n df = df.merge(thread_sentiment_sd, how = 'left')\n df['positive_post'] = (df['post_net_sentiment'] - df['thread_avg_net_sentiment'])/df['thread_avg_net_sentiment'] >= 1\n df['negative_post'] = (df['post_net_sentiment'] - df['thread_avg_net_sentiment']) / df['thread_avg_net_sentiment'] <= -1\n df['neutral_post'] = abs((df['post_net_sentiment'] - df['thread_avg_net_sentiment']) / df['thread_avg_net_sentiment']) < 1\n df_post_sentiment_counts = df.groupby(['session_user_id', 'week'])[['positive_post', 'negative_post', 'neutral_post']]\\\n .sum()\\\n .rename(columns = lambda x: x + '_count')\\\n .reset_index()\n df_out = df_out.merge(df_post_sentiment_counts)\n df_avg_diff = df.groupby(['session_user_id', 'week'])['net_sentiment_diff_from_thread_avg'].mean().rename('avg_net_sentiment_diff_from_thread_avg').reset_index()\n df_out = df_out.merge(df_avg_diff).drop('avg_net_sentiment', axis = 1)\n return df_out\n\n\ndef flesch_reading_ease(text):\n try:\n return textstat.flesch_reading_ease(text)\n except:\n return np.nan\n\n\ndef flesch_kincaid_grade(text):\n try:\n return textstat.flesch_kincaid_grade(text)\n except:\n return np.nan\n\n\ndef gen_flesch_scores(df, reading_ease_bins = [-np.inf] + [x for x in range(10, 100, 10)] + [np.inf], grade_level_bins = [-np.inf] + [x for x in range(20)] + [np.inf]):\n \"\"\"\n Generate features based on flesch readability scores: 'flesch_reading_ease' and 'flesch_kincaid_grade'.\n :param df: pd.DataFrame of forum post data.\n :return: pd.DataFrame of 'session_user_id', 'week', 'avg_flesch_reading_ease' and 'avg_flesch_kincaid_grade'. Note that users who do not post should have NaNs, not zeros.\n \"\"\"\n reading_ease_bin_labs = [\"_\".join([\"reading_ease_bin\", str(reading_ease_bins[x]), str(reading_ease_bins[x+1])]) for x in range(len(reading_ease_bins)-1)]\n grade_level_bin_labs = [\"_\".join([\"grade_level_bin\", str(grade_level_bins[x]), str(grade_level_bins[x + 1])]) for x in range(len(grade_level_bins) - 1)]\n reading_ease_scores = df['post_text'].apply(flesch_reading_ease)\n reading_ease_bin_scores = pd.cut(reading_ease_scores, bins = reading_ease_bins, labels = reading_ease_bin_labs)\n temp = pd.concat([df[['session_user_id', 'week']], pd.get_dummies(reading_ease_bin_scores)], axis = 1)\n flesch_kinkaid_scores = df['post_text'].apply(flesch_kincaid_grade)\n grade_level_bin_scores = pd.cut(flesch_kinkaid_scores, bins = grade_level_bins, labels = grade_level_bin_labs)\n df_out = pd.concat([temp, pd.get_dummies(grade_level_bin_scores)], axis = 1).groupby(['session_user_id', 'week']).sum().reset_index()\n return df_out\n\n\ndef gen_bigram_counts(df):\n \"\"\"\n Generate counts of unique bigrams used per user per week.\n :param df: pd.DataFrame of forum post data.\n :return: pd.DataFrame of 'session_user_id', 'week', 'unique_bigrams_week'.\n \"\"\"\n import nltk\n nltk.download('punkt')\n # TODO: look at cleaning text (removing tags, html, etc.)\n # TODO: is there a more efficient way to do this?\n df['post_bigrams'] = df['post_text'].apply(lambda x: [bg for bg in nltk.bigrams(nltk.word_tokenize(x))])\n df_out = df.groupby(['session_user_id', 'week'])['post_bigrams'].apply(sum).rename('all_bigrams_week').reset_index()\n df_out['unique_bigrams_week'] = df_out['all_bigrams_week'].apply(lambda x: len(set(x)))\n df_out.drop('all_bigrams_week', axis = 1, inplace = True)\n return df_out\n\n\ndef gen_forum_features(forum_df, course_start, course_end, dropout_fp = \"/output/user_dropout_weeks.csv\"):\n forum_df['week'] = (forum_df['post_time']*1000).apply(timestamp_week, args = (course_start, course_end))\n forum_df['post_text'] = forum_df['post_text'].apply(str)\n users, weeks = get_users_and_weeks(forum_df, dropout_fp, week_col='week')\n forum_df = gen_thread_order(forum_df)\n # initialize output dataframe with one entry per user per week\n df_out = gen_user_week_df(users, weeks)\n # compute feature: threads_started\n feat_temp = gen_threads_started(forum_df)\n df_out = df_out.merge(feat_temp, how = 'left')\n df_out['threads_started'].fillna(0, inplace = True)\n # compute feature: avg post length in characters\n forum_df['post_len_char'] = forum_df['post_text'].apply(len)\n feat_temp = forum_df.groupby(['session_user_id', 'week'])['post_len_char'].agg('sum').rename('week_post_len_char').reset_index()\n df_out = df_out.merge(feat_temp, how = 'left')\n df_out['week_post_len_char'].fillna(0, inplace = True)\n # compute feature: number of posts\n feat_temp = forum_df.groupby(['session_user_id', 'week']).size().rename('num_posts').reset_index()\n df_out = df_out.merge(feat_temp, how='left')\n df_out['num_posts'].fillna(0, inplace = True)\n # compute feature: num_replies: count of posts which were responses to other users (i.e., not first post and not self-response)\n feat_temp = gen_num_replies(forum_df)\n df_out = df_out.merge(feat_temp, how='left')\n df_out['num_replies'].fillna(0, inplace = True)\n #compute feature: votes_net : sum of upvotes minus downvotes (this is what 'votes' field is) for all posts that week\n feat_temp = forum_df.groupby(['session_user_id', 'week'])['votes'].sum().rename('votes_net').reset_index()\n df_out = df_out.merge(feat_temp, how='left')\n df_out['votes_net'].fillna(0, inplace=True)\n # compute feature: avg_sentiment\n # compute feature: net_sentiment_diff_from_thread_avg\n feat_temp = gen_sentiment_feats(forum_df)\n df_out = df_out.merge(feat_temp, how='left')\n df_out['positive_post_count'].fillna(0, inplace = True)\n df_out['negative_post_count'].fillna(0, inplace=True)\n df_out['neutral_post_count'].fillna(0, inplace=True)\n df_out['avg_net_sentiment_diff_from_thread_avg'].fillna(0, inplace = True)\n # compute feature: flesch reading ease score and grade level score\n feat_temp = gen_flesch_scores(forum_df)\n df_out = df_out.merge(feat_temp, how='left')\n for c in [x for x in df_out.columns if x.startswith(\"reading_ease_bin\") or x.startswith(\"grade_level_bin\")]:\n df_out[c].fillna(0, inplace = True)\n # compute feature: number of unique bigrams\n feat_temp = gen_bigram_counts(forum_df)\n df_out = df_out.merge(feat_temp, how='left')\n df_out['unique_bigrams_week'].fillna(0, inplace = True)\n return df_out\n\n\ndef write_forum_output(forum_feature_df, output_dir, run, appended = True, week_only = False, week = 2):\n week_df = forum_feature_df[forum_feature_df.week == week]\n if week_df.shape[0] == 0:\n return # no data for this week\n app_week_df = generate_appended_csv(forum_feature_df, week)\n # write output; fill NaN values with NA so R will be happy :-D\n if week_only:\n week_df.set_index('session_user_id').fillna('NA').to_csv(os.path.join(output_dir, 'week_{0}_forum_only_feats.csv'.format(week)))\n if appended:\n app_week_df.set_index('session_user_id').fillna('NA').to_csv(os.path.join(output_dir, 'week_{0}_forum_appended_feats.csv'.format(week)))\n return\n\n\ndef main(course_name, run, output_dir = '/output', date_file = 'coursera_course_dates.csv'):\n \"\"\"\n Main workhorse function; builds full forum dataset for course_name.\n :param course_name: course short name; should match name in coursera_course_dates.csv\n :param date_file: course dates CSV file\n :param output_dir: output directory; should be /proc_data/shortname\n :param run: run numbers in 3-digit string format\n :return: None; writes output to output_dir subdirectories\n \"\"\"\n input_dir = os.path.join('/input', course_name, run)\n date_file_path = os.path.join(input_dir, date_file)\n print('fetching data for run {0}'.format(run))\n # fetch start/end dates\n course_start, course_end = fetch_start_end_date(course_name, run, date_file_path)\n # read in forum data; this combines comments and posts\n forum_df = read_forum_and_comment_data(output_dir, run)\n # generate derived features\n forum_feature_df = gen_forum_features(forum_df, course_start, course_end)\n assert forum_feature_df.isnull().sum().sum() == 0\n # write features to output_dir, by course week\n write_forum_output(forum_feature_df, output_dir, run)\n return None\n\n\nif __name__ == '__main__':\n # build parser\n parser = argparse.ArgumentParser(description='Create quiz features from CSV files.')\n parser.add_argument('-i', metavar=\"Input data file path\",\n nargs=1, type=str, required=True, dest='input_dir')\n parser.add_argument('-d', metavar=\"Course dates CSV\",\n nargs=1, type=str, required=True, dest='date_file')\n parser.add_argument('-o', metavar=\"output directory path (will be created if does not exist)\", nargs=1, type=str,\n required=True, dest='out_dir')\n parser.add_argument('-n', metavar=\"course short name; should match name in coursera_course_dates.csv\", nargs=1, type=str,\n required=True, dest='course_name')\n # collect input from parser\n args = parser.parse_args()\n INPUT_DIR = args.input_dir[0]\n date_file = args.date_file[0]\n course_name = args.course_name[0]\n OUTPUT_DIR = args.out_dir[0]\n # fetch list of run numbers from input_dir\n runs = fetch_course_runs(INPUT_DIR)\n # generate features for each run and export as CSVs into OUTPUT_DIR\n main(course_name, date_file, input_dir=INPUT_DIR, output_dir=OUTPUT_DIR, runs = runs)\n\n\n" ]
[ [ "pandas.cut", "pandas.get_dummies" ] ]
saransh09/openfl-1
[ "beba571929a56771f2fc1671154a3dbe60b38785" ]
[ "tests/openfl/utilities/optimizers/func_for_optimization.py" ]
[ "# Copyright (C) 2021-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Numpy optimizers test functions module.\"\"\"\nfrom typing import Dict\n\nimport numpy as np\n\n\ndef rosenbrock_func(point: Dict[str, np.ndarray]) -> float:\n \"\"\"\n Calculate Rosenbrock function.\n\n More details: https://en.wikipedia.org/wiki/Rosenbrock_function\n \"\"\"\n return (1 - point['x'])**2 + 100 * (point['y'] - point['x']**2)**2\n\n\ndef _get_rosenbrock_grads(point: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n \"\"\"Calculate gradients for Rosenbrock function.\"\"\"\n grads = {'x': np.array([0]), 'y': np.array([0])}\n grads['x'] = -2 * (1 - point['x']) - 400 * point['x'] * (point['y'] - point['x']**2)\n grads['y'] = grads['y'] + 200 * (point['y'] - point['x']**2)\n return grads\n\n\ndef mc_cormick_func(point: Dict[str, np.ndarray]) -> float:\n \"\"\"\n Calculate McCormick function.\n\n More details: https://en.wikipedia.org/wiki/Test_functions_for_optimization\n \"\"\"\n return (np.sin(point['x'] + point['y'])\n + (point['x'] - point['y'])**2\n - 1.5 * point['x'] + 2.5 * point['y'] + 1)\n\n\ndef _get_mc_cormick_grads(point: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n \"\"\"Calculate gradients for McCormick function.\"\"\"\n grads = {'x': np.array([0]), 'y': np.array([0])}\n grads['x'] = np.cos(point['x'] + point['y']) + 2 * (point['x'] - point['y']) - 1.5\n grads['y'] = np.cos(point['x'] + point['y']) - 2 * (point['x'] - point['y']) + 2.5\n return grads\n\n\nrosenbrock_func.get_grads = _get_rosenbrock_grads\nrosenbrock_func.true_answer = {'x': np.array([1.0]), 'y': np.array([1.0])}\n\nmc_cormick_func.get_grads = _get_mc_cormick_grads\nmc_cormick_func.true_answer = {'x': np.array([-0.54719]), 'y': np.array([-1.54719])}\n\n__all__ = [\n 'rosenbrock_func',\n 'mc_cormick_func',\n]\n" ]
[ [ "numpy.array", "numpy.cos", "numpy.sin" ] ]
msai-amin/DP-FL
[ "be275996ef8030d3eb473d8f6d609969bb071909" ]
[ "opacus/privacy_engine.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport os\nimport types\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\n\nfrom . import privacy_analysis\nfrom .dp_model_inspector import DPModelInspector\nfrom .layers.dp_ddp import (\n DifferentiallyPrivateDistributedDataParallel,\n average_gradients,\n)\nfrom .per_sample_gradient_clip import PerSampleGradientClipper\nfrom .utils import clipping\n\n\nDEFAULT_ALPHAS = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))\n\n\ndef get_noise_multiplier(\n target_epsilon: float,\n target_delta: float,\n sample_rate: float,\n epochs: int,\n alphas: List[float],\n sigma_min: Optional[float] = 0.01,\n sigma_max: Optional[float] = 10.0,\n) -> float:\n r\"\"\"\n Computes the noise level sigma to reach a total budget of (target_epsilon, target_delta)\n at the end of epochs, with a given sample_rate\n\n Args:\n target_epsilon: the privacy budget's epsilon\n target_delta: the privacy budget's delta\n sample_rate: the sampling rate (usually batch_size / n_data)\n epochs: the number of epochs to run\n alphas: the list of orders at which to compute RDP\n\n Returns:\n The noise level sigma to ensure privacy budget of (target_epsilon, target_delta)\n\n \"\"\"\n eps = float(\"inf\")\n while eps > target_epsilon:\n sigma_max = 2 * sigma_max\n rdp = privacy_analysis.compute_rdp(\n sample_rate, sigma_max, epochs / sample_rate, alphas\n )\n eps = privacy_analysis.get_privacy_spent(alphas, rdp, target_delta)[0]\n if sigma_max > 2000:\n raise ValueError(\"The privacy budget is too low.\")\n\n while sigma_max - sigma_min > 0.01:\n sigma = (sigma_min + sigma_max) / 2\n rdp = privacy_analysis.compute_rdp(\n sample_rate, sigma, epochs / sample_rate, alphas\n )\n eps = privacy_analysis.get_privacy_spent(alphas, rdp, target_delta)[0]\n\n if eps < target_epsilon:\n sigma_max = sigma\n else:\n sigma_min = sigma\n\n return sigma\n\n\nclass PrivacyEngine:\n r\"\"\"\n The main component of Opacus is the ``PrivacyEngine``.\n\n To train a model with differential privacy, all you need to do\n is to define a ``PrivacyEngine`` and later attach it to your\n optimizer before running.\n\n\n Example:\n This example shows how to define a ``PrivacyEngine`` and to attach\n it to your optimizer.\n\n >>> import torch\n >>> model = torch.nn.Linear(16, 32) # An example model\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.05)\n >>> privacy_engine = PrivacyEngine(model, sample_rate=0.01, noise_multiplier=1.3, max_grad_norm=1.0)\n >>> privacy_engine.attach(optimizer) # That's it! Now it's business as usual.\n \"\"\"\n\n def __init__(\n self,\n module: nn.Module,\n *, # As per PEP 3102, this forces clients to specify kwargs explicitly, not positionally\n sample_rate: Optional[float] = None,\n batch_size: Optional[int] = None,\n sample_size: Optional[int] = None,\n max_grad_norm: Union[float, List[float]],\n noise_multiplier: Optional[float] = None,\n alphas: List[float] = DEFAULT_ALPHAS,\n secure_rng: bool = False,\n batch_first: bool = True,\n target_delta: float = 1e-6,\n target_epsilon: Optional[float] = None,\n epochs: Optional[float] = None,\n loss_reduction: str = \"mean\",\n **misc_settings,\n ):\n r\"\"\"\n Args:\n module: The Pytorch module to which we are attaching the privacy engine\n alphas: A list of RDP orders\n noise_multiplier: The ratio of the standard deviation of the Gaussian noise to\n the L2-sensitivity of the function to which the noise is added\n max_grad_norm: The maximum norm of the per-sample gradients. Any gradient with norm\n higher than this will be clipped to this value.\n batch_size: Training batch size. Used in the privacy accountant.\n sample_size: The size of the sample (dataset). Used in the privacy accountant.\n sample_rate: Sample rate used to build batches. Used in the privacy accountant.\n secure_rng: If on, it will use ``torchcsprng`` for secure random number generation.\n Comes with a significant performance cost, therefore it's recommended that you\n turn it off when just experimenting.\n batch_first: Flag to indicate if the input tensor to the corresponding module\n has the first dimension representing the batch. If set to True, dimensions on\n input tensor will be ``[batch_size, ..., ...]``.\n target_delta: The target delta. If unset, we will set it for you.\n loss_reduction: Indicates if the loss reduction (for aggregating the gradients)\n is a sum or a mean operation. Can take values \"sum\" or \"mean\"\n **misc_settings: Other arguments to the init\n \"\"\"\n\n self.steps = 0\n self.module = module\n\n self.batch_size = batch_size\n self.sample_size = sample_size\n self.sample_rate = sample_rate\n self._set_sample_rate()\n\n if isinstance(module, DifferentiallyPrivateDistributedDataParallel):\n rank = torch.distributed.get_rank()\n n_replicas = torch.distributed.get_world_size()\n self.sample_rate *= n_replicas\n else:\n rank = 0\n n_replicas = 1\n\n if noise_multiplier is None:\n if target_epsilon is None or target_delta is None or epochs is None:\n raise ValueError(\n \"If noise_multiplier is not specified, (target_epsilon, target_delta, epochs) should be given to the engine.\"\n )\n self.noise_multiplier = get_noise_multiplier(\n target_epsilon, target_delta, self.sample_rate, epochs, alphas\n )\n else:\n self.noise_multiplier = noise_multiplier\n\n self.max_grad_norm = max_grad_norm\n self.alphas = alphas\n self.target_delta = target_delta\n self.secure_rng = secure_rng\n self.batch_first = batch_first\n self.loss_reduction = loss_reduction\n self.misc_settings = misc_settings\n self.n_replicas = n_replicas\n self.rank = rank\n\n self.device = next(module.parameters()).device\n self.steps = 0\n\n if self.noise_multiplier < 0:\n raise ValueError(\n f\"noise_multiplier={self.noise_multiplier} is not a valid value. Please provide a float >= 0.\"\n )\n\n if isinstance(self.max_grad_norm, float) and self.max_grad_norm <= 0:\n raise ValueError(\n f\"max_grad_norm={self.max_grad_norm} is not a valid value. Please provide a float > 0.\"\n )\n\n if not self.target_delta:\n if self.sample_size:\n warnings.warn(\n \"target_delta unset. Setting it to an order of magnitude less than 1/sample_size.\"\n )\n self.target_delta = 0.1 * (1 / self.sample_size)\n else:\n raise ValueError(\"Please provide a target_delta.\")\n\n if self.secure_rng:\n self.seed = None\n try:\n import torchcsprng as csprng\n except ImportError as e:\n msg = (\n \"To use secure RNG, you must install the torchcsprng package! \"\n \"Check out the instructions here: https://github.com/pytorch/csprng#installation\"\n )\n raise ImportError(msg) from e\n\n self.seed = None\n self.random_number_generator = csprng.create_random_device_generator(\n \"/dev/urandom\"\n )\n else:\n warnings.warn(\n \"Secure RNG turned off. This is perfectly fine for experimentation as it allows \"\n \"for much faster training performance, but remember to turn it on and retrain \"\n \"one last time before production with ``secure_rng`` turned on.\"\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self.seed = int.from_bytes(os.urandom(8), byteorder=\"big\", signed=True)\n self.random_number_generator = self._set_seed(self.seed)\n\n self.validator = DPModelInspector()\n self.clipper = None # lazy initialization in attach\n\n def state_dict(self):\n return {\n \"steps\": self.steps,\n }\n\n def load_state_dict(self, state_dict):\n self.steps = state_dict[\"steps\"]\n\n def detach(self):\n r\"\"\"\n Detaches the privacy engine from optimizer.\n\n To detach the ``PrivacyEngine`` from optimizer, this method returns\n the model and the optimizer to their original states (i.e. all\n added attributes/methods will be removed).\n \"\"\"\n optim = self.optimizer\n optim.privacy_engine = None\n self.clipper.close()\n optim.step = types.MethodType(optim.original_step, optim)\n del optim.virtual_step\n\n def attach(self, optimizer: torch.optim.Optimizer):\n r\"\"\"\n Attaches the privacy engine to the optimizer.\n\n Attaches to the ``PrivacyEngine`` an optimizer object,and injects\n itself into the optimizer's step. To do that it,\n\n 1. Validates that the model does not have unsupported layers.\n\n 2. Adds a pointer to this object (the ``PrivacyEngine``) inside the optimizer.\n\n 3. Moves optimizer's original ``step()`` function to ``original_step()``.\n\n 4. Monkeypatches the optimizer's ``step()`` function to call ``step()`` on\n the query engine automatically whenever it would call ``step()`` for itself.\n\n Args:\n optimizer: The optimizer to which the privacy engine will attach\n \"\"\"\n\n self.validator.validate(self.module)\n norm_clipper = (\n clipping.ConstantFlatClipper(self.max_grad_norm)\n if not isinstance(self.max_grad_norm, list)\n else clipping.ConstantPerLayerClipper(self.max_grad_norm)\n )\n\n if self.misc_settings.get(\"experimental\", False):\n norm_clipper = clipping._Dynamic_Clipper_(\n [self.max_grad_norm],\n self.misc_settings.get(\"clip_per_layer\", False),\n self.misc_settings.get(\n \"clipping_method\", clipping.ClippingMethod.STATIC\n ),\n self.misc_settings.get(\"clipping_ratio\", 0.0),\n self.misc_settings.get(\"clipping_momentum\", 0.0),\n )\n\n self.clipper = PerSampleGradientClipper(\n self.module,\n norm_clipper,\n self.batch_first,\n self.loss_reduction,\n )\n\n def dp_zero_grad(self):\n self.privacy_engine.zero_grad()\n self.original_zero_grad()\n\n def dp_step(self, closure=None):\n self.privacy_engine.step()\n if isinstance(\n self.privacy_engine.module, DifferentiallyPrivateDistributedDataParallel\n ):\n average_gradients(self.privacy_engine.module)\n self.original_step(closure)\n\n optimizer.privacy_engine = self\n optimizer.original_step = optimizer.step\n optimizer.step = types.MethodType(dp_step, optimizer)\n\n optimizer.original_zero_grad = optimizer.zero_grad\n optimizer.zero_grad = types.MethodType(dp_zero_grad, optimizer)\n\n def virtual_step(self):\n self.privacy_engine.virtual_step()\n\n optimizer.virtual_step = types.MethodType(virtual_step, optimizer)\n\n # create a cross reference for detaching\n self.optimizer = optimizer\n\n def get_renyi_divergence(self):\n rdp = torch.tensor(\n privacy_analysis.compute_rdp(\n self.sample_rate, self.noise_multiplier, 1, self.alphas\n )\n )\n return rdp\n\n def get_privacy_spent(\n self, target_delta: Optional[float] = None\n ) -> Tuple[float, float]:\n \"\"\"\n Computes the (epsilon, delta) privacy budget spent so far.\n\n This method converts from an (alpha, epsilon)-DP guarantee for all alphas that\n the ``PrivacyEngine`` was initialized with. It returns the optimal alpha together\n with the best epsilon.\n\n Args:\n target_delta: The Target delta. If None, it will default to the privacy\n engine's target delta.\n\n Returns:\n Pair of epsilon and optimal order alpha.\n \"\"\"\n if target_delta is None:\n if self.target_delta is None:\n raise ValueError(\n \"If self.target_delta is not specified, target_delta should be set as argument to get_privacy_spent.\"\n )\n target_delta = self.target_delta\n rdp = self.get_renyi_divergence() * self.steps\n eps, best_alpha = privacy_analysis.get_privacy_spent(\n self.alphas, rdp, target_delta\n )\n return float(eps), float(best_alpha)\n\n def zero_grad(self):\n \"\"\"\n Resets clippers status.\n\n Clipper keeps internal gradient per sample in the batch in each\n ``forward`` call of the module, they need to be cleaned before the\n next round.\n\n If these variables are not cleaned the per sample gradients keep\n being concatenated accross batches. If accumulating gradients\n is intented behavious, e.g. simulating a large batch, prefer\n using ``virtual_step()`` function.\n \"\"\"\n if self.clipper is not None:\n self.clipper.zero_grad()\n\n def step(self):\n \"\"\"\n Takes a step for the privacy engine.\n\n Notes:\n You should not call this method directly. Rather, by attaching your\n ``PrivacyEngine`` to the optimizer, the ``PrivacyEngine`` would have\n the optimizer call this method for you.\n\n Raises:\n ValueError: If the last batch of training epoch is greater than others.\n This ensures the clipper consumed the right amount of gradients.\n In the last batch of a training epoch, we might get a batch that is\n smaller than others but we should never get a batch that is too large\n\n \"\"\"\n self.steps += 1\n self.clipper.clip_and_accumulate()\n clip_values, batch_size = self.clipper.pre_step()\n\n params = (p for p in self.module.parameters() if p.requires_grad)\n for p, clip_value in zip(params, clip_values):\n noise = self._generate_noise(clip_value, p)\n if self.loss_reduction == \"mean\" and self.rank == 0:\n noise /= batch_size\n p.grad += noise\n\n def to(self, device: Union[str, torch.device]):\n \"\"\"\n Moves the privacy engine to the target device.\n\n Args:\n device : The device on which Pytorch Tensors are allocated.\n See: https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device\n\n Example:\n This example shows the usage of this method, on how to move the model\n after instantiating the ``PrivacyEngine``.\n\n >>> model = torch.nn.Linear(16, 32) # An example model. Default device is CPU\n >>> privacy_engine = PrivacyEngine(model, sample_rate=0.01, noise_multiplier=0.8, max_grad_norm=0.5)\n >>> device = \"cuda:3\" # GPU\n >>> model.to(device) # If we move the model to GPU, we should call the to() method of the privacy engine (next line)\n >>> privacy_engine.to(device)\n\n Returns:\n The current ``PrivacyEngine``\n \"\"\"\n self.device = device\n return self\n\n def virtual_step(self):\n r\"\"\"\n Takes a virtual step.\n\n Virtual batches enable training with arbitrary large batch sizes, while\n keeping the memory consumption constant. This is beneficial, when training\n models with larger batch sizes than standard models.\n\n Example:\n Imagine you want to train a model with batch size of 2048, but you can only\n fit batch size of 128 in your GPU. Then, you can do the following:\n\n >>> for i, (X, y) in enumerate(dataloader):\n >>> logits = model(X)\n >>> loss = criterion(logits, y)\n >>> loss.backward()\n >>> if i % 16 == 15:\n >>> optimizer.step() # this will call privacy engine's step()\n >>> optimizer.zero_grad()\n >>> else:\n >>> optimizer.virtual_step() # this will call privacy engine's virtual_step()\n\n The rough idea of virtual step is as follows:\n\n 1. Calling ``loss.backward()`` repeatedly stores the per-sample gradients\n for all mini-batches. If we call ``loss.backward()`` ``N`` times on\n mini-batches of size ``B``, then each weight's ``.grad_sample`` field will\n contain ``NxB`` gradients. Then, when calling ``step()``, the privacy engine\n clips all ``NxB`` gradients and computes the average gradient for an effective\n batch of size ``NxB``. A call to ``optimizer.zero_grad()`` erases the\n per-sample gradients.\n\n 2. By calling ``virtual_step()`` after ``loss.backward()``,the ``B``\n per-sample gradients for this mini-batch are clipped and summed up into a\n gradient accumulator. The per-sample gradients can then be discarded. After\n ``N`` iterations (alternating calls to ``loss.backward()`` and\n ``virtual_step()``), a call to ``step()`` will compute the average gradient\n for an effective batch of size ``NxB``.\n\n The advantage here is that this is memory-efficient: it discards the per-sample\n gradients after every mini-batch. We can thus handle batches of arbitrary size.\n \"\"\"\n self.clipper.clip_and_accumulate()\n\n def _generate_noise(\n self, max_grad_norm: float, reference: nn.parameter.Parameter\n ) -> torch.Tensor:\n r\"\"\"\n Generates a tensor of Gaussian noise of the same shape as ``reference``.\n\n The generated tensor has zero mean and standard deviation\n sigma = ``noise_multiplier x max_grad_norm ``\n\n Args:\n max_grad_norm : The maximum norm of the per-sample gradients.\n reference : The reference, based on which the dimention of the\n noise tensor will be determined\n\n Returns:\n the generated noise with noise zero and standard\n deviation of ``noise_multiplier x max_grad_norm ``\n \"\"\"\n if self.noise_multiplier > 0 and max_grad_norm > 0:\n return torch.normal(\n 0,\n self.noise_multiplier * max_grad_norm,\n reference.grad.shape,\n device=self.device,\n generator=self.random_number_generator,\n )\n return torch.zeros(reference.grad.shape, device=self.device)\n\n def _set_seed(self, seed: int):\n r\"\"\"\n Allows to manually set the seed allowing for a deterministic run. Useful if you want to\n debug.\n\n WARNING: MANUALLY SETTING THE SEED BREAKS THE GUARANTEE OF SECURE RNG.\n For this reason, this method will raise a ValueError if you had ``secure_rng`` turned on.\n\n Args:\n seed : The **unsecure** seed\n \"\"\"\n if self.secure_rng:\n raise ValueError(\n \"Seed was manually set on a ``PrivacyEngine`` with ``secure_rng`` turned on.\"\n \"This fundamentally breaks secure_rng, and cannot be allowed. \"\n \"If you do need reproducibility with a fixed seed, first instantiate the PrivacyEngine \"\n \"with ``secure_seed`` turned off.\"\n )\n self.seed = seed\n\n return (\n torch.random.manual_seed(self.seed)\n if self.device.type == \"cpu\"\n else torch.cuda.manual_seed(self.seed)\n )\n\n def _set_sample_rate(self):\n r\"\"\"\n Determine the ``sample_rate``.\n\n If a ``sample_rate`` is provided, it will be used.\n If no ``sample_rate``is provided, the used ``sample_rate`` will be equal to\n ``batch_size`` /  ``sample_size``.\n \"\"\"\n if self.batch_size and not isinstance(self.batch_size, int):\n raise ValueError(\n f\"batch_size={self.batch_size} is not a valid value. Please provide a positive integer.\"\n )\n\n if self.sample_size and not isinstance(self.sample_size, int):\n raise ValueError(\n f\"sample_size={self.sample_size} is not a valid value. Please provide a positive integer.\"\n )\n\n if self.sample_rate is None:\n if self.batch_size is None or self.sample_size is None:\n raise ValueError(\n \"You must provide (batch_size and sample_sizes) or sample_rate.\"\n )\n else:\n self.sample_rate = self.batch_size / self.sample_size\n if self.batch_size is not None or self.sample_size is not None:\n warnings.warn(\n \"The sample rate will be defined from ``batch_size`` and ``sample_size``.\"\n \"The returned privacy budget will be incorrect.\"\n )\n else:\n warnings.warn(\n \"A ``sample_rate`` has been provided.\"\n \"Thus, the provided ``batch_size``and ``sample_size`` will be ignored.\"\n )\n\n if self.sample_rate > 1.0:\n raise ValueError(\n f\"sample_rate={self.sample_rate} is not a valid value. Please provide a float between 0 and 1.\"\n )\n" ]
[ [ "torch.normal", "torch.cuda.manual_seed", "torch.zeros", "torch.random.manual_seed", "torch.distributed.get_rank", "torch.distributed.get_world_size" ] ]
sage-home/sage-model
[ "73680d91494389dbc13d3edd2fbeadc023694756" ]
[ "plotting/galaxy_properties.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nExample script for plotting the data from the Mini-Millennium simulation.\n\nBy extending the model lists (e.g., IMFs, snapshots, etc), you can plot multiple\nsimulations on the same axis.\n\nRefer to the online documentation (sage-model.readthedocs.io) for full information on how\nto add your own data format, property calculations and plots.\n\nAuthor: Jacob Seiler.\n\"\"\"\n\n# These contain example functions to calculate (and plot) properties such as the\n# stellar mass function, quiescent fraction, bulge fraction etc.\nimport sage_analysis.example_calcs\nimport sage_analysis.example_plots\n\n# Function to determine what we will calculate/plot for each model.\nfrom sage_analysis.utils import generate_func_dict\n\n# Class that handles the calculation of properties.\nfrom sage_analysis.model import Model\n# Data Classes that handle reading the different SAGE output formats.\nfrom sage_analysis.sage_binary import SageBinaryData\n\n# This has h5py dependance. If the user is only using binary, then we don't mind not\n# importing.\ntry:\n from sage_analysis.sage_hdf5 import SageHdf5Data\nexcept ImportError:\n print(\"h5py not found. If you're reading in HDF5 output from SAGE, please install \"\n \"this package.\")\n\nimport numpy as np\nimport os\n\n# Sometimes we divide a galaxy that has zero mass (e.g., no cold gas). Ignore these\n# warnings as they spam stdout. Also remember the old settings.\nold_error_settings = np.seterr()\nnp.seterr(all=\"ignore\")\n\n\nif __name__ == \"__main__\":\n\n # We support the plotting of an arbitrary number of models. To do so, simply add the\n # extra variables specifying the path to the model directory and other variables.\n # E.g., 'model1_snapshot = ...\", \"model1_IMF = ...\".\n model0_snapshot = 63 # Snapshot we're plotting properties at.\n model0_IMF = \"Chabrier\" # Chabrier or Salpeter.\n model0_label = \"Mini-Millennium\" # Goes on the axis.\n model0_sage_file = \"../input/millennium.par\"\n model0_simulation = \"Mini-Millennium\" # Used to set cosmology.\n model0_first_file = 0 # File range we're plotting.\n model0_last_file = 0 # Closed interval, [first_file, last_file].\n\n # Then extend each of these lists for all the models that you want to plot.\n # E.g., 'IMFs = [model0_IMF, model1_IMF, ..., modelN_IMF]\n IMFs = [model0_IMF]\n labels = [model0_label]\n snapshots = [model0_snapshot]\n sage_files = [model0_sage_file]\n simulations = [model0_simulation]\n first_files = [model0_first_file]\n last_files = [model0_last_file]\n\n # A couple of extra variables...\n plot_output_format = \"png\"\n plot_output_path = \"./plots\" # Will be created if path doesn't exist.\n\n # These toggles specify which plots you want to be made.\n plot_toggles = {\"SMF\" : 1, # Stellar mass function.\n \"BMF\" : 1, # Baryonic mass function.\n \"GMF\" : 1, # Gas mass function (cold gas).\n \"BTF\" : 1, # Baryonic Tully-Fisher.\n \"sSFR\" : 1, # Specific star formation rate.\n \"gas_frac\" : 1, # Fraction of galaxy that is cold gas.\n \"metallicity\" : 1, # Metallicity scatter plot.\n \"bh_bulge\" : 1, # Black hole-bulge relationship.\n \"quiescent\" : 1, # Fraction of galaxies that are quiescent.\n \"bulge_fraction\" : 1, # Fraction of galaxies that are bulge/disc dominated.\n \"baryon_fraction\" : 1, # Fraction of baryons in galaxy/reservoir.\n \"reservoirs\" : 1, # Mass in each reservoir.\n \"spatial\" : 1} # Spatial distribution of galaxies.\n\n ############## DO NOT TOUCH BELOW #############\n ### IF NOT ADDING EXTRA PROPERTIES OR PLOTS ###\n ############## DO NOT TOUCH BELOW #############\n\n # Generate directory for output plots.\n if not os.path.exists(plot_output_path):\n os.makedirs(plot_output_path)\n\n # Generate a dictionary for each model containing the required information.\n # We store these in `model_dicts` which will be a list of dictionaries.\n model_dicts = []\n for IMF, label, snapshot, sage_file, sim, first_file, last_file in zip(IMFs, labels, snapshots, sage_files, simulations, first_files, last_files):\n this_model_dict = {\"IMF\": IMF,\n \"label\": label,\n \"snapshot\": snapshot,\n \"sage_file\": sage_file,\n \"simulation\": sim,\n \"first_file\": first_file,\n \"last_file\": last_file}\n\n model_dicts.append(this_model_dict)\n\n # Go through each model and calculate all the required properties.\n models = []\n for model_dict in model_dicts:\n\n # Instantiate a Model class. This holds the data paths and methods to calculate\n # the required properties.\n my_model = Model(model_dict)\n my_model.plot_output_format = plot_output_format\n\n # Each SAGE output has a specific class written to read in the data.\n if my_model.sage_output_format == \"sage_binary\":\n my_model.data_class = SageBinaryData(my_model)\n elif my_model.sage_output_format == \"sage_hdf5\":\n my_model.data_class = SageHdf5Data(my_model)\n\n my_model.data_class.set_cosmology(my_model)\n\n # Some properties require the stellar mass function to normalize their values. For\n # these, the SMF plot toggle is explicitly required.\n try:\n if plot_toggles[\"SMF\"]:\n my_model.calc_SMF = True\n else:\n my_model.calc_SMF = False\n except KeyError: # Maybe we've removed \"SMF\" from plot_toggles...\n my_model.calc_SMF = False\n\n # Then populate the `calculation_methods` dictionary. This dictionary will control\n # which properties each model will calculate. The dictionary is populated using\n # the plot_toggles defined above.\n # Our functions are inside the `example_calcs.py` module and are named \"calc_<toggle>\". If\n # your functions are in a different module or different function prefix, change it\n # here.\n # ALL FUNCTIONS MUST HAVE A FUNCTION SIGNATURE `func(Model, gals, optional_kwargs=...)`.\n calculation_functions = generate_func_dict(plot_toggles, module_name=\"sage_analysis.example_calcs\",\n function_prefix=\"calc_\")\n\n # Finally, before we calculate the properties, we need to decide how each property\n # is stored. Properties can be binned (e.g., how many galaxies with mass between 10^8.0\n # and 10^8.1), scatter plotted (e.g., for 1000 galaxies plot the specific star\n # formation rate versus stellar mass) or a single number (e.g., the sum\n # of the star formation rate at a snapshot). Properties can be accessed using\n # `Model.properties[\"property_name\"]`; e.g., `Model.properties[\"SMF\"]`.\n\n # First let's do the properties binned on stellar mass. The bins themselves can be\n # accessed using `Model.bins[\"bin_name\"]`; e.g., `Model.bins[\"stellar_mass_bins\"]\n stellar_properties = [\"SMF\", \"red_SMF\", \"blue_SMF\", \"BMF\", \"GMF\",\n \"centrals_MF\", \"satellites_MF\", \"quiescent_galaxy_counts\",\n \"quiescent_centrals_counts\", \"quiescent_satellites_counts\",\n \"fraction_bulge_sum\", \"fraction_bulge_var\",\n \"fraction_disk_sum\", \"fraction_disk_var\"]\n my_model.init_binned_properties(8.0, 12.0, 0.1, \"stellar_mass_bins\",\n stellar_properties)\n\n # Properties binned on halo mass.\n halo_properties = [\"fof_HMF\"]\n component_properties = [\"halo_{0}_fraction_sum\".format(component) for component in\n [\"baryon\", \"stars\", \"cold\", \"hot\", \"ejected\", \"ICS\", \"bh\"]]\n my_model.init_binned_properties(10.0, 14.0, 0.1, \"halo_mass_bins\",\n halo_properties+component_properties)\n\n # Now properties that will be extended as lists.\n scatter_properties = [\"BTF_mass\", \"BTF_vel\", \"sSFR_mass\", \"sSFR_sSFR\",\n \"gas_frac_mass\", \"gas_frac\", \"metallicity_mass\",\n \"metallicity\", \"bh_mass\", \"bulge_mass\", \"reservoir_mvir\",\n \"reservoir_stars\", \"reservoir_cold\", \"reservoir_hot\",\n \"reservoir_ejected\", \"reservoir_ICS\", \"x_pos\",\n \"y_pos\", \"z_pos\"]\n my_model.init_scatter_properties(scatter_properties)\n\n # Finally those properties that are stored as a single number.\n single_properties = []\n my_model.init_single_properties(single_properties)\n\n # To be more memory concious, we calculate the required properties on a\n # file-by-file basis. This ensures we do not keep ALL the galaxy data in memory.\n my_model.calc_properties_all_files(calculation_functions, debug=False)\n\n models.append(my_model)\n\n # Similar to the calculation functions, all of the plotting functions are in the\n # `plots.py` module and are labelled `plot_<toggle>`.\n plot_functions = generate_func_dict(plot_toggles, module_name=\"sage_analysis.example_plots\",\n function_prefix=\"plot_\")\n\n # Now do the plotting.\n for func_name in plot_functions.keys():\n func = plot_functions[func_name][0]\n keyword_args = plot_functions[func_name][1]\n\n func(models, plot_output_path, plot_output_format, **keyword_args)\n\n # Set the error settings to the previous ones so we don't annoy the user.\n np.seterr(divide=old_error_settings[\"divide\"], over=old_error_settings[\"over\"],\n under=old_error_settings[\"under\"], invalid=old_error_settings[\"invalid\"])\n" ]
[ [ "numpy.seterr" ] ]
marobinette/defundthepolice
[ "691cfb6b50fa973e0a91ff685664b229a92e2eee" ]
[ "main_st.py" ]
[ "import json\nimport glob\nimport os\nimport textwrap\nimport math\nimport logging\n\nimport pandas as pd\nimport streamlit as st\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom viz import bar_graph, pie_chart\n\nBAR_CHART = \"Bar Chart\"\nPIE_CHART = \"Pie Chart\"\nSTATES_FOLDER = \"data/states/\"\nst.set_option(\"deprecation.showfileUploaderEncoding\", False)\n\nCHART_DICT = {BAR_CHART: bar_graph, PIE_CHART: pie_chart}\n\n\ndef show_menu():\n st.sidebar.title(\"Social Media Toolkit Generator\")\n st.sidebar.header(\"Defund the Police\")\n\n st.sidebar.markdown(\n \"“Defund the police” means reallocating or redirecting funding away from the \"\n \"police department to other government agencies funded by the local municipality.\"\n )\n\n st.sidebar.markdown(\n \"The goal of this tool is to highlight how much money local communities spend on \"\n \"Police, and then how reallocating funds can make a direct impact into their community\"\n )\n\n # TODO add more \"apps\" such as county compare tool\n\n\ndef draw_image(text, bg_color, text_color, font):\n # TODO make advanced pannel for deep customizations\n image_width = 600\n image_height = 335\n img = Image.new(\"RGB\", (image_width, image_height), color=bg_color)\n canvas = ImageDraw.Draw(img)\n font = ImageFont.truetype(font, size=24)\n pad = -25\n # print(text)\n for line in text:\n # print(line)\n\n # canvas.textsize(text, font=font)\n # canvas.text((10,10), text, fill=(255, 255, 0))\n text_width, text_height = canvas.textsize(line, font=font)\n\n x_pos = int((image_width - text_width) / 2)\n y_pos = int((image_height - text_height) / 2) + pad\n canvas.text((x_pos, y_pos), line, font=font, fill=text_color)\n pad += text_height + 5\n\n return img\n\n\ndef calc_percent(row, total_budget):\n\n return round((float(row[\"budget\"] / float(total_budget)) * 100), 2)\n\n\ndef select_police_row(budget_df):\n try:\n police_df = budget_df.loc[budget_df[\"item\"].str.contains(\"Police\")]\n police_json = police_df.reset_index().to_json(orient=\"records\")\n police_data = json.loads(police_json)[0]\n except Exception as no_police_row:\n logging.error(no_police_row)\n try:\n police_df = budget_df.loc[budget_df[\"item\"].str.contains(\"Safety\")]\n police_json = police_df.reset_index().to_json(orient=\"records\")\n police_data = json.loads(police_json)[0]\n except Exception as no_safety_row:\n logging.error(no_safety_row)\n st.warning(\"No column named police, manually select\")\n police_col = st.selectbox(\"Select Police budget\", list(budget_df[\"item\"]))\n police_df = budget_df.loc[budget_df[\"item\"].str.contains(police_col)]\n police_json = police_df.reset_index().to_json(orient=\"records\")\n police_data = json.loads(police_json)[0]\n\n return police_data\n\n\ndef create_budget_json(state, county):\n # read budget.csv\n budget_csv_path = STATES_FOLDER + state + \"/\" + county + \"/budget.csv\"\n budget_df = pd.read_csv(budget_csv_path, index_col=False)\n # st.write(budget_df)\n\n # add percentages to budget_df\n total_budget = budget_df[\"budget\"].sum()\n budget_df[\"percent\"] = budget_df.apply(\n lambda row: calc_percent(row, total_budget), axis=1\n )\n\n police_data = select_police_row(budget_df)\n return police_data, budget_df\n\n\ndef make_investment_image(investment, reinvest_money, bg_color, text_color, font):\n\n if investment == \"Education\":\n cpu_cost = 500.0\n laptops = int(math.ceil(reinvest_money / cpu_cost))\n\n laptops_string = str(f\"{laptops:,}\")\n text = \"That translates to \" + laptops_string + \" laptops for our community\"\n wrapped_string = textwrap.wrap(text, width=30)\n image = draw_image(wrapped_string, bg_color, text_color, font)\n\n st.image(image, use_column_width=True)\n st.write(\"*500 dollar laptops\")\n\n # TODO add in extra investments\n\n\ndef get_concat_v_cut(im1, im2):\n dst = Image.new(\"RGB\", (min(im1.width, im2.width), im1.height + im2.height))\n dst.paste(im1, (0, 0))\n dst.paste(im2, (0, im1.height))\n return dst\n\n\ndef bar_chart_banner(bar_chart, state, county, bg_color, font, text, text_color):\n\n # lets make simple image\n image_width = 600\n image_height = 200\n img = Image.new(\"RGB\", (image_width, image_height), color=bg_color)\n canvas = ImageDraw.Draw(img)\n font = ImageFont.truetype(font, size=24)\n pad = -25\n starter = 40\n # print(text)\n for line in text:\n # print(line)\n\n # canvas.textsize(text, font=font)\n # canvas.text((10,10), text, fill=(255, 255, 0))\n text_width, text_height = canvas.textsize(line, font=font)\n\n x_pos = int((image_width - text_width) / 2)\n y_pos = starter + pad\n canvas.text((x_pos, y_pos), line, font=font, fill=text_color)\n pad += text_height + 5\n\n dst = get_concat_v_cut(img, bar_chart)\n st.image(dst)\n\n\ndef main():\n show_menu()\n st.header(\"Select Community\")\n\n # Select state\n states = os.listdir(STATES_FOLDER)\n state = st.selectbox(\"Select State\", states)\n\n # select county\n counties = os.listdir(STATES_FOLDER + state)\n county = st.selectbox(\"Select County\", counties)\n\n police_data, budget_df = create_budget_json(state, county)\n st.write(budget_df)\n\n # Show budget for year\n money = \"$\" + f'{police_data[\"budget\"]:,}'\n header_string = (\n # \"For \"\n # + str(police_data[\"year\"])\n # + \" \"\n str(county)\n + \" County, \"\n + str(state)\n + \" has a police budget of \"\n + str(money)\n )\n wrapped_string = textwrap.wrap(header_string, width=30)\n # st.header(wrapped_string)\n\n fonts = [\"fonts/Chunk_Five_Print.otf\"]\n\n fonts.extend(glob.glob(\"fonts/*\"))\n font = st.selectbox(\"Select Font\", fonts)\n\n bg_color = st.beta_color_picker(\"Background color\", \"#496D89\")\n st.write(\"The current background color is\", bg_color)\n\n text_color = st.beta_color_picker(\"Text color\", \"#FFFFFF\")\n st.write(\"The current text color is\", text_color)\n\n image = draw_image(wrapped_string, bg_color, text_color, font)\n\n st.image(image, use_column_width=True)\n\n st.write(\"source: \" + str(police_data[\"source\"]))\n defund = st.slider(\"Defund %\", 0, 100, 20)\n\n defund_decmial = float(defund / 100)\n reinvest_money = float(police_data[\"budget\"]) * defund_decmial\n reinvest_money_string = \"$\" + f\"{int(reinvest_money):,}\"\n\n investments = [\"Education\", \"Healthcare\", \"Social Programs\"]\n realocate = st.selectbox(\"Reinvest\", investments)\n\n realoc_str = (\n \"By defunding the police by \"\n + str(defund)\n + \"% we can invest \"\n + reinvest_money_string\n + \" into \"\n + realocate\n )\n\n wrapped_string = textwrap.wrap(realoc_str, width=30)\n # st.header(realoc_str)\n image = draw_image(wrapped_string, bg_color, text_color, font)\n\n st.image(image, use_column_width=True)\n\n # based on input show what we can do...\n make_investment_image(realocate, reinvest_money, bg_color, text_color, font)\n\n # TODO make this another \"app\" in sidebar for users to select\n # TODO have way to select different visualizations\n chart_types = [BAR_CHART, PIE_CHART]\n selected_chart = st.selectbox(\"Chart Types\", chart_types)\n CHART_DICT.get(selected_chart)(budget_df)\n # bar_chart = viz.bar_graph(budget_df)\n # st.altair_chart(altair_chart(budget_df), use_container_width=True)\n\n wrapped_string = textwrap.wrap(header_string + \"\\n\" + realoc_str, width=30)\n uploaded_file = st.file_uploader(\"Choose an Image File\")\n if uploaded_file is not None:\n try:\n bar_chart = Image.open(uploaded_file)\n bar_chart_banner(\n bar_chart, state, county, bg_color, font, wrapped_string, text_color\n )\n except Exception as error:\n st.error(error)\n\n hide_streamlit_style = \"\"\"\n <title> Half Explot </title>\n <style>\n #MainMenu {visibility: hidden;}\n footer {visibility: hidden;}\n .sidebar .sidebar-content {background-image: linear-gradient(180deg,#4CA1AF,#2c3e50);}\n .btn-outline-secondary {\n border-color: #09ab3b85;\n color: #f9f9f9;\n }\n body {\n color: #fafafa;\n text-align: left;\n background-color: #262730;\n }\n </style>\n \"\"\"\n st.markdown(hide_streamlit_style, unsafe_allow_html=True)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv" ] ]
c74p/Capstone-SalesForecasting
[ "9c7950dc2fe839a04a0521f7da1ed3936496cd94" ]
[ "notebooks/EDA/S04-customers.py" ]
[ "import cauldron as cd\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'figure.max_open_warning': 0}) # NOQA\nimport seaborn as sns\nsns.set() # NOQA, need this for styling\nimport pandas as pd\n\nimport os, sys # NOQA\nsys.path.append('../../src/data')\nimport make_dataset # NOQA, need the lines above to get directories right\n\n# Import df from shared Cauldron memory\ndf = cd.shared.df\n\ncd.display.markdown(\n \"\"\"\n ## Sales vs Customers\n\n As noted above, **customers** has a correlation of 0.90 with **sales**.\n It's pretty obvious on the chart below; the more customers, the more sales.\n Note also that as we bring in more customers, the relationship gets less\n strong, until it starts to break down around 5,000 customers in a given\n store (clearly only a few stores could even fit 5,000 customers in a day).\n\n We don't know the specific definition of 'customer' in this case, or how\n they're counted. Is it someone who bought, or just someone who came into\n the store? Do internet visitors/buyers count? In any case, we'll want to\n work with the marketing team to bring more people through the doors\n (virtual and physical).\n\n For now, since the correlation with sales is so strong, and since our\n neural network model will manage the relationship between customers and\n sales implicitly for us, let's continue to focus on **sales** and keep\n **customers** as a secondary focus.\n \"\"\"\n)\n\n# Prep data for display\navg_sales_by_customers = df.groupby('customers').sales.mean()\n\n# Create and display the chart\nfig, ax = plt.subplots()\nax.plot(avg_sales_by_customers)\nax.set_title('Average Sales by Number of Customers')\nax.set_xlabel('Number of Customers')\nax.set_ylabel('Average Sales')\nax.set_xticklabels(['{:,.0f}'.format(x) for x in ax.get_xticks()])\nax.set_yticklabels(['${:,.0f}'.format(x) for x in ax.get_yticks()])\ncd.display.pyplot(fig)\n" ]
[ [ "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.subplots" ] ]
kblondal/RMG-Py
[ "ee14e35321c1dc3cd1900c6d2ebb27931d1bb542" ]
[ "arkane/encorr/mbac.py" ]
[ "#!/usr/bin/env python3\n\n###############################################################################\n# #\n# RMG - Reaction Mechanism Generator #\n# #\n# Copyright (c) 2002-2019 Prof. William H. Green ([email protected]), #\n# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the 'Software'), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n###############################################################################\n\n\"\"\"\nThis module provides methods for applying Melius-type bond additivity\ncorrections (M-BAC) as described in:\nAnantharaman and Melius, J. Phys. Chem. A 2005, 109, 1734-1747\n\"\"\"\n\nimport numpy as np\nimport pybel\n\nfrom rmgpy.molecule import Molecule, Atom, Bond, get_element\n\nimport arkane.encorr.data as data\nfrom arkane.exceptions import BondAdditivityCorrectionError\n\n################################################################################\n\n\natom_spins = {\n 'H': 0.5, 'C': 1.0, 'N': 1.5, 'O': 1.0, 'F': 0.5, 'Si': 1.0, 'P': 1.5, 'S': 1.0, 'Cl': 0.5, 'Br': 0.5, 'I': 0.5\n}\n\n\ndef get_bac(model_chemistry, coords, nums, multiplicity=1, mol_corr=0.0):\n \"\"\"\n Given the model chemistry, molecular coordinates, atomic numbers,\n and dictionaries of BAC parameters, return the total BAC\n (should be SUBTRACTED from energy).\n\n Note that a molecular correction term other than 0 destroys the size\n consistency of the quantum chemistry method. This correction also\n requires the multiplicity of the molecule.\n \"\"\"\n alpha = 3.0 # Angstrom^-1\n\n # Get BAC parameters\n try:\n params = data.mbac[model_chemistry]\n except KeyError:\n raise BondAdditivityCorrectionError(\n 'Missing Melius-type BAC parameters for model chemistry {}'.format(model_chemistry)\n )\n atom_corr = params['atom_corr']\n bond_corr_length = params['bond_corr_length']\n bond_corr_neighbor = params['bond_corr_neighbor']\n\n # Get single-bonded RMG molecule\n mol = geo_to_mol(coords, nums)\n\n # Molecular correction\n spin = 0.5 * (multiplicity - 1)\n bac_mol = mol_corr * (spin - sum(atom_spins[atom.element.symbol] for atom in mol.atoms))\n\n # Atomic correction\n bac_atom = sum(atom_corr[atom.element.symbol] for atom in mol.atoms)\n\n # Bond correction\n bac_bond = 0.0\n for bond in mol.get_all_edges():\n atom1 = bond.atom1\n atom2 = bond.atom2\n symbol1 = atom1.element.symbol\n symbol2 = atom2.element.symbol\n\n # Bond length correction\n length_corr = (bond_corr_length[symbol1] * bond_corr_length[symbol2]) ** 0.5\n length = np.linalg.norm(atom1.coords - atom2.coords)\n bac_bond += length_corr * np.exp(-alpha * length)\n\n # Neighbor correction\n for other_atom, other_bond in mol.get_bonds(atom1).items(): # Atoms adjacent to atom1\n if other_bond is not bond:\n other_symbol = other_atom.element.symbol\n bac_bond += bond_corr_neighbor[symbol1] + bond_corr_neighbor[other_symbol]\n for other_atom, other_bond in mol.get_bonds(atom2).items(): # Atoms adjacent to atom2\n if other_bond is not bond:\n other_symbol = other_atom.element.symbol\n bac_bond += bond_corr_neighbor[symbol2] + bond_corr_neighbor[other_symbol]\n\n return (bac_mol + bac_atom + bac_bond) * 4184.0 # Convert kcal/mol to J/mol\n\n\ndef geo_to_mol(coords, nums):\n \"\"\"\n Convert molecular geometry specified by atomic coordinates and\n atomic numbers to RMG molecule.\n\n Use Open Babel for most cases because it's better at recognizing\n long bonds. Use RMG for hydrogen because Open Babel can't do it for\n mysterious reasons.\n \"\"\"\n if list(nums) == [1, 1]:\n mol = Molecule()\n mol.from_xyz(nums, coords)\n else:\n xyz = '{}\\n\\n'.format(len(nums))\n xyz += '\\n'.join('{0} {1[0]: .10f} {1[1]: .10f} {1[2]: .10f}'.format(n, c) for n, c in zip(nums, coords))\n mol = pybel.readstring('xyz', xyz)\n mol = pybel_to_rmg(mol)\n return mol\n\n\ndef pybel_to_rmg(pybel_mol):\n \"\"\"\n Convert Pybel molecule to RMG molecule but ignore charge,\n multiplicity, and bond orders.\n \"\"\"\n mol = Molecule()\n for pybel_atom in pybel_mol:\n element = get_element(pybel_atom.atomicnum)\n atom = Atom(element=element, coords=np.array(pybel_atom.coords))\n mol.vertices.append(atom)\n for obbond in pybel.ob.OBMolBondIter(pybel_mol.OBMol):\n begin_idx = obbond.GetBeginAtomIdx() - 1 # Open Babel indexes atoms starting at 1\n end_idx = obbond.GetEndAtomIdx() - 1\n bond = Bond(mol.vertices[begin_idx], mol.vertices[end_idx])\n mol.add_bond(bond)\n return mol\n" ]
[ [ "numpy.array", "numpy.exp", "numpy.linalg.norm" ] ]
t-groth/tinygrad
[ "4aaa670d1fd6b6ebcbc472ed77277b2fd3abe387" ]
[ "tinygrad/optim.py" ]
[ "import numpy as np\n\nclass Optimizer:\n def __init__(self, params):\n self.params = params\n\nclass SGD(Optimizer):\n def __init__(self, params, lr=0.001):\n super(SGD, self).__init__(params)\n self.lr = lr\n\n def step(self):\n for t in self.params:\n t.data -= self.lr * t.grad\n\n# 80% sure this is right?\nclass Adam(Optimizer):\n def __init__(self, params, lr=0.001, b1=0.9, b2=0.999, eps=1e-8):\n super(Adam, self).__init__(params)\n self.lr = lr\n self.b1 = b1\n self.b2 = b2\n self.eps = eps\n self.t = 0\n\n self.m = [np.zeros_like(t.data) for t in self.params]\n self.v = [np.zeros_like(t.data) for t in self.params]\n\n def step(self):\n for i,t in enumerate(self.params):\n self.t += 1\n self.m[i] = self.b1 * self.m[i] + (1 - self.b1) * t.grad\n self.v[i] = self.b2 * self.v[i] + (1 - self.b2) * np.square(t.grad)\n mhat = self.m[i] / (1. - self.b1**self.t)\n vhat = self.v[i] / (1. - self.b2**self.t)\n t.data -= self.lr * mhat / (np.sqrt(vhat) + self.eps)\n\n" ]
[ [ "numpy.square", "numpy.zeros_like", "numpy.sqrt" ] ]
stonebig/pyRiemann
[ "131cc82e6faaf8b515fa44e7af64159a3e9840e9" ]
[ "examples/artifacts/plot_correct_ajdc_EEG.py" ]
[ "\"\"\"\n===============================================================================\nArtifact Correction by AJDC-based Blind Source Separation\n===============================================================================\n\nBlind source separation (BSS) based on approximate joint diagonalization of\nFourier cospectra (AJDC), applied to artifact correction of EEG [1].\n\"\"\"\n# Authors: Quentin Barthélemy & David Ojeda.\n# EEG signal kindly shared by Marco Congedo.\n#\n# License: BSD (3-clause)\n\nimport gzip\nimport numpy as np\nfrom scipy.signal import welch\nfrom matplotlib import pyplot as plt\n\nfrom mne import create_info\nfrom mne.io import RawArray\nfrom mne.viz import plot_topomap\nfrom mne.preprocessing import ICA\n\nfrom pyriemann.spatialfilters import AJDC\nfrom pyriemann.utils.viz import plot_cospectra\n\n\n###############################################################################\n\n\ndef read_header(fname):\n \"\"\"Read the header of sample-blinks.txt\"\"\"\n with gzip.open(fname, 'rt') as f:\n content = f.readline().split()\n return content[:-1], int(content[-1])\n\n\n###############################################################################\n# Load EEG data\n# -------------\n\nfname = '../data/sample-blinks.txt.gz'\nsignal_raw = np.loadtxt(fname, skiprows=1).T\nch_names, sfreq = read_header(fname)\nch_count = len(ch_names)\nduration = signal_raw.shape[1] / sfreq\n\n\n###############################################################################\n# Channel space\n# -------------\n\n# Plot signal X\nch_info = create_info(ch_names=ch_names, ch_types=['eeg'] * ch_count,\n sfreq=sfreq)\nch_info.set_montage('standard_1020')\nsignal = RawArray(signal_raw, ch_info, verbose=False)\nsignal.plot(duration=duration, start=0, n_channels=ch_count,\n scalings={'eeg': 3e1}, color={'eeg': 'steelblue'},\n title='Original EEG signal', show_scalebars=False)\n\n\n###############################################################################\n# AJDC: Second-Order Statistics (SOS)-based BSS, diagonalizing cospectra\n# ----------------------------------------------------------------------\n\n# Compute and diagonalize Fourier cospectral matrices between 1 and 32 Hz\nwindow, overlap = sfreq, 0.5\nfmin, fmax = 1, 32\najdc = AJDC(window=window, overlap=overlap, fmin=fmin, fmax=fmax, fs=sfreq,\n dim_red={'max_cond': 100})\najdc.fit(signal_raw[np.newaxis, np.newaxis, ...])\nfreqs = ajdc.freqs_\n\n# Plot cospectra in channel space, after trace-normalization by frequency: each\n# cospectrum, associated to a frequency, is a covariance matrix\nplot_cospectra(ajdc._cosp_channels, freqs, ylabels=ch_names,\n title='Cospectra, in channel space')\n\n\n###############################################################################\n\n# Plot diagonalized cospectra in source space\nsr_count = ajdc.n_sources_\nsr_names = ['S' + str(s).zfill(2) for s in range(sr_count)]\nplot_cospectra(ajdc._cosp_sources, freqs, ylabels=sr_names,\n title='Diagonalized cospectra, in source space')\n\n\n###############################################################################\n# Source space\n# ------------\n\n# Estimate sources S applying forward filters B to signal X: S = B X\nsource_raw = ajdc.transform(signal_raw[np.newaxis, ...])[0]\n\n# Plot sources S\nsr_info = create_info(ch_names=sr_names, ch_types=['misc'] * sr_count,\n sfreq=sfreq)\nsource = RawArray(source_raw, sr_info, verbose=False)\nsource.plot(duration=duration, start=0, n_channels=sr_count,\n scalings={'misc': 2e2}, title='EEG sources estimated by AJDC',\n show_scalebars=False)\n\n\n###############################################################################\n# Artifact identification\n# -----------------------\n\n# Identify artifact by eye: blinks are well separated in source S0\nblink_idx = 0\n\n# Get normal spectrum, ie power spectrum after trace-normalization\nblink_spectrum_norm = ajdc._cosp_sources[:, blink_idx, blink_idx]\nblink_spectrum_norm /= np.linalg.norm(blink_spectrum_norm)\n\n# Get absolute spectrum, ie raw power spectrum of the source\nf, spectrum = welch(source.get_data(picks=[blink_idx]), fs=sfreq,\n nperseg=window, noverlap=int(window * overlap))\nblink_spectrum_abs = spectrum[0, (f >= fmin) & (f <= fmax)]\nblink_spectrum_abs /= np.linalg.norm(blink_spectrum_abs)\n\n# Get topographic map\nblink_filter = ajdc.backward_filters_[:, blink_idx]\n\n# Plot spectrum and topographic map of the blink source separated by AJDC\nfig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\naxs[0].set(title='Power spectrum of the blink source estimated by AJDC',\n xlabel='Frequency (Hz)', ylabel='Power spectral density')\naxs[0].plot(freqs, blink_spectrum_abs, label='Absolute power')\naxs[0].plot(freqs, blink_spectrum_norm, label='Normal power')\naxs[0].legend()\naxs[1].set_title('Topographic map of the blink source estimated by AJDC')\nplot_topomap(blink_filter, pos=ch_info, axes=axs[1], extrapolate='box')\nplt.show()\n\n\n###############################################################################\n# Artifact correction by BSS denoising\n# ------------------------------------\n\n# BSS denoising: blink source is suppressed in source space using activation\n# matrix D, and then applying backward filters A to come back to channel space\n# Denoised signal: Xd = A D S\nsignal_denois_raw = ajdc.inverse_transform(source_raw[np.newaxis, ...],\n supp=[blink_idx])[0]\n\n# Plot denoised signal Xd\nsignal_denois = RawArray(signal_denois_raw, ch_info, verbose=False)\nsignal_denois.plot(duration=duration, start=0, n_channels=ch_count,\n scalings={'eeg': 3e1}, color={'eeg': 'steelblue'},\n title='Denoised EEG signal by AJDC', show_scalebars=False)\n\n\n###############################################################################\n# Comparison with Independent Component Analysis (ICA)\n# ----------------------------------------------------\n\n# Infomax-based ICA is a Higher-Order Statistics (HOS)-based BSS, minimizing\n# mutual information\nica = ICA(n_components=ajdc.n_sources_, method='infomax', random_state=42)\nica.fit(signal, picks='eeg')\n\n# Plot sources separated by ICA\nica.plot_sources(signal, title='EEG sources estimated by ICA')\n\n# Can you find the blink source?\n\n###############################################################################\n\n# Plot topographic maps of sources separated by ICA\nica.plot_components(title='Topographic maps of EEG sources estimated by ICA')\n\n\n###############################################################################\n# References\n# ----------\n# [1] Q. Barthélemy, L. Mayaud, Y. Renard, D. Kim, S.-W. Kang, J. Gunkelman and\n# M. Congedo, \"Online denoising of eye-blinks in electroencephalography\",\n# Neurophysiol Clin, 2017\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.linalg.norm", "numpy.loadtxt" ] ]
hyyh28/gym-minigrid
[ "678b049e410eb9c07a9a77e15e239a8115edf057" ]
[ "gym_minigrid/minigrid.py" ]
[ "import math\nimport hashlib\nimport gym\nfrom enum import IntEnum\nimport numpy as np\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nfrom .rendering import *\n\n# Size in pixels of a tile in the full-scale human view\nTILE_PIXELS = 32\n\n# Map of color names to RGB values\nCOLORS = {\n 'red' : np.array([255, 0, 0]),\n 'green' : np.array([0, 255, 0]),\n 'blue' : np.array([0, 0, 255]),\n 'purple': np.array([112, 39, 195]),\n 'yellow': np.array([255, 255, 0]),\n 'grey' : np.array([100, 100, 100])\n}\n\nCOLOR_NAMES = sorted(list(COLORS.keys()))\n\n# Used to map colors to integers\nCOLOR_TO_IDX = {\n 'red' : 0,\n 'green' : 1,\n 'blue' : 2,\n 'purple': 3,\n 'yellow': 4,\n 'grey' : 5\n}\n\nIDX_TO_COLOR = dict(zip(COLOR_TO_IDX.values(), COLOR_TO_IDX.keys()))\n\n# Map of object type to integers\nOBJECT_TO_IDX = {\n 'unseen' : 0,\n 'empty' : 1,\n 'wall' : 2,\n 'floor' : 3,\n 'door' : 4,\n 'key' : 5,\n 'ball' : 6,\n 'box' : 7,\n 'goal' : 8,\n 'lava' : 9,\n 'agent' : 10,\n}\n\nIDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))\n\n# Map of state names to integers\nSTATE_TO_IDX = {\n 'open' : 0,\n 'closed': 1,\n 'locked': 2,\n}\n\n# Map of agent direction indices to vectors\nDIR_TO_VEC = [\n # Pointing right (positive X)\n np.array((1, 0)),\n # Down (positive Y)\n np.array((0, 1)),\n # Pointing left (negative X)\n np.array((-1, 0)),\n # Up (negative Y)\n np.array((0, -1)),\n]\n\nclass WorldObj:\n \"\"\"\n Base class for grid world objects\n \"\"\"\n\n def __init__(self, type, color):\n assert type in OBJECT_TO_IDX, type\n assert color in COLOR_TO_IDX, color\n self.type = type\n self.color = color\n self.contains = None\n\n # Initial position of the object\n self.init_pos = None\n\n # Current position of the object\n self.cur_pos = None\n\n def can_overlap(self):\n \"\"\"Can the agent overlap with this?\"\"\"\n return False\n\n def can_pickup(self):\n \"\"\"Can the agent pick this up?\"\"\"\n return False\n\n def can_contain(self):\n \"\"\"Can this contain another object?\"\"\"\n return False\n\n def see_behind(self):\n \"\"\"Can the agent see behind this object?\"\"\"\n return True\n\n def toggle(self, env, pos):\n \"\"\"Method to trigger/toggle an action this object performs\"\"\"\n return False\n\n def encode(self):\n \"\"\"Encode the a description of this object as a 3-tuple of integers\"\"\"\n return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], 0)\n\n @staticmethod\n def decode(type_idx, color_idx, state):\n \"\"\"Create an object from a 3-tuple state description\"\"\"\n\n obj_type = IDX_TO_OBJECT[type_idx]\n color = IDX_TO_COLOR[color_idx]\n\n if obj_type == 'empty' or obj_type == 'unseen':\n return None\n\n # State, 0: open, 1: closed, 2: locked\n is_open = state == 0\n is_locked = state == 2\n\n if obj_type == 'wall':\n v = Wall(color)\n elif obj_type == 'floor':\n v = Floor(color)\n elif obj_type == 'ball':\n v = Ball(color)\n elif obj_type == 'key':\n v = Key(color)\n elif obj_type == 'box':\n v = Box(color)\n elif obj_type == 'door':\n v = Door(color, is_open, is_locked)\n elif obj_type == 'goal':\n v = Goal()\n elif obj_type == 'lava':\n v = Lava()\n else:\n assert False, \"unknown object type in decode '%s'\" % obj_type\n\n return v\n\n def render(self, r):\n \"\"\"Draw this object with the given renderer\"\"\"\n raise NotImplementedError\n\nclass Goal(WorldObj):\n def __init__(self):\n super().__init__('goal', 'green')\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])\n\nclass Floor(WorldObj):\n \"\"\"\n Colored floor tile the agent can walk over\n \"\"\"\n\n def __init__(self, color='blue'):\n super().__init__('floor', color)\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n # Give the floor a pale color\n color = COLORS[self.color] / 2\n fill_coords(img, point_in_rect(0.031, 1, 0.031, 1), color)\n\n\nclass Lava(WorldObj):\n def __init__(self):\n super().__init__('lava', 'red')\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n c = (255, 128, 0)\n\n # Background color\n fill_coords(img, point_in_rect(0, 1, 0, 1), c)\n\n # Little waves\n for i in range(3):\n ylo = 0.3 + 0.2 * i\n yhi = 0.4 + 0.2 * i\n fill_coords(img, point_in_line(0.1, ylo, 0.3, yhi, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.3, yhi, 0.5, ylo, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.5, ylo, 0.7, yhi, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.7, yhi, 0.9, ylo, r=0.03), (0,0,0))\n\nclass Wall(WorldObj):\n def __init__(self, color='grey'):\n super().__init__('wall', color)\n\n def see_behind(self):\n return False\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])\n\nclass Door(WorldObj):\n def __init__(self, color, is_open=False, is_locked=False):\n super().__init__('door', color)\n self.is_open = is_open\n self.is_locked = is_locked\n\n def can_overlap(self):\n \"\"\"The agent can only walk over this cell when the door is open\"\"\"\n return self.is_open\n\n def see_behind(self):\n return self.is_open\n\n def toggle(self, env, pos):\n # If the player has the right key to open the door\n if self.is_locked:\n if isinstance(env.carrying, Key) and env.carrying.color == self.color:\n self.is_locked = False\n self.is_open = True\n return True\n return False\n\n self.is_open = not self.is_open\n return True\n\n def encode(self):\n \"\"\"Encode the a description of this object as a 3-tuple of integers\"\"\"\n\n # State, 0: open, 1: closed, 2: locked\n if self.is_open:\n state = 0\n elif self.is_locked:\n state = 2\n elif not self.is_open:\n state = 1\n\n return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], state)\n\n def render(self, img):\n c = COLORS[self.color]\n\n if self.is_open:\n fill_coords(img, point_in_rect(0.88, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.92, 0.96, 0.04, 0.96), (0,0,0))\n return\n\n # Door frame and door\n if self.is_locked:\n fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.06, 0.94, 0.06, 0.94), 0.45 * np.array(c))\n\n # Draw key slot\n fill_coords(img, point_in_rect(0.52, 0.75, 0.50, 0.56), c)\n else:\n fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.04, 0.96, 0.04, 0.96), (0,0,0))\n fill_coords(img, point_in_rect(0.08, 0.92, 0.08, 0.92), c)\n fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), (0,0,0))\n\n # Draw door handle\n fill_coords(img, point_in_circle(cx=0.75, cy=0.50, r=0.08), c)\n\nclass Key(WorldObj):\n def __init__(self, color='blue'):\n super(Key, self).__init__('key', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n\n # Vertical quad\n fill_coords(img, point_in_rect(0.50, 0.63, 0.31, 0.88), c)\n\n # Teeth\n fill_coords(img, point_in_rect(0.38, 0.50, 0.59, 0.66), c)\n fill_coords(img, point_in_rect(0.38, 0.50, 0.81, 0.88), c)\n\n # Ring\n fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.190), c)\n fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.064), (0,0,0))\n\nclass Ball(WorldObj):\n def __init__(self, color='blue'):\n super(Ball, self).__init__('ball', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n fill_coords(img, point_in_circle(0.5, 0.5, 0.31), COLORS[self.color])\n\nclass Box(WorldObj):\n def __init__(self, color, contains=None):\n super(Box, self).__init__('box', color)\n self.contains = contains\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n\n # Outline\n fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), c)\n fill_coords(img, point_in_rect(0.18, 0.82, 0.18, 0.82), (0,0,0))\n\n # Horizontal slit\n fill_coords(img, point_in_rect(0.16, 0.84, 0.47, 0.53), c)\n\n def toggle(self, env, pos):\n # Replace the box by its contents\n env.grid.set(*pos, self.contains)\n return True\n\nclass Grid:\n \"\"\"\n Represent a grid and operations on it\n \"\"\"\n\n # Static cache of pre-renderer tiles\n tile_cache = {}\n\n def __init__(self, width, height):\n assert width >= 3\n assert height >= 3\n\n self.width = width\n self.height = height\n\n self.grid = [None] * width * height\n\n def __contains__(self, key):\n if isinstance(key, WorldObj):\n for e in self.grid:\n if e is key:\n return True\n elif isinstance(key, tuple):\n for e in self.grid:\n if e is None:\n continue\n if (e.color, e.type) == key:\n return True\n if key[0] is None and key[1] == e.type:\n return True\n return False\n\n def __eq__(self, other):\n grid1 = self.encode()\n grid2 = other.encode()\n return np.array_equal(grid2, grid1)\n\n def __ne__(self, other):\n return not self == other\n\n def copy(self):\n from copy import deepcopy\n return deepcopy(self)\n\n def set(self, i, j, v):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n self.grid[j * self.width + i] = v\n\n def get(self, i, j):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n return self.grid[j * self.width + i]\n\n def horz_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.width - x\n for i in range(0, length):\n self.set(x + i, y, obj_type())\n\n def vert_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.height - y\n for j in range(0, length):\n self.set(x, y + j, obj_type())\n\n def wall_rect(self, x, y, w, h):\n self.horz_wall(x, y, w)\n self.horz_wall(x, y+h-1, w)\n self.vert_wall(x, y, h)\n self.vert_wall(x+w-1, y, h)\n\n def rotate_left(self):\n \"\"\"\n Rotate the grid to the left (counter-clockwise)\n \"\"\"\n\n grid = Grid(self.height, self.width)\n\n for i in range(self.width):\n for j in range(self.height):\n v = self.get(i, j)\n grid.set(j, grid.height - 1 - i, v)\n\n return grid\n\n def slice(self, topX, topY, width, height):\n \"\"\"\n Get a subset of the grid\n \"\"\"\n\n grid = Grid(width, height)\n\n for j in range(0, height):\n for i in range(0, width):\n x = topX + i\n y = topY + j\n\n if x >= 0 and x < self.width and \\\n y >= 0 and y < self.height:\n v = self.get(x, y)\n else:\n v = Wall()\n\n grid.set(i, j, v)\n\n return grid\n\n @classmethod\n def render_tile(\n cls,\n obj,\n agent_dir=None,\n highlight=False,\n tile_size=TILE_PIXELS,\n subdivs=3\n ):\n \"\"\"\n Render a tile and cache the result\n \"\"\"\n\n # Hash map lookup key for the cache\n key = (agent_dir, highlight, tile_size)\n key = obj.encode() + key if obj else key\n\n if key in cls.tile_cache:\n return cls.tile_cache[key]\n\n img = np.zeros(shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8)\n\n # Draw the grid lines (top and left edges)\n fill_coords(img, point_in_rect(0, 0.031, 0, 1), (100, 100, 100))\n fill_coords(img, point_in_rect(0, 1, 0, 0.031), (100, 100, 100))\n\n if obj != None:\n obj.render(img)\n\n # Overlay the agent on top\n if agent_dir is not None:\n tri_fn = point_in_triangle(\n (0.12, 0.19),\n (0.87, 0.50),\n (0.12, 0.81),\n )\n\n # Rotate the agent based on its direction\n tri_fn = rotate_fn(tri_fn, cx=0.5, cy=0.5, theta=0.5*math.pi*agent_dir)\n fill_coords(img, tri_fn, (255, 0, 0))\n\n # Highlight the cell if needed\n if highlight:\n highlight_img(img)\n\n # Downsample the image to perform supersampling/anti-aliasing\n img = downsample(img, subdivs)\n\n # Cache the rendered tile\n cls.tile_cache[key] = img\n\n return img\n\n def render(\n self,\n tile_size,\n agent_pos=None,\n agent_dir=None,\n highlight_mask=None\n ):\n \"\"\"\n Render this grid at a given scale\n :param r: target renderer object\n :param tile_size: tile size in pixels\n \"\"\"\n\n if highlight_mask is None:\n highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)\n\n # Compute the total grid size\n width_px = self.width * tile_size\n height_px = self.height * tile_size\n\n img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)\n\n # Render the grid\n for j in range(0, self.height):\n for i in range(0, self.width):\n cell = self.get(i, j)\n\n agent_here = np.array_equal(agent_pos, (i, j))\n tile_img = Grid.render_tile(\n cell,\n agent_dir=agent_dir if agent_here else None,\n highlight=highlight_mask[i, j],\n tile_size=tile_size\n )\n\n ymin = j * tile_size\n ymax = (j+1) * tile_size\n xmin = i * tile_size\n xmax = (i+1) * tile_size\n img[ymin:ymax, xmin:xmax, :] = tile_img\n\n return img\n\n def encode(self, vis_mask=None):\n \"\"\"\n Produce a compact numpy encoding of the grid\n \"\"\"\n\n if vis_mask is None:\n vis_mask = np.ones((self.width, self.height), dtype=bool)\n\n array = np.zeros((self.width, self.height, 3), dtype='uint8')\n\n for i in range(self.width):\n for j in range(self.height):\n if vis_mask[i, j]:\n v = self.get(i, j)\n\n if v is None:\n array[i, j, 0] = OBJECT_TO_IDX['empty']\n array[i, j, 1] = 0\n array[i, j, 2] = 0\n\n else:\n array[i, j, :] = v.encode()\n\n return array\n\n @staticmethod\n def decode(array):\n \"\"\"\n Decode an array grid encoding back into a grid\n \"\"\"\n\n width, height, channels = array.shape\n assert channels == 3\n\n vis_mask = np.ones(shape=(width, height), dtype=np.bool)\n\n grid = Grid(width, height)\n for i in range(width):\n for j in range(height):\n type_idx, color_idx, state = array[i, j]\n v = WorldObj.decode(type_idx, color_idx, state)\n grid.set(i, j, v)\n vis_mask[i, j] = (type_idx != OBJECT_TO_IDX['unseen'])\n\n return grid, vis_mask\n\n def process_vis(grid, agent_pos):\n mask = np.zeros(shape=(grid.width, grid.height), dtype=np.bool)\n\n mask[agent_pos[0], agent_pos[1]] = True\n\n for j in reversed(range(0, grid.height)):\n for i in range(0, grid.width-1):\n if not mask[i, j]:\n continue\n\n cell = grid.get(i, j)\n if cell and not cell.see_behind():\n continue\n\n mask[i+1, j] = True\n if j > 0:\n mask[i+1, j-1] = True\n mask[i, j-1] = True\n\n for i in reversed(range(1, grid.width)):\n if not mask[i, j]:\n continue\n\n cell = grid.get(i, j)\n if cell and not cell.see_behind():\n continue\n\n mask[i-1, j] = True\n if j > 0:\n mask[i-1, j-1] = True\n mask[i, j-1] = True\n\n for j in range(0, grid.height):\n for i in range(0, grid.width):\n if not mask[i, j]:\n grid.set(i, j, None)\n\n return mask\n\nclass MiniGridEnv(gym.Env):\n \"\"\"\n 2D grid world game environment\n \"\"\"\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 10\n }\n\n # Enumeration of possible actions\n class Actions(IntEnum):\n # Turn left, turn right, move forward\n left = 0\n right = 1\n forward = 2\n\n # Pick up an object\n pickup = 3\n # Drop an object\n drop = 4\n # Toggle/activate an object\n toggle = 5\n\n # Done completing task\n done = 6\n\n def __init__(\n self,\n grid_size=None,\n width=None,\n height=None,\n max_steps=100,\n see_through_walls=False,\n seed=1337,\n agent_view_size=7\n ):\n # Can't set both grid_size and width/height\n if grid_size:\n assert width == None and height == None\n width = grid_size\n height = grid_size\n\n # Action enumeration for this environment\n self.actions = MiniGridEnv.Actions\n\n # Actions are discrete integer values\n self.action_space = spaces.Discrete(len(self.actions))\n\n # Number of cells (width and height) in the agent view\n assert agent_view_size % 2 == 1\n assert agent_view_size >= 3\n self.agent_view_size = agent_view_size\n\n # Observations are dictionaries containing an\n # encoding of the grid and a textual 'mission' string\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(self.agent_view_size * self.agent_view_size * 3,),\n dtype='uint8'\n )\n #self.observation_space = spaces.Dict({\n # 'image': self.observation_space\n #})\n\n # Range of possible rewards\n self.reward_range = (0, 1)\n\n # Window to use for human rendering mode\n self.window = None\n\n # Environment configuration\n self.width = width\n self.height = height\n self.max_steps = max_steps\n self.see_through_walls = see_through_walls\n self._max_episode_steps = self.max_steps\n\n # Current position and direction of the agent\n self.agent_pos = None\n self.agent_dir = None\n\n # Initialize the RNG\n self.seed(seed=seed)\n\n # Initialize the state\n self.reset()\n\n def reset(self):\n # Current position and direction of the agent\n self.agent_pos = None\n self.agent_dir = None\n\n # Generate a new random grid at the start of each episode\n # To keep the same grid for each episode, call env.seed() with\n # the same seed before calling env.reset()\n self._gen_grid(self.width, self.height)\n\n # These fields should be defined by _gen_grid\n assert self.agent_pos is not None\n assert self.agent_dir is not None\n\n # Check that the agent doesn't overlap with an object\n start_cell = self.grid.get(*self.agent_pos)\n assert start_cell is None or start_cell.can_overlap()\n\n # Item picked up, being carried, initially nothing\n self.carrying = None\n\n # Step count since episode start\n self.step_count = 0\n\n # Return first observation\n obs = self.gen_obs()\n return obs\n\n def seed(self, seed=1337):\n # Seed the random number generator\n self.np_random, _ = seeding.np_random(seed)\n return [seed]\n\n def hash(self, size=16):\n \"\"\"Compute a hash that uniquely identifies the current state of the environment.\n :param size: Size of the hashing\n \"\"\"\n sample_hash = hashlib.sha256()\n\n to_encode = [self.grid.encode(), self.agent_pos, self.agent_dir]\n for item in to_encode:\n sample_hash.update(str(item).encode('utf8'))\n\n return sample_hash.hexdigest()[:size]\n\n @property\n def steps_remaining(self):\n return self.max_steps - self.step_count\n\n def __str__(self):\n \"\"\"\n Produce a pretty string of the environment's grid along with the agent.\n A grid cell is represented by 2-character string, the first one for\n the object and the second one for the color.\n \"\"\"\n\n # Map of object types to short string\n OBJECT_TO_STR = {\n 'wall' : 'W',\n 'floor' : 'F',\n 'door' : 'D',\n 'key' : 'K',\n 'ball' : 'A',\n 'box' : 'B',\n 'goal' : 'G',\n 'lava' : 'V',\n }\n\n # Short string for opened door\n OPENDED_DOOR_IDS = '_'\n\n # Map agent's direction to short string\n AGENT_DIR_TO_STR = {\n 0: '>',\n 1: 'V',\n 2: '<',\n 3: '^'\n }\n\n str = ''\n\n for j in range(self.grid.height):\n\n for i in range(self.grid.width):\n if i == self.agent_pos[0] and j == self.agent_pos[1]:\n str += 2 * AGENT_DIR_TO_STR[self.agent_dir]\n continue\n\n c = self.grid.get(i, j)\n\n if c == None:\n str += ' '\n continue\n\n if c.type == 'door':\n if c.is_open:\n str += '__'\n elif c.is_locked:\n str += 'L' + c.color[0].upper()\n else:\n str += 'D' + c.color[0].upper()\n continue\n\n str += OBJECT_TO_STR[c.type] + c.color[0].upper()\n\n if j < self.grid.height - 1:\n str += '\\n'\n\n return str\n\n def _gen_grid(self, width, height):\n assert False, \"_gen_grid needs to be implemented by each environment\"\n\n def _reward(self):\n \"\"\"\n Compute the reward to be given upon success\n \"\"\"\n\n return 1 - 0.9 * (self.step_count / self.max_steps)\n\n def _rand_int(self, low, high):\n \"\"\"\n Generate random integer in [low,high[\n \"\"\"\n\n return self.np_random.randint(low, high)\n\n def _rand_float(self, low, high):\n \"\"\"\n Generate random float in [low,high[\n \"\"\"\n\n return self.np_random.uniform(low, high)\n\n def _rand_bool(self):\n \"\"\"\n Generate random boolean value\n \"\"\"\n\n return (self.np_random.randint(0, 2) == 0)\n\n def _rand_elem(self, iterable):\n \"\"\"\n Pick a random element in a list\n \"\"\"\n\n lst = list(iterable)\n idx = self._rand_int(0, len(lst))\n return lst[idx]\n\n def _rand_subset(self, iterable, num_elems):\n \"\"\"\n Sample a random subset of distinct elements of a list\n \"\"\"\n\n lst = list(iterable)\n assert num_elems <= len(lst)\n\n out = []\n\n while len(out) < num_elems:\n elem = self._rand_elem(lst)\n lst.remove(elem)\n out.append(elem)\n\n return out\n\n def _rand_color(self):\n \"\"\"\n Generate a random color name (string)\n \"\"\"\n\n return self._rand_elem(COLOR_NAMES)\n\n def _rand_pos(self, xLow, xHigh, yLow, yHigh):\n \"\"\"\n Generate a random (x,y) position tuple\n \"\"\"\n\n return (\n self.np_random.randint(xLow, xHigh),\n self.np_random.randint(yLow, yHigh)\n )\n\n def place_obj(self,\n obj,\n top=None,\n size=None,\n reject_fn=None,\n max_tries=math.inf\n ):\n \"\"\"\n Place an object at an empty position in the grid\n\n :param top: top-left position of the rectangle where to place\n :param size: size of the rectangle where to place\n :param reject_fn: function to filter out potential positions\n \"\"\"\n\n if top is None:\n top = (0, 0)\n else:\n top = (max(top[0], 0), max(top[1], 0))\n\n if size is None:\n size = (self.grid.width, self.grid.height)\n\n num_tries = 0\n\n while True:\n # This is to handle with rare cases where rejection sampling\n # gets stuck in an infinite loop\n if num_tries > max_tries:\n raise RecursionError('rejection sampling failed in place_obj')\n\n num_tries += 1\n\n pos = np.array((\n self._rand_int(top[0], min(top[0] + size[0], self.grid.width)),\n self._rand_int(top[1], min(top[1] + size[1], self.grid.height))\n ))\n\n # Don't place the object on top of another object\n if self.grid.get(*pos) != None:\n continue\n\n # Don't place the object where the agent is\n if np.array_equal(pos, self.agent_pos):\n continue\n\n # Check if there is a filtering criterion\n if reject_fn and reject_fn(self, pos):\n continue\n\n break\n\n self.grid.set(*pos, obj)\n\n if obj is not None:\n obj.init_pos = pos\n obj.cur_pos = pos\n\n return pos\n\n def put_obj(self, obj, i, j):\n \"\"\"\n Put an object at a specific position in the grid\n \"\"\"\n\n self.grid.set(i, j, obj)\n obj.init_pos = (i, j)\n obj.cur_pos = (i, j)\n\n def place_agent(\n self,\n top=None,\n size=None,\n rand_dir=True,\n max_tries=math.inf\n ):\n \"\"\"\n Set the agent's starting point at an empty position in the grid\n \"\"\"\n\n self.agent_pos = None\n pos = self.place_obj(None, top, size, max_tries=max_tries)\n self.agent_pos = pos\n\n if rand_dir:\n self.agent_dir = self._rand_int(0, 4)\n\n return pos\n\n @property\n def dir_vec(self):\n \"\"\"\n Get the direction vector for the agent, pointing in the direction\n of forward movement.\n \"\"\"\n\n assert self.agent_dir >= 0 and self.agent_dir < 4\n return DIR_TO_VEC[self.agent_dir]\n\n @property\n def right_vec(self):\n \"\"\"\n Get the vector pointing to the right of the agent.\n \"\"\"\n\n dx, dy = self.dir_vec\n return np.array((-dy, dx))\n\n @property\n def front_pos(self):\n \"\"\"\n Get the position of the cell that is right in front of the agent\n \"\"\"\n\n return self.agent_pos + self.dir_vec\n\n def get_view_coords(self, i, j):\n \"\"\"\n Translate and rotate absolute grid coordinates (i, j) into the\n agent's partially observable view (sub-grid). Note that the resulting\n coordinates may be negative or outside of the agent's view size.\n \"\"\"\n\n ax, ay = self.agent_pos\n dx, dy = self.dir_vec\n rx, ry = self.right_vec\n\n # Compute the absolute coordinates of the top-left view corner\n sz = self.agent_view_size\n hs = self.agent_view_size // 2\n tx = ax + (dx * (sz-1)) - (rx * hs)\n ty = ay + (dy * (sz-1)) - (ry * hs)\n\n lx = i - tx\n ly = j - ty\n\n # Project the coordinates of the object relative to the top-left\n # corner onto the agent's own coordinate system\n vx = (rx*lx + ry*ly)\n vy = -(dx*lx + dy*ly)\n\n return vx, vy\n\n def get_view_exts(self):\n \"\"\"\n Get the extents of the square set of tiles visible to the agent\n Note: the bottom extent indices are not included in the set\n \"\"\"\n\n # Facing right\n if self.agent_dir == 0:\n topX = self.agent_pos[0]\n topY = self.agent_pos[1] - self.agent_view_size // 2\n # Facing down\n elif self.agent_dir == 1:\n topX = self.agent_pos[0] - self.agent_view_size // 2\n topY = self.agent_pos[1]\n # Facing left\n elif self.agent_dir == 2:\n topX = self.agent_pos[0] - self.agent_view_size + 1\n topY = self.agent_pos[1] - self.agent_view_size // 2\n # Facing up\n elif self.agent_dir == 3:\n topX = self.agent_pos[0] - self.agent_view_size // 2\n topY = self.agent_pos[1] - self.agent_view_size + 1\n else:\n assert False, \"invalid agent direction\"\n\n botX = topX + self.agent_view_size\n botY = topY + self.agent_view_size\n\n return (topX, topY, botX, botY)\n\n def relative_coords(self, x, y):\n \"\"\"\n Check if a grid position belongs to the agent's field of view, and returns the corresponding coordinates\n \"\"\"\n\n vx, vy = self.get_view_coords(x, y)\n\n if vx < 0 or vy < 0 or vx >= self.agent_view_size or vy >= self.agent_view_size:\n return None\n\n return vx, vy\n\n def in_view(self, x, y):\n \"\"\"\n check if a grid position is visible to the agent\n \"\"\"\n\n return self.relative_coords(x, y) is not None\n\n def agent_sees(self, x, y):\n \"\"\"\n Check if a non-empty grid position is visible to the agent\n \"\"\"\n\n coordinates = self.relative_coords(x, y)\n if coordinates is None:\n return False\n vx, vy = coordinates\n\n obs = self.gen_obs()\n obs_grid, _ = Grid.decode(obs['image'])\n obs_cell = obs_grid.get(vx, vy)\n world_cell = self.grid.get(x, y)\n\n return obs_cell is not None and obs_cell.type == world_cell.type\n\n def step(self, action):\n self.step_count += 1\n\n reward = 0\n done = False\n\n # Get the position in front of the agent\n fwd_pos = self.front_pos\n\n # Get the contents of the cell in front of the agent\n fwd_cell = self.grid.get(*fwd_pos)\n\n # Rotate left\n if action == self.actions.left:\n self.agent_dir -= 1\n if self.agent_dir < 0:\n self.agent_dir += 4\n\n # Rotate right\n elif action == self.actions.right:\n self.agent_dir = (self.agent_dir + 1) % 4\n\n # Move forward\n elif action == self.actions.forward:\n if fwd_cell == None or fwd_cell.can_overlap():\n self.agent_pos = fwd_pos\n if fwd_cell != None and fwd_cell.type == 'goal':\n done = True\n reward = self._reward()\n if fwd_cell != None and fwd_cell.type == 'lava':\n done = True\n\n # Pick up an object\n elif action == self.actions.pickup:\n if fwd_cell and fwd_cell.can_pickup():\n if self.carrying is None:\n self.carrying = fwd_cell\n self.carrying.cur_pos = np.array([-1, -1])\n self.grid.set(*fwd_pos, None)\n\n # Drop an object\n elif action == self.actions.drop:\n if not fwd_cell and self.carrying:\n self.grid.set(*fwd_pos, self.carrying)\n self.carrying.cur_pos = fwd_pos\n self.carrying = None\n\n # Toggle/activate an object\n elif action == self.actions.toggle:\n if fwd_cell:\n fwd_cell.toggle(self, fwd_pos)\n\n # Done action (not used by default)\n elif action == self.actions.done:\n pass\n\n else:\n assert False, \"unknown action\"\n\n if self.step_count >= self.max_steps:\n done = True\n\n obs = self.gen_obs()\n\n return obs, reward, done, {}\n\n def gen_obs_grid(self):\n \"\"\"\n Generate the sub-grid observed by the agent.\n This method also outputs a visibility mask telling us which grid\n cells the agent can actually see.\n \"\"\"\n\n topX, topY, botX, botY = self.get_view_exts()\n\n grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)\n\n for i in range(self.agent_dir + 1):\n grid = grid.rotate_left()\n\n # Process occluders and visibility\n # Note that this incurs some performance cost\n if not self.see_through_walls:\n vis_mask = grid.process_vis(agent_pos=(self.agent_view_size // 2 , self.agent_view_size - 1))\n else:\n vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)\n\n # Make it so the agent sees what it's carrying\n # We do this by placing the carried object at the agent's position\n # in the agent's partially observable view\n agent_pos = grid.width // 2, grid.height - 1\n if self.carrying:\n grid.set(*agent_pos, self.carrying)\n else:\n grid.set(*agent_pos, None)\n\n return grid, vis_mask\n\n def gen_obs(self):\n \"\"\"\n Generate the agent's view (partially observable, low-resolution encoding)\n \"\"\"\n\n grid, vis_mask = self.gen_obs_grid()\n\n # Encode the partially observable view into a numpy array\n image = grid.encode(vis_mask)\n\n assert hasattr(self, 'mission'), \"environments must define a textual mission string\"\n\n # Observations are dictionaries containing:\n # - an image (partially observable view of the environment)\n # - the agent's direction/orientation (acting as a compass)\n # - a textual mission string (instructions for the agent)\n obs = {\n 'image': image,\n 'direction': self.agent_dir,\n 'mission': self.mission\n }\n\n return obs['image'].reshape(self.agent_view_size * self.agent_view_size * 3,)\n\n def get_obs_render(self, obs, tile_size=TILE_PIXELS//2):\n \"\"\"\n Render an agent observation for visualization\n \"\"\"\n\n grid, vis_mask = Grid.decode(obs)\n\n # Render the whole grid\n img = grid.render(\n tile_size,\n agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1),\n agent_dir=3,\n highlight_mask=vis_mask\n )\n\n return img\n\n def render(self, mode='human', close=False, highlight=True, tile_size=TILE_PIXELS):\n \"\"\"\n Render the whole-grid human view\n \"\"\"\n\n if close:\n if self.window:\n self.window.close()\n return\n\n if mode == 'human' and not self.window:\n import gym_minigrid.window\n self.window = gym_minigrid.window.Window('gym_minigrid')\n self.window.show(block=False)\n\n # Compute which cells are visible to the agent\n _, vis_mask = self.gen_obs_grid()\n\n # Compute the world coordinates of the bottom-left corner\n # of the agent's view area\n f_vec = self.dir_vec\n r_vec = self.right_vec\n top_left = self.agent_pos + f_vec * (self.agent_view_size-1) - r_vec * (self.agent_view_size // 2)\n\n # Mask of which cells to highlight\n highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)\n\n # For each cell in the visibility mask\n for vis_j in range(0, self.agent_view_size):\n for vis_i in range(0, self.agent_view_size):\n # If this cell is not visible, don't highlight it\n if not vis_mask[vis_i, vis_j]:\n continue\n\n # Compute the world coordinates of this cell\n abs_i, abs_j = top_left - (f_vec * vis_j) + (r_vec * vis_i)\n\n if abs_i < 0 or abs_i >= self.width:\n continue\n if abs_j < 0 or abs_j >= self.height:\n continue\n\n # Mark this cell to be highlighted\n highlight_mask[abs_i, abs_j] = True\n\n # Render the whole grid\n img = self.grid.render(\n tile_size,\n self.agent_pos,\n self.agent_dir,\n highlight_mask=highlight_mask if highlight else None\n )\n\n if mode == 'human':\n self.window.show_img(img)\n self.window.set_caption(self.mission)\n\n return img\n\n def close(self):\n if self.window:\n self.window.close()\n return\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.array_equal", "numpy.ones" ] ]
pandali1/pyGAT
[ "3d9812ce101ac74ce963d155baad35d20b5a8d34" ]
[ "layers.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GraphAttentionLayer(nn.Module):\n \"\"\"\n Simple GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(GraphAttentionLayer, self).__init__()\n self.dropout = dropout\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n #转换矩阵,计算WH\n self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))\n nn.init.xavier_uniform_(self.W.data, gain=1.414)\n #用于计算e_ij\n self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n\n def forward(self, h, adj):\n Wh = torch.mm(h, self.W) # 计算WH h.shape: (N, in_features), Wh.shape: (N, out_features)\n e = self._prepare_attentional_mechanism_input(Wh) #计算每个节点与其他节点的注意力值\n\n zero_vec = -9e15*torch.ones_like(e)\n attention = torch.where(adj > 0, e, zero_vec) #将注意力矩阵进行剪切,如果两个节点不相连的话,将其值重置为负无穷\n attention = F.softmax(attention, dim=1) #归一化\n attention = F.dropout(attention, self.dropout, training=self.training)#dropout\n h_prime = torch.matmul(attention, Wh)\n\n if self.concat:\n return F.elu(h_prime)\n else:\n return h_prime\n\n def _prepare_attentional_mechanism_input(self, Wh):\n # Wh.shape (N, out_feature)\n # self.a.shape (2 * out_feature, 1)\n # Wh1&2.shape (N, 1)\n # e.shape (N, N)\n Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])\n Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])\n # broadcast add\n e = Wh1 + Wh2.T\n return self.leakyrelu(e)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\nclass SpecialSpmmFunction(torch.autograd.Function):\n \"\"\"Special function for only sparse region backpropataion layer.\"\"\"\n @staticmethod\n def forward(ctx, indices, values, shape, b):\n assert indices.requires_grad == False\n a = torch.sparse_coo_tensor(indices, values, shape)\n ctx.save_for_backward(a, b)\n ctx.N = shape[0]\n return torch.matmul(a, b)\n\n @staticmethod\n def backward(ctx, grad_output):\n a, b = ctx.saved_tensors\n grad_values = grad_b = None\n if ctx.needs_input_grad[1]:\n grad_a_dense = grad_output.matmul(b.t())\n edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]\n grad_values = grad_a_dense.view(-1)[edge_idx]\n if ctx.needs_input_grad[3]:\n grad_b = a.t().matmul(grad_output)\n return None, grad_values, None, grad_b\n\n\nclass SpecialSpmm(nn.Module):\n def forward(self, indices, values, shape, b):\n return SpecialSpmmFunction.apply(indices, values, shape, b)\n\n \nclass SpGraphAttentionLayer(nn.Module):\n \"\"\"\n Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(SpGraphAttentionLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n nn.init.xavier_normal_(self.W.data, gain=1.414)\n \n self.a = nn.Parameter(torch.zeros(size=(1, 2*out_features)))\n nn.init.xavier_normal_(self.a.data, gain=1.414)\n\n self.dropout = nn.Dropout(dropout)\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n self.special_spmm = SpecialSpmm()\n\n def forward(self, input, adj):\n dv = 'cuda' if input.is_cuda else 'cpu'\n\n N = input.size()[0]\n edge = adj.nonzero().t()\n\n h = torch.mm(input, self.W)\n # h: N x out\n assert not torch.isnan(h).any()\n\n # Self-attention on the nodes - Shared attention mechanism\n edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()\n # edge: 2*D x E\n\n edge_e = torch.exp(-self.leakyrelu(self.a.mm(edge_h).squeeze()))\n assert not torch.isnan(edge_e).any()\n # edge_e: E\n\n e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N,1), device=dv))\n # e_rowsum: N x 1\n\n edge_e = self.dropout(edge_e)\n # edge_e: E\n\n h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)\n assert not torch.isnan(h_prime).any()\n # h_prime: N x out\n \n h_prime = h_prime.div(e_rowsum)\n # h_prime: N x out\n assert not torch.isnan(h_prime).any()\n\n if self.concat:\n # if this layer is not last layer,\n return F.elu(h_prime)\n else:\n # if this layer is last layer,\n return h_prime\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n" ]
[ [ "torch.nn.functional.softmax", "torch.mm", "torch.nn.Dropout", "torch.empty", "torch.Size", "torch.nn.functional.dropout", "torch.zeros", "torch.ones", "torch.cat", "torch.isnan", "torch.nn.init.xavier_normal_", "torch.sparse_coo_tensor", "torch.matmul", "torch.nn.LeakyReLU", "torch.where", "torch.nn.init.xavier_uniform_", "torch.nn.functional.elu", "torch.ones_like" ] ]
Brenn10/jetyak_uav
[ "cd8b0622f8a8d293edd00508b2210aa76a238739" ]
[ "jetyak_uav_utils/scripts/nodes/filter/filter_node.py" ]
[ "#!/usr/bin/python\n\nimport numpy as np\nimport rospy as rp\nfrom std_srvs.srv import Trigger, TriggerResponse\nfrom sensor_msgs.msg import Imu\nfrom geometry_msgs.msg import PoseStamped, Vector3Stamped\nfrom data_point import DataPoint\nfrom fusion_ekf import FusionEKF\n\nclass FilterNode():\n\n\tdef __init__(self,):\n\t\trp.init_node(\"tag_pose_filter\")\n\t\tself.lastTag = DataPoint()\n\t\tself.gravityConst = 9.832\n\t\tself.biasX = -0.149\n\t\tself.biasY = 0.041\n\n\t\t# Number of States\n\t\tn = 17\n\n\t\t# Initial State Transition Matrix\n\t\tF = np.asmatrix(np.eye(n))\n\n\t\t# Process Matrix\n\t\tself.P = np.asmatrix(1.0e4 * np.eye(n))\n\n\t\t# Transition Matrix for Tag measurements\n\t\tHtag = np.matrix(np.zeros((7, n)))\n\t\tHtag[0:3, 0:3] = np.matrix(np.eye(3))\n\t\tHtag[3:7, 9:13] = np.matrix(np.eye(4))\n\n\t\t# Covariance Matrix for Tag measurements\n\t\tRtag = np.asmatrix(1.0e-6 * np.eye(7))\n\n\t\t# Transition Matrix for IMU measurements\n\t\tHimu = np.matrix(np.zeros((7, n)))\n\t\tHimu[0:3, 6:9] = np.matrix(np.eye(3))\n\t\tHimu[3:7, 13:17] = np.matrix(np.eye(4))\n\n\t\t# Covariance Matrix for IMU measurements\n\t\tRimu = np.asmatrix(1.0e-3 * np.eye(7))\n\n\t\t# Process Noise Level\n\t\tN = 1.0\n\n\t\t# Initialize Kalman Filter\n\t\tself.fusionF = FusionEKF(n, F, self.P, Htag, Himu, Rtag, Rimu, N)\n\n\t\t# Set up Subscribers\n\t\tself.imu_sub = rp.Subscriber(\"/dji_sdk/imu\", Imu, self.imu_callback)\n\t\tself.tag_sub = rp.Subscriber(\"tag_pose\", PoseStamped, self.tag_callback)\n\n\t\t# Set up Publishers\n\t\tself.tagVel_pub = rp.Publisher(\"tag_velocity\", Vector3Stamped, queue_size = 1)\n\t\tself.tag_pub = rp.Publisher(\"filtered_tag\", PoseStamped, queue_size = 1)\n\n\t\t# Set up Service Servers\n\t\tself.reset_service = rp.Service(\"reset_filter\", Trigger, self.reset_callback)\n\n\t\trp.spin()\n\n\tdef tag_callback(self, msg):\n\t\t# Prepare msg for process\n\t\ttagD = DataPoint()\n\t\ttagD.setID('tagPose')\n\t\ttagD.setZ(np.matrix([[msg.pose.position.x],\n\t\t\t\t\t\t\t[msg.pose.position.y],\n\t\t\t\t\t\t\t[msg.pose.position.z],\n\t\t\t\t\t\t\t[msg.pose.orientation.x],\n\t\t\t\t\t\t\t[msg.pose.orientation.y],\n\t\t\t\t\t\t\t[msg.pose.orientation.z],\n\t\t\t\t\t\t\t[msg.pose.orientation.w]]))\n\t\ttagD.setTime(msg.header.stamp.to_sec())\n\n\t\t# Process Data\n\t\tif self.checkOutliers(tagD):\n\t\t\tself.fusionF.process(tagD)\n\t\t\n\t\t# Get Filtered State\n\t\tif self.fusionF.isInit:\n\t\t\tfState = self.fusionF.getState()\n\n\t\t\t# Publish Filtered State\n\t\t\tpubMsg = PoseStamped()\n\t\t\tpubMsg.header.stamp = msg.header.stamp\n\t\t\tpubMsg.header.frame_id = msg.header.frame_id\n\t\t\tpubMsg.pose.position.x = fState.item(0)\n\t\t\tpubMsg.pose.position.y = fState.item(1)\n\t\t\tpubMsg.pose.position.z = fState.item(2)\n\t\t\tpubMsg.pose.orientation.x = fState.item(9)\n\t\t\tpubMsg.pose.orientation.y = fState.item(10)\n\t\t\tpubMsg.pose.orientation.z = fState.item(11)\n\t\t\tpubMsg.pose.orientation.w = fState.item(12)\n\t\t\tself.tag_pub.publish(pubMsg)\n\n\t\t\t# Publish Tag's Velocity\n\t\t\tvelMsg = Vector3Stamped()\n\t\t\tvelMsg.header.stamp = msg.header.stamp\n\t\t\tvelMsg.vector.x = fState.item(3)\n\t\t\tvelMsg.vector.y = fState.item(4)\n\t\t\tvelMsg.vector.z = fState.item(5)\n\t\t\tself.tagVel_pub.publish(velMsg)\t\t\n\n\tdef imu_callback(self, msg):\n\t\t# Prepare msg for process\n\t\timuD = DataPoint()\n\t\timuD.setID('imuAcc')\n\t\timuD.setZ(np.matrix([[(msg.linear_acceleration.x - self.biasX) / self.gravityConst],\n\t\t\t\t\t\t\t[(msg.linear_acceleration.y - self.biasY) / self.gravityConst],\n\t\t\t\t\t\t\t[(msg.linear_acceleration.z - self.gravityConst) / self.gravityConst],\n\t\t\t\t\t\t\t[msg.angular_velocity.x],\n\t\t\t\t\t\t\t[msg.angular_velocity.y],\n\t\t\t\t\t\t\t[msg.angular_velocity.z]]))\n\t\timuD.setTime(msg.header.stamp.to_sec())\n\n\t\t# Process Data\n\t\tself.fusionF.process(imuD)\n\n\tdef checkOutliers(self, newTag):\n\t\tif (self.lastTag.getTime() == None):\n\t\t\tself.lastTag = newTag\n\t\t\treturn True\n\t\telse:\n\t\t\tdt = newTag.getTime() - self.lastTag.getTime()\n\t\t\tvX = (newTag.getZ().item(0) - self.lastTag.getZ().item(0)) / dt\n\t\t\tvY = (newTag.getZ().item(1) - self.lastTag.getZ().item(1)) / dt\n\t\t\tvZ = (newTag.getZ().item(2) - self.lastTag.getZ().item(2)) / dt\n\n\t\t\tv = np.sqrt(pow(vX, 2) + pow(vY, 2) + pow(vZ, 2))\n\t\t\t\n\t\t\tif v < 5.0:\n\t\t\t\tself.lastTag = newTag\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\n\tdef reset_callback(self, req):\n\t\tprint(\"Resetting\")\n\t\tself.fusionF.resetFilter(self.P)\n\t\tsuccessful = True\n\t\t\n\t\treturn TriggerResponse(successful,\"Successfully reset filter\")\n\n# Start Node\nfiltered = FilterNode()\n" ]
[ [ "numpy.matrix", "numpy.eye", "numpy.zeros" ] ]
5cr009e/qianjinqiu
[ "68b5cd55440d0bebbd5a023b7dcce97a2bc86c75" ]
[ "Fund/FundVisualizer.py" ]
[ "#coding:utf-8\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport platform\n\n\ndef setup_mpl():\n fonts_dict = {\n \"Linux\": \"WenQuanYi Zen Hei\",\n \"Windows\": \"SimHei\",\n }\n # print(platform.platform())\n for system in [\"Linux\", \"Windows\"]:\n if system in platform.platform():\n matplotlib.rcParams['font.sans-serif'] = fonts_dict[system]\n matplotlib.rcParams['font.family'] = fonts_dict[system]\n matplotlib.rcParams['axes.unicode_minus'] = False\n break\n \n\nclass FundVisualizer:\n def __init__(self):\n setup_mpl() \n self.fig, self.axes = plt.subplots(nrows=2, ncols=1)\n\n def plot(self, fund, keys=['单位净值', '累计净值']):\n fund.data[keys].plot(ax=self.axes[0])\n fund.data['日增长率'].plot(ax=self.axes[1])\n plt.show() " ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
xnchu/pyspedas
[ "62657581c0b6ed980fcd99ac34455a8b7a77dede" ]
[ "pyspedas/mms/feeps/mms_feeps_pad.py" ]
[ "import warnings\nimport numpy as np\nfrom pytplot import get_data, store_data, options\nfrom pyspedas.mms.feeps.mms_feeps_pitch_angles import mms_feeps_pitch_angles\nfrom pyspedas.mms.feeps.mms_feeps_active_eyes import mms_feeps_active_eyes\nfrom pyspedas.mms.feeps.mms_feeps_pad_spinavg import mms_feeps_pad_spinavg\n\ndef mms_feeps_pad(bin_size=16.3636, probe='1', energy=[70, 600], level='l2', suffix='', datatype='electron', data_units='intensity', data_rate='srvy', angles_from_bfield=False):\n \"\"\"\n This function will calculate pitch angle distributions using data from the MMS Fly's Eye Energetic Particle Sensor (FEEPS)\n \n Parameters:\n probe: str\n probe #, e.g., '4' for MMS4\n\n data_units: str\n 'intensity' \n\n datatype: str\n 'electron' or 'ion'\n\n data_rate: str\n instrument data rate, e.g., 'srvy' or 'brst'\n\n level: str\n data level\n\n suffix: str\n suffix of the loaded data\n\n energy: list of float\n energy range to include in the calculation\n\n bin_size: float\n size of the pitch angle bins\n \n angles_from_bfield: bool\n calculate the pitch angles from the B-field data instead of reading from the CDFs\n\n Returns:\n List of tplot variables created.\n \"\"\"\n\n # account for angular response (finite field of view) of instruments\n # electrons can use +/- 21.4 deg on each pitch angle as average response angle; ions can start with +/-10 deg, but both need to be further refined\n if datatype == 'electron':\n dangresp = 21.4 # deg\n elif datatype == 'ion': \n dangresp = 10.0 # deg\n\n if energy[0] < 32.0:\n print('Please select a starting energy of 32 keV or above')\n return\n\n units_label = ''\n if data_units == 'intensity':\n units_label = '1/(cm^2-sr-s-keV)'\n elif data_units == 'counts':\n units_label = '[counts/s]'\n \n if not isinstance(probe, str): probe=str(probe)\n\n prefix = 'mms' + probe\n n_pabins = 180/bin_size\n pa_bins = [180.*pa_bin/n_pabins for pa_bin in range(0, int(n_pabins)+1)]\n pa_label = [180.*pa_bin/n_pabins+bin_size/2. for pa_bin in range(0, int(n_pabins))]\n\n if data_rate == 'brst' and angles_from_bfield == False:\n # v5.5+ = mms1_epd_feeps_srvy_l2_electron_pitch_angle\n pa_times, pa_data = get_data(prefix+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_pitch_angle'+suffix)\n else:\n pa_var, idx_maps = mms_feeps_pitch_angles(probe=probe, level=level, data_rate=data_rate, datatype=datatype, suffix=suffix)\n pa_times, pa_data = get_data(pa_var)\n\n if pa_data is None:\n print(\"Error, couldn't find the PA variable\")\n return\n\n eyes = mms_feeps_active_eyes([pa_times.min(), pa_times.max()], probe, data_rate, datatype, level)\n\n pa_data_map = {}\n\n if data_rate == 'srvy':\n if datatype == 'electron': \n pa_data_map['top-electron'] = idx_maps['electron-top']\n pa_data_map['bottom-electron'] = idx_maps['electron-bottom']\n if datatype == 'ion':\n pa_data_map['top-ion'] = idx_maps['ion-top']\n pa_data_map['bottom-ion'] = idx_maps['ion-bottom']\n elif data_rate == 'brst':\n # note: the following are indices of the top/bottom sensors in pa_data\n # they should be consistent with pa_dlimits.labels\n pa_data_map['top-electron'] = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n pa_data_map['bottom-electron'] = [9, 10, 11, 12, 13, 14, 15, 16, 17]\n # and ions:\n pa_data_map['top-ion'] = [0, 1, 2]\n pa_data_map['bottom-ion'] = [3, 4, 5]\n\n sensor_types = ['top', 'bottom']\n\n if datatype == 'electron':\n dflux = np.zeros([len(pa_times), len(pa_data_map['top-electron'])+len(pa_data_map['bottom-electron'])])\n dpa = np.zeros([len(pa_times), len(pa_data_map['top-electron'])+len(pa_data_map['bottom-electron'])])\n elif datatype == 'ion':\n dflux = np.zeros([len(pa_times), len(pa_data_map['top-ion'])+len(pa_data_map['bottom-ion'])])\n dpa = np.zeros([len(pa_times), len(pa_data_map['top-ion'])+len(pa_data_map['bottom-ion'])])\n\n for s_type in sensor_types:\n pa_map = pa_data_map[s_type+'-'+datatype]\n particle_idxs = [eye-1 for eye in eyes[s_type]]\n for isen, sensor_num in enumerate(particle_idxs):\n var_name = 'mms'+str(probe)+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+s_type+'_'+data_units+'_sensorid_'+str(sensor_num+1)+'_clean_sun_removed'+suffix\n times, data, energies = get_data(var_name)\n data[data == 0] = 'nan' # remove any 0s before averaging\n if np.isnan(energies[0]): # assumes all energies are NaNs if the first is\n continue\n # energy indices to use:\n indx = np.where((energies >= energy[0]) & (energies <= energy[1]))\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n dflux[:, pa_map[isen]] = np.nanmean(data[:, indx[0]], axis=1)\n dpa[:, pa_map[isen]] = pa_data[:, pa_map[isen]]\n\n # we need to replace the 0.0s left in after populating dpa with NaNs; these \n # 0.0s are left in there because these points aren't covered by sensors loaded\n # for this datatype/data_rate\n dpa[dpa == 0] = 'nan'\n\n pa_flux = np.zeros([len(pa_times), int(n_pabins)])\n delta_pa = (pa_bins[1]-pa_bins[0])/2.0\n\n # Now loop through PA bins and time, find the telescopes where there is data in those bins and average it up!\n for pa_idx, pa_time in enumerate(pa_times):\n for ipa in range(0, int(n_pabins)):\n if not np.isnan(dpa[pa_idx, :][0]):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n ind = np.where((dpa[pa_idx, :] + dangresp >= pa_label[ipa]-delta_pa) & (dpa[pa_idx, :]-dangresp < pa_label[ipa]+delta_pa))\n if ind[0].size != 0:\n if len(ind[0]) > 1:\n pa_flux[pa_idx, ipa] = np.nanmean(dflux[pa_idx, ind[0]], axis=0)\n else:\n pa_flux[pa_idx, ipa] = dflux[pa_idx, ind[0]]\n\n pa_flux[pa_flux == 0] = 'nan' # fill any missed bins with NAN\n\n en_range_string = str(int(energy[0])) + '-' + str(int(energy[1])) + 'keV'\n new_name = 'mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_'+ en_range_string +'_pad'+suffix\n\n store_data(new_name, data={'x': times, 'y': pa_flux, 'v': pa_label})\n options(new_name, 'ylog', False)\n options(new_name, 'zlog', True)\n options(new_name, 'spec', True)\n options(new_name, 'Colormap', 'jet')\n options(new_name, 'ztitle', units_label)\n options(new_name, 'ytitle', 'MMS' + str(probe) + ' ' + datatype + ' PA (deg)')\n\n # create the spin-averaged PAD\n spin_avg_var = mms_feeps_pad_spinavg(probe=probe, data_units=data_units, datatype=datatype, data_rate=data_rate, level=level, suffix=suffix, energy=energy)\n \n return [new_name, spin_avg_var]\n" ]
[ [ "numpy.isnan", "numpy.where", "numpy.nanmean" ] ]
usc-isi-i2/dsbox-cleaning
[ "3cb5146dbf89f0ea2f8bf71a843eb1cfa63f7917" ]
[ "dsbox/datapreprocessing/cleaner/encoder.py" ]
[ "import logging\nfrom typing import NamedTuple, Dict, List, Set, Union\n\nimport d3m\nimport d3m.metadata.base as mbase\nimport numpy as np\nimport pandas as pd\nfrom common_primitives import utils\nfrom d3m.container import DataFrame as d3m_DataFrame\nfrom d3m.metadata import hyperparams as metadata_hyperparams\nfrom d3m.metadata import hyperparams, params\nfrom d3m.metadata.hyperparams import Enumeration, UniformInt, UniformBool\nfrom d3m.primitive_interfaces.base import CallResult\nfrom d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase\n\nfrom . import config\n\n_logger = logging.getLogger(__name__)\n\nInput = d3m.container.DataFrame\nOutput = d3m.container.DataFrame\n\n\nclass EncParams(params.Params):\n mapping: Dict\n cat_columns: List[str]\n empty_columns: List[int]\n\n\nclass EncHyperparameter(hyperparams.Hyperparams):\n n_limit = UniformInt(lower=5, upper=100, default=12,\n description='Limits the maximum number of columns generated from a single categorical column',\n semantic_types=['http://schema.org/Integer',\n 'https://metadata.datadrivendiscovery.org/types/TuningParameter'])\n use_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[int](-1),\n default=(),\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.\",\n )\n exclude_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[int](-1),\n default=(),\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"A set of column indices to not operate on. Applicable only if \\\"use_columns\\\" is not provided.\",\n )\n return_result = hyperparams.Enumeration(\n values=['append', 'replace', 'new'],\n default='replace',\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.\",\n )\n use_semantic_types = hyperparams.UniformBool(\n default=False,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe\"\n )\n add_index_columns = hyperparams.UniformBool(\n default=True,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"Also include primary index columns if input data has them. Applicable only if \\\"return_result\\\" is set to \\\"new\\\".\",\n )\n\n\nclass Encoder(UnsupervisedLearnerPrimitiveBase[Input, Output, EncParams, EncHyperparameter]):\n \"\"\"\n An one-hot encoder, which\n 1. n_limit: max number of distinct values to one-hot encode,\n remaining values with fewer occurence are put in [colname]_other_ column.\n\n 2. feed in data by set_training_data, then apply fit() function to tune the encoder.\n\n 3. produce(): input data would be encoded and return.\n \"\"\"\n metadata = hyperparams.base.PrimitiveMetadata({\n \"id\": \"18f0bb42-6350-3753-8f2d-d1c3da70f279\",\n \"version\": config.VERSION,\n \"name\": \"ISI DSBox Data Encoder\",\n \"description\": \"Encode data, such as one-hot encoding for categorical data\",\n \"python_path\": \"d3m.primitives.data_preprocessing.Encoder.DSBOX\",\n \"primitive_family\": \"DATA_PREPROCESSING\",\n \"algorithm_types\": [\"ENCODE_ONE_HOT\"],\n \"source\": {\n \"name\": config.D3M_PERFORMER_TEAM,\n \"contact\": config.D3M_CONTACT,\n \"uris\": [config.REPOSITORY]\n },\n \"keywords\": [\"preprocessing\", \"encoding\"],\n \"installation\": [config.INSTALLATION],\n })\n\n def __repr__(self):\n return \"%s(%r)\" % ('Encoder', self.__dict__)\n\n def __init__(self, *, hyperparams: EncHyperparameter) -> None:\n\n super().__init__(hyperparams=hyperparams)\n self.hyperparams = hyperparams\n self._mapping: Dict = {}\n self._input_data: Input = None\n self._input_data_copy = None\n self._fitted = False\n self._cat_columns = []\n self._col_index = None\n self._empty_columns = []\n\n def set_training_data(self, *, inputs: Input) -> None:\n self._input_data = inputs\n self._fitted = False\n\n def _trim_features(self, feature, n_limit):\n\n topn = feature.dropna().unique()\n if n_limit:\n if feature.dropna().nunique() > n_limit:\n topn = list(feature.value_counts().head(n_limit).index)\n topn.append('other_')\n topn = [x for x in topn if x]\n return feature.name, topn\n\n def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:\n\n if self._fitted:\n return\n\n if self._input_data is None:\n raise ValueError('Missing training(fitting) data.')\n\n # Look at attribute columns only\n # print('fit in', self._input_data.columns)\n data = self._input_data.copy()\n all_attributes = utils.list_columns_with_semantic_types(metadata=data.metadata, semantic_types=[\n \"https://metadata.datadrivendiscovery.org/types/Attribute\"])\n\n # Remove columns with all empty values, structural type str\n numeric = utils.list_columns_with_semantic_types(\n data.metadata, ['http://schema.org/Integer', 'http://schema.org/Float'])\n numeric = [x for x in numeric if x in all_attributes]\n\n self._empty_columns = []\n _logger.debug(f'Numeric columns: {numeric}')\n for element in numeric:\n if data.metadata.query((mbase.ALL_ELEMENTS, element)).get('structural_type', ()) == str:\n if pd.isnull(pd.to_numeric(data.iloc[:, element])).sum() == data.shape[0]:\n _logger.debug(f'Empty numeric str column: {element}')\n self._empty_columns.append(element)\n\n # Remove columns with all empty values, structural numeric\n is_empty = pd.isnull(data).sum(axis=0) == data.shape[0]\n for i in all_attributes:\n if is_empty.iloc[i] and i not in self._empty_columns:\n _logger.debug(f'Empty numeric str column: {element}')\n self._empty_columns.append(i)\n\n _logger.debug('Removing entirely empty columns: {}'.format(data.columns[self._empty_columns]))\n\n data = utils.remove_columns(data, self._empty_columns)\n\n categorical_attributes = utils.list_columns_with_semantic_types(metadata=data.metadata,\n semantic_types=[\n \"https://metadata.datadrivendiscovery.org/types/OrdinalData\",\n \"https://metadata.datadrivendiscovery.org/types/CategoricalData\"])\n all_attributes = utils.list_columns_with_semantic_types(metadata=data.metadata, semantic_types=[\n \"https://metadata.datadrivendiscovery.org/types/Attribute\"])\n\n self._cat_col_index = list(set(all_attributes).intersection(categorical_attributes))\n self._cat_columns = data.columns[self._cat_col_index].tolist()\n\n _logger.debug('Encoding columns: {}'.format(self._cat_columns))\n\n mapping = {}\n for column_name in self._cat_columns:\n col = data[column_name]\n temp = self._trim_features(col, self.hyperparams['n_limit'])\n if temp:\n mapping[temp[0]] = temp[1]\n self._mapping = mapping\n self._fitted = True\n return CallResult(None, has_finished=True)\n\n def produce(self, *, inputs: Input, timeout: float = None, iterations: int = None) -> CallResult[Output]:\n \"\"\"\n Convert and output the input data into encoded format,\n using the trained (fitted) encoder.\n Notice that [colname]_other_ and [colname]_nan columns\n are always kept for one-hot encoded columns.\n \"\"\"\n\n self._input_data_copy = inputs.copy()\n\n # Remove columns with all empty values\n _logger.debug('Removing entirely empty columns: {}'.format(self._input_data_copy.columns[self._empty_columns]))\n self._input_data_copy = utils.remove_columns(self._input_data_copy, self._empty_columns)\n\n # Return if there is nothing to encode\n if len(self._cat_columns) == 0:\n return CallResult(self._input_data_copy, True, 1)\n\n _logger.debug('Encoding columns: {}'.format(self._cat_columns))\n\n data_encode = self._input_data_copy[list(self._mapping.keys())]\n\n # Get rid of false SettingWithCopyWarning\n data_encode.is_copy = None\n\n res = []\n for column_name in self._cat_columns:\n feature = data_encode[column_name].copy()\n other_ = lambda x: 'Other' if (x and x not in self._mapping[column_name]) else x\n nan_ = lambda x: x if x else np.nan\n feature.loc[feature.notnull()] = feature[feature.notnull()].apply(other_)\n feature = feature.apply(nan_)\n new_column_names = ['{}_{}'.format(column_name, i) for i in self._mapping[column_name] + ['nan']]\n encoded = pd.get_dummies(feature, dummy_na=True, prefix=column_name)\n missed = [name for name in new_column_names if name not in list(encoded.columns)]\n for m in missed:\n # print('missing', m)\n encoded[m] = 0\n encoded = encoded[new_column_names]\n res.append(encoded)\n # data_encode.loc[:,column_name] = feature\n\n # Drop columns that will be encoded\n # data_rest = self._input_data_copy.drop(self._mapping.keys(), axis=1)\n columns_names = self._input_data_copy.columns.tolist()\n drop_indices = [columns_names.index(col) for col in self._mapping.keys()]\n drop_indices = sorted(drop_indices)\n\n all_categorical = False\n try:\n self._input_data_copy = utils.remove_columns(self._input_data_copy, drop_indices)\n except ValueError:\n _logger.warning(\"[warn] All the attributes are categorical!\")\n all_categorical = True\n\n # metadata for columns that are not one hot encoded\n # self._col_index = [self._input_data_copy.columns.get_loc(c) for c in data_rest.columns]\n # data_rest.metadata = utils.select_columns_metadata(self._input_data_copy.metadata, self._col_index)\n\n # encode data\n # encoded = d3m_DataFrame(pd.get_dummies(data_encode, dummy_na=True, prefix=self._cat_columns, prefix_sep='_',\n # columns=self._cat_columns))\n encoded = d3m_DataFrame(pd.concat(res, axis=1))\n\n # update metadata for existing columns\n\n for index in range(len(encoded.columns)):\n old_metadata = dict(encoded.metadata.query((mbase.ALL_ELEMENTS, index)))\n old_metadata[\"structural_type\"] = int\n old_metadata[\"semantic_types\"] = (\n 'http://schema.org/Integer', 'https://metadata.datadrivendiscovery.org/types/Attribute')\n encoded.metadata = encoded.metadata.update((mbase.ALL_ELEMENTS, index), old_metadata)\n ## merge/concat both the dataframes\n if not all_categorical:\n output = utils.horizontal_concat(self._input_data_copy, encoded)\n else:\n output = encoded\n return CallResult(output, True, 1)\n\n def get_params(self) -> EncParams:\n if not self._fitted:\n raise ValueError(\"Fit not performed.\")\n return EncParams(\n mapping=self._mapping,\n cat_columns=self._cat_columns,\n empty_columns=self._empty_columns)\n\n def set_params(self, *, params: EncParams) -> None:\n self._fitted = True\n self._mapping = params['mapping']\n self._cat_columns = params['cat_columns']\n self._empty_columns = params['empty_columns']\n\n @classmethod\n def _get_columns_to_fit(cls, inputs: Input, hyperparams: EncHyperparameter):\n if not hyperparams['use_semantic_types']:\n return inputs, list(range(len(inputs.columns)))\n\n inputs_metadata = inputs.metadata\n\n def can_produce_column(column_index: int) -> bool:\n return cls._can_produce_column(inputs_metadata, column_index, hyperparams)\n\n columns_to_produce, columns_not_to_produce = common_utils.get_columns_to_use(inputs_metadata,\n use_columns=hyperparams[\n 'use_columns'],\n exclude_columns=hyperparams[\n 'exclude_columns'],\n can_use_column=can_produce_column)\n return inputs.iloc[:, columns_to_produce], columns_to_produce\n\n @classmethod\n def _can_produce_column(cls, inputs_metadata: mbase.DataMetadata, column_index: int,\n hyperparams: EncHyperparameter) -> bool:\n column_metadata = inputs_metadata.query((mbase.ALL_ELEMENTS, column_index))\n\n semantic_types = column_metadata.get('semantic_types', [])\n if len(semantic_types) == 0:\n cls.logger.warning(\"No semantic types found in column metadata\")\n return False\n if \"https://metadata.datadrivendiscovery.org/types/Attribute\" in semantic_types:\n return True\n\n return False\n" ]
[ [ "pandas.concat", "pandas.to_numeric", "pandas.isnull", "pandas.get_dummies" ] ]
yuantn/MI-AOD
[ "e57114d60f9ce5e43839cdf7068a90ee58092ec8" ]
[ "mmdet/models/dense_heads/ssd_head.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import xavier_init\nfrom mmdet.core import (build_anchor_generator, build_assigner,\n build_bbox_coder, build_sampler, multi_apply)\nfrom ..builder import HEADS\nfrom ..losses import smooth_l1_loss\nfrom .MIAOD_head import MIAODHead\nimport numpy as np\n\n\n# TODO: add loss evaluator for SSD\[email protected]_module()\nclass SSDHead(MIAODHead):\n \"\"\"SSD head used in https://arxiv.org/abs/1512.02325.\n\n Args:\n C (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n anchor_generator (dict): Config dict for anchor generator\n background_label (int | None): Label ID of background, set as 0 for\n RPN and C for other heads. It will automatically set as\n C if None is given.\n bbox_coder (dict): Config of bounding box coder.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied on decoded bounding boxes. Default: False\n train_cfg (dict): Training config of anchor head.\n test_cfg (dict): Testing config of anchor head.\n \"\"\" # noqa: W605\n\n def __init__(self, C=20, in_channels=(512, 1024, 512, 256, 256, 256),\n anchor_generator=dict(type='SSDAnchorGenerator', scale_major=False, input_size=300,\n strides=[8, 16, 32, 64, 100, 300],\n ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),\n basesize_ratio_range=(0.1, 0.9)),\n background_label=20,\n bbox_coder=dict(type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0]),\n reg_decoded_bbox=False, train_cfg=None, test_cfg=None):\n super(MIAODHead, self).__init__()\n if train_cfg is not None:\n self.param_lambda = train_cfg.param_lambda\n self.in_channels = in_channels\n self.C = C\n self.cls_out_channels = C + 1 # add background class\n self.anchor_generator = build_anchor_generator(anchor_generator)\n N = self.anchor_generator.num_base_anchors\n self.l_imgcls = nn.BCELoss()\n\n f_r_convs = []\n f_1_convs = []\n f_2_convs = []\n f_mil_convs = []\n for i in range(len(in_channels)):\n f_r_convs.append(nn.Conv2d(in_channels[i], N[i] * 4, kernel_size=3, padding=1))\n f_1_convs.append(nn.Conv2d(in_channels[i], N[i] * (C + 1), kernel_size=3, padding=1))\n f_2_convs.append(nn.Conv2d(in_channels[i], N[i] * (C + 1), kernel_size=3, padding=1))\n f_mil_convs.append(nn.Conv2d(in_channels[i], N[i] * (C + 1), kernel_size=3, padding=1))\n self.f_r_convs = nn.ModuleList(f_r_convs)\n self.f_1_convs = nn.ModuleList(f_1_convs)\n self.f_2_convs = nn.ModuleList(f_2_convs)\n self.f_mil_convs = nn.ModuleList(f_mil_convs)\n self.background_label = (C if background_label is None else background_label)\n # background_label should be either 0 or C\n assert (self.background_label == 0 or self.background_label == C)\n self.bbox_coder = build_bbox_coder(bbox_coder)\n self.reg_decoded_bbox = reg_decoded_bbox\n self.use_sigmoid_cls = False\n self.cls_focal_loss = False\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n # set sampling=False for archor_target\n self.sampling = False\n if self.train_cfg:\n self.assigner = build_assigner(self.train_cfg.assigner)\n # SSD sampling=False so use PseudoSampler\n sampler_cfg = dict(type='PseudoSampler')\n self.sampler = build_sampler(sampler_cfg, context=self)\n self.fp16_enabled = False\n\n def init_weights(self):\n \"\"\"Initialize weights of the head.\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform', bias=0)\n\n def forward(self, x):\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple:\n y_f (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n N * C.\n y_head_f_r (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n N * 4.\n \"\"\"\n\n # add another cls\n y_head_f_1 = []\n y_head_f_2 = []\n y_head_f_r = []\n y_head_cls = []\n for x_single, reg_conv, cls_conv1, cls_conv2, mil_conv in \\\n zip(x, self.f_r_convs, self.f_1_convs, self.f_2_convs, self.f_mil_convs):\n y_head_f_1_single = cls_conv1(x_single)\n y_head_f_2_single = cls_conv2(x_single)\n y_head_f_r_single = reg_conv(x_single)\n y_head_f_mil = mil_conv(x_single)\n\n y_head_cls_term2 = (y_head_f_1_single + y_head_f_2_single) / 2\n y_head_cls_term2 = y_head_cls_term2.detach()\n y_head_f_mil = y_head_f_mil.permute(0, 2, 3, 1).reshape(y_head_f_1_single.shape[0],\n -1, self.cls_out_channels)\n y_head_cls_term2 = y_head_cls_term2.permute(0, 2, 3, 1).reshape(y_head_f_1_single.shape[0],\n -1, self.cls_out_channels)\n y_head_cls_single = y_head_f_mil.softmax(2) * y_head_cls_term2.sigmoid().max(2, keepdim=True)[0].softmax(1)\n\n y_head_f_1.append(y_head_f_1_single)\n y_head_f_2.append(y_head_f_2_single)\n y_head_f_r.append(y_head_f_r_single)\n y_head_cls.append(y_head_cls_single)\n return y_head_f_1, y_head_f_2, y_head_f_r, y_head_cls\n\n def l_det(self, y_head_f_single, y_head_f_r_single, x_i_single, y_cls_single, label_weights,\n y_loc_single, bbox_weights, num_total_samples):\n \"\"\"Compute loss of a single image.\n\n Args:\n y_head_f_single (Tensor): Box scores for eachimage\n Has shape (num_total_anchors, C).\n y_head_f_r_single (Tensor): Box energies / deltas for each image\n level with shape (num_total_anchors, 4).\n x_i_single (Tensor): Box reference for each scale level with shape\n (num_total_anchors, 4).\n y_cls_single (Tensor): Labels of each anchors with shape\n (num_total_anchors,).\n label_weights (Tensor): Label weights of each anchor with shape\n (num_total_anchors,)\n y_loc_single (Tensor): BBox regression targets of each anchor wight\n shape (num_total_anchors, 4).\n bbox_weights (Tensor): BBox regression loss weights of each anchor\n with shape (num_total_anchors, 4).\n num_total_samples (int): If sampling, num total samples equal to\n the number of total anchors; Otherwise, it is the number of\n positive anchors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n l_det_cls_all = F.cross_entropy(\n y_head_f_single, y_cls_single, reduction='none') * label_weights\n\n # FG cat_id: [0, C -1], BG cat_id: C\n pos_inds = ((y_cls_single >= 0) & (y_cls_single < self.background_label)).nonzero().reshape(-1)\n neg_inds = (y_cls_single == self.background_label).nonzero().view(-1)\n\n # fore/background partition\n if pos_inds.dim() == 0:\n return l_det_cls_all.sum()[None]*0, l_det_cls_all.sum()\n num_pos_samples = pos_inds.size(0)\n num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples\n if num_neg_samples > neg_inds.size(0):\n num_neg_samples = neg_inds.size(0)\n topk_l_det_cls_neg, _ = l_det_cls_all[neg_inds].topk(num_neg_samples)\n l_det_cls_pos = l_det_cls_all[pos_inds].sum()\n l_det_cls_neg = topk_l_det_cls_neg.sum()\n # loss for pos and neg (our loss only use pos)\n l_det_cls = (l_det_cls_pos + l_det_cls_neg) / num_total_samples\n\n if self.reg_decoded_bbox:\n y_head_f_r_single = self.bbox_coder.decode(x_i_single, y_head_f_r_single)\n l_det_loc = smooth_l1_loss(y_head_f_r_single, y_loc_single, bbox_weights,\n beta=self.train_cfg.smoothl1_beta, avg_factor=num_total_samples)\n return l_det_cls[None], l_det_loc\n\n def L_det(self, y_f, y_f_r, y_head_cls, y_loc_img, y_cls_img, img_metas, y_loc_img_ignore=None):\n \"\"\"Compute losses of the head.\n\n Args:\n y_f (list[Tensor]): Box scores for each scale level\n Has shape (N, N * C, H, W)\n y_f_r (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, N * 4, H, W)\n y_loc_img (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n y_cls_img (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n y_loc_img_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in y_f]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n device = y_f[0].device\n x_i, valid_flag_list = self.get_anchors(featmap_sizes, img_metas, device=device)\n cls_reg_targets = self.get_targets(x_i, valid_flag_list, y_loc_img, img_metas,\n y_loc_img_ignore_list=y_loc_img_ignore,\n y_cls_img_list=y_cls_img,\n label_channels=1, unmap_outputs=False)\n if cls_reg_targets is None:\n return None\n (y_cls, label_weights_list, y_loc, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\n num_images = len(img_metas)\n all_y_f = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, -1, self.cls_out_channels) for s in y_f], 1)\n all_y_cls = torch.cat(y_cls, -1).view(num_images, -1)\n all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1)\n all_y_f_r = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in y_f_r], -2)\n all_y_loc = torch.cat(y_loc, -2).view(num_images, -1, 4)\n all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4)\n # concat all level anchors to a single tensor\n all_x_i = []\n for i in range(num_images):\n all_x_i.append(torch.cat(x_i[i]))\n # check NaN and Inf\n assert torch.isfinite(all_y_f).all().item(), 'classification scores become infinite or NaN!'\n assert torch.isfinite(all_y_f_r).all().item(), 'bbox predications become infinite or NaN!'\n l_det_cls, l_det_loc = multi_apply(self.l_det, all_y_f, all_y_f_r, all_x_i,\n all_y_cls, all_label_weights, all_y_loc, all_bbox_weights,\n num_total_samples=num_total_pos)\n # compute mil loss\n y_head_cls_1level, y_cls_1level = self.get_img_gtlabel_score(y_cls_img, y_head_cls)\n l_imgcls = self.l_imgcls(y_head_cls_1level, y_cls_1level)\n return dict(l_det_cls=l_det_cls, l_det_loc=l_det_loc, l_imgcls=[l_imgcls])\n\n def l_wave_dis(self, y_head_f_1_single, y_head_f_2_single, y_head_cls_single):\n w_i = y_head_cls_single.detach()\n l_det_cls_all = (abs(y_head_f_1_single.softmax(-1) - y_head_f_2_single.softmax(-1)) *\n w_i.reshape(-1, self.cls_out_channels)).mean(dim=1).sum() * self.param_lambda\n l_det_loc = torch.tensor([0.0], device=y_head_f_1_single.device)\n return l_det_cls_all[None], l_det_loc\n\n # Re-weighting and minimizing instance uncertainty\n def L_wave_min(self, y_f, y_f_r, y_head_cls, y_loc_img, y_cls_img, img_metas, y_loc_img_ignore=None):\n featmap_sizes = [featmap.size()[-2:] for featmap in y_f[0]]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n device = y_f[0][0].device\n x_i, valid_flag_list = self.get_anchors(featmap_sizes, img_metas, device=device)\n cls_reg_targets = self.get_targets(x_i, valid_flag_list, y_loc_img, img_metas,\n y_loc_img_ignore_list=y_loc_img_ignore,\n y_cls_img_list=y_cls_img,\n label_channels=1, unmap_outputs=False)\n if cls_reg_targets is None:\n return None\n (y_cls, label_weights_list, y_loc, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\n num_images = len(img_metas)\n all_y_f_1 = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, -1, self.cls_out_channels) for s in y_f[0]], 1)\n all_y_f_2 = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, -1, self.cls_out_channels) for s in y_f[1]], 1)\n all_y_cls = torch.cat(y_cls, -1).view(num_images, -1)\n all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1)\n all_y_f_r = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in y_f_r], -2)\n all_y_loc = torch.cat(y_loc, -2).view(num_images, -1, 4)\n all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4)\n # concat all level anchors to a single tensor\n all_x_i = []\n for i in range(num_images):\n all_x_i.append(torch.cat(x_i[i]))\n all_y_head_cls = torch.cat([s for s in y_head_cls], 1)\n l_wave_dis, l_det_loc = multi_apply(self.l_wave_dis, all_y_f_1, all_y_f_2, all_y_head_cls)\n if np.array([y_loc_img[i].sum() for i in range(len(y_loc_img))]).sum() < 0:\n l_det_cls = [torch.tensor(0.0, device=device)]\n l_det_loc = [torch.tensor(0.0, device=device)]\n for (i, value) in enumerate(l_det_loc):\n if value.isnan():\n l_det_loc[i].data = torch.tensor(0.0, device=device)\n # compute mil loss\n y_head_cls_1level, y_pseudo = self.get_img_pseudolabel_score(y_f, y_head_cls)\n if (y_pseudo.sum(1) == 0).sum() > 0: # ignore hard images\n l_imgcls = self.l_imgcls(y_head_cls_1level, y_pseudo) * 0\n else:\n l_imgcls = self.l_imgcls(y_head_cls_1level, y_pseudo)\n else:\n l_det_cls1, l_det_loc1 = multi_apply(self.l_det, all_y_f_1, all_y_f_r, all_x_i,\n all_y_cls, all_label_weights, all_y_loc, all_bbox_weights,\n num_total_samples=num_total_pos)\n l_det_cls2, l_det_loc2 = multi_apply(self.l_det, all_y_f_2, all_y_f_r, all_x_i,\n all_y_cls, all_label_weights, all_y_loc, all_bbox_weights,\n num_total_samples=num_total_pos)\n l_det_cls = list(map(lambda m, n: (m + n) / 2, l_det_cls1, l_det_cls2))\n l_det_loc = list(map(lambda m, n: (m + n) / 2, l_det_loc1, l_det_loc2))\n l_wave_dis = list(map(lambda m: m * 0.0, l_wave_dis))\n # compute mil loss\n y_head_cls_1level, y_cls_1level = self.get_img_gtlabel_score(y_cls_img, y_head_cls)\n l_imgcls = self.l_imgcls(y_head_cls_1level, y_cls_1level)\n return dict(l_det_cls=l_det_cls, l_det_loc=l_det_loc, l_wave_dis=l_wave_dis, l_imgcls=[l_imgcls])\n\n def l_wave_dis_minus(self, y_head_f_1_single, y_head_f_2_single, y_head_cls_single):\n w_i = y_head_cls_single.detach()\n l_det_cls_all = ((1 - abs(y_head_f_1_single.softmax(-1) - y_head_f_2_single.softmax(-1))) *\n w_i.reshape(-1, self.cls_out_channels)).mean(dim=1).sum() * self.param_lambda\n l_det_loc = torch.tensor([0.0], device=y_head_f_1_single.device)\n return l_det_cls_all[None], l_det_loc\n\n # Re-weighting and maximizing instance uncertainty\n def L_wave_max(self, y_f, y_f_r, y_head_cls, y_loc_img, y_cls_img, img_metas, y_loc_img_ignore=None):\n featmap_sizes = [featmap.size()[-2:] for featmap in y_f[0]]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n device = y_f[0][0].device\n x_i, valid_flag_list = self.get_anchors(featmap_sizes, img_metas, device=device)\n cls_reg_targets = self.get_targets(x_i, valid_flag_list, y_loc_img, img_metas,\n y_loc_img_ignore_list=y_loc_img_ignore,\n y_cls_img_list=y_cls_img,\n label_channels=1, unmap_outputs=False)\n if cls_reg_targets is None:\n return None\n (y_cls, label_weights_list, y_loc, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\n num_images = len(img_metas)\n all_y_f_1 = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, -1, self.cls_out_channels) for s in y_f[0]], 1)\n all_y_f_2 = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, -1, self.cls_out_channels) for s in y_f[1]], 1)\n all_y_cls = torch.cat(y_cls, -1).view(num_images, -1)\n all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1)\n all_y_f_r = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in y_f_r], -2)\n all_y_loc = torch.cat(y_loc, -2).view(num_images, -1, 4)\n all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4)\n # concat all level anchors to a single tensor\n all_x_i = []\n for i in range(num_images):\n all_x_i.append(torch.cat(x_i[i]))\n all_y_head_cls = torch.cat([s for s in y_head_cls], 1)\n l_wave_dis_minus, l_det_loc = multi_apply(self.l_wave_dis_minus, all_y_f_1, all_y_f_2, all_y_head_cls)\n if np.array([y_loc_img[i].sum() for i in range(len(y_loc_img))]).sum() < 0:\n l_det_cls = [torch.tensor(0.0, device=device)]\n l_det_loc = [torch.tensor(0.0, device=device)]\n for (i, value) in enumerate(l_det_loc):\n if value.isnan():\n l_det_loc[i].data = torch.tensor(0.0, device=device)\n l_imgcls = torch.tensor(0.0, device=device)\n else:\n l_det_cls1, l_det_loc1 = multi_apply(self.l_det, all_y_f_1, all_y_f_r, all_x_i,\n all_y_cls, all_label_weights, all_y_loc, all_bbox_weights,\n num_total_samples=num_total_pos)\n l_det_cls2, l_det_loc2 = multi_apply(self.l_det, all_y_f_2, all_y_f_r, all_x_i,\n all_y_cls, all_label_weights, all_y_loc, all_bbox_weights,\n num_total_samples=num_total_pos)\n l_det_cls = list(map(lambda m, n: (m + n) / 2, l_det_cls1, l_det_cls2))\n l_det_loc = list(map(lambda m, n: (m + n) / 2, l_det_loc1, l_det_loc2))\n l_wave_dis_minus = list(map(lambda m: m * 0.0, l_wave_dis_minus))\n # compute mil loss\n y_head_cls_1level, y_cls_1level = self.get_img_gtlabel_score(y_cls_img, y_head_cls)\n l_imgcls = self.l_imgcls(y_head_cls_1level, y_cls_1level)\n return dict(l_det_cls=l_det_cls, l_det_loc=l_det_loc, l_wave_dis_minus=l_wave_dis_minus, l_imgcls=[l_imgcls])\n" ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.functional.cross_entropy", "torch.nn.Conv2d", "torch.tensor", "torch.nn.BCELoss", "torch.isfinite" ] ]
yassineAlouini/TPU-Exploration
[ "d252d5953fc12ce1bbad4493f344c66027160d2c" ]
[ "notebooks/pytorch_xla_resnet18_cifar10_training.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Works for Colab only it seems? No, works with the correct version of Python.\n\n\n\"\"\"PyTorch/XLA ResNet18/CIFAR10 Training\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/github/pytorch/xla/blob/master/contrib/colab/resnet18-training.ipynb\n\n## PyTorch/XLA ResNet18/CIFAR10 (GPU or TPU)\n\n### [RUNME] Install Colab compatible PyTorch/XLA wheels and dependencies\n\"\"\"\n\n# !pip install cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.8.1-cp37-cp37m-linux_x86_64.whl\n\n\"\"\"Only run the below commented cell if you would like a nightly release\"\"\"\n\n# VERSION = \"nightly\" #@param [\"nightly\", \"20200516\"] # or YYYYMMDD format\n# !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py\n# !python pytorch-xla-env-setup.py --version $VERSION\n# import os \n# os.environ['LD_LIBRARY_PATH']='/usr/local/lib'\n# !echo $LD_LIBRARY_PATH\n\n# !sudo ln -s /usr/local/lib/libmkl_intel_lp64.so /usr/local/lib/libmkl_intel_lp64.so.1\n# !sudo ln -s /usr/local/lib/libmkl_intel_thread.so /usr/local/lib/libmkl_intel_thread.so.1\n# !sudo ln -s /usr/local/lib/libmkl_core.so /usr/local/lib/libmkl_core.so.1\n\n# !ldconfig\n# !ldd /usr/local/lib/python3.7/dist-packages/torch/lib/libtorch.so\n\n# PyTorch/XLA GPU Setup (only if GPU runtime)\nimport os\nif os.environ.get('COLAB_GPU', '0') == '1':\n os.environ['GPU_NUM_DEVICES'] = '1'\n os.environ['XLA_FLAGS'] = '--xla_gpu_cuda_data_dir=/usr/local/cuda/'\n\n\"\"\"### Define Parameters\n\n\n\"\"\"\n\n# Result Visualization Helper\nfrom matplotlib import pyplot as plt\n\nM, N = 4, 6\nRESULT_IMG_PATH = '/tmp/test_result.jpg'\nCIFAR10_LABELS = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck']\n\ndef plot_results(images, labels, preds):\n images, labels, preds = images[:M*N], labels[:M*N], preds[:M*N]\n inv_norm = transforms.Normalize(\n mean=(-0.4914/0.2023, -0.4822/0.1994, -0.4465/0.2010),\n std=(1/0.2023, 1/0.1994, 1/0.2010))\n\n num_images = images.shape[0]\n fig, axes = plt.subplots(M, N, figsize=(16, 9))\n fig.suptitle('Correct / Predicted Labels (Red text for incorrect ones)')\n\n for i, ax in enumerate(fig.axes):\n ax.axis('off')\n if i >= num_images:\n continue\n img, label, prediction = images[i], labels[i], preds[i]\n img = inv_norm(img)\n img = img.permute(1, 2, 0) # (C, M, N) -> (M, N, C)\n label, prediction = label.item(), prediction.item()\n if label == prediction:\n ax.set_title(u'\\u2713', color='blue', fontsize=22)\n else:\n ax.set_title(\n 'X {}/{}'.format(CIFAR10_LABELS[label],\n CIFAR10_LABELS[prediction]), color='red')\n ax.imshow(img)\n plt.savefig(RESULT_IMG_PATH, transparent=True)\n\n# Define Parameters\nFLAGS = {}\nFLAGS['data_dir'] = \"/tmp/cifar\"\nFLAGS['batch_size'] = 128\nFLAGS['num_workers'] = 4\nFLAGS['learning_rate'] = 0.02\nFLAGS['momentum'] = 0.9\nFLAGS['num_epochs'] = 20\nFLAGS['num_cores'] = 8 if os.environ.get('TPU_NAME', None) else 1\nFLAGS['log_steps'] = 20\nFLAGS['metrics_debug'] = False\n\nimport numpy as np\nimport os\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch_xla\nimport torch_xla.core.xla_model as xm\nimport torch_xla.debug.metrics as met\nimport torch_xla.distributed.parallel_loader as pl\nimport torch_xla.distributed.xla_multiprocessing as xmp\nimport torch_xla.utils.utils as xu\nimport torchvision\nfrom torchvision import datasets, transforms\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(\n in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_planes,\n self.expansion * planes,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(\n 3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512 * block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = torch.flatten(out, 1)\n out = self.linear(out)\n return F.log_softmax(out, dim=1)\n\n\ndef ResNet18():\n return ResNet(BasicBlock, [2, 2, 2, 2])\n\nSERIAL_EXEC = xmp.MpSerialExecutor()\n# Only instantiate model weights once in memory.\nWRAPPED_MODEL = xmp.MpModelWrapper(ResNet18())\n\ndef train_resnet18():\n torch.manual_seed(1)\n\n def get_dataset():\n norm = transforms.Normalize(\n mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010))\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n norm,\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n norm,\n ])\n train_dataset = datasets.CIFAR10(\n root=FLAGS['data_dir'],\n train=True,\n download=True,\n transform=transform_train)\n test_dataset = datasets.CIFAR10(\n root=FLAGS['data_dir'],\n train=False,\n download=True,\n transform=transform_test)\n \n return train_dataset, test_dataset\n \n # Using the serial executor avoids multiple processes\n # to download the same data.\n train_dataset, test_dataset = SERIAL_EXEC.run(get_dataset)\n\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=True)\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=FLAGS['batch_size'],\n sampler=train_sampler,\n num_workers=FLAGS['num_workers'],\n drop_last=True)\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=FLAGS['batch_size'],\n shuffle=False,\n num_workers=FLAGS['num_workers'],\n drop_last=True)\n\n # Scale learning rate to num cores\n learning_rate = FLAGS['learning_rate'] * xm.xrt_world_size()\n\n # Get loss function, optimizer, and model\n device = xm.xla_device()\n model = WRAPPED_MODEL.to(device)\n optimizer = optim.SGD(model.parameters(), lr=learning_rate,\n momentum=FLAGS['momentum'], weight_decay=5e-4)\n loss_fn = nn.NLLLoss()\n\n def train_loop_fn(loader):\n tracker = xm.RateTracker()\n model.train()\n for x, (data, target) in enumerate(loader):\n optimizer.zero_grad()\n output = model(data)\n loss = loss_fn(output, target)\n loss.backward()\n xm.optimizer_step(optimizer)\n tracker.add(FLAGS['batch_size'])\n if x % FLAGS['log_steps'] == 0:\n print('[xla:{}]({}) Loss={:.5f} Rate={:.2f} GlobalRate={:.2f} Time={}'.format(\n xm.get_ordinal(), x, loss.item(), tracker.rate(),\n tracker.global_rate(), time.asctime()), flush=True)\n\n def test_loop_fn(loader):\n total_samples = 0\n correct = 0\n model.eval()\n data, pred, target = None, None, None\n for data, target in loader:\n output = model(data)\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.view_as(pred)).sum().item()\n total_samples += data.size()[0]\n\n accuracy = 100.0 * correct / total_samples\n print('[xla:{}] Accuracy={:.2f}%'.format(\n xm.get_ordinal(), accuracy), flush=True)\n return accuracy, data, pred, target\n\n # Train and eval loops\n accuracy = 0.0\n data, pred, target = None, None, None\n for epoch in range(1, FLAGS['num_epochs'] + 1):\n para_loader = pl.ParallelLoader(train_loader, [device])\n train_loop_fn(para_loader.per_device_loader(device))\n xm.master_print(\"Finished training epoch {}\".format(epoch))\n\n para_loader = pl.ParallelLoader(test_loader, [device])\n accuracy, data, pred, target = test_loop_fn(para_loader.per_device_loader(device))\n if FLAGS['metrics_debug']:\n xm.master_print(met.metrics_report(), flush=True)\n\n return accuracy, data, pred, target\n\n# Start training processes\ndef _mp_fn(rank, flags):\n global FLAGS\n FLAGS = flags\n torch.set_default_tensor_type('torch.FloatTensor')\n accuracy, data, pred, target = train_resnet18()\n if rank == 0:\n # Retrieve tensors that are on TPU core 0 and plot.\n plot_results(data.cpu(), pred.cpu(), target.cpu())\n\nxmp.spawn(_mp_fn, args=(FLAGS,), nprocs=FLAGS['num_cores'],\n start_method='fork')\n\n\"\"\"## Visualize Predictions\"\"\"\n\n# from google.colab.patches import cv2_imshow\nimport matplotlib.pylab as plt\nimport cv2\nimg = cv2.imread(RESULT_IMG_PATH, cv2.IMREAD_UNCHANGED)\nplt.imshow(img)\nplt.show()" ]
[ [ "torch.set_default_tensor_type", "torch.nn.NLLLoss", "matplotlib.pylab.show", "torch.nn.Sequential", "torch.nn.functional.log_softmax", "torch.manual_seed", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.flatten", "torch.nn.Linear", "torch.nn.functional.relu", "matplotlib.pylab.imshow", "matplotlib.pylab.subplots", "torch.nn.BatchNorm2d", "matplotlib.pylab.savefig" ] ]
jianhanlim/ipr-imagecaptioning
[ "428204dd2b22cacd59ff98950f8fc7520101a427" ]
[ "addition_bi/base_model.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport cPickle as pickle\nimport copy\nimport json\nfrom tqdm import tqdm\n\nfrom utils.nn import NN\nfrom utils.coco.coco import COCO\nfrom utils.coco.pycocoevalcap.eval import COCOEvalCap\nfrom utils.misc import ImageLoader, CaptionData, TopN, generate_binary_key\nfrom datetime import datetime as dt\nfrom concurrent.futures import ThreadPoolExecutor\nimport logging\n\nfrom tensorflow.contrib.model_pruning.python import pruning\n\n\nclass BaseModel(object):\n def __init__(self, config):\n self.config = config\n self.is_train = True if config.phase == 'train' else False\n self.train_cnn = self.is_train and config.train_cnn\n self.image_shape = [224, 224, 3]\n if self.config.cnn == 'inceptionv4':\n self.image_shape = [299, 299, 3]\n self.nn = NN(config)\n self.global_step = tf.Variable(0,name = 'global_step',trainable = False)\n self.saver_cnn = None\n self.saver = None\n self.build()\n self.saveExecutor = ThreadPoolExecutor(max_workers=1)\n\n def build(self):\n raise NotImplementedError()\n \n def prune(self, sess, train_data):\n \"\"\" Prune the model. \"\"\"\n print(\"Pruning the model...\")\n config = self.config\n \n # Get, Print, and Edit Pruning Hyperparameters\n pruning_hparams = pruning.get_pruning_hparams()\n print(\"Pruning Hyperparameters:\", pruning_hparams)\n\n # Change hyperparameters to meet our needs\n pruning_hparams.begin_pruning_step = 0\n pruning_hparams.end_pruning_step = 250\n pruning_hparams.pruning_frequency = 1\n pruning_hparams.sparsity_function_end_step = 250\n pruning_hparams.target_sparsity = .5\n\n # Create a pruning object using the pruning specification, sparsity seems to have priority over the hparam\n p = pruning.Pruning(pruning_hparams, global_step=self.global_step, sparsity=.5)\n prune_op = p.conditional_mask_update_op()\n \n sess.run(tf.global_variables_initializer())\n sess.run(self.zero_ops)\n tf.get_default_graph().finalize()\n if not os.path.exists(config.summary_dir):\n os.mkdir(config.summary_dir)\n train_writer = tf.summary.FileWriter(config.summary_dir,\n sess.graph)\n\n # initialize a real key\n key = generate_binary_key(config.num_lstm_units, config.key, config.seed)\n signkey = generate_binary_key(config.num_lstm_units, config.signkey, -1)\n\n for _ in tqdm(list(range(config.num_epochs)), desc='epoch'):\n for _ in tqdm(list(range(train_data.num_batches)), desc='batch'):\n before_time = dt.now()\n batch = train_data.next_batch()\n after_time = dt.now()\n image_files, sentences, masks, images = batch\n\n feed_dict = {self.images: images,\n self.sentences: sentences,\n self.masks: masks,\n self.key: key,\n self.signkey: signkey}\n\n _, global_step, summary = sess.run([self.accum_ops, self.increment_global_step, self.summary], feed_dict=feed_dict)\n\n if (global_step + 1) % config.accumulate_grads == 0:\n sess.run(prune_op)\n sess.run(self.opt_op)\n sess.run(self.zero_ops)\n logging.debug(\"Weight sparsities:\", sess.run(tf.contrib.model_pruning.get_weight_sparsity()))\n\n train_writer.add_summary(summary, global_step)\n\n if (global_step + 1) % config.save_period == 0:\n self.save(sess)\n\n\n search_time = dt.now()\n logging.debug(\"Load Images Time: {}\".format((after_time - before_time).total_seconds()))\n logging.debug(\"Search Time: {}\".format((search_time - after_time).total_seconds()))\n\n train_data.reset()\n train_data.start_executor()\n\n self.save(sess)\n train_writer.close()\n print(\"Final sparsity by layer (should be 0)\", sess.run(tf.contrib.model_pruning.get_weight_sparsity()))\n print(\"Pruning complete.\")\n \n def train(self, sess, train_data):\n \"\"\" Train the model. \"\"\"\n print(\"Training the model...\")\n config = self.config\n\n if not os.path.exists(config.summary_dir):\n os.mkdir(config.summary_dir)\n train_writer = tf.summary.FileWriter(config.summary_dir,\n sess.graph)\n\n # initialize a real key\n key = generate_binary_key(config.num_lstm_units, config.key, config.seed)\n signkey = generate_binary_key(config.num_lstm_units, config.signkey, -1)\n\n for _ in tqdm(list(range(config.num_epochs)), desc='epoch'):\n for _ in tqdm(list(range(train_data.num_batches)), desc='batch'):\n before_time = dt.now()\n batch = train_data.next_batch()\n after_time = dt.now()\n image_files, sentences, masks, images = batch\n\n feed_dict = {self.images: images,\n self.sentences: sentences,\n self.masks: masks,\n self.key: key,\n self.signkey: signkey}\n\n _, global_step, summary = sess.run([self.accum_ops, self.increment_global_step, self.summary], feed_dict=feed_dict)\n\n if (global_step + 1) % config.accumulate_grads == 0:\n sess.run(self.opt_op)\n sess.run(self.zero_ops)\n\n train_writer.add_summary(summary, global_step)\n\n if (global_step + 1) % config.save_period == 0:\n self.save(sess)\n\n search_time = dt.now()\n logging.debug(\"Load Images Time: {}\".format((after_time - before_time).total_seconds()))\n logging.debug(\"Search Time: {}\".format((search_time - after_time).total_seconds()))\n\n train_data.reset()\n train_data.start_executor()\n\n self.save(sess)\n train_writer.close()\n print(\"Training complete.\")\n\n def eval(self, sess, eval_gt_coco, eval_data, vocabulary):\n \"\"\" Evaluate the model using the COCO val2014 data. \"\"\"\n print(\"Evaluating the model ...\")\n config = self.config\n\n results = []\n if not os.path.exists(config.eval_result_dir):\n os.mkdir(config.eval_result_dir)\n \n signkey_accuracy_final = 0\n # Generate the captions for the images\n idx = 0\n for k in tqdm(list(range(eval_data.num_batches)), desc='batch'):\n before_time = dt.now()\n batch = eval_data.next_batch()\n after_time = dt.now()\n caption_data, signkey_accuracy = self.beam_search(sess, batch, vocabulary)\n signkey_accuracy_final += signkey_accuracy\n \n search_time = dt.now()\n print(\"Load Images Time: {}\".format((after_time - before_time).total_seconds()))\n print(\"Search Time: {}\".format((search_time - after_time).total_seconds()))\n\n fake_cnt = 0 if k<eval_data.num_batches-1 \\\n else eval_data.fake_count\n for l in range(eval_data.batch_size-fake_cnt):\n word_idxs = caption_data[l][0].sentence\n score = caption_data[l][0].score\n caption = vocabulary.get_sentence(word_idxs)\n results.append({'image_id': eval_data.image_ids[idx],\n 'caption': caption})\n idx += 1\n\n # Save the result in an image file, if requested\n if config.save_eval_result_as_image:\n image_file = batch[0][l]\n image_name = image_file.split(os.sep)[-1]\n image_name = os.path.splitext(image_name)[0]\n img = plt.imread(image_file)\n plt.imshow(img)\n plt.axis('off')\n plt.title(caption)\n plt.savefig(os.path.join(config.eval_result_dir,\n image_name+'_result.jpg'))\n\n fp = open(config.eval_result_file, 'wb')\n json.dump(results, fp)\n fp.close()\n\n # Evaluate these captions\n eval_result_coco = eval_gt_coco.loadRes(config.eval_result_file)\n scorer = COCOEvalCap(eval_gt_coco, eval_result_coco)\n scorer.evaluate()\n print(\"Evaluation complete.\")\n print(\"Sign Key Accuracy: \")\n print(signkey_accuracy_final/eval_data.num_batches)\n\n def test(self, sess, test_data, vocabulary):\n \"\"\" Test the model using any given images. \"\"\"\n print(\"Testing the model ...\")\n config = self.config\n\n if not os.path.exists(config.test_result_dir):\n os.mkdir(config.test_result_dir)\n\n captions = []\n scores = []\n\n # Generate the captions for the images\n for k in tqdm(list(range(test_data.num_batches)), desc='path'):\n batch = test_data.next_batch()\n caption_data, _ = self.beam_search(sess, batch, vocabulary)\n\n fake_cnt = 0 if k<test_data.num_batches-1 \\\n else test_data.fake_count\n for l in range(test_data.batch_size-fake_cnt):\n word_idxs = caption_data[l][0].sentence\n score = caption_data[l][0].score\n caption = vocabulary.get_sentence(word_idxs)\n captions.append(caption)\n scores.append(score)\n\n # Save the result in an image file\n # Save the result in an image file, if requested\n if config.save_eval_result_as_image:\n image_file = batch[0][l]\n image_name = image_file.split(os.sep)[-1]\n image_name = os.path.splitext(image_name)[0]\n img = plt.imread(image_file)\n plt.imshow(img)\n plt.axis('off')\n plt.title(caption)\n plt.savefig(os.path.join(config.test_result_dir,\n image_name+'_result.jpg'))\n\n # Save the captions to a file\n results = pd.DataFrame({'image_files':test_data.image_files,\n 'caption':captions,\n 'prob':scores})\n results.to_csv(config.test_result_file)\n print(\"Testing complete.\")\n\n def beam_search(self, sess, batch, vocabulary):\n \"\"\"Use beam search to generate the captions for a batch of images.\"\"\"\n # Feed in the images to get the contexts and the initial LSTM states\n config = self.config\n image_files, images = batch\n #images = self.image_loader.load_images(image_files)\n\n # initialize a key\n key = generate_binary_key(config.num_lstm_units, config.key, config.seed)\n \n # attack key\n if config.attack_key > 0:\n different = int(config.num_lstm_units * config.attack_key)\n key[:different] = key[:different] * -1\n \n # attack key - prune\n if config.attack_prune > 0:\n different = int(config.num_lstm_units * config.attack_prune)\n key[:different] = 0\n \n signkey = generate_binary_key(config.num_lstm_units, config.signkey, -1)\n signkey_accuracy = 0\n \n contexts, initial_memory, initial_output = sess.run(\n [self.conv_feats, self.initial_memory, self.initial_output],\n feed_dict = {self.images: images})\n\n partial_caption_data = []\n complete_caption_data = []\n for k in range(config.batch_size):\n initial_beam = CaptionData(sentence = [],\n memory = initial_memory[k],\n output = initial_output[k],\n score = 1.0)\n partial_caption_data.append(TopN(config.beam_size))\n partial_caption_data[-1].push(initial_beam)\n complete_caption_data.append(TopN(config.beam_size))\n\n # Run beam search\n for idx in range(config.max_caption_length):\n partial_caption_data_lists = []\n for k in range(config.batch_size):\n data = partial_caption_data[k].extract()\n partial_caption_data_lists.append(data)\n partial_caption_data[k].reset()\n\n num_steps = 1 if idx == 0 else config.beam_size\n for b in range(num_steps):\n if idx == 0:\n last_word = np.zeros((config.batch_size), np.int32)\n else:\n last_word = np.array([pcl[b].sentence[-1]\n for pcl in partial_caption_data_lists],\n np.int32)\n\n last_memory = np.array([pcl[b].memory\n for pcl in partial_caption_data_lists],\n np.float32)\n last_output = np.array([pcl[b].output\n for pcl in partial_caption_data_lists],\n np.float32)\n\n memory, output, scores = sess.run(\n [self.memory, self.output, self.probs],\n feed_dict = {self.contexts: contexts,\n self.last_word: last_word,\n self.last_memory: last_memory,\n self.last_output: last_output,\n self.key: key})\n \n # attack sign\n if config.attack_sign > 0:\n different = int(config.num_lstm_units * config.attack_sign)\n output[:,config.num_lstm_units-different:] = output[:,config.num_lstm_units-different:] * -1\n \n if idx == 0 and b == 0:\n signkey_accuracy = np.sum((np.sign(output) == signkey).astype(float)) \\\n / (config.batch_size * config.num_lstm_units)\n \n # Find the beam_size most probable next words\n for k in range(config.batch_size):\n caption_data = partial_caption_data_lists[k][b]\n words_and_scores = list(enumerate(scores[k]))\n words_and_scores.sort(key=lambda x: -x[1])\n words_and_scores = words_and_scores[0:config.beam_size+1]\n\n # Append each of these words to the current partial caption\n for w, s in words_and_scores:\n sentence = caption_data.sentence + [w]\n score = caption_data.score * s\n beam = CaptionData(sentence,\n memory[k],\n output[k],\n score)\n if vocabulary.words[w] == '.':\n complete_caption_data[k].push(beam)\n else:\n partial_caption_data[k].push(beam)\n\n results = []\n for k in range(config.batch_size):\n if complete_caption_data[k].size() == 0:\n complete_caption_data[k] = partial_caption_data[k]\n results.append(complete_caption_data[k].extract(sort=True))\n\n return results, signkey_accuracy\n\n def save(self, sess):\n config = self.config\n \"\"\" Save the model. \"\"\"\n print(\" Saving the model\")\n if config.save_mode == 'numpy':\n before_time = dt.now()\n config = self.config\n data = {v.name: v.eval() for v in tf.global_variables()}\n save_path = os.path.join(config.save_dir, str(self.global_step.eval()))\n after_time = dt.now()\n\n np.save(save_path, data)\n\n search_time = dt.now()\n print(\"Load Variable: {}\".format((after_time - before_time).total_seconds()))\n print(\"Save: {}\".format((search_time - after_time).total_seconds()))\n else:\n before_time = dt.now()\n save_path = os.path.join(config.save_dir, 'model')\n self.saver.save(sess, save_path, global_step=self.global_step.eval())\n after_time = dt.now()\n print(\"Saver: {}\".format((after_time - before_time).total_seconds()))\n\n info_file = open(os.path.join(config.save_dir, \"config.pickle\"), \"wb\")\n config_ = copy.copy(config)\n config_.global_step = self.global_step.eval()\n pickle.dump(config_, info_file)\n info_file.close()\n print(\"Model saved.\")\n\n def load(self, sess, model_file=None):\n \"\"\" Load the model. \"\"\"\n config = self.config\n\n if config.load_mode == 'numpy':\n if model_file is not None:\n save_path = model_file\n else:\n info_path = os.path.join(config.save_dir, \"config.pickle\")\n info_file = open(info_path, \"rb\")\n config = pickle.load(info_file)\n global_step = config.global_step\n info_file.close()\n save_path = os.path.join(config.save_dir,\n str(global_step)+\".npy\")\n\n print(\"Loading the model from %s...\" %save_path)\n data_dict = np.load(save_path).item()\n count = 0\n for v in tqdm(tf.global_variables()):\n if v.name in data_dict.keys():\n try:\n sess.run(v.assign(data_dict[v.name]))\n count += 1\n except Exception as e:\n print(e)\n print(v.name)\n print(\"%d tensors loaded.\" %count)\n else:\n #self.saver.restore(sess, model_file)\n self.optimistic_restore(sess, model_file)\n print(\"Model restored.\")\n\n def optimistic_restore(self, session, save_file):\n reader = tf.train.NewCheckpointReader(save_file)\n saved_shapes = reader.get_variable_to_shape_map()\n var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables() if var.name.split(':')[0] in saved_shapes])\n restore_vars = []\n name2var = dict(zip(map(lambda x:x.name.split(':')[0], tf.global_variables()), tf.global_variables()))\n with tf.variable_scope('', reuse=True):\n for var_name, saved_var_name in var_names:\n curr_var = name2var[saved_var_name]\n var_shape = curr_var.get_shape().as_list()\n if var_shape == saved_shapes[saved_var_name]:\n restore_vars.append(curr_var)\n print(\"number of parameters: {}\".format(len(restore_vars)))\n saver = tf.train.Saver(restore_vars)\n saver.restore(session, save_file)\n\n def load_cnn(self, session, data_path, ignore_missing=True):\n \"\"\" Load a pretrained CNN model. \"\"\"\n print(\"Loading the CNN from %s...\" %data_path)\n\n if self.config.cnn in ['vgg16', 'resnet50']:\n data_dict = np.load(data_path).item()\n count = 0\n for op_name in tqdm(data_dict):\n with tf.variable_scope(op_name, reuse = True):\n for param_name, data in data_dict[op_name].iteritems():\n try:\n var = tf.get_variable(param_name)\n session.run(var.assign(data))\n count += 1\n except ValueError:\n pass\n print(\"%d tensors loaded.\" %count)\n\n elif self.config.cnn in ['resnet101', 'inceptionv4']:\n # Restore tf checkpoint variables from disk.\n self.saver_cnn.restore(session, data_path)\n print(\"Model restored.\")\n\n else:\n print('Incorrect CNN is selected')\n exit(-1)\n" ]
[ [ "matplotlib.pyplot.imshow", "tensorflow.get_variable", "matplotlib.pyplot.imread", "tensorflow.global_variables", "pandas.DataFrame", "tensorflow.get_default_graph", "tensorflow.Variable", "numpy.save", "matplotlib.pyplot.axis", "tensorflow.train.Saver", "numpy.load", "numpy.zeros", "tensorflow.contrib.model_pruning.python.pruning.Pruning", "matplotlib.pyplot.title", "tensorflow.global_variables_initializer", "tensorflow.train.NewCheckpointReader", "tensorflow.contrib.model_pruning.python.pruning.get_pruning_hparams", "numpy.array", "tensorflow.summary.FileWriter", "numpy.sign", "tensorflow.variable_scope", "tensorflow.contrib.model_pruning.get_weight_sparsity" ] ]
MartinoMensio/allennlp
[ "9a6dce3997d17185650a5377c9a7a5af0f1ac241" ]
[ "allennlp/data/fields/text_field.py" ]
[ "\"\"\"\nA ``TextField`` represents a string of text, the kind that you might want to represent with\nstandard word vectors, or pass through an LSTM.\n\"\"\"\nfrom typing import Dict, List, Optional\nimport textwrap\n\nfrom overrides import overrides\nfrom spacy.tokens import Token as SpacyToken\nimport torch\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.data.fields.sequence_field import SequenceField\nfrom allennlp.data.tokenizers.token import Token\nfrom allennlp.data.token_indexers.token_indexer import TokenIndexer, TokenType\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.nn import util\n\nTokenList = List[TokenType] # pylint: disable=invalid-name\n\n\nclass TextField(SequenceField[Dict[str, torch.Tensor]]):\n \"\"\"\n This ``Field`` represents a list of string tokens. Before constructing this object, you need\n to tokenize raw strings using a :class:`~allennlp.data.tokenizers.tokenizer.Tokenizer`.\n\n Because string tokens can be represented as indexed arrays in a number of ways, we also take a\n dictionary of :class:`~allennlp.data.token_indexers.token_indexer.TokenIndexer`\n objects that will be used to convert the tokens into indices.\n Each ``TokenIndexer`` could represent each token as a single ID, or a list of character IDs, or\n something else.\n\n This field will get converted into a dictionary of arrays, one for each ``TokenIndexer``. A\n ``SingleIdTokenIndexer`` produces an array of shape (num_tokens,), while a\n ``TokenCharactersIndexer`` produces an array of shape (num_tokens, num_characters).\n \"\"\"\n def __init__(self, tokens: List[Token], token_indexers: Dict[str, TokenIndexer]) -> None:\n self.tokens = tokens\n self._token_indexers = token_indexers\n self._indexed_tokens: Optional[Dict[str, TokenList]] = None\n self._indexer_name_to_indexed_token: Optional[Dict[str, List[str]]] = None\n\n if not all([isinstance(x, (Token, SpacyToken)) for x in tokens]):\n raise ConfigurationError(\"TextFields must be passed Tokens. \"\n \"Found: {} with types {}.\".format(tokens, [type(x) for x in tokens]))\n\n @overrides\n def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):\n for indexer in self._token_indexers.values():\n for token in self.tokens:\n indexer.count_vocab_items(token, counter)\n\n @overrides\n def index(self, vocab: Vocabulary):\n token_arrays: Dict[str, TokenList] = {}\n indexer_name_to_indexed_token: Dict[str, List[str]] = {}\n for indexer_name, indexer in self._token_indexers.items():\n token_indices = indexer.tokens_to_indices(self.tokens, vocab, indexer_name)\n token_arrays.update(token_indices)\n indexer_name_to_indexed_token[indexer_name] = list(token_indices.keys())\n self._indexed_tokens = token_arrays\n self._indexer_name_to_indexed_token = indexer_name_to_indexed_token\n\n @overrides\n def get_padding_lengths(self) -> Dict[str, int]:\n \"\"\"\n The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by\n (potentially) several ``TokenIndexers``. This method gets the max length (over tokens)\n associated with each of these arrays.\n \"\"\"\n # Our basic outline: we will iterate over `TokenIndexers`, and aggregate lengths over tokens\n # for each indexer separately. Then we will combine the results for each indexer into a single\n # dictionary, resolving any (unlikely) key conflicts by taking a max.\n lengths = []\n if self._indexed_tokens is None:\n raise ConfigurationError(\"You must call .index(vocabulary) on a \"\n \"field before determining padding lengths.\")\n\n # Each indexer can return a different sequence length, and for indexers that return\n # multiple arrays each can have a different length. We'll keep track of them here.\n for indexer_name, indexer in self._token_indexers.items():\n indexer_lengths = {}\n\n for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]:\n # This is a list of dicts, one for each token in the field.\n token_lengths = [indexer.get_padding_lengths(token)\n for token in self._indexed_tokens[indexed_tokens_key]]\n if not token_lengths:\n # This is a padding edge case and occurs when we want to pad a ListField of\n # TextFields. In order to pad the list field, we need to be able to have an\n # _empty_ TextField, but if this is the case, token_lengths will be an empty\n # list, so we add the default empty padding dictionary to the list instead.\n token_lengths = [{}]\n # Iterate over the keys and find the maximum token length.\n # It's fine to iterate over the keys of the first token since all tokens have the same keys.\n for key in token_lengths[0]:\n indexer_lengths[key] = max(x[key] if key in x else 0 for x in token_lengths)\n lengths.append(indexer_lengths)\n\n indexer_sequence_lengths = {key: len(val) for key, val in self._indexed_tokens.items()}\n # Get the padding lengths for sequence lengths.\n if len(set(indexer_sequence_lengths.values())) == 1:\n # This is the default case where all indexers return the same length.\n # Keep the existing 'num_tokens' key for backward compatibility with existing config files.\n padding_lengths = {'num_tokens': list(indexer_sequence_lengths.values())[0]}\n else:\n # The indexers return different lengths.\n padding_lengths = indexer_sequence_lengths\n\n # Get all keys which have been used for padding for each indexer and take the max if there are duplicates.\n padding_keys = {key for d in lengths for key in d.keys()}\n for padding_key in padding_keys:\n padding_lengths[padding_key] = max(x[padding_key] if padding_key in x else 0 for x in lengths)\n return padding_lengths\n\n @overrides\n def sequence_length(self) -> int:\n return len(self.tokens)\n\n @overrides\n def as_tensor(self,\n padding_lengths: Dict[str, int],\n cuda_device: int = -1) -> Dict[str, torch.Tensor]:\n tensors = {}\n num_tokens = padding_lengths.get('num_tokens')\n for indexer_name, indexer in self._token_indexers.items():\n if num_tokens is None:\n # The indexers return different lengths.\n # Get the desired_num_tokens for this indexer.\n desired_num_tokens = {\n indexed_tokens_key: padding_lengths[indexed_tokens_key]\n for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]\n }\n else:\n desired_num_tokens = {indexer_name: num_tokens}\n\n indices_to_pad = {indexed_tokens_key: self._indexed_tokens[indexed_tokens_key]\n for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]}\n padded_array = indexer.pad_token_sequence(indices_to_pad,\n desired_num_tokens, padding_lengths)\n # We use the key of the indexer to recognise what the tensor corresponds to within the\n # field (i.e. the result of word indexing, or the result of character indexing, for\n # example).\n # TODO(mattg): we might someday have a TokenIndexer that needs to use something other\n # than a LongTensor here, and it's not clear how to signal that. Maybe we'll need to\n # add a class method to TokenIndexer to tell us the type? But we can worry about that\n # when there's a compelling use case for it.\n indexer_tensors = {key: torch.LongTensor(array) for key, array in padded_array.items()}\n if cuda_device > -1:\n for key in indexer_tensors.keys():\n indexer_tensors[key] = indexer_tensors[key].cuda(cuda_device)\n tensors.update(indexer_tensors)\n return tensors\n\n @overrides\n def empty_field(self):\n # pylint: disable=protected-access\n text_field = TextField([], self._token_indexers)\n text_field._indexed_tokens = {}\n text_field._indexer_name_to_indexed_token = {}\n for indexer_name, indexer in self._token_indexers.items():\n array_keys = indexer.get_keys(indexer_name)\n for key in array_keys:\n text_field._indexed_tokens[key] = []\n text_field._indexer_name_to_indexed_token[indexer_name] = array_keys\n return text_field\n\n @overrides\n def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:\n # pylint: disable=no-self-use\n # This is creating a dict of {token_indexer_key: batch_tensor} for each token indexer used\n # to index this field.\n return util.batch_tensor_dicts(tensor_list)\n\n def __str__(self) -> str:\n indexers = {name: indexer.__class__.__name__ for name, indexer in self._token_indexers.items()}\n\n # Double tab to indent under the header.\n formatted_text = \"\".join([\"\\t\\t\" + text + \"\\n\"\n for text in textwrap.wrap(repr(self.tokens), 100)])\n return f\"TextField of length {self.sequence_length()} with \" \\\n f\"text: \\n {formatted_text} \\t\\tand TokenIndexers : {indexers}\"\n" ]
[ [ "torch.LongTensor" ] ]
jturner65/Getup-DartEnv
[ "a952a98d4c8a39510f70db5eaf1f2386350a828a" ]
[ "gym/envs/dart/reacher.py" ]
[ "# This environment is created by Karen Liu ([email protected])\r\n\r\nimport numpy as np\r\nfrom gym import utils\r\nfrom gym.envs.dart import dart_env\r\n\r\nclass DartReacherEnv(dart_env.DartEnv, utils.EzPickle):\r\n def __init__(self):\r\n self.target = np.array([0.8, -0.6, 0.6])\r\n self.action_scale = np.array([10, 10, 10, 10, 10])\r\n self.control_bounds = np.array([[1.0, 1.0, 1.0, 1.0, 1.0],[-1.0, -1.0, -1.0, -1.0, -1.0]])\r\n dart_env.DartEnv.__init__(self, 'reacher.skel', 4, 21, self.control_bounds)\r\n utils.EzPickle.__init__(self)\r\n\r\n def _step(self, a):\r\n clamped_control = np.array(a)\r\n for i in range(len(clamped_control)):\r\n if clamped_control[i] > self.control_bounds[0][i]:\r\n clamped_control[i] = self.control_bounds[0][i]\r\n if clamped_control[i] < self.control_bounds[1][i]:\r\n clamped_control[i] = self.control_bounds[1][i]\r\n tau = np.multiply(clamped_control, self.action_scale)\r\n\r\n fingertip = np.array([0.0, -0.25, 0.0])\r\n vec = self.robot_skeleton.bodynodes[2].to_world(fingertip) - self.target\r\n reward_dist = - np.linalg.norm(vec)\r\n reward_ctrl = - np.square(tau).sum() * 0.001\r\n alive_bonus = 0\r\n reward = reward_dist + reward_ctrl + alive_bonus\r\n \r\n self.do_simulation(tau, self.frame_skip)\r\n ob = self._get_obs()\r\n\r\n s = self.state_vector()\r\n\r\n done = not (np.isfinite(s).all() and (-reward_dist > 0.1))\r\n\r\n\r\n return ob, reward, done, {}\r\n\r\n def _get_obs(self):\r\n theta = self.robot_skeleton.q\r\n fingertip = np.array([0.0, -0.25, 0.0])\r\n vec = self.robot_skeleton.bodynodes[2].to_world(fingertip) - self.target\r\n return np.concatenate([np.cos(theta), np.sin(theta), self.target, self.robot_skeleton.dq, vec]).ravel()\r\n\r\n def reset_model(self):\r\n self.dart_world.reset()\r\n qpos = self.robot_skeleton.q + self.np_random.uniform(low=-.01, high=.01, size=self.robot_skeleton.ndofs)\r\n qvel = self.robot_skeleton.dq + self.np_random.uniform(low=-.01, high=.01, size=self.robot_skeleton.ndofs)\r\n self.set_state(qpos, qvel)\r\n while True:\r\n self.target = self.np_random.uniform(low=-1, high=1, size=3)\r\n if np.linalg.norm(self.target) < 1.5: break\r\n\r\n\r\n self.dart_world.skeletons[0].q=[0, 0, 0, self.target[0], self.target[1], self.target[2]]\r\n\r\n return self._get_obs()\r\n\r\n\r\n def viewer_setup(self):\r\n self._get_viewer().scene.tb.trans[2] = -3.5\r\n self._get_viewer().scene.tb._set_theta(0)\r\n self.track_skeleton_id = 0\r\n" ]
[ [ "numpy.square", "numpy.isfinite", "numpy.multiply", "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.array" ] ]
singaln/Roformer_Simlarity
[ "9e813c3a6a9517aa0c07738821e6b4a8e9b0aff3" ]
[ "utils.py" ]
[ "# coding=utf-8\n# @Time:2021/6/29:45\n# @author: SinGaln\n\n\"\"\"utils文件\"\"\"\nimport os\nimport torch\nimport random\nimport logging\nimport numpy as np\nfrom model import BertModelOutputs\nfrom transformers import BertConfig, BertTokenizer\nfrom sklearn.metrics import precision_score, recall_score, f1_score\n\nMODEL_CLASSES = {\n \"bert\":(BertConfig, BertModelOutputs, BertTokenizer)\n}\n\nMODEL_PATH_MAP = {\n \"bert\":\"./chinese_bert_wwm\"\n}\n\n# 获取label(完全匹配, 部分匹配, 不匹配)\ndef get_labels(args):\n return [label.strip() for label in\n open(os.path.join(args.data_dir, args.task, args.label_file), \"r\", encoding=\"utf-8\")]\n\n# 加载tokenizer\ndef load_tokenizer(args):\n return MODEL_CLASSES[args.model_type][2].from_pretrained(args.pretrained_model_path)\n\n# 设置logger\ndef init_logger():\n logging.basicConfig(format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%Y/%m/%d %H:%M:%S\",\n level=logging.INFO)\n\n# 设置种子\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed)\n\n# 计算precision_score, recall_score, f1_score\ndef get_metrics(pred_label, true_label):\n assert len(pred_label) == len(true_label)\n return {\n \"precision_score\":precision_score(true_label, pred_label, average=\"macro\"),\n \"recall_score\": recall_score(true_label, pred_label, average=\"macro\"),\n \"f1\": f1_score(true_label, pred_label, average=\"macro\")\n }\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "sklearn.metrics.precision_score", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "sklearn.metrics.f1_score", "sklearn.metrics.recall_score" ] ]
jiwoongim/IMsML
[ "2f5647794b433c1cb63f14ba1bcbf4919ca48b87" ]
[ "code/model_kit/conv_layer.py" ]
[ "import math\nimport tensorflow as tf\n\nfrom utils.utils import base_name\nfrom utils.nn_utils import init_weights, activation_fn, _get_variable\n\nclass Conv_Layer(object):\n def __init__(self, Xdim, kern_sz, filter_sz, atype, scope_name, \\\n stride=[2,2], padding='SAME', dformat='NHWC'):\n\n ''' \n M - Dimention of the output\n N - Number of data\n C - Number of channels\n W - Width\n H - Height\n dformat - NCHW | NHWC \n scope_name - Name of the layer '''\n\n self.Xdim = Xdim\n self.atype = atype\n self.padding = padding\n self.scope_name = scope_name\n self.kern_sz = kern_sz\n self.filter_sz = filter_sz\n\n if data_format == 'NCHW':\n self.stride = [1, 1, stride[0], stride[1]]\n self.kernel_shape = [ filter_sz[0], \\\n filter_sz[1], \\\n self.Xdim[1], \\\n kern_sz ]\n elif data_format == 'NHWC':\n self.stride = [1, stride[0], stride[1], 1]\n self.kernel_shape = [ filter_sz[0], \\\n filter_sz[1], \\\n self.Xdim[-1], \\\n kern_sz ]\n\n self._initialize_params()\n\n\n def __call__(self, X):\n\n if type(xs) != list: xs = [xs]\n assert len(xs) == len(self.Ws), \\\n \"Expected %d input vectors, got %d\" % (len(self.Ws), len(xs))\n\n with tf.variable_scope(self.scope): \n \n return self.fp(X)\n\n \n def _initialize_params(self, wtype='xavier'):\n '''Initialize parameters in the layer'''\n\n with tf.variable_scope(self.scope_name) as sc:\n\n self.Wx = tf.get_variable(\"Wx\", shape=self.kernel_shape, \\\n initializer=init_weights(wtype))\n\n self.hbias = tf.get_variable(\"hbias\", shape=[self.M], \\\n initializer=init_weights('zeros'))\n\n self.params = [self.Wx, self.hbias]\n\n\n def fp(self):\n\n conv = tf.nn.conv2d(x, self.Wx, self.stride, self.padding, data_format=data_format)\n logit = tf.nn.bias_add(conv, self.hbias, self.dformat)\n\n return activation_fn(logit, self.atype)\n\n\n def clone(self, scope_name=None):\n '''Duplicating the layer object'''\n\n if scope_name is None: scope_name = self.scope_name + \"_clone\"\n\n with tf.variable_scope(scope_name) as sc:\n for v in self.params:\n tf.get_variable(base_name(v), v.get_shape(),\n initializer=lambda x,dtype=tf.float32: \\\n v.initialized_value())\n sc.reuse_variables()\n return Conv_Layer(self.D, self.M, self.atype, scope_name=sc)\n\n\ndef conv2d(x, c, wc=0.00005):\n ksize = c['ksize']\n stride = c['stride']\n filters_out = c['conv_filters_out']\n\n filters_in = x.get_shape()[-1]\n shape = [ksize, 1, filters_in, filters_out]\n initializer = tf.truncated_normal_initializer(stddev=0.1)\n\n collections = [tf.GraphKeys.VARIABLES, 'resnet_variables']\n\n weights = _get_variable('weights',\n shape=shape,\n dtype='float',\n initializer=initializer,\n variable_name='resnet_variables',\n weight_decay=wc)\n\n return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')\n\n\n" ]
[ [ "tensorflow.nn.bias_add", "tensorflow.variable_scope", "tensorflow.truncated_normal_initializer", "tensorflow.nn.conv2d" ] ]
CYENS/Continual_Learning_on_the_Edge_with_TensorFlow-Lite
[ "26d4551a0542cd779f699ee5e5ca674a6c316f59" ]
[ "Offline Experiments/lat_utils.py" ]
[ "# IMPORTANT NOTE - THIS CODE WAS NOT WRITTEN BY THE AUTHOR OF THE PAPER SUBJECT TO DOUBLE-BLIND REVIEW\n# CODE USED FROM PAPER: https://arxiv.org/abs/1912.01100\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n################################################################################\n# Copyright (c) 2020. Vincenzo Lomonaco, Gabriele Graffieti, Lorenzo #\n# Pellegrini, Davide Maltoni. All rights reserved. #\n# See the accompanying LICENSE file for terms. #\n# #\n# Date: 01-04-2020 #\n# Authors: Vincenzo Lomonaco, Gabriele Graffieti, Lorenzo Pellegrini, Davide #\n# Maltoni. #\n# E-mail: [email protected] #\n# Website: vincenzolomonaco.com #\n################################################################################\n\n\"\"\"\nGeneral useful functions for machine learning with Pytorch.\n\"\"\"\n\n# Python 2-3 compatible\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport torch\nfrom models.batch_renorm import BatchRenorm2D\n\ndef shuffle_in_unison(dataset, seed=None, in_place=False):\n \"\"\"\n Shuffle two (or more) list in unison. It's important to shuffle the images\n and the labels maintaining their correspondence.\n\n Args:\n dataset (dict): list of shuffle with the same order.\n seed (int): set of fixed Cifar parameters.\n in_place (bool): if we want to shuffle the same data or we want\n to return a new shuffled dataset.\n Returns:\n list: train and test sets composed of images and labels, if in_place\n is set to False.\n \"\"\"\n\n if seed:\n np.random.seed(seed)\n rng_state = np.random.get_state()\n new_dataset = []\n for x in dataset:\n if in_place:\n np.random.shuffle(x)\n else:\n new_dataset.append(np.random.permutation(x))\n np.random.set_state(rng_state)\n\n if not in_place:\n return new_dataset\n\n\ndef shuffle_in_unison_pytorch(dataset, seed=None):\n \"\"\"\n Shuffle two (or more) list of torch tensors in unison. It's important to\n shuffle the images and the labels maintaining their correspondence.\n \"\"\"\n\n shuffled_dataset = []\n perm = torch.randperm(dataset[0].size(0))\n if seed:\n torch.manual_seed(seed)\n for x in dataset:\n shuffled_dataset.append(x[perm])\n\n return shuffled_dataset\n\n\ndef pad_data(dataset, mb_size):\n \"\"\"\n Padding all the matrices contained in dataset to suit the mini-batch\n size. We assume they have the same shape.\n\n Args:\n dataset (str): sets to pad to reach a multile of mb_size.\n mb_size (int): mini-batch size.\n Returns:\n list: padded data sets\n int: number of iterations needed to cover the entire training set\n with mb_size mini-batches.\n \"\"\"\n\n num_set = len(dataset)\n x = dataset[0]\n # computing test_iters\n n_missing = x.shape[0] % mb_size\n if n_missing > 0:\n surplus = 1\n else:\n surplus = 0\n it = x.shape[0] // mb_size + surplus\n\n # padding data to fix batch dimentions\n if n_missing > 0:\n n_to_add = mb_size - n_missing\n for i, data in enumerate(dataset):\n dataset[i] = np.concatenate((data[:n_to_add], data))\n if num_set == 1:\n dataset = dataset[0]\n\n return dataset, it\n\n\ndef get_accuracy(model, criterion, batch_size, test_x, test_y, use_cuda=True,\n mask=None, preproc=None):\n \"\"\"\n Test accuracy given a model and the test data.\n\n Args:\n model (nn.Module): the pytorch model to test.\n criterion (func): loss function.\n batch_size (int): mini-batch size.\n test_x (tensor): test data.\n test_y (tensor): test labels.\n use_cuda (bool): if we want to use gpu or cpu.\n mask (bool): if we want to maks out some classes from the results.\n Returns:\n ave_loss (float): average loss across the test set.\n acc (float): average accuracy.\n accs (list): average accuracy for class.\n \"\"\"\n\n model.eval()\n\n correct_cnt, ave_loss = 0, 0\n model = maybe_cuda(model, use_cuda=use_cuda)\n\n num_class = int(np.max(test_y) + 1)\n hits_per_class = [0] * num_class\n pattern_per_class = [0] * num_class\n test_it = test_y.shape[0] // batch_size + 1\n\n test_x = torch.from_numpy(test_x).type(torch.FloatTensor)\n test_y = torch.from_numpy(test_y).type(torch.LongTensor)\n\n if preproc:\n test_x = preproc(test_x)\n\n for i in range(test_it):\n # indexing\n start = i * batch_size\n end = (i + 1) * batch_size\n\n x = maybe_cuda(test_x[start:end], use_cuda=use_cuda)\n y = maybe_cuda(test_y[start:end], use_cuda=use_cuda)\n\n logits = model(x)\n\n if mask is not None:\n # we put an high negative number so that after softmax that prob\n # will be zero and not contribute to the loss\n idx = (torch.FloatTensor(mask).cuda() == 0).nonzero()\n idx = idx.view(idx.size(0))\n logits[:, idx] = -10e10\n\n loss = criterion(logits, y)\n _, pred_label = torch.max(logits.data, 1)\n correct_cnt += (pred_label == y.data).sum()\n ave_loss += loss.item()\n\n for label in y.data:\n pattern_per_class[int(label)] += 1\n\n for i, pred in enumerate(pred_label):\n if pred == y.data[i]:\n hits_per_class[int(pred)] += 1\n\n accs = np.asarray(hits_per_class) / \\\n np.asarray(pattern_per_class).astype(float)\n\n acc = correct_cnt.item() * 1.0 / test_y.size(0)\n\n ave_loss /= test_y.size(0)\n\n return ave_loss, acc, accs\n\n\ndef preprocess_imgs(img_batch, scale=True, norm=True, channel_first=True):\n \"\"\"\n Here we get a batch of PIL imgs and we return them normalized as for\n the pytorch pre-trained models.\n\n Args:\n img_batch (tensor): batch of images.\n scale (bool): if we want to scale the images between 0 an 1.\n channel_first (bool): if the channel dimension is before of after\n the other dimensions (width and height).\n norm (bool): if we want to normalize them.\n Returns:\n tensor: pre-processed batch.\n\n \"\"\"\n\n if scale:\n # convert to float in [0, 1]\n img_batch = img_batch / 255\n\n if norm:\n # normalize\n img_batch[:, :, :, 0] = ((img_batch[:, :, :, 0] - 0.485) / 0.229)\n img_batch[:, :, :, 1] = ((img_batch[:, :, :, 1] - 0.456) / 0.224)\n img_batch[:, :, :, 2] = ((img_batch[:, :, :, 2] - 0.406) / 0.225)\n\n if channel_first:\n # Swap channel dimension to fit the caffe format (c, w, h)\n img_batch = np.transpose(img_batch, (0, 3, 1, 2))\n\n return img_batch\n\n\ndef maybe_cuda(what, use_cuda=True, **kw):\n \"\"\"\n Moves `what` to CUDA and returns it, if `use_cuda` and it's available.\n\n Args:\n what (object): any object to move to eventually gpu\n use_cuda (bool): if we want to use gpu or cpu.\n Returns\n object: the same object but eventually moved to gpu.\n \"\"\"\n\n if use_cuda is not False and torch.cuda.is_available():\n what = what.cuda()\n return what\n\n\ndef replace_bn_with_brn(\n m, name=\"\", momentum=0.1, r_d_max_inc_step=0.0001, r_max=1.0,\n d_max=0.0, max_r_max=3.0, max_d_max=5.0):\n for child_name, child in m.named_children():\n if isinstance(child, torch.nn.BatchNorm2d):\n setattr(m, child_name, BatchRenorm2D(\n child.num_features,\n gamma=child.weight,\n beta=child.bias,\n running_mean=child.running_mean,\n running_var=child.running_var,\n eps=child.eps,\n momentum=momentum,\n r_d_max_inc_step=r_d_max_inc_step,\n r_max=r_max,\n d_max=d_max,\n max_r_max=max_r_max,\n max_d_max=max_d_max\n ))\n else:\n replace_bn_with_brn(child, child_name, momentum, r_d_max_inc_step, r_max, d_max,\n max_r_max, max_d_max)\n\n\ndef change_brn_pars(\n m, name=\"\", momentum=0.1, r_d_max_inc_step=0.0001, r_max=1.0,\n d_max=0.0):\n for target_name, target_attr in m.named_children():\n if isinstance(target_attr, BatchRenorm2D):\n target_attr.momentum = torch.tensor(momentum, requires_grad=False)\n target_attr.r_max = torch.tensor(r_max, requires_grad=False)\n target_attr.d_max = torch.tensor(d_max, requires_grad=False)\n target_attr.r_d_max_inc_step = r_d_max_inc_step\n\n else:\n change_brn_pars(target_attr, target_name, momentum, r_d_max_inc_step, r_max, d_max)\n\n\ndef consolidate_weights(model, cur_clas):\n \"\"\" Mean-shift for the target layer weights\"\"\"\n\n with torch.no_grad():\n globavg = np.average(model.output.weight.detach()\n .cpu().numpy()[cur_clas])\n for c in cur_clas:\n w = model.output.weight.detach().cpu().numpy()[c]\n\n if c in cur_clas:\n new_w = w - globavg\n if c in model.saved_weights.keys():\n wpast_j = np.sqrt(model.past_j[c] / model.cur_j[c])\n model.saved_weights[c] = (model.saved_weights[c] * wpast_j\n + new_w) / (wpast_j + 1)\n else:\n model.saved_weights[c] = new_w\n\n\ndef set_consolidate_weights(model):\n \"\"\" set trained weights \"\"\"\n\n with torch.no_grad():\n for c, w in model.saved_weights.items():\n model.output.weight[c].copy_(\n torch.from_numpy(model.saved_weights[c])\n )\n\n\ndef reset_weights(model, cur_clas):\n \"\"\" reset weights\"\"\"\n\n with torch.no_grad():\n model.output.weight.fill_(0.0)\n for c, w in model.saved_weights.items():\n if c in cur_clas:\n model.output.weight[c].copy_(\n torch.from_numpy(model.saved_weights[c])\n )\n\n\ndef examples_per_class(train_y):\n count = {i:0 for i in range(50)}\n for y in train_y:\n count[int(y)] +=1\n\n return count\n\n\ndef set_brn_to_train(m, name=\"\"):\n for target_name, target_attr in m.named_children():\n if isinstance(target_attr, BatchRenorm2D):\n target_attr.train()\n else:\n set_brn_to_train(target_attr, target_name)\n\n\ndef set_brn_to_eval(m, name=\"\"):\n for target_name, target_attr in m.named_children():\n if isinstance(target_attr, BatchRenorm2D):\n target_attr.eval()\n else:\n set_brn_to_eval(target_attr, target_name)\n\n\ndef set_bn_to(m, name=\"\", phase=\"train\"):\n for target_name, target_attr in m.named_children():\n if isinstance(target_attr, torch.nn.BatchNorm2d):\n if phase == \"train\":\n target_attr.train()\n else:\n target_attr.eval()\n else:\n set_bn_to(target_attr, target_name, phase)\n\n\ndef freeze_up_to(model, freeze_below_layer, only_conv=False):\n for name, param in model.named_parameters():\n # tells whether we want to use gradients for a given parameter\n if only_conv:\n if \"conv\" in name:\n param.requires_grad = False\n print(\"Freezing parameter \" + name)\n else:\n param.requires_grad = False\n print(\"Freezing parameter \" + name)\n\n if name == freeze_below_layer:\n break\n\n\ndef create_syn_data(model):\n size = 0\n print('Creating Syn data for Optimal params and their Fisher info')\n\n for name, param in model.named_parameters():\n if \"bn\" not in name and \"output\" not in name:\n print(name, param.flatten().size(0))\n size += param.flatten().size(0)\n\n # The first array returned is a 2D array: the first component contains\n # the params at loss minimum, the second the parameter importance\n # The second array is a dictionary with the synData\n synData = {}\n synData['old_theta'] = torch.zeros(size, dtype=torch.float32)\n synData['new_theta'] = torch.zeros(size, dtype=torch.float32)\n synData['grad'] = torch.zeros(size, dtype=torch.float32)\n synData['trajectory'] = torch.zeros(size, dtype=torch.float32)\n synData['cum_trajectory'] = torch.zeros(size, dtype=torch.float32)\n\n return torch.zeros((2, size), dtype=torch.float32), synData\n\n\ndef extract_weights(model, target):\n\n with torch.no_grad():\n weights_vector= None\n for name, param in model.named_parameters():\n if \"bn\" not in name and \"output\" not in name:\n # print(name, param.flatten())\n if weights_vector is None:\n weights_vector = param.flatten()\n else:\n weights_vector = torch.cat(\n (weights_vector, param.flatten()), 0)\n\n target[...] = weights_vector.cpu()\n\n\ndef extract_grad(model, target):\n # Store the gradients into target\n with torch.no_grad():\n grad_vector= None\n for name, param in model.named_parameters():\n if \"bn\" not in name and \"output\" not in name:\n # print(name, param.flatten())\n if grad_vector is None:\n grad_vector = param.grad.flatten()\n else:\n grad_vector = torch.cat(\n (grad_vector, param.grad.flatten()), 0)\n\n target[...] = grad_vector.cpu()\n\n\ndef init_batch(net, ewcData, synData):\n # Keep initial weights\n extract_weights(net, ewcData[0])\n synData['trajectory'] = 0\n\n\ndef pre_update(net, synData):\n extract_weights(net, synData['old_theta'])\n\n\ndef post_update(net, synData):\n extract_weights(net, synData['new_theta'])\n extract_grad(net, synData['grad'])\n\n synData['trajectory'] += synData['grad'] * (\n synData['new_theta'] - synData['old_theta'])\n\n\ndef update_ewc_data(net, ewcData, synData, clip_to, c=0.0015):\n extract_weights(net, synData['new_theta'])\n eps = 0.0000001 # 0.001 in few task - 0.1 used in a more complex setup\n\n synData['cum_trajectory'] += c * synData['trajectory'] / (\n np.square(synData['new_theta'] - ewcData[0]) + eps)\n\n ewcData[1] = torch.empty_like(synData['cum_trajectory'])\\\n .copy_(-synData['cum_trajectory'])\n\n ewcData[1] = torch.clamp(ewcData[1], max=clip_to)\n # (except CWR)\n ewcData[0] = synData['new_theta'].clone().detach()\n\n\ndef compute_ewc_loss(model, ewcData, lambd=0):\n\n weights_vector = None\n for name, param in model.named_parameters():\n if \"bn\" not in name and \"output\" not in name:\n if weights_vector is None:\n weights_vector = param.flatten()\n else:\n weights_vector = torch.cat(\n (weights_vector, param.flatten()), 0)\n\n ewcData = maybe_cuda(ewcData, use_cuda=True)\n loss = (lambd / 2) * torch.dot(ewcData[1], (weights_vector - ewcData[0])**2)\n return loss\n\n\nif __name__ == \"__main__\":\n\n from models.mobilenet import MyMobilenetV1\n model = MyMobilenetV1(pretrained=True)\n replace_bn_with_brn(model, \"net\")\n\n ewcData, synData = create_syn_data(model)\n extract_weights(model, ewcData[0])\n\n\n" ]
[ [ "torch.max", "numpy.sqrt", "torch.zeros", "numpy.asarray", "numpy.concatenate", "numpy.max", "torch.no_grad", "torch.FloatTensor", "torch.cuda.is_available", "numpy.square", "torch.from_numpy", "torch.tensor", "numpy.random.set_state", "torch.dot", "torch.empty_like", "numpy.transpose", "numpy.random.get_state", "numpy.random.seed", "torch.manual_seed", "numpy.random.shuffle", "numpy.random.permutation", "torch.clamp" ] ]
sidharthkumar10500/motion_correction
[ "56fda2df3c464b25d8374f06e80849c5b1d92717" ]
[ "datagen.py" ]
[ "import numpy as np\nimport sigpy as sp\nimport torch\nfrom torch.utils.data import Dataset\nimport scipy\nfrom scipy import ndimage\n\"\"\"\nDataloader for the motion corrupt images \n\"\"\"\nclass MotionCorrupt(Dataset):\n def __init__(self, sample_list, num_slices,\n center_slice):\n self.sample_list = sample_list #list of all of those pytorch files\n self.num_slices = num_slices\n self.center_slice = center_slice\n\n def __len__(self):\n return len(self.sample_list) * self.num_slices\n\n def __getitem__(self, idx):\n\n # Convert to numerical\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n # Separate slice and sample\n sample_idx = idx // self.num_slices\n slice_idx = self.center_slice + np.mod(idx, self.num_slices) - self.num_slices // 2\n\n # Load MRI image\n gt_img = torch.load(self.sample_list[sample_idx])['gt_imgs'][...,slice_idx]\n motion_img = torch.load(self.sample_list[sample_idx])['moco_imgs'][...,slice_idx]\n # normalization by max value\n scale1 = np.max(abs(gt_img))\n scale2 = np.max(abs(motion_img))\n gt_img = gt_img/scale1 \n motion_img = motion_img/scale2\n sample = {'idx': idx,\n 'img_motion_corrupt': motion_img.astype(np.complex64),\n 'img_gt': gt_img.astype(np.complex64),\n 'data_range': 1.0\n }\n return sample" ]
[ [ "numpy.mod", "torch.is_tensor", "torch.load" ] ]
abugler/SMLFinalProject
[ "d89bbe959ca35ecee53660ac3b88fedb1d1df4d9" ]
[ "scripts/analyze.py" ]
[ "from runners.experiment_utils import load_experiment, save_experiment\nfrom src import logging\nfrom runners.utils import load_yaml, flatten\nfrom . import cmd, document_parser\nimport glob\nimport pandas as pd\nimport os\nimport copy\nimport numpy as np\nfrom argparse import ArgumentParser\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom gspread import WorksheetNotFound\n\ndef init_gsheet(credentials_path):\n \"\"\"\n Initializes the Google Sheets client given a path to credentials.\n \n Args:\n credentials_path (str): path to your Google credentials that are used to\n authorize the Google Sheets access.\n \n Returns:\n :class:`gspread.Client`: Google Sheets Client initialized with credentials.\n \"\"\"\n scope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n credentials_path, scope\n )\n gc = gspread.authorize(credentials)\n return gc\n\ndef upload_to_gsheet(results, config, exp=None, upload_source_metrics=False):\n \"\"\"\n Uploads the analysis to the Google Sheet, if possible.\n \n Args:\n results (:class:`pandas.DataFrame`): DataFrame containing all the results - output\n by :py:func:`scripts.analyze.analyze`.\n config (dict): Dictionary containing the entire experiment configuration.\n exp (:class:`comet_ml.Experiment`): Experiment given by comet.ml (optional).\n upload_source_metrics (bool): Uploads metrics for each source if True. Defaults to False.\n Can have interactions with the API limit on Google Sheets. If there are two many \n sources, then it will hit the limit and the script will break.\n \"\"\"\n credentials_path = os.getenv('PATH_TO_GOOGLE_CREDENTIALS', None)\n if not credentials_path:\n logging.info('PATH_TO_GOOGLE_CREDENTIALS not set, cannot proceed.')\n return None\n\n gc = init_gsheet(credentials_path)\n\n config = copy.deepcopy(config)\n sheet_name = config['info'].pop('spreadsheet_name', None)\n worksheet_name = config['info'].pop('worksheet_name', None)\n if not sheet_name or not worksheet_name:\n logging.info('Sheet name not specified, not uploading results to Google sheets')\n return None\n logging.info(f'Opening {sheet_name} with {worksheet_name}')\n sheet = gc.open(sheet_name)\n\n try:\n summary_worksheet = sheet.worksheet(worksheet_name)\n except WorksheetNotFound:\n logging.info(f'Worksheet not found, creating new sheet w/ name {worksheet_name}')\n template_worksheet = sheet.worksheet('Template')\n summary_worksheet = template_worksheet.duplicate(new_sheet_name=worksheet_name)\n\n datasets = np.unique(results['dataset'])\n metrics = ['SDR', 'SIR', 'SAR']\n notes = config['info'].pop('notes', 'No notes')\n\n def trunc(values, decs=0):\n return np.trunc(values*10**decs)/(10**decs)\n\n existing_rows = summary_worksheet.get_all_values()\n\n for dataset in datasets:\n logging.info(\n f\"Uploading results for {dataset} for {config['info']['experiment_key']} \"\n f\"@ {worksheet_name} in {summary_worksheet}\"\n )\n _results = results[results['dataset'] == dataset]\n dataset_paths = {\n key: config['datasets'][key]['folder'] \n for key in config['datasets']\n }\n experiment_key = config['info']['experiment_key']\n experiment_url = 'No link'\n if hasattr(exp, '_get_experiment_url'):\n experiment_url = exp._get_experiment_url()\n row_to_insert = [\n f'=HYPERLINK(\"{experiment_url}\", \"{experiment_key}\")',\n notes, \n dataset_paths.pop('train', 'No training'),\n dataset_paths.pop('val', 'No validation.'),\n dataset,\n np.unique(_results['file_name']).shape[0],\n ]\n\n row_exists = False\n row_index = 3\n for j, row in enumerate(existing_rows):\n compared_indices = [2, 3, 4]\n row = [row[0]] + [row[i] for i in compared_indices]\n inserted_row = (\n [config['info']['experiment_key']] + \n [str(row_to_insert[i]) for i in compared_indices] \n )\n if (row == inserted_row):\n logging.info(\"Row already exists\")\n row_exists = True\n row_index = j + 1\n break\n \n if not row_exists:\n summary_worksheet.insert_row(\n row_to_insert, index=3, value_input_option='USER_ENTERED'\n )\n overall_metrics = (\n [np.unique(_results['file_name']).shape[0]] + \n [trunc(x, decs=2) for x in _results.mean()[metrics]]\n )\n overall_index = summary_worksheet.find('Overall').col - 1\n for i, value in enumerate(overall_metrics):\n summary_worksheet.update_cell(row_index, overall_index + i, value)\n\n if upload_source_metrics:\n try:\n source_names = np.unique(_results['source_name']).tolist()\n for source_name in source_names:\n source_metrics = []\n try:\n source_name_cell = summary_worksheet.find(source_name)\n except Exception as e:\n source_name_cell = summary_worksheet.find('Source')\n source_name_cell.value = source_name\n summary_worksheet.update_cells([source_name_cell])\n for i, metric in enumerate(metrics):\n value = trunc(\n _results[_results['source_name'] == source_name].mean()[metric], \n decs=2\n )\n summary_worksheet.update_cell(\n row_index, source_name_cell.col + i, value\n )\n except:\n logging.info(\"Failure in uploading. Likely too many unique sources and we hit an API limit.\")\n pass\n\ndef analyze(path_to_yml_file, use_gsheet=False, upload_source_metrics=False):\n \"\"\"\n Analyzes the metrics for all the files that were evaluated in the experiment.\n \n Args:\n path_to_yml_file (str): Path to the yml file that defines the experiment. The\n corresponding results folder for the experiment is what will be analyzed and put\n into a Pandas dataframe.\n use_gsheet (bool, optional): Whether or not to upload to the Google Sheet. \n Defaults to False.\n upload_source_metrics (bool): Uploads metrics for each source if True. Defaults to False.\n Can have interactions with the API limit on Google Sheets. If there are two many \n sources, then it will hit the limit and the script will break.\n \n Returns:\n tuple: 3-element tuple containing\n\n - results (:class:`pandas.DataFrame`): DataFrame containing all of the results \n for every file evaluated in the experiment. The DataFrame also has every\n key in the experiment configuration in flattened format.\n \n For example, model_config_recurrent_stack_args_embedding_size is a column in the DataFrame.\n\n - config (*dict*): A dictionary containing the configuration of the experiment. \n\n - exp (:class:`comet_ml.Experiment`): An instantiated experiment if comet.ml is needed, otherwise it is None.\n \"\"\"\n config, exp, path_to_yml_file = load_experiment(path_to_yml_file)\n \n paths = glob.glob(\n os.path.join(config['info']['output_folder'], 'results', '**.yml'),\n recursive=True\n )\n\n results = []\n\n for _path in paths:\n data = load_yaml(_path, [])\n for _data in data:\n keys = sorted(list(_data.keys()))\n keys.remove('permutation')\n for key in keys:\n flattened = {\n 'experiment_key': config['info']['experiment_key'],\n 'notes': config['info']['notes'],\n 'file_name': _path,\n 'dataset': config['datasets']['test']['folder'],\n 'source_name': key.split('/')[-1],\n }\n\n flattened.update(flatten(config))\n\n for metric in _data[key]:\n flattened[metric] = np.mean(_data[key][metric])\n\n results.append(flattened)\n \n results = pd.DataFrame(results)\n\n logging.info(results.mean())\n logging.info(config['info']['experiment_key'])\n\n if use_gsheet:\n upload_to_gsheet(results, config, exp, upload_source_metrics)\n\n return results, config, exp\n\n@document_parser('analyze', 'scripts.analyze.analyze')\ndef build_parser():\n parser = ArgumentParser()\n parser.add_argument(\n \"-p\",\n \"--path_to_yml_file\",\n type=str,\n required=True,\n help=\"\"\"Path to the configuration for the experiment that is getting analyzed. The\n corresponding results folder for the experiment is what will be analyzed and put\n into a Pandas dataframe.\n \"\"\"\n )\n parser.add_argument(\n \"--use_gsheet\",\n action=\"store_true\",\n help=\"\"\"Results can be synced to a Google sheet after analysis if this is true.\n Defaults to false.\n \"\"\"\n )\n parser.add_argument(\n \"--upload_source_metrics\",\n action=\"store_true\",\n help=\"\"\"Uploads metrics for each source if True. Defaults to False.\n Can have interactions with the API limit on Google Sheets. If there are two many \n sources, then it will hit the limit and the script will break.\n \"\"\"\n )\n return parser\n\nif __name__ == '__main__':\n cmd(analyze, build_parser) " ]
[ [ "numpy.trunc", "numpy.mean", "pandas.DataFrame", "numpy.unique" ] ]
neuromodulation/py_neuromodulation
[ "1e8505d4324c9d2f37e5d56629a2ee418ea0b12b" ]
[ "py_neuromodulation/nm_coherence.py" ]
[ "from scipy import signal\nimport numpy as np\n\nclass NM_Coherence:\n \n def __init__(self, fs, window, fbands, fband_names, ch_1_name, ch_2_name, ch_1_idx, ch_2_idx, coh, icoh) -> None:\n self.fs = fs\n self.window = window\n self.Pxx = None\n self.Pyy = None\n self.Pxy = None\n self.f = None\n self.coh = coh\n self.icoh = icoh\n self.coh_val = None\n self.icoh_val = None\n self.ch_1 = ch_1_name\n self.ch_2 = ch_2_name\n self.ch_1_idx = ch_1_idx\n self.ch_2_idx = ch_2_idx\n self.fbands = fbands # list of lists, e.g. [[10, 15], [15, 20]]\n self.fband_names = fband_names\n pass\n\n def get_coh(self, features_, x, y):\n self.f, self.Pxx = signal.welch(x, self.fs, self.window)\n self.Pyy = signal.welch(y, self.fs, self.window)[1]\n self.Pxy = signal.csd(x, y, self.fs, self.window)[1]\n \n if self.coh is True:\n self.coh_val = np.abs(self.Pxy**2)/(self.Pxx*self.Pyy)\n if self.icoh is True:\n self.icoh_val = np.array(self.Pxy/(self.Pxx*self.Pyy)).imag\n \n for idx, fband in enumerate(self.fbands):\n if self.coh is True:\n feature_calc = np.mean(self.coh_val[np.bitwise_and(self.f>fband[0],\n self.f<fband[1])])\n feature_name = '_'.join([\"coh\", self.ch_1, \"to\", self.ch_2, self.fband_names[idx]])\n features_[feature_name] = feature_calc\n if self.icoh is True:\n feature_calc = np.mean(self.icoh_val[np.bitwise_and(self.f>fband[0],\n self.f<fband[1])])\n feature_name = '_'.join([\"icoh\", self.ch_1, \"to\", self.ch_2, self.fband_names[idx]])\n features_[feature_name] = feature_calc\n return features_\n" ]
[ [ "scipy.signal.csd", "numpy.abs", "numpy.bitwise_and", "numpy.array", "scipy.signal.welch" ] ]
zv/pandas
[ "2fad5d71053e31cae87f72b199a5699c51851110" ]
[ "pandas/tests/io/test_sql.py" ]
[ "\"\"\"SQL io tests\n\nThe SQL tests are broken down in different classes:\n\n- `PandasSQLTest`: base class with common methods for all test classes\n- Tests for the public API (only tests with sqlite3)\n - `_TestSQLApi` base class\n - `TestSQLApi`: test the public API with sqlalchemy engine\n - `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI\n connection\n- Tests for the different SQL flavors (flavor specific type conversions)\n - Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with\n common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy\n Connection object. The different tested flavors (sqlite3, MySQL,\n PostgreSQL) derive from the base class\n - Tests for the fallback mode (`TestSQLiteFallback`)\n\n\"\"\"\n\nimport csv\nfrom datetime import (\n date,\n datetime,\n time,\n)\nfrom io import StringIO\nimport sqlite3\nimport warnings\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import (\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n)\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n concat,\n date_range,\n isna,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\n\nimport pandas.io.sql as sql\nfrom pandas.io.sql import (\n SQLAlchemyEngine,\n _gt14,\n get_engine,\n read_sql_query,\n read_sql_table,\n)\n\ntry:\n import sqlalchemy\n from sqlalchemy import inspect\n from sqlalchemy.ext import declarative\n from sqlalchemy.orm import session as sa_session\n import sqlalchemy.schema\n import sqlalchemy.sql.sqltypes as sqltypes\n\n SQLALCHEMY_INSTALLED = True\nexcept ImportError:\n SQLALCHEMY_INSTALLED = False\n\nSQL_STRINGS = {\n \"create_iris\": {\n \"sqlite\": \"\"\"CREATE TABLE iris (\n \"SepalLength\" REAL,\n \"SepalWidth\" REAL,\n \"PetalLength\" REAL,\n \"PetalWidth\" REAL,\n \"Name\" TEXT\n )\"\"\",\n \"mysql\": \"\"\"CREATE TABLE iris (\n `SepalLength` DOUBLE,\n `SepalWidth` DOUBLE,\n `PetalLength` DOUBLE,\n `PetalWidth` DOUBLE,\n `Name` VARCHAR(200)\n )\"\"\",\n \"postgresql\": \"\"\"CREATE TABLE iris (\n \"SepalLength\" DOUBLE PRECISION,\n \"SepalWidth\" DOUBLE PRECISION,\n \"PetalLength\" DOUBLE PRECISION,\n \"PetalWidth\" DOUBLE PRECISION,\n \"Name\" VARCHAR(200)\n )\"\"\",\n },\n \"insert_iris\": {\n \"sqlite\": \"\"\"INSERT INTO iris VALUES(?, ?, ?, ?, ?)\"\"\",\n \"mysql\": \"\"\"INSERT INTO iris VALUES(%s, %s, %s, %s, \"%s\");\"\"\",\n \"postgresql\": \"\"\"INSERT INTO iris VALUES(%s, %s, %s, %s, %s);\"\"\",\n },\n \"create_test_types\": {\n \"sqlite\": \"\"\"CREATE TABLE types_test_data (\n \"TextCol\" TEXT,\n \"DateCol\" TEXT,\n \"IntDateCol\" INTEGER,\n \"IntDateOnlyCol\" INTEGER,\n \"FloatCol\" REAL,\n \"IntCol\" INTEGER,\n \"BoolCol\" INTEGER,\n \"IntColWithNull\" INTEGER,\n \"BoolColWithNull\" INTEGER\n )\"\"\",\n \"mysql\": \"\"\"CREATE TABLE types_test_data (\n `TextCol` TEXT,\n `DateCol` DATETIME,\n `IntDateCol` INTEGER,\n `IntDateOnlyCol` INTEGER,\n `FloatCol` DOUBLE,\n `IntCol` INTEGER,\n `BoolCol` BOOLEAN,\n `IntColWithNull` INTEGER,\n `BoolColWithNull` BOOLEAN\n )\"\"\",\n \"postgresql\": \"\"\"CREATE TABLE types_test_data (\n \"TextCol\" TEXT,\n \"DateCol\" TIMESTAMP,\n \"DateColWithTz\" TIMESTAMP WITH TIME ZONE,\n \"IntDateCol\" INTEGER,\n \"IntDateOnlyCol\" INTEGER,\n \"FloatCol\" DOUBLE PRECISION,\n \"IntCol\" INTEGER,\n \"BoolCol\" BOOLEAN,\n \"IntColWithNull\" INTEGER,\n \"BoolColWithNull\" BOOLEAN\n )\"\"\",\n },\n \"insert_test_types\": {\n \"sqlite\": {\n \"query\": \"\"\"\n INSERT INTO types_test_data\n VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\",\n \"fields\": (\n \"TextCol\",\n \"DateCol\",\n \"IntDateCol\",\n \"IntDateOnlyCol\",\n \"FloatCol\",\n \"IntCol\",\n \"BoolCol\",\n \"IntColWithNull\",\n \"BoolColWithNull\",\n ),\n },\n \"mysql\": {\n \"query\": \"\"\"\n INSERT INTO types_test_data\n VALUES(\"%s\", %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\",\n \"fields\": (\n \"TextCol\",\n \"DateCol\",\n \"IntDateCol\",\n \"IntDateOnlyCol\",\n \"FloatCol\",\n \"IntCol\",\n \"BoolCol\",\n \"IntColWithNull\",\n \"BoolColWithNull\",\n ),\n },\n \"postgresql\": {\n \"query\": \"\"\"\n INSERT INTO types_test_data\n VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\",\n \"fields\": (\n \"TextCol\",\n \"DateCol\",\n \"DateColWithTz\",\n \"IntDateCol\",\n \"IntDateOnlyCol\",\n \"FloatCol\",\n \"IntCol\",\n \"BoolCol\",\n \"IntColWithNull\",\n \"BoolColWithNull\",\n ),\n },\n },\n \"read_parameters\": {\n \"sqlite\": \"SELECT * FROM iris WHERE Name=? AND SepalLength=?\",\n \"mysql\": 'SELECT * FROM iris WHERE `Name`=\"%s\" AND `SepalLength`=%s',\n \"postgresql\": 'SELECT * FROM iris WHERE \"Name\"=%s AND \"SepalLength\"=%s',\n },\n \"read_named_parameters\": {\n \"sqlite\": \"\"\"\n SELECT * FROM iris WHERE Name=:name AND SepalLength=:length\n \"\"\",\n \"mysql\": \"\"\"\n SELECT * FROM iris WHERE\n `Name`=\"%(name)s\" AND `SepalLength`=%(length)s\n \"\"\",\n \"postgresql\": \"\"\"\n SELECT * FROM iris WHERE\n \"Name\"=%(name)s AND \"SepalLength\"=%(length)s\n \"\"\",\n },\n \"read_no_parameters_with_percent\": {\n \"sqlite\": \"SELECT * FROM iris WHERE Name LIKE '%'\",\n \"mysql\": \"SELECT * FROM iris WHERE `Name` LIKE '%'\",\n \"postgresql\": \"SELECT * FROM iris WHERE \\\"Name\\\" LIKE '%'\",\n },\n \"create_view\": {\n \"sqlite\": \"\"\"\n CREATE VIEW iris_view AS\n SELECT * FROM iris\n \"\"\"\n },\n}\n\n\[email protected]\ndef test_frame1():\n columns = [\"index\", \"A\", \"B\", \"C\", \"D\"]\n data = [\n (\n \"2000-01-03 00:00:00\",\n 0.980268513777,\n 3.68573087906,\n -0.364216805298,\n -1.15973806169,\n ),\n (\n \"2000-01-04 00:00:00\",\n 1.04791624281,\n -0.0412318367011,\n -0.16181208307,\n 0.212549316967,\n ),\n (\n \"2000-01-05 00:00:00\",\n 0.498580885705,\n 0.731167677815,\n -0.537677223318,\n 1.34627041952,\n ),\n (\n \"2000-01-06 00:00:00\",\n 1.12020151869,\n 1.56762092543,\n 0.00364077397681,\n 0.67525259227,\n ),\n ]\n return DataFrame(data, columns=columns)\n\n\[email protected]\ndef test_frame3():\n columns = [\"index\", \"A\", \"B\"]\n data = [\n (\"2000-01-03 00:00:00\", 2 ** 31 - 1, -1.987670),\n (\"2000-01-04 00:00:00\", -29, -0.0412318367011),\n (\"2000-01-05 00:00:00\", 20000, 0.731167677815),\n (\"2000-01-06 00:00:00\", -290867, 1.56762092543),\n ]\n return DataFrame(data, columns=columns)\n\n\nclass MixInBase:\n def teardown_method(self, method):\n # if setup fails, there may not be a connection to close.\n if hasattr(self, \"conn\"):\n for tbl in self._get_all_tables():\n self.drop_table(tbl)\n self._close_conn()\n\n\nclass MySQLMixIn(MixInBase):\n def drop_table(self, table_name):\n cur = self.conn.cursor()\n cur.execute(f\"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}\")\n self.conn.commit()\n\n def _get_all_tables(self):\n cur = self.conn.cursor()\n cur.execute(\"SHOW TABLES\")\n return [table[0] for table in cur.fetchall()]\n\n def _close_conn(self):\n from pymysql.err import Error\n\n try:\n self.conn.close()\n except Error:\n pass\n\n\nclass SQLiteMixIn(MixInBase):\n def drop_table(self, table_name):\n self.conn.execute(\n f\"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}\"\n )\n self.conn.commit()\n\n def _get_all_tables(self):\n c = self.conn.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n return [table[0] for table in c.fetchall()]\n\n def _close_conn(self):\n self.conn.close()\n\n\nclass SQLAlchemyMixIn(MixInBase):\n def drop_table(self, table_name):\n sql.SQLDatabase(self.conn).drop_table(table_name)\n\n def _get_all_tables(self):\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n table_list = meta.tables.keys()\n return table_list\n\n def _close_conn(self):\n # https://docs.sqlalchemy.org/en/13/core/connections.html#engine-disposal\n self.conn.dispose()\n\n\nclass PandasSQLTest:\n \"\"\"\n Base class with common private methods for SQLAlchemy and fallback cases.\n\n \"\"\"\n\n def _get_exec(self):\n if hasattr(self.conn, \"execute\"):\n return self.conn\n else:\n return self.conn.cursor()\n\n @pytest.fixture(params=[(\"io\", \"data\", \"csv\", \"iris.csv\")])\n def load_iris_data(self, datapath, request):\n\n iris_csv_file = datapath(*request.param)\n\n if not hasattr(self, \"conn\"):\n self.setup_connect()\n\n self.drop_table(\"iris\")\n self._get_exec().execute(SQL_STRINGS[\"create_iris\"][self.flavor])\n\n with open(iris_csv_file, newline=None) as iris_csv:\n r = csv.reader(iris_csv)\n next(r) # skip header row\n ins = SQL_STRINGS[\"insert_iris\"][self.flavor]\n\n for row in r:\n self._get_exec().execute(ins, row)\n\n def _load_iris_view(self):\n self.drop_table(\"iris_view\")\n self._get_exec().execute(SQL_STRINGS[\"create_view\"][self.flavor])\n\n def _check_iris_loaded_frame(self, iris_frame):\n pytype = iris_frame.dtypes[0].type\n row = iris_frame.iloc[0]\n\n assert issubclass(pytype, np.floating)\n tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, \"Iris-setosa\"])\n\n def _load_types_test_data(self, data):\n def _filter_to_flavor(flavor, df):\n flavor_dtypes = {\n \"sqlite\": {\n \"TextCol\": \"str\",\n \"DateCol\": \"str\",\n \"IntDateCol\": \"int64\",\n \"IntDateOnlyCol\": \"int64\",\n \"FloatCol\": \"float\",\n \"IntCol\": \"int64\",\n \"BoolCol\": \"int64\",\n \"IntColWithNull\": \"float\",\n \"BoolColWithNull\": \"float\",\n },\n \"mysql\": {\n \"TextCol\": \"str\",\n \"DateCol\": \"str\",\n \"IntDateCol\": \"int64\",\n \"IntDateOnlyCol\": \"int64\",\n \"FloatCol\": \"float\",\n \"IntCol\": \"int64\",\n \"BoolCol\": \"bool\",\n \"IntColWithNull\": \"float\",\n \"BoolColWithNull\": \"float\",\n },\n \"postgresql\": {\n \"TextCol\": \"str\",\n \"DateCol\": \"str\",\n \"DateColWithTz\": \"str\",\n \"IntDateCol\": \"int64\",\n \"IntDateOnlyCol\": \"int64\",\n \"FloatCol\": \"float\",\n \"IntCol\": \"int64\",\n \"BoolCol\": \"bool\",\n \"IntColWithNull\": \"float\",\n \"BoolColWithNull\": \"float\",\n },\n }\n\n dtypes = flavor_dtypes[flavor]\n return df[dtypes.keys()].astype(dtypes)\n\n df = DataFrame(data)\n self.types_test = {\n flavor: _filter_to_flavor(flavor, df)\n for flavor in (\"sqlite\", \"mysql\", \"postgresql\")\n }\n\n def _load_raw_sql(self):\n self.drop_table(\"types_test_data\")\n self._get_exec().execute(SQL_STRINGS[\"create_test_types\"][self.flavor])\n ins = SQL_STRINGS[\"insert_test_types\"][self.flavor]\n data = [\n {\n \"TextCol\": \"first\",\n \"DateCol\": \"2000-01-03 00:00:00\",\n \"DateColWithTz\": \"2000-01-01 00:00:00-08:00\",\n \"IntDateCol\": 535852800,\n \"IntDateOnlyCol\": 20101010,\n \"FloatCol\": 10.10,\n \"IntCol\": 1,\n \"BoolCol\": False,\n \"IntColWithNull\": 1,\n \"BoolColWithNull\": False,\n },\n {\n \"TextCol\": \"first\",\n \"DateCol\": \"2000-01-04 00:00:00\",\n \"DateColWithTz\": \"2000-06-01 00:00:00-07:00\",\n \"IntDateCol\": 1356998400,\n \"IntDateOnlyCol\": 20101212,\n \"FloatCol\": 10.10,\n \"IntCol\": 1,\n \"BoolCol\": False,\n \"IntColWithNull\": None,\n \"BoolColWithNull\": None,\n },\n ]\n\n for d in data:\n self._get_exec().execute(\n ins[\"query\"], [d[field] for field in ins[\"fields\"]]\n )\n\n self._load_types_test_data(data)\n\n def _count_rows(self, table_name):\n result = (\n self._get_exec()\n .execute(f\"SELECT count(*) AS count_1 FROM {table_name}\")\n .fetchone()\n )\n return result[0]\n\n def _read_sql_iris(self):\n iris_frame = self.pandasSQL.read_query(\"SELECT * FROM iris\")\n self._check_iris_loaded_frame(iris_frame)\n\n def _read_sql_iris_parameter(self):\n query = SQL_STRINGS[\"read_parameters\"][self.flavor]\n params = [\"Iris-setosa\", 5.1]\n iris_frame = self.pandasSQL.read_query(query, params=params)\n self._check_iris_loaded_frame(iris_frame)\n\n def _read_sql_iris_named_parameter(self):\n query = SQL_STRINGS[\"read_named_parameters\"][self.flavor]\n params = {\"name\": \"Iris-setosa\", \"length\": 5.1}\n iris_frame = self.pandasSQL.read_query(query, params=params)\n self._check_iris_loaded_frame(iris_frame)\n\n def _read_sql_iris_no_parameter_with_percent(self):\n query = SQL_STRINGS[\"read_no_parameters_with_percent\"][self.flavor]\n iris_frame = self.pandasSQL.read_query(query, params=None)\n self._check_iris_loaded_frame(iris_frame)\n\n def _to_sql(self, test_frame1, method=None):\n self.drop_table(\"test_frame1\")\n\n self.pandasSQL.to_sql(test_frame1, \"test_frame1\", method=method)\n assert self.pandasSQL.has_table(\"test_frame1\")\n\n num_entries = len(test_frame1)\n num_rows = self._count_rows(\"test_frame1\")\n assert num_rows == num_entries\n\n # Nuke table\n self.drop_table(\"test_frame1\")\n\n def _to_sql_empty(self, test_frame1):\n self.drop_table(\"test_frame1\")\n self.pandasSQL.to_sql(test_frame1.iloc[:0], \"test_frame1\")\n\n def _to_sql_fail(self, test_frame1):\n self.drop_table(\"test_frame1\")\n\n self.pandasSQL.to_sql(test_frame1, \"test_frame1\", if_exists=\"fail\")\n assert self.pandasSQL.has_table(\"test_frame1\")\n\n msg = \"Table 'test_frame1' already exists\"\n with pytest.raises(ValueError, match=msg):\n self.pandasSQL.to_sql(test_frame1, \"test_frame1\", if_exists=\"fail\")\n\n self.drop_table(\"test_frame1\")\n\n def _to_sql_replace(self, test_frame1):\n self.drop_table(\"test_frame1\")\n\n self.pandasSQL.to_sql(test_frame1, \"test_frame1\", if_exists=\"fail\")\n # Add to table again\n self.pandasSQL.to_sql(test_frame1, \"test_frame1\", if_exists=\"replace\")\n assert self.pandasSQL.has_table(\"test_frame1\")\n\n num_entries = len(test_frame1)\n num_rows = self._count_rows(\"test_frame1\")\n\n assert num_rows == num_entries\n self.drop_table(\"test_frame1\")\n\n def _to_sql_append(self, test_frame1):\n # Nuke table just in case\n self.drop_table(\"test_frame1\")\n\n self.pandasSQL.to_sql(test_frame1, \"test_frame1\", if_exists=\"fail\")\n\n # Add to table again\n self.pandasSQL.to_sql(test_frame1, \"test_frame1\", if_exists=\"append\")\n assert self.pandasSQL.has_table(\"test_frame1\")\n\n num_entries = 2 * len(test_frame1)\n num_rows = self._count_rows(\"test_frame1\")\n\n assert num_rows == num_entries\n self.drop_table(\"test_frame1\")\n\n def _to_sql_method_callable(self, test_frame1):\n check = [] # used to double check function below is really being used\n\n def sample(pd_table, conn, keys, data_iter):\n check.append(1)\n data = [dict(zip(keys, row)) for row in data_iter]\n conn.execute(pd_table.table.insert(), data)\n\n self.drop_table(\"test_frame1\")\n\n self.pandasSQL.to_sql(test_frame1, \"test_frame1\", method=sample)\n assert self.pandasSQL.has_table(\"test_frame1\")\n\n assert check == [1]\n num_entries = len(test_frame1)\n num_rows = self._count_rows(\"test_frame1\")\n assert num_rows == num_entries\n # Nuke table\n self.drop_table(\"test_frame1\")\n\n def _to_sql_with_sql_engine(self, test_frame1, engine=\"auto\", **engine_kwargs):\n \"\"\"`to_sql` with the `engine` param\"\"\"\n # mostly copied from this class's `_to_sql()` method\n self.drop_table(\"test_frame1\")\n\n self.pandasSQL.to_sql(\n test_frame1, \"test_frame1\", engine=engine, **engine_kwargs\n )\n assert self.pandasSQL.has_table(\"test_frame1\")\n\n num_entries = len(test_frame1)\n num_rows = self._count_rows(\"test_frame1\")\n assert num_rows == num_entries\n\n # Nuke table\n self.drop_table(\"test_frame1\")\n\n def _roundtrip(self, test_frame1):\n self.drop_table(\"test_frame_roundtrip\")\n self.pandasSQL.to_sql(test_frame1, \"test_frame_roundtrip\")\n result = self.pandasSQL.read_query(\"SELECT * FROM test_frame_roundtrip\")\n\n result.set_index(\"level_0\", inplace=True)\n # result.index.astype(int)\n\n result.index.name = None\n\n tm.assert_frame_equal(result, test_frame1)\n\n def _execute_sql(self):\n # drop_sql = \"DROP TABLE IF EXISTS test\" # should already be done\n iris_results = self.pandasSQL.execute(\"SELECT * FROM iris\")\n row = iris_results.fetchone()\n tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, \"Iris-setosa\"])\n\n def _to_sql_save_index(self):\n df = DataFrame.from_records(\n [(1, 2.1, \"line1\"), (2, 1.5, \"line2\")], columns=[\"A\", \"B\", \"C\"], index=[\"A\"]\n )\n self.pandasSQL.to_sql(df, \"test_to_sql_saves_index\")\n ix_cols = self._get_index_columns(\"test_to_sql_saves_index\")\n assert ix_cols == [[\"A\"]]\n\n def _transaction_test(self):\n with self.pandasSQL.run_transaction() as trans:\n trans.execute(\"CREATE TABLE test_trans (A INT, B TEXT)\")\n\n class DummyException(Exception):\n pass\n\n # Make sure when transaction is rolled back, no rows get inserted\n ins_sql = \"INSERT INTO test_trans (A,B) VALUES (1, 'blah')\"\n try:\n with self.pandasSQL.run_transaction() as trans:\n trans.execute(ins_sql)\n raise DummyException(\"error\")\n except DummyException:\n # ignore raised exception\n pass\n res = self.pandasSQL.read_query(\"SELECT * FROM test_trans\")\n assert len(res) == 0\n\n # Make sure when transaction is committed, rows do get inserted\n with self.pandasSQL.run_transaction() as trans:\n trans.execute(ins_sql)\n res2 = self.pandasSQL.read_query(\"SELECT * FROM test_trans\")\n assert len(res2) == 1\n\n\n# -----------------------------------------------------------------------------\n# -- Testing the public API\n\n\nclass _TestSQLApi(PandasSQLTest):\n \"\"\"\n Base class to test the public API.\n\n From this two classes are derived to run these tests for both the\n sqlalchemy mode (`TestSQLApi`) and the fallback mode\n (`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific\n tests for the different sql flavours are included in `_TestSQLAlchemy`.\n\n Notes:\n flavor can always be passed even in SQLAlchemy mode,\n should be correctly ignored.\n\n we don't use drop_table because that isn't part of the public api\n\n \"\"\"\n\n flavor = \"sqlite\"\n mode: str\n\n def setup_connect(self):\n self.conn = self.connect()\n\n @pytest.fixture(autouse=True)\n def setup_method(self, load_iris_data):\n self.load_test_data_and_sql()\n\n def load_test_data_and_sql(self):\n self._load_iris_view()\n self._load_raw_sql()\n\n def test_read_sql_iris(self):\n iris_frame = sql.read_sql_query(\"SELECT * FROM iris\", self.conn)\n self._check_iris_loaded_frame(iris_frame)\n\n def test_read_sql_view(self):\n iris_frame = sql.read_sql_query(\"SELECT * FROM iris_view\", self.conn)\n self._check_iris_loaded_frame(iris_frame)\n\n def test_read_sql_with_chunksize_no_result(self):\n query = \"SELECT * FROM iris_view WHERE SepalLength < 0.0\"\n with_batch = sql.read_sql_query(query, self.conn, chunksize=5)\n without_batch = sql.read_sql_query(query, self.conn)\n tm.assert_frame_equal(concat(with_batch), without_batch)\n\n def test_to_sql(self, test_frame1):\n sql.to_sql(test_frame1, \"test_frame1\", self.conn)\n assert sql.has_table(\"test_frame1\", self.conn)\n\n def test_to_sql_fail(self, test_frame1):\n sql.to_sql(test_frame1, \"test_frame2\", self.conn, if_exists=\"fail\")\n assert sql.has_table(\"test_frame2\", self.conn)\n\n msg = \"Table 'test_frame2' already exists\"\n with pytest.raises(ValueError, match=msg):\n sql.to_sql(test_frame1, \"test_frame2\", self.conn, if_exists=\"fail\")\n\n def test_to_sql_replace(self, test_frame1):\n sql.to_sql(test_frame1, \"test_frame3\", self.conn, if_exists=\"fail\")\n # Add to table again\n sql.to_sql(test_frame1, \"test_frame3\", self.conn, if_exists=\"replace\")\n assert sql.has_table(\"test_frame3\", self.conn)\n\n num_entries = len(test_frame1)\n num_rows = self._count_rows(\"test_frame3\")\n\n assert num_rows == num_entries\n\n def test_to_sql_append(self, test_frame1):\n sql.to_sql(test_frame1, \"test_frame4\", self.conn, if_exists=\"fail\")\n\n # Add to table again\n sql.to_sql(test_frame1, \"test_frame4\", self.conn, if_exists=\"append\")\n assert sql.has_table(\"test_frame4\", self.conn)\n\n num_entries = 2 * len(test_frame1)\n num_rows = self._count_rows(\"test_frame4\")\n\n assert num_rows == num_entries\n\n def test_to_sql_type_mapping(self, test_frame3):\n sql.to_sql(test_frame3, \"test_frame5\", self.conn, index=False)\n result = sql.read_sql(\"SELECT * FROM test_frame5\", self.conn)\n\n tm.assert_frame_equal(test_frame3, result)\n\n def test_to_sql_series(self):\n s = Series(np.arange(5, dtype=\"int64\"), name=\"series\")\n sql.to_sql(s, \"test_series\", self.conn, index=False)\n s2 = sql.read_sql_query(\"SELECT * FROM test_series\", self.conn)\n tm.assert_frame_equal(s.to_frame(), s2)\n\n def test_roundtrip(self, test_frame1):\n sql.to_sql(test_frame1, \"test_frame_roundtrip\", con=self.conn)\n result = sql.read_sql_query(\"SELECT * FROM test_frame_roundtrip\", con=self.conn)\n\n # HACK!\n result.index = test_frame1.index\n result.set_index(\"level_0\", inplace=True)\n result.index.astype(int)\n result.index.name = None\n tm.assert_frame_equal(result, test_frame1)\n\n def test_roundtrip_chunksize(self, test_frame1):\n sql.to_sql(\n test_frame1,\n \"test_frame_roundtrip\",\n con=self.conn,\n index=False,\n chunksize=2,\n )\n result = sql.read_sql_query(\"SELECT * FROM test_frame_roundtrip\", con=self.conn)\n tm.assert_frame_equal(result, test_frame1)\n\n def test_execute_sql(self):\n # drop_sql = \"DROP TABLE IF EXISTS test\" # should already be done\n iris_results = sql.execute(\"SELECT * FROM iris\", con=self.conn)\n row = iris_results.fetchone()\n tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, \"Iris-setosa\"])\n\n def test_date_parsing(self):\n # Test date parsing in read_sql\n # No Parsing\n df = sql.read_sql_query(\"SELECT * FROM types_test_data\", self.conn)\n assert not issubclass(df.DateCol.dtype.type, np.datetime64)\n\n df = sql.read_sql_query(\n \"SELECT * FROM types_test_data\", self.conn, parse_dates=[\"DateCol\"]\n )\n assert issubclass(df.DateCol.dtype.type, np.datetime64)\n assert df.DateCol.tolist() == [\n Timestamp(2000, 1, 3, 0, 0, 0),\n Timestamp(2000, 1, 4, 0, 0, 0),\n ]\n\n df = sql.read_sql_query(\n \"SELECT * FROM types_test_data\",\n self.conn,\n parse_dates={\"DateCol\": \"%Y-%m-%d %H:%M:%S\"},\n )\n assert issubclass(df.DateCol.dtype.type, np.datetime64)\n assert df.DateCol.tolist() == [\n Timestamp(2000, 1, 3, 0, 0, 0),\n Timestamp(2000, 1, 4, 0, 0, 0),\n ]\n\n df = sql.read_sql_query(\n \"SELECT * FROM types_test_data\", self.conn, parse_dates=[\"IntDateCol\"]\n )\n assert issubclass(df.IntDateCol.dtype.type, np.datetime64)\n assert df.IntDateCol.tolist() == [\n Timestamp(1986, 12, 25, 0, 0, 0),\n Timestamp(2013, 1, 1, 0, 0, 0),\n ]\n\n df = sql.read_sql_query(\n \"SELECT * FROM types_test_data\", self.conn, parse_dates={\"IntDateCol\": \"s\"}\n )\n assert issubclass(df.IntDateCol.dtype.type, np.datetime64)\n assert df.IntDateCol.tolist() == [\n Timestamp(1986, 12, 25, 0, 0, 0),\n Timestamp(2013, 1, 1, 0, 0, 0),\n ]\n\n df = sql.read_sql_query(\n \"SELECT * FROM types_test_data\",\n self.conn,\n parse_dates={\"IntDateOnlyCol\": \"%Y%m%d\"},\n )\n assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)\n assert df.IntDateOnlyCol.tolist() == [\n Timestamp(\"2010-10-10\"),\n Timestamp(\"2010-12-12\"),\n ]\n\n @pytest.mark.parametrize(\"error\", [\"ignore\", \"raise\", \"coerce\"])\n @pytest.mark.parametrize(\n \"read_sql, text, mode\",\n [\n (sql.read_sql, \"SELECT * FROM types_test_data\", (\"sqlalchemy\", \"fallback\")),\n (sql.read_sql, \"types_test_data\", (\"sqlalchemy\")),\n (\n sql.read_sql_query,\n \"SELECT * FROM types_test_data\",\n (\"sqlalchemy\", \"fallback\"),\n ),\n (sql.read_sql_table, \"types_test_data\", (\"sqlalchemy\")),\n ],\n )\n def test_custom_dateparsing_error(self, read_sql, text, mode, error):\n if self.mode in mode:\n expected = self.types_test[self.flavor].astype(\n {\"DateCol\": \"datetime64[ns]\"}\n )\n\n result = read_sql(\n text,\n con=self.conn,\n parse_dates={\n \"DateCol\": {\"errors\": error},\n },\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_date_and_index(self):\n # Test case where same column appears in parse_date and index_col\n\n df = sql.read_sql_query(\n \"SELECT * FROM types_test_data\",\n self.conn,\n index_col=\"DateCol\",\n parse_dates=[\"DateCol\", \"IntDateCol\"],\n )\n\n assert issubclass(df.index.dtype.type, np.datetime64)\n assert issubclass(df.IntDateCol.dtype.type, np.datetime64)\n\n def test_timedelta(self):\n\n # see #6921\n df = to_timedelta(Series([\"00:00:01\", \"00:00:03\"], name=\"foo\")).to_frame()\n with tm.assert_produces_warning(UserWarning):\n df.to_sql(\"test_timedelta\", self.conn)\n result = sql.read_sql_query(\"SELECT * FROM test_timedelta\", self.conn)\n tm.assert_series_equal(result[\"foo\"], df[\"foo\"].view(\"int64\"))\n\n def test_complex_raises(self):\n df = DataFrame({\"a\": [1 + 1j, 2j]})\n msg = \"Complex datatypes not supported\"\n with pytest.raises(ValueError, match=msg):\n df.to_sql(\"test_complex\", self.conn)\n\n @pytest.mark.parametrize(\n \"index_name,index_label,expected\",\n [\n # no index name, defaults to 'index'\n (None, None, \"index\"),\n # specifying index_label\n (None, \"other_label\", \"other_label\"),\n # using the index name\n (\"index_name\", None, \"index_name\"),\n # has index name, but specifying index_label\n (\"index_name\", \"other_label\", \"other_label\"),\n # index name is integer\n (0, None, \"0\"),\n # index name is None but index label is integer\n (None, 0, \"0\"),\n ],\n )\n def test_to_sql_index_label(self, index_name, index_label, expected):\n temp_frame = DataFrame({\"col1\": range(4)})\n temp_frame.index.name = index_name\n query = \"SELECT * FROM test_index_label\"\n sql.to_sql(temp_frame, \"test_index_label\", self.conn, index_label=index_label)\n frame = sql.read_sql_query(query, self.conn)\n assert frame.columns[0] == expected\n\n def test_to_sql_index_label_multiindex(self):\n temp_frame = DataFrame(\n {\"col1\": range(4)},\n index=MultiIndex.from_product([(\"A0\", \"A1\"), (\"B0\", \"B1\")]),\n )\n\n # no index name, defaults to 'level_0' and 'level_1'\n sql.to_sql(temp_frame, \"test_index_label\", self.conn)\n frame = sql.read_sql_query(\"SELECT * FROM test_index_label\", self.conn)\n assert frame.columns[0] == \"level_0\"\n assert frame.columns[1] == \"level_1\"\n\n # specifying index_label\n sql.to_sql(\n temp_frame,\n \"test_index_label\",\n self.conn,\n if_exists=\"replace\",\n index_label=[\"A\", \"B\"],\n )\n frame = sql.read_sql_query(\"SELECT * FROM test_index_label\", self.conn)\n assert frame.columns[:2].tolist() == [\"A\", \"B\"]\n\n # using the index name\n temp_frame.index.names = [\"A\", \"B\"]\n sql.to_sql(temp_frame, \"test_index_label\", self.conn, if_exists=\"replace\")\n frame = sql.read_sql_query(\"SELECT * FROM test_index_label\", self.conn)\n assert frame.columns[:2].tolist() == [\"A\", \"B\"]\n\n # has index name, but specifying index_label\n sql.to_sql(\n temp_frame,\n \"test_index_label\",\n self.conn,\n if_exists=\"replace\",\n index_label=[\"C\", \"D\"],\n )\n frame = sql.read_sql_query(\"SELECT * FROM test_index_label\", self.conn)\n assert frame.columns[:2].tolist() == [\"C\", \"D\"]\n\n msg = \"Length of 'index_label' should match number of levels, which is 2\"\n with pytest.raises(ValueError, match=msg):\n sql.to_sql(\n temp_frame,\n \"test_index_label\",\n self.conn,\n if_exists=\"replace\",\n index_label=\"C\",\n )\n\n def test_multiindex_roundtrip(self):\n df = DataFrame.from_records(\n [(1, 2.1, \"line1\"), (2, 1.5, \"line2\")],\n columns=[\"A\", \"B\", \"C\"],\n index=[\"A\", \"B\"],\n )\n\n df.to_sql(\"test_multiindex_roundtrip\", self.conn)\n result = sql.read_sql_query(\n \"SELECT * FROM test_multiindex_roundtrip\", self.conn, index_col=[\"A\", \"B\"]\n )\n tm.assert_frame_equal(df, result, check_index_type=True)\n\n @pytest.mark.parametrize(\n \"dtype\",\n [\n None,\n int,\n float,\n {\"A\": int, \"B\": float},\n ],\n )\n def test_dtype_argument(self, dtype):\n # GH10285 Add dtype argument to read_sql_query\n df = DataFrame([[1.2, 3.4], [5.6, 7.8]], columns=[\"A\", \"B\"])\n df.to_sql(\"test_dtype_argument\", self.conn)\n\n expected = df.astype(dtype)\n result = sql.read_sql_query(\n \"SELECT A, B FROM test_dtype_argument\", con=self.conn, dtype=dtype\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_integer_col_names(self):\n df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])\n sql.to_sql(df, \"test_frame_integer_col_names\", self.conn, if_exists=\"replace\")\n\n def test_get_schema(self, test_frame1):\n create_sql = sql.get_schema(test_frame1, \"test\", con=self.conn)\n assert \"CREATE\" in create_sql\n\n def test_get_schema_with_schema(self, test_frame1):\n # GH28486\n create_sql = sql.get_schema(test_frame1, \"test\", con=self.conn, schema=\"pypi\")\n assert \"CREATE TABLE pypi.\" in create_sql\n\n def test_get_schema_dtypes(self):\n float_frame = DataFrame({\"a\": [1.1, 1.2], \"b\": [2.1, 2.2]})\n dtype = sqlalchemy.Integer if self.mode == \"sqlalchemy\" else \"INTEGER\"\n create_sql = sql.get_schema(\n float_frame, \"test\", con=self.conn, dtype={\"b\": dtype}\n )\n assert \"CREATE\" in create_sql\n assert \"INTEGER\" in create_sql\n\n def test_get_schema_keys(self, test_frame1):\n frame = DataFrame({\"Col1\": [1.1, 1.2], \"Col2\": [2.1, 2.2]})\n create_sql = sql.get_schema(frame, \"test\", con=self.conn, keys=\"Col1\")\n constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY (\"Col1\")'\n assert constraint_sentence in create_sql\n\n # multiple columns as key (GH10385)\n create_sql = sql.get_schema(test_frame1, \"test\", con=self.conn, keys=[\"A\", \"B\"])\n constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY (\"A\", \"B\")'\n assert constraint_sentence in create_sql\n\n def test_chunksize_read(self):\n df = DataFrame(np.random.randn(22, 5), columns=list(\"abcde\"))\n df.to_sql(\"test_chunksize\", self.conn, index=False)\n\n # reading the query in one time\n res1 = sql.read_sql_query(\"select * from test_chunksize\", self.conn)\n\n # reading the query in chunks with read_sql_query\n res2 = DataFrame()\n i = 0\n sizes = [5, 5, 5, 5, 2]\n\n for chunk in sql.read_sql_query(\n \"select * from test_chunksize\", self.conn, chunksize=5\n ):\n res2 = concat([res2, chunk], ignore_index=True)\n assert len(chunk) == sizes[i]\n i += 1\n\n tm.assert_frame_equal(res1, res2)\n\n # reading the query in chunks with read_sql_query\n if self.mode == \"sqlalchemy\":\n res3 = DataFrame()\n i = 0\n sizes = [5, 5, 5, 5, 2]\n\n for chunk in sql.read_sql_table(\"test_chunksize\", self.conn, chunksize=5):\n res3 = concat([res3, chunk], ignore_index=True)\n assert len(chunk) == sizes[i]\n i += 1\n\n tm.assert_frame_equal(res1, res3)\n\n def test_categorical(self):\n # GH8624\n # test that categorical gets written correctly as dense column\n df = DataFrame(\n {\n \"person_id\": [1, 2, 3],\n \"person_name\": [\"John P. Doe\", \"Jane Dove\", \"John P. Doe\"],\n }\n )\n df2 = df.copy()\n df2[\"person_name\"] = df2[\"person_name\"].astype(\"category\")\n\n df2.to_sql(\"test_categorical\", self.conn, index=False)\n res = sql.read_sql_query(\"SELECT * FROM test_categorical\", self.conn)\n\n tm.assert_frame_equal(res, df)\n\n def test_unicode_column_name(self):\n # GH 11431\n df = DataFrame([[1, 2], [3, 4]], columns=[\"\\xe9\", \"b\"])\n df.to_sql(\"test_unicode\", self.conn, index=False)\n\n def test_escaped_table_name(self):\n # GH 13206\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [0.2, np.nan, 5.6]})\n df.to_sql(\"d1187b08-4943-4c8d-a7f6\", self.conn, index=False)\n\n res = sql.read_sql_query(\"SELECT * FROM `d1187b08-4943-4c8d-a7f6`\", self.conn)\n\n tm.assert_frame_equal(res, df)\n\n\[email protected]\[email protected](not SQLALCHEMY_INSTALLED, reason=\"SQLAlchemy not installed\")\nclass TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):\n \"\"\"\n Test the public API as it would be used directly\n\n Tests for `read_sql_table` are included here, as this is specific for the\n sqlalchemy mode.\n\n \"\"\"\n\n flavor = \"sqlite\"\n mode = \"sqlalchemy\"\n\n def connect(self):\n return sqlalchemy.create_engine(\"sqlite:///:memory:\")\n\n def test_read_table_columns(self, test_frame1):\n # test columns argument in read_table\n sql.to_sql(test_frame1, \"test_frame\", self.conn)\n\n cols = [\"A\", \"B\"]\n result = sql.read_sql_table(\"test_frame\", self.conn, columns=cols)\n assert result.columns.tolist() == cols\n\n def test_read_table_index_col(self, test_frame1):\n # test columns argument in read_table\n sql.to_sql(test_frame1, \"test_frame\", self.conn)\n\n result = sql.read_sql_table(\"test_frame\", self.conn, index_col=\"index\")\n assert result.index.names == [\"index\"]\n\n result = sql.read_sql_table(\"test_frame\", self.conn, index_col=[\"A\", \"B\"])\n assert result.index.names == [\"A\", \"B\"]\n\n result = sql.read_sql_table(\n \"test_frame\", self.conn, index_col=[\"A\", \"B\"], columns=[\"C\", \"D\"]\n )\n assert result.index.names == [\"A\", \"B\"]\n assert result.columns.tolist() == [\"C\", \"D\"]\n\n def test_read_sql_delegate(self):\n iris_frame1 = sql.read_sql_query(\"SELECT * FROM iris\", self.conn)\n iris_frame2 = sql.read_sql(\"SELECT * FROM iris\", self.conn)\n tm.assert_frame_equal(iris_frame1, iris_frame2)\n\n iris_frame1 = sql.read_sql_table(\"iris\", self.conn)\n iris_frame2 = sql.read_sql(\"iris\", self.conn)\n tm.assert_frame_equal(iris_frame1, iris_frame2)\n\n def test_not_reflect_all_tables(self):\n # create invalid table\n qry = \"\"\"CREATE TABLE invalid (x INTEGER, y UNKNOWN);\"\"\"\n self.conn.execute(qry)\n qry = \"\"\"CREATE TABLE other_table (x INTEGER, y INTEGER);\"\"\"\n self.conn.execute(qry)\n\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # Trigger a warning.\n sql.read_sql_table(\"other_table\", self.conn)\n sql.read_sql_query(\"SELECT * FROM other_table\", self.conn)\n # Verify some things\n assert len(w) == 0\n\n def test_warning_case_insensitive_table_name(self, test_frame1):\n # see gh-7815\n #\n # We can't test that this warning is triggered, a the database\n # configuration would have to be altered. But here we test that\n # the warning is certainly NOT triggered in a normal case.\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # This should not trigger a Warning\n test_frame1.to_sql(\"CaseSensitive\", self.conn)\n # Verify some things\n assert len(w) == 0\n\n def _get_index_columns(self, tbl_name):\n from sqlalchemy.engine import reflection\n\n insp = reflection.Inspector.from_engine(self.conn)\n ixs = insp.get_indexes(\"test_index_saved\")\n ixs = [i[\"column_names\"] for i in ixs]\n return ixs\n\n def test_sqlalchemy_type_mapping(self):\n\n # Test Timestamp objects (no datetime64 because of timezone) (GH9085)\n df = DataFrame(\n {\"time\": to_datetime([\"201412120154\", \"201412110254\"], utc=True)}\n )\n db = sql.SQLDatabase(self.conn)\n table = sql.SQLTable(\"test_type\", db, frame=df)\n # GH 9086: TIMESTAMP is the suggested type for datetimes with timezones\n assert isinstance(table.table.c[\"time\"].type, sqltypes.TIMESTAMP)\n\n @pytest.mark.parametrize(\n \"integer, expected\",\n [\n (\"int8\", \"SMALLINT\"),\n (\"Int8\", \"SMALLINT\"),\n (\"uint8\", \"SMALLINT\"),\n (\"UInt8\", \"SMALLINT\"),\n (\"int16\", \"SMALLINT\"),\n (\"Int16\", \"SMALLINT\"),\n (\"uint16\", \"INTEGER\"),\n (\"UInt16\", \"INTEGER\"),\n (\"int32\", \"INTEGER\"),\n (\"Int32\", \"INTEGER\"),\n (\"uint32\", \"BIGINT\"),\n (\"UInt32\", \"BIGINT\"),\n (\"int64\", \"BIGINT\"),\n (\"Int64\", \"BIGINT\"),\n (int, \"BIGINT\" if np.dtype(int).name == \"int64\" else \"INTEGER\"),\n ],\n )\n def test_sqlalchemy_integer_mapping(self, integer, expected):\n # GH35076 Map pandas integer to optimal SQLAlchemy integer type\n df = DataFrame([0, 1], columns=[\"a\"], dtype=integer)\n db = sql.SQLDatabase(self.conn)\n table = sql.SQLTable(\"test_type\", db, frame=df)\n\n result = str(table.table.c.a.type)\n assert result == expected\n\n @pytest.mark.parametrize(\"integer\", [\"uint64\", \"UInt64\"])\n def test_sqlalchemy_integer_overload_mapping(self, integer):\n # GH35076 Map pandas integer to optimal SQLAlchemy integer type\n df = DataFrame([0, 1], columns=[\"a\"], dtype=integer)\n db = sql.SQLDatabase(self.conn)\n with pytest.raises(\n ValueError, match=\"Unsigned 64 bit integer datatype is not supported\"\n ):\n sql.SQLTable(\"test_type\", db, frame=df)\n\n def test_database_uri_string(self, test_frame1):\n # Test read_sql and .to_sql method with a database URI (GH10654)\n # db_uri = 'sqlite:///:memory:' # raises\n # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near\n # \"iris\": syntax error [SQL: 'iris']\n with tm.ensure_clean() as name:\n db_uri = \"sqlite:///\" + name\n table = \"iris\"\n test_frame1.to_sql(table, db_uri, if_exists=\"replace\", index=False)\n test_frame2 = sql.read_sql(table, db_uri)\n test_frame3 = sql.read_sql_table(table, db_uri)\n query = \"SELECT * FROM iris\"\n test_frame4 = sql.read_sql_query(query, db_uri)\n tm.assert_frame_equal(test_frame1, test_frame2)\n tm.assert_frame_equal(test_frame1, test_frame3)\n tm.assert_frame_equal(test_frame1, test_frame4)\n\n # using driver that will not be installed on Travis to trigger error\n # in sqlalchemy.create_engine -> test passing of this error to user\n try:\n # the rest of this test depends on pg8000's being absent\n import pg8000 # noqa\n\n pytest.skip(\"pg8000 is installed\")\n except ImportError:\n pass\n\n db_uri = \"postgresql+pg8000://user:pass@host/dbname\"\n with pytest.raises(ImportError, match=\"pg8000\"):\n sql.read_sql(\"select * from table\", db_uri)\n\n def _make_iris_table_metadata(self):\n sa = sqlalchemy\n metadata = sa.MetaData()\n iris = sa.Table(\n \"iris\",\n metadata,\n sa.Column(\"SepalLength\", sa.REAL),\n sa.Column(\"SepalWidth\", sa.REAL),\n sa.Column(\"PetalLength\", sa.REAL),\n sa.Column(\"PetalWidth\", sa.REAL),\n sa.Column(\"Name\", sa.TEXT),\n )\n\n return iris\n\n def test_query_by_text_obj(self):\n # WIP : GH10846\n name_text = sqlalchemy.text(\"select * from iris where name=:name\")\n iris_df = sql.read_sql(name_text, self.conn, params={\"name\": \"Iris-versicolor\"})\n all_names = set(iris_df[\"Name\"])\n assert all_names == {\"Iris-versicolor\"}\n\n def test_query_by_select_obj(self):\n # WIP : GH10846\n iris = self._make_iris_table_metadata()\n\n name_select = sqlalchemy.select([iris]).where(\n iris.c.Name == sqlalchemy.bindparam(\"name\")\n )\n iris_df = sql.read_sql(name_select, self.conn, params={\"name\": \"Iris-setosa\"})\n all_names = set(iris_df[\"Name\"])\n assert all_names == {\"Iris-setosa\"}\n\n def test_column_with_percentage(self):\n # GH 37157\n df = DataFrame({\"A\": [0, 1, 2], \"%_variation\": [3, 4, 5]})\n df.to_sql(\"test_column_percentage\", self.conn, index=False)\n\n res = sql.read_sql_table(\"test_column_percentage\", self.conn)\n\n tm.assert_frame_equal(res, df)\n\n\nclass _EngineToConnMixin:\n \"\"\"\n A mixin that causes setup_connect to create a conn rather than an engine.\n \"\"\"\n\n @pytest.fixture(autouse=True)\n def setup_method(self, load_iris_data):\n super().load_test_data_and_sql()\n engine = self.conn\n conn = engine.connect()\n self.__tx = conn.begin()\n self.pandasSQL = sql.SQLDatabase(conn)\n self.__engine = engine\n self.conn = conn\n\n yield\n\n self.__tx.rollback()\n self.conn.close()\n self.conn = self.__engine\n self.pandasSQL = sql.SQLDatabase(self.__engine)\n\n\[email protected]\nclass TestSQLApiConn(_EngineToConnMixin, TestSQLApi):\n pass\n\n\[email protected]\nclass TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):\n \"\"\"\n Test the public sqlite connection fallback API\n\n \"\"\"\n\n flavor = \"sqlite\"\n mode = \"fallback\"\n\n def connect(self, database=\":memory:\"):\n return sqlite3.connect(database)\n\n def test_sql_open_close(self, test_frame3):\n # Test if the IO in the database still work if the connection closed\n # between the writing and reading (as in many real situations).\n\n with tm.ensure_clean() as name:\n\n conn = self.connect(name)\n sql.to_sql(test_frame3, \"test_frame3_legacy\", conn, index=False)\n conn.close()\n\n conn = self.connect(name)\n result = sql.read_sql_query(\"SELECT * FROM test_frame3_legacy;\", conn)\n conn.close()\n\n tm.assert_frame_equal(test_frame3, result)\n\n @pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason=\"SQLAlchemy is installed\")\n def test_con_string_import_error(self):\n conn = \"mysql://root@localhost/pandas\"\n with pytest.raises(ImportError, match=\"SQLAlchemy\"):\n sql.read_sql(\"SELECT * FROM iris\", conn)\n\n def test_read_sql_delegate(self):\n iris_frame1 = sql.read_sql_query(\"SELECT * FROM iris\", self.conn)\n iris_frame2 = sql.read_sql(\"SELECT * FROM iris\", self.conn)\n tm.assert_frame_equal(iris_frame1, iris_frame2)\n\n msg = \"Execution failed on sql 'iris': near \\\"iris\\\": syntax error\"\n with pytest.raises(sql.DatabaseError, match=msg):\n sql.read_sql(\"iris\", self.conn)\n\n def test_safe_names_warning(self):\n # GH 6798\n df = DataFrame([[1, 2], [3, 4]], columns=[\"a\", \"b \"]) # has a space\n # warns on create table with spaces in names\n with tm.assert_produces_warning():\n sql.to_sql(df, \"test_frame3_legacy\", self.conn, index=False)\n\n def test_get_schema2(self, test_frame1):\n # without providing a connection object (available for backwards comp)\n create_sql = sql.get_schema(test_frame1, \"test\")\n assert \"CREATE\" in create_sql\n\n def _get_sqlite_column_type(self, schema, column):\n\n for col in schema.split(\"\\n\"):\n if col.split()[0].strip('\"\"') == column:\n return col.split()[1]\n raise ValueError(f\"Column {column} not found\")\n\n def test_sqlite_type_mapping(self):\n\n # Test Timestamp objects (no datetime64 because of timezone) (GH9085)\n df = DataFrame(\n {\"time\": to_datetime([\"201412120154\", \"201412110254\"], utc=True)}\n )\n db = sql.SQLiteDatabase(self.conn)\n table = sql.SQLiteTable(\"test_type\", db, frame=df)\n schema = table.sql_schema()\n assert self._get_sqlite_column_type(schema, \"time\") == \"TIMESTAMP\"\n\n\n# -----------------------------------------------------------------------------\n# -- Database flavor specific tests\n\n\nclass _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):\n \"\"\"\n Base class for testing the sqlalchemy backend.\n\n Subclasses for specific database types are created below. Tests that\n deviate for each flavor are overwritten there.\n\n \"\"\"\n\n flavor: str\n\n @pytest.fixture(autouse=True, scope=\"class\")\n def setup_class(cls):\n cls.setup_import()\n cls.setup_driver()\n conn = cls.conn = cls.connect()\n conn.connect()\n\n def load_test_data_and_sql(self):\n self._load_raw_sql()\n\n @pytest.fixture(autouse=True)\n def setup_method(self, load_iris_data):\n self.load_test_data_and_sql()\n\n @classmethod\n def setup_import(cls):\n # Skip this test if SQLAlchemy not available\n if not SQLALCHEMY_INSTALLED:\n pytest.skip(\"SQLAlchemy not installed\")\n\n @classmethod\n def setup_driver(cls):\n raise NotImplementedError()\n\n @classmethod\n def connect(cls):\n raise NotImplementedError()\n\n def setup_connect(self):\n try:\n self.conn = self.connect()\n self.pandasSQL = sql.SQLDatabase(self.conn)\n # to test if connection can be made:\n self.conn.connect()\n except sqlalchemy.exc.OperationalError:\n pytest.skip(f\"Can't connect to {self.flavor} server\")\n\n def test_read_sql(self):\n self._read_sql_iris()\n\n def test_read_sql_parameter(self):\n self._read_sql_iris_parameter()\n\n def test_read_sql_named_parameter(self):\n self._read_sql_iris_named_parameter()\n\n def test_to_sql(self, test_frame1):\n self._to_sql(test_frame1)\n\n def test_to_sql_empty(self, test_frame1):\n self._to_sql_empty(test_frame1)\n\n def test_to_sql_fail(self, test_frame1):\n self._to_sql_fail(test_frame1)\n\n def test_to_sql_replace(self, test_frame1):\n self._to_sql_replace(test_frame1)\n\n def test_to_sql_append(self, test_frame1):\n self._to_sql_append(test_frame1)\n\n def test_to_sql_method_multi(self, test_frame1):\n self._to_sql(test_frame1, method=\"multi\")\n\n def test_to_sql_method_callable(self, test_frame1):\n self._to_sql_method_callable(test_frame1)\n\n def test_create_table(self):\n temp_conn = self.connect()\n temp_frame = DataFrame(\n {\"one\": [1.0, 2.0, 3.0, 4.0], \"two\": [4.0, 3.0, 2.0, 1.0]}\n )\n\n pandasSQL = sql.SQLDatabase(temp_conn)\n pandasSQL.to_sql(temp_frame, \"temp_frame\")\n\n if _gt14():\n insp = inspect(temp_conn)\n assert insp.has_table(\"temp_frame\")\n else:\n assert temp_conn.has_table(\"temp_frame\")\n\n def test_drop_table(self):\n temp_conn = self.connect()\n\n temp_frame = DataFrame(\n {\"one\": [1.0, 2.0, 3.0, 4.0], \"two\": [4.0, 3.0, 2.0, 1.0]}\n )\n\n pandasSQL = sql.SQLDatabase(temp_conn)\n pandasSQL.to_sql(temp_frame, \"temp_frame\")\n\n if _gt14():\n insp = inspect(temp_conn)\n assert insp.has_table(\"temp_frame\")\n else:\n assert temp_conn.has_table(\"temp_frame\")\n\n pandasSQL.drop_table(\"temp_frame\")\n\n if _gt14():\n assert not insp.has_table(\"temp_frame\")\n else:\n assert not temp_conn.has_table(\"temp_frame\")\n\n def test_roundtrip(self, test_frame1):\n self._roundtrip(test_frame1)\n\n def test_execute_sql(self):\n self._execute_sql()\n\n def test_read_table(self):\n iris_frame = sql.read_sql_table(\"iris\", con=self.conn)\n self._check_iris_loaded_frame(iris_frame)\n\n def test_read_table_columns(self):\n iris_frame = sql.read_sql_table(\n \"iris\", con=self.conn, columns=[\"SepalLength\", \"SepalLength\"]\n )\n tm.equalContents(iris_frame.columns.values, [\"SepalLength\", \"SepalLength\"])\n\n def test_read_table_absent_raises(self):\n msg = \"Table this_doesnt_exist not found\"\n with pytest.raises(ValueError, match=msg):\n sql.read_sql_table(\"this_doesnt_exist\", con=self.conn)\n\n def test_default_type_conversion(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n assert issubclass(df.FloatCol.dtype.type, np.floating)\n assert issubclass(df.IntCol.dtype.type, np.integer)\n assert issubclass(df.BoolCol.dtype.type, np.bool_)\n\n # Int column with NA values stays as float\n assert issubclass(df.IntColWithNull.dtype.type, np.floating)\n # Bool column with NA values becomes object\n assert issubclass(df.BoolColWithNull.dtype.type, object)\n\n def test_bigint(self):\n # int64 should be converted to BigInteger, GH7433\n df = DataFrame(data={\"i64\": [2 ** 62]})\n df.to_sql(\"test_bigint\", self.conn, index=False)\n result = sql.read_sql_table(\"test_bigint\", self.conn)\n\n tm.assert_frame_equal(df, result)\n\n def test_default_date_load(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n # IMPORTANT - sqlite has no native date type, so shouldn't parse, but\n # MySQL SHOULD be converted.\n assert issubclass(df.DateCol.dtype.type, np.datetime64)\n\n def test_datetime_with_timezone(self):\n # edge case that converts postgresql datetime with time zone types\n # to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok\n # but should be more natural, so coerce to datetime64[ns] for now\n\n def check(col):\n # check that a column is either datetime64[ns]\n # or datetime64[ns, UTC]\n if is_datetime64_dtype(col.dtype):\n\n # \"2000-01-01 00:00:00-08:00\" should convert to\n # \"2000-01-01 08:00:00\"\n assert col[0] == Timestamp(\"2000-01-01 08:00:00\")\n\n # \"2000-06-01 00:00:00-07:00\" should convert to\n # \"2000-06-01 07:00:00\"\n assert col[1] == Timestamp(\"2000-06-01 07:00:00\")\n\n elif is_datetime64tz_dtype(col.dtype):\n assert str(col.dt.tz) == \"UTC\"\n\n # \"2000-01-01 00:00:00-08:00\" should convert to\n # \"2000-01-01 08:00:00\"\n # \"2000-06-01 00:00:00-07:00\" should convert to\n # \"2000-06-01 07:00:00\"\n # GH 6415\n expected_data = [\n Timestamp(\"2000-01-01 08:00:00\", tz=\"UTC\"),\n Timestamp(\"2000-06-01 07:00:00\", tz=\"UTC\"),\n ]\n expected = Series(expected_data, name=col.name)\n tm.assert_series_equal(col, expected)\n\n else:\n raise AssertionError(\n f\"DateCol loaded with incorrect type -> {col.dtype}\"\n )\n\n # GH11216\n df = read_sql_query(\"select * from types_test_data\", self.conn)\n if not hasattr(df, \"DateColWithTz\"):\n pytest.skip(\"no column with datetime with time zone\")\n\n # this is parsed on Travis (linux), but not on macosx for some reason\n # even with the same versions of psycopg2 & sqlalchemy, possibly a\n # Postgresql server version difference\n col = df.DateColWithTz\n assert is_datetime64tz_dtype(col.dtype)\n\n df = read_sql_query(\n \"select * from types_test_data\", self.conn, parse_dates=[\"DateColWithTz\"]\n )\n if not hasattr(df, \"DateColWithTz\"):\n pytest.skip(\"no column with datetime with time zone\")\n col = df.DateColWithTz\n assert is_datetime64tz_dtype(col.dtype)\n assert str(col.dt.tz) == \"UTC\"\n check(df.DateColWithTz)\n\n df = concat(\n list(\n read_sql_query(\"select * from types_test_data\", self.conn, chunksize=1)\n ),\n ignore_index=True,\n )\n col = df.DateColWithTz\n assert is_datetime64tz_dtype(col.dtype)\n assert str(col.dt.tz) == \"UTC\"\n expected = sql.read_sql_table(\"types_test_data\", self.conn)\n col = expected.DateColWithTz\n assert is_datetime64tz_dtype(col.dtype)\n tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)\n\n # xref #7139\n # this might or might not be converted depending on the postgres driver\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n check(df.DateColWithTz)\n\n def test_datetime_with_timezone_roundtrip(self):\n # GH 9086\n # Write datetimetz data to a db and read it back\n # For dbs that support timestamps with timezones, should get back UTC\n # otherwise naive data should be returned\n expected = DataFrame(\n {\"A\": date_range(\"2013-01-01 09:00:00\", periods=3, tz=\"US/Pacific\")}\n )\n expected.to_sql(\"test_datetime_tz\", self.conn, index=False)\n\n if self.flavor == \"postgresql\":\n # SQLAlchemy \"timezones\" (i.e. offsets) are coerced to UTC\n expected[\"A\"] = expected[\"A\"].dt.tz_convert(\"UTC\")\n else:\n # Otherwise, timestamps are returned as local, naive\n expected[\"A\"] = expected[\"A\"].dt.tz_localize(None)\n\n result = sql.read_sql_table(\"test_datetime_tz\", self.conn)\n tm.assert_frame_equal(result, expected)\n\n result = sql.read_sql_query(\"SELECT * FROM test_datetime_tz\", self.conn)\n if self.flavor == \"sqlite\":\n # read_sql_query does not return datetime type like read_sql_table\n assert isinstance(result.loc[0, \"A\"], str)\n result[\"A\"] = to_datetime(result[\"A\"])\n tm.assert_frame_equal(result, expected)\n\n def test_out_of_bounds_datetime(self):\n # GH 26761\n data = DataFrame({\"date\": datetime(9999, 1, 1)}, index=[0])\n data.to_sql(\"test_datetime_obb\", self.conn, index=False)\n result = sql.read_sql_table(\"test_datetime_obb\", self.conn)\n expected = DataFrame([pd.NaT], columns=[\"date\"])\n tm.assert_frame_equal(result, expected)\n\n def test_naive_datetimeindex_roundtrip(self):\n # GH 23510\n # Ensure that a naive DatetimeIndex isn't converted to UTC\n dates = date_range(\"2018-01-01\", periods=5, freq=\"6H\")._with_freq(None)\n expected = DataFrame({\"nums\": range(5)}, index=dates)\n expected.to_sql(\"foo_table\", self.conn, index_label=\"info_date\")\n result = sql.read_sql_table(\"foo_table\", self.conn, index_col=\"info_date\")\n # result index with gain a name from a set_index operation; expected\n tm.assert_frame_equal(result, expected, check_names=False)\n\n def test_date_parsing(self):\n # No Parsing\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n expected_type = object if self.flavor == \"sqlite\" else np.datetime64\n assert issubclass(df.DateCol.dtype.type, expected_type)\n\n df = sql.read_sql_table(\"types_test_data\", self.conn, parse_dates=[\"DateCol\"])\n assert issubclass(df.DateCol.dtype.type, np.datetime64)\n\n df = sql.read_sql_table(\n \"types_test_data\", self.conn, parse_dates={\"DateCol\": \"%Y-%m-%d %H:%M:%S\"}\n )\n assert issubclass(df.DateCol.dtype.type, np.datetime64)\n\n df = sql.read_sql_table(\n \"types_test_data\",\n self.conn,\n parse_dates={\"DateCol\": {\"format\": \"%Y-%m-%d %H:%M:%S\"}},\n )\n assert issubclass(df.DateCol.dtype.type, np.datetime64)\n\n df = sql.read_sql_table(\n \"types_test_data\", self.conn, parse_dates=[\"IntDateCol\"]\n )\n assert issubclass(df.IntDateCol.dtype.type, np.datetime64)\n\n df = sql.read_sql_table(\n \"types_test_data\", self.conn, parse_dates={\"IntDateCol\": \"s\"}\n )\n assert issubclass(df.IntDateCol.dtype.type, np.datetime64)\n\n df = sql.read_sql_table(\n \"types_test_data\", self.conn, parse_dates={\"IntDateCol\": {\"unit\": \"s\"}}\n )\n assert issubclass(df.IntDateCol.dtype.type, np.datetime64)\n\n def test_datetime(self):\n df = DataFrame(\n {\"A\": date_range(\"2013-01-01 09:00:00\", periods=3), \"B\": np.arange(3.0)}\n )\n df.to_sql(\"test_datetime\", self.conn)\n\n # with read_table -> type information from schema used\n result = sql.read_sql_table(\"test_datetime\", self.conn)\n result = result.drop(\"index\", axis=1)\n tm.assert_frame_equal(result, df)\n\n # with read_sql -> no type information -> sqlite has no native\n result = sql.read_sql_query(\"SELECT * FROM test_datetime\", self.conn)\n result = result.drop(\"index\", axis=1)\n if self.flavor == \"sqlite\":\n assert isinstance(result.loc[0, \"A\"], str)\n result[\"A\"] = to_datetime(result[\"A\"])\n tm.assert_frame_equal(result, df)\n else:\n tm.assert_frame_equal(result, df)\n\n def test_datetime_NaT(self):\n df = DataFrame(\n {\"A\": date_range(\"2013-01-01 09:00:00\", periods=3), \"B\": np.arange(3.0)}\n )\n df.loc[1, \"A\"] = np.nan\n df.to_sql(\"test_datetime\", self.conn, index=False)\n\n # with read_table -> type information from schema used\n result = sql.read_sql_table(\"test_datetime\", self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql -> no type information -> sqlite has no native\n result = sql.read_sql_query(\"SELECT * FROM test_datetime\", self.conn)\n if self.flavor == \"sqlite\":\n assert isinstance(result.loc[0, \"A\"], str)\n result[\"A\"] = to_datetime(result[\"A\"], errors=\"coerce\")\n tm.assert_frame_equal(result, df)\n else:\n tm.assert_frame_equal(result, df)\n\n def test_datetime_date(self):\n # test support for datetime.date\n df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=[\"a\"])\n df.to_sql(\"test_date\", self.conn, index=False)\n res = read_sql_table(\"test_date\", self.conn)\n result = res[\"a\"]\n expected = to_datetime(df[\"a\"])\n # comes back as datetime64\n tm.assert_series_equal(result, expected)\n\n def test_datetime_time(self):\n # test support for datetime.time\n df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=[\"a\"])\n df.to_sql(\"test_time\", self.conn, index=False)\n res = read_sql_table(\"test_time\", self.conn)\n tm.assert_frame_equal(res, df)\n\n # GH8341\n # first, use the fallback to have the sqlite adapter put in place\n sqlite_conn = TestSQLiteFallback.connect()\n sql.to_sql(df, \"test_time2\", sqlite_conn, index=False)\n res = sql.read_sql_query(\"SELECT * FROM test_time2\", sqlite_conn)\n ref = df.applymap(lambda _: _.strftime(\"%H:%M:%S.%f\"))\n tm.assert_frame_equal(ref, res) # check if adapter is in place\n # then test if sqlalchemy is unaffected by the sqlite adapter\n sql.to_sql(df, \"test_time3\", self.conn, index=False)\n if self.flavor == \"sqlite\":\n res = sql.read_sql_query(\"SELECT * FROM test_time3\", self.conn)\n ref = df.applymap(lambda _: _.strftime(\"%H:%M:%S.%f\"))\n tm.assert_frame_equal(ref, res)\n res = sql.read_sql_table(\"test_time3\", self.conn)\n tm.assert_frame_equal(df, res)\n\n def test_mixed_dtype_insert(self):\n # see GH6509\n s1 = Series(2 ** 25 + 1, dtype=np.int32)\n s2 = Series(0.0, dtype=np.float32)\n df = DataFrame({\"s1\": s1, \"s2\": s2})\n\n # write and read again\n df.to_sql(\"test_read_write\", self.conn, index=False)\n df2 = sql.read_sql_table(\"test_read_write\", self.conn)\n\n tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)\n\n def test_nan_numeric(self):\n # NaNs in numeric float column\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [0.2, np.nan, 5.6]})\n df.to_sql(\"test_nan\", self.conn, index=False)\n\n # with read_table\n result = sql.read_sql_table(\"test_nan\", self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql\n result = sql.read_sql_query(\"SELECT * FROM test_nan\", self.conn)\n tm.assert_frame_equal(result, df)\n\n def test_nan_fullcolumn(self):\n # full NaN column (numeric float column)\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [np.nan, np.nan, np.nan]})\n df.to_sql(\"test_nan\", self.conn, index=False)\n\n # with read_table\n result = sql.read_sql_table(\"test_nan\", self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql -> not type info from table -> stays None\n df[\"B\"] = df[\"B\"].astype(\"object\")\n df[\"B\"] = None\n result = sql.read_sql_query(\"SELECT * FROM test_nan\", self.conn)\n tm.assert_frame_equal(result, df)\n\n def test_nan_string(self):\n # NaNs in string column\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [\"a\", \"b\", np.nan]})\n df.to_sql(\"test_nan\", self.conn, index=False)\n\n # NaNs are coming back as None\n df.loc[2, \"B\"] = None\n\n # with read_table\n result = sql.read_sql_table(\"test_nan\", self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql\n result = sql.read_sql_query(\"SELECT * FROM test_nan\", self.conn)\n tm.assert_frame_equal(result, df)\n\n def _get_index_columns(self, tbl_name):\n from sqlalchemy import inspect\n\n insp = inspect(self.conn)\n\n ixs = insp.get_indexes(tbl_name)\n ixs = [i[\"column_names\"] for i in ixs]\n return ixs\n\n def test_to_sql_save_index(self):\n self._to_sql_save_index()\n\n def test_transactions(self):\n self._transaction_test()\n\n def test_get_schema_create_table(self, test_frame3):\n # Use a dataframe without a bool column, since MySQL converts bool to\n # TINYINT (which read_sql_table returns as an int and causes a dtype\n # mismatch)\n\n tbl = \"test_get_schema_create_table\"\n create_sql = sql.get_schema(test_frame3, tbl, con=self.conn)\n blank_test_df = test_frame3.iloc[:0]\n\n self.drop_table(tbl)\n self.conn.execute(create_sql)\n returned_df = sql.read_sql_table(tbl, self.conn)\n tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)\n self.drop_table(tbl)\n\n def test_dtype(self):\n cols = [\"A\", \"B\"]\n data = [(0.8, True), (0.9, None)]\n df = DataFrame(data, columns=cols)\n df.to_sql(\"dtype_test\", self.conn)\n df.to_sql(\"dtype_test2\", self.conn, dtype={\"B\": sqlalchemy.TEXT})\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n sqltype = meta.tables[\"dtype_test2\"].columns[\"B\"].type\n assert isinstance(sqltype, sqlalchemy.TEXT)\n msg = \"The type of B is not a SQLAlchemy type\"\n with pytest.raises(ValueError, match=msg):\n df.to_sql(\"error\", self.conn, dtype={\"B\": str})\n\n # GH9083\n df.to_sql(\"dtype_test3\", self.conn, dtype={\"B\": sqlalchemy.String(10)})\n meta.reflect()\n sqltype = meta.tables[\"dtype_test3\"].columns[\"B\"].type\n assert isinstance(sqltype, sqlalchemy.String)\n assert sqltype.length == 10\n\n # single dtype\n df.to_sql(\"single_dtype_test\", self.conn, dtype=sqlalchemy.TEXT)\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n sqltypea = meta.tables[\"single_dtype_test\"].columns[\"A\"].type\n sqltypeb = meta.tables[\"single_dtype_test\"].columns[\"B\"].type\n assert isinstance(sqltypea, sqlalchemy.TEXT)\n assert isinstance(sqltypeb, sqlalchemy.TEXT)\n\n def test_notna_dtype(self):\n cols = {\n \"Bool\": Series([True, None]),\n \"Date\": Series([datetime(2012, 5, 1), None]),\n \"Int\": Series([1, None], dtype=\"object\"),\n \"Float\": Series([1.1, None]),\n }\n df = DataFrame(cols)\n\n tbl = \"notna_dtype_test\"\n df.to_sql(tbl, self.conn)\n returned_df = sql.read_sql_table(tbl, self.conn) # noqa\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n if self.flavor == \"mysql\":\n my_type = sqltypes.Integer\n else:\n my_type = sqltypes.Boolean\n\n col_dict = meta.tables[tbl].columns\n\n assert isinstance(col_dict[\"Bool\"].type, my_type)\n assert isinstance(col_dict[\"Date\"].type, sqltypes.DateTime)\n assert isinstance(col_dict[\"Int\"].type, sqltypes.Integer)\n assert isinstance(col_dict[\"Float\"].type, sqltypes.Float)\n\n def test_double_precision(self):\n V = 1.23456789101112131415\n\n df = DataFrame(\n {\n \"f32\": Series([V], dtype=\"float32\"),\n \"f64\": Series([V], dtype=\"float64\"),\n \"f64_as_f32\": Series([V], dtype=\"float64\"),\n \"i32\": Series([5], dtype=\"int32\"),\n \"i64\": Series([5], dtype=\"int64\"),\n }\n )\n\n df.to_sql(\n \"test_dtypes\",\n self.conn,\n index=False,\n if_exists=\"replace\",\n dtype={\"f64_as_f32\": sqlalchemy.Float(precision=23)},\n )\n res = sql.read_sql_table(\"test_dtypes\", self.conn)\n\n # check precision of float64\n assert np.round(df[\"f64\"].iloc[0], 14) == np.round(res[\"f64\"].iloc[0], 14)\n\n # check sql types\n meta = sqlalchemy.schema.MetaData(bind=self.conn)\n meta.reflect()\n col_dict = meta.tables[\"test_dtypes\"].columns\n assert str(col_dict[\"f32\"].type) == str(col_dict[\"f64_as_f32\"].type)\n assert isinstance(col_dict[\"f32\"].type, sqltypes.Float)\n assert isinstance(col_dict[\"f64\"].type, sqltypes.Float)\n assert isinstance(col_dict[\"i32\"].type, sqltypes.Integer)\n assert isinstance(col_dict[\"i64\"].type, sqltypes.BigInteger)\n\n def test_connectable_issue_example(self):\n # This tests the example raised in issue\n # https://github.com/pandas-dev/pandas/issues/10104\n\n def foo(connection):\n query = \"SELECT test_foo_data FROM test_foo_data\"\n return sql.read_sql_query(query, con=connection)\n\n def bar(connection, data):\n data.to_sql(name=\"test_foo_data\", con=connection, if_exists=\"append\")\n\n def main(connectable):\n with connectable.connect() as conn:\n with conn.begin():\n if _gt14():\n # https://github.com/sqlalchemy/sqlalchemy/commit/\n # 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973\n foo_data = foo(conn)\n bar(conn, foo_data)\n else:\n foo_data = conn.run_callable(foo)\n conn.run_callable(bar, foo_data)\n\n DataFrame({\"test_foo_data\": [0, 1, 2]}).to_sql(\"test_foo_data\", self.conn)\n main(self.conn)\n\n @pytest.mark.parametrize(\n \"input\",\n [{\"foo\": [np.inf]}, {\"foo\": [-np.inf]}, {\"foo\": [-np.inf], \"infe0\": [\"bar\"]}],\n )\n def test_to_sql_with_negative_npinf(self, input, request):\n # GH 34431\n\n df = DataFrame(input)\n\n if self.flavor == \"mysql\":\n # GH 36465\n # The input {\"foo\": [-np.inf], \"infe0\": [\"bar\"]} does not raise any error\n # for pymysql version >= 0.10\n # TODO: remove this version check after GH 36465 is fixed\n import pymysql\n\n if pymysql.VERSION[0:3] >= (0, 10, 0) and \"infe0\" in df.columns:\n mark = pytest.mark.xfail(reason=\"GH 36465\")\n request.node.add_marker(mark)\n\n msg = \"inf cannot be used with MySQL\"\n with pytest.raises(ValueError, match=msg):\n df.to_sql(\"foobar\", self.conn, index=False)\n else:\n df.to_sql(\"foobar\", self.conn, index=False)\n res = sql.read_sql_table(\"foobar\", self.conn)\n tm.assert_equal(df, res)\n\n def test_temporary_table(self):\n test_data = \"Hello, World!\"\n expected = DataFrame({\"spam\": [test_data]})\n Base = declarative.declarative_base()\n\n class Temporary(Base):\n __tablename__ = \"temp_test\"\n __table_args__ = {\"prefixes\": [\"TEMPORARY\"]}\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)\n\n Session = sa_session.sessionmaker(bind=self.conn)\n session = Session()\n with session.transaction:\n conn = session.connection()\n Temporary.__table__.create(conn)\n session.add(Temporary(spam=test_data))\n session.flush()\n df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)\n\n tm.assert_frame_equal(df, expected)\n\n # -- SQL Engine tests (in the base class for now)\n def test_invalid_engine(self, test_frame1):\n msg = \"engine must be one of 'auto', 'sqlalchemy'\"\n with pytest.raises(ValueError, match=msg):\n self._to_sql_with_sql_engine(test_frame1, \"bad_engine\")\n\n def test_options_sqlalchemy(self, test_frame1):\n # use the set option\n with pd.option_context(\"io.sql.engine\", \"sqlalchemy\"):\n self._to_sql_with_sql_engine(test_frame1)\n\n def test_options_auto(self, test_frame1):\n # use the set option\n with pd.option_context(\"io.sql.engine\", \"auto\"):\n self._to_sql_with_sql_engine(test_frame1)\n\n def test_options_get_engine(self):\n assert isinstance(get_engine(\"sqlalchemy\"), SQLAlchemyEngine)\n\n with pd.option_context(\"io.sql.engine\", \"sqlalchemy\"):\n assert isinstance(get_engine(\"auto\"), SQLAlchemyEngine)\n assert isinstance(get_engine(\"sqlalchemy\"), SQLAlchemyEngine)\n\n with pd.option_context(\"io.sql.engine\", \"auto\"):\n assert isinstance(get_engine(\"auto\"), SQLAlchemyEngine)\n assert isinstance(get_engine(\"sqlalchemy\"), SQLAlchemyEngine)\n\n def test_get_engine_auto_error_message(self):\n # Expect different error messages from get_engine(engine=\"auto\")\n # if engines aren't installed vs. are installed but bad version\n pass\n # TODO fill this in when we add more engines\n\n\nclass _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):\n def test_transactions(self):\n pytest.skip(\"Nested transactions rollbacks don't work with Pandas\")\n\n\nclass _TestSQLiteAlchemy:\n \"\"\"\n Test the sqlalchemy backend against an in-memory sqlite database.\n\n \"\"\"\n\n flavor = \"sqlite\"\n\n @classmethod\n def connect(cls):\n return sqlalchemy.create_engine(\"sqlite:///:memory:\")\n\n @classmethod\n def setup_driver(cls):\n # sqlite3 is built-in\n cls.driver = None\n\n def test_default_type_conversion(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n assert issubclass(df.FloatCol.dtype.type, np.floating)\n assert issubclass(df.IntCol.dtype.type, np.integer)\n\n # sqlite has no boolean type, so integer type is returned\n assert issubclass(df.BoolCol.dtype.type, np.integer)\n\n # Int column with NA values stays as float\n assert issubclass(df.IntColWithNull.dtype.type, np.floating)\n\n # Non-native Bool column with NA values stays as float\n assert issubclass(df.BoolColWithNull.dtype.type, np.floating)\n\n def test_default_date_load(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n # IMPORTANT - sqlite has no native date type, so shouldn't parse, but\n assert not issubclass(df.DateCol.dtype.type, np.datetime64)\n\n def test_bigint_warning(self):\n # test no warning for BIGINT (to support int64) is raised (GH7433)\n df = DataFrame({\"a\": [1, 2]}, dtype=\"int64\")\n df.to_sql(\"test_bigintwarning\", self.conn, index=False)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n sql.read_sql_table(\"test_bigintwarning\", self.conn)\n assert len(w) == 0\n\n\nclass _TestMySQLAlchemy:\n \"\"\"\n Test the sqlalchemy backend against an MySQL database.\n\n \"\"\"\n\n flavor = \"mysql\"\n port = 3306\n\n @classmethod\n def connect(cls):\n return sqlalchemy.create_engine(\n f\"mysql+{cls.driver}://root@localhost:{cls.port}/pandas\",\n connect_args=cls.connect_args,\n )\n\n @classmethod\n def setup_driver(cls):\n pymysql = pytest.importorskip(\"pymysql\")\n cls.driver = \"pymysql\"\n cls.connect_args = {\"client_flag\": pymysql.constants.CLIENT.MULTI_STATEMENTS}\n\n def test_default_type_conversion(self):\n df = sql.read_sql_table(\"types_test_data\", self.conn)\n\n assert issubclass(df.FloatCol.dtype.type, np.floating)\n assert issubclass(df.IntCol.dtype.type, np.integer)\n\n # MySQL has no real BOOL type (it's an alias for TINYINT)\n assert issubclass(df.BoolCol.dtype.type, np.integer)\n\n # Int column with NA values stays as float\n assert issubclass(df.IntColWithNull.dtype.type, np.floating)\n\n # Bool column with NA = int column with NA values => becomes float\n assert issubclass(df.BoolColWithNull.dtype.type, np.floating)\n\n def test_read_procedure(self):\n import pymysql\n\n # see GH7324. Although it is more an api test, it is added to the\n # mysql tests as sqlite does not have stored procedures\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [0.1, 0.2, 0.3]})\n df.to_sql(\"test_procedure\", self.conn, index=False)\n\n proc = \"\"\"DROP PROCEDURE IF EXISTS get_testdb;\n\n CREATE PROCEDURE get_testdb ()\n\n BEGIN\n SELECT * FROM test_procedure;\n END\"\"\"\n\n connection = self.conn.connect()\n trans = connection.begin()\n try:\n r1 = connection.execute(proc) # noqa\n trans.commit()\n except pymysql.Error:\n trans.rollback()\n raise\n\n res1 = sql.read_sql_query(\"CALL get_testdb();\", self.conn)\n tm.assert_frame_equal(df, res1)\n\n # test delegation to read_sql_query\n res2 = sql.read_sql(\"CALL get_testdb();\", self.conn)\n tm.assert_frame_equal(df, res2)\n\n\nclass _TestPostgreSQLAlchemy:\n \"\"\"\n Test the sqlalchemy backend against an PostgreSQL database.\n\n \"\"\"\n\n flavor = \"postgresql\"\n port = 5432\n\n @classmethod\n def connect(cls):\n return sqlalchemy.create_engine(\n f\"postgresql+{cls.driver}://postgres:postgres@localhost:{cls.port}/pandas\"\n )\n\n @classmethod\n def setup_driver(cls):\n pytest.importorskip(\"psycopg2\")\n cls.driver = \"psycopg2\"\n\n def test_schema_support(self):\n # only test this for postgresql (schema's not supported in\n # mysql/sqlite)\n df = DataFrame({\"col1\": [1, 2], \"col2\": [0.1, 0.2], \"col3\": [\"a\", \"n\"]})\n\n # create a schema\n self.conn.execute(\"DROP SCHEMA IF EXISTS other CASCADE;\")\n self.conn.execute(\"CREATE SCHEMA other;\")\n\n # write dataframe to different schema's\n df.to_sql(\"test_schema_public\", self.conn, index=False)\n df.to_sql(\n \"test_schema_public_explicit\", self.conn, index=False, schema=\"public\"\n )\n df.to_sql(\"test_schema_other\", self.conn, index=False, schema=\"other\")\n\n # read dataframes back in\n res1 = sql.read_sql_table(\"test_schema_public\", self.conn)\n tm.assert_frame_equal(df, res1)\n res2 = sql.read_sql_table(\"test_schema_public_explicit\", self.conn)\n tm.assert_frame_equal(df, res2)\n res3 = sql.read_sql_table(\n \"test_schema_public_explicit\", self.conn, schema=\"public\"\n )\n tm.assert_frame_equal(df, res3)\n res4 = sql.read_sql_table(\"test_schema_other\", self.conn, schema=\"other\")\n tm.assert_frame_equal(df, res4)\n msg = \"Table test_schema_other not found\"\n with pytest.raises(ValueError, match=msg):\n sql.read_sql_table(\"test_schema_other\", self.conn, schema=\"public\")\n\n # different if_exists options\n\n # create a schema\n self.conn.execute(\"DROP SCHEMA IF EXISTS other CASCADE;\")\n self.conn.execute(\"CREATE SCHEMA other;\")\n\n # write dataframe with different if_exists options\n df.to_sql(\"test_schema_other\", self.conn, schema=\"other\", index=False)\n df.to_sql(\n \"test_schema_other\",\n self.conn,\n schema=\"other\",\n index=False,\n if_exists=\"replace\",\n )\n df.to_sql(\n \"test_schema_other\",\n self.conn,\n schema=\"other\",\n index=False,\n if_exists=\"append\",\n )\n res = sql.read_sql_table(\"test_schema_other\", self.conn, schema=\"other\")\n tm.assert_frame_equal(concat([df, df], ignore_index=True), res)\n\n # specifying schema in user-provided meta\n\n # The schema won't be applied on another Connection\n # because of transactional schemas\n if isinstance(self.conn, sqlalchemy.engine.Engine):\n engine2 = self.connect()\n pdsql = sql.SQLDatabase(engine2, schema=\"other\")\n pdsql.to_sql(df, \"test_schema_other2\", index=False)\n pdsql.to_sql(df, \"test_schema_other2\", index=False, if_exists=\"replace\")\n pdsql.to_sql(df, \"test_schema_other2\", index=False, if_exists=\"append\")\n res1 = sql.read_sql_table(\"test_schema_other2\", self.conn, schema=\"other\")\n res2 = pdsql.read_table(\"test_schema_other2\")\n tm.assert_frame_equal(res1, res2)\n\n def test_copy_from_callable_insertion_method(self):\n # GH 8953\n # Example in io.rst found under _io.sql.method\n # not available in sqlite, mysql\n def psql_insert_copy(table, conn, keys, data_iter):\n # gets a DBAPI connection that can provide a cursor\n dbapi_conn = conn.connection\n with dbapi_conn.cursor() as cur:\n s_buf = StringIO()\n writer = csv.writer(s_buf)\n writer.writerows(data_iter)\n s_buf.seek(0)\n\n columns = \", \".join([f'\"{k}\"' for k in keys])\n if table.schema:\n table_name = f\"{table.schema}.{table.name}\"\n else:\n table_name = table.name\n\n sql_query = f\"COPY {table_name} ({columns}) FROM STDIN WITH CSV\"\n cur.copy_expert(sql=sql_query, file=s_buf)\n\n expected = DataFrame({\"col1\": [1, 2], \"col2\": [0.1, 0.2], \"col3\": [\"a\", \"n\"]})\n expected.to_sql(\n \"test_copy_insert\", self.conn, index=False, method=psql_insert_copy\n )\n result = sql.read_sql_table(\"test_copy_insert\", self.conn)\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\[email protected]\nclass TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):\n pass\n\n\[email protected]\[email protected]\nclass TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):\n pass\n\n\[email protected]\[email protected]\nclass TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):\n pass\n\n\[email protected]\[email protected]\nclass TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):\n pass\n\n\[email protected]\nclass TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):\n pass\n\n\[email protected]\nclass TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):\n pass\n\n\n# -----------------------------------------------------------------------------\n# -- Test Sqlite / MySQL fallback\n\n\[email protected]\nclass TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):\n \"\"\"\n Test the fallback mode against an in-memory sqlite database.\n\n \"\"\"\n\n flavor = \"sqlite\"\n\n @classmethod\n def connect(cls):\n return sqlite3.connect(\":memory:\")\n\n def setup_connect(self):\n self.conn = self.connect()\n\n @pytest.fixture(autouse=True)\n def setup_method(self, load_iris_data):\n self.pandasSQL = sql.SQLiteDatabase(self.conn)\n\n def test_read_sql(self):\n self._read_sql_iris()\n\n def test_read_sql_parameter(self):\n self._read_sql_iris_parameter()\n\n def test_read_sql_named_parameter(self):\n self._read_sql_iris_named_parameter()\n\n def test_to_sql(self, test_frame1):\n self._to_sql(test_frame1)\n\n def test_to_sql_empty(self, test_frame1):\n self._to_sql_empty(test_frame1)\n\n def test_to_sql_fail(self, test_frame1):\n self._to_sql_fail(test_frame1)\n\n def test_to_sql_replace(self, test_frame1):\n self._to_sql_replace(test_frame1)\n\n def test_to_sql_append(self, test_frame1):\n self._to_sql_append(test_frame1)\n\n def test_to_sql_method_multi(self, test_frame1):\n # GH 29921\n self._to_sql(test_frame1, method=\"multi\")\n\n def test_create_and_drop_table(self):\n temp_frame = DataFrame(\n {\"one\": [1.0, 2.0, 3.0, 4.0], \"two\": [4.0, 3.0, 2.0, 1.0]}\n )\n\n self.pandasSQL.to_sql(temp_frame, \"drop_test_frame\")\n\n assert self.pandasSQL.has_table(\"drop_test_frame\")\n\n self.pandasSQL.drop_table(\"drop_test_frame\")\n\n assert not self.pandasSQL.has_table(\"drop_test_frame\")\n\n def test_roundtrip(self, test_frame1):\n self._roundtrip(test_frame1)\n\n def test_execute_sql(self):\n self._execute_sql()\n\n def test_datetime_date(self):\n # test support for datetime.date\n df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=[\"a\"])\n df.to_sql(\"test_date\", self.conn, index=False)\n res = read_sql_query(\"SELECT * FROM test_date\", self.conn)\n if self.flavor == \"sqlite\":\n # comes back as strings\n tm.assert_frame_equal(res, df.astype(str))\n elif self.flavor == \"mysql\":\n tm.assert_frame_equal(res, df)\n\n def test_datetime_time(self):\n # test support for datetime.time, GH #8341\n df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=[\"a\"])\n df.to_sql(\"test_time\", self.conn, index=False)\n res = read_sql_query(\"SELECT * FROM test_time\", self.conn)\n if self.flavor == \"sqlite\":\n # comes back as strings\n expected = df.applymap(lambda _: _.strftime(\"%H:%M:%S.%f\"))\n tm.assert_frame_equal(res, expected)\n\n def _get_index_columns(self, tbl_name):\n ixs = sql.read_sql_query(\n \"SELECT * FROM sqlite_master WHERE type = 'index' \"\n + f\"AND tbl_name = '{tbl_name}'\",\n self.conn,\n )\n ix_cols = []\n for ix_name in ixs.name:\n ix_info = sql.read_sql_query(f\"PRAGMA index_info({ix_name})\", self.conn)\n ix_cols.append(ix_info.name.tolist())\n return ix_cols\n\n def test_to_sql_save_index(self):\n self._to_sql_save_index()\n\n def test_transactions(self):\n self._transaction_test()\n\n def _get_sqlite_column_type(self, table, column):\n recs = self.conn.execute(f\"PRAGMA table_info({table})\")\n for cid, name, ctype, not_null, default, pk in recs:\n if name == column:\n return ctype\n raise ValueError(f\"Table {table}, column {column} not found\")\n\n def test_dtype(self):\n if self.flavor == \"mysql\":\n pytest.skip(\"Not applicable to MySQL legacy\")\n cols = [\"A\", \"B\"]\n data = [(0.8, True), (0.9, None)]\n df = DataFrame(data, columns=cols)\n df.to_sql(\"dtype_test\", self.conn)\n df.to_sql(\"dtype_test2\", self.conn, dtype={\"B\": \"STRING\"})\n\n # sqlite stores Boolean values as INTEGER\n assert self._get_sqlite_column_type(\"dtype_test\", \"B\") == \"INTEGER\"\n\n assert self._get_sqlite_column_type(\"dtype_test2\", \"B\") == \"STRING\"\n msg = r\"B \\(<class 'bool'>\\) not a string\"\n with pytest.raises(ValueError, match=msg):\n df.to_sql(\"error\", self.conn, dtype={\"B\": bool})\n\n # single dtype\n df.to_sql(\"single_dtype_test\", self.conn, dtype=\"STRING\")\n assert self._get_sqlite_column_type(\"single_dtype_test\", \"A\") == \"STRING\"\n assert self._get_sqlite_column_type(\"single_dtype_test\", \"B\") == \"STRING\"\n\n def test_notna_dtype(self):\n if self.flavor == \"mysql\":\n pytest.skip(\"Not applicable to MySQL legacy\")\n\n cols = {\n \"Bool\": Series([True, None]),\n \"Date\": Series([datetime(2012, 5, 1), None]),\n \"Int\": Series([1, None], dtype=\"object\"),\n \"Float\": Series([1.1, None]),\n }\n df = DataFrame(cols)\n\n tbl = \"notna_dtype_test\"\n df.to_sql(tbl, self.conn)\n\n assert self._get_sqlite_column_type(tbl, \"Bool\") == \"INTEGER\"\n assert self._get_sqlite_column_type(tbl, \"Date\") == \"TIMESTAMP\"\n assert self._get_sqlite_column_type(tbl, \"Int\") == \"INTEGER\"\n assert self._get_sqlite_column_type(tbl, \"Float\") == \"REAL\"\n\n def test_illegal_names(self):\n # For sqlite, these should work fine\n df = DataFrame([[1, 2], [3, 4]], columns=[\"a\", \"b\"])\n\n msg = \"Empty table or column name specified\"\n with pytest.raises(ValueError, match=msg):\n df.to_sql(\"\", self.conn)\n\n for ndx, weird_name in enumerate(\n [\n \"test_weird_name]\",\n \"test_weird_name[\",\n \"test_weird_name`\",\n 'test_weird_name\"',\n \"test_weird_name'\",\n \"_b.test_weird_name_01-30\",\n '\"_b.test_weird_name_01-30\"',\n \"99beginswithnumber\",\n \"12345\",\n \"\\xe9\",\n ]\n ):\n df.to_sql(weird_name, self.conn)\n sql.table_exists(weird_name, self.conn)\n\n df2 = DataFrame([[1, 2], [3, 4]], columns=[\"a\", weird_name])\n c_tbl = f\"test_weird_col_name{ndx:d}\"\n df2.to_sql(c_tbl, self.conn)\n sql.table_exists(c_tbl, self.conn)\n\n\n# -----------------------------------------------------------------------------\n# -- Old tests from 0.13.1 (before refactor using sqlalchemy)\n\n\n_formatters = {\n datetime: \"'{}'\".format,\n str: \"'{}'\".format,\n np.str_: \"'{}'\".format,\n bytes: \"'{}'\".format,\n float: \"{:.8f}\".format,\n int: \"{:d}\".format,\n type(None): lambda x: \"NULL\",\n np.float64: \"{:.10f}\".format,\n bool: \"'{!s}'\".format,\n}\n\n\ndef format_query(sql, *args):\n processed_args = []\n for arg in args:\n if isinstance(arg, float) and isna(arg):\n arg = None\n\n formatter = _formatters[type(arg)]\n processed_args.append(formatter(arg))\n\n return sql % tuple(processed_args)\n\n\ndef tquery(query, con=None):\n \"\"\"Replace removed sql.tquery function\"\"\"\n res = sql.execute(query, con=con).fetchall()\n return None if res is None else list(res)\n\n\nclass TestXSQLite:\n def setup_method(self):\n self.conn = sqlite3.connect(\":memory:\")\n\n def teardown_method(self):\n self.conn.close()\n\n def drop_table(self, table_name):\n cur = self.conn.cursor()\n cur.execute(f\"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}\")\n self.conn.commit()\n\n def test_basic(self):\n frame = tm.makeTimeDataFrame()\n sql.to_sql(frame, name=\"test_table\", con=self.conn, index=False)\n result = sql.read_sql(\"select * from test_table\", self.conn)\n\n # HACK! Change this once indexes are handled properly.\n result.index = frame.index\n\n expected = frame\n tm.assert_frame_equal(result, frame)\n\n frame[\"txt\"] = [\"a\"] * len(frame)\n frame2 = frame.copy()\n new_idx = Index(np.arange(len(frame2))) + 10\n frame2[\"Idx\"] = new_idx.copy()\n sql.to_sql(frame2, name=\"test_table2\", con=self.conn, index=False)\n result = sql.read_sql(\"select * from test_table2\", self.conn, index_col=\"Idx\")\n expected = frame.copy()\n expected.index = new_idx\n expected.index.name = \"Idx\"\n tm.assert_frame_equal(expected, result)\n\n def test_write_row_by_row(self):\n frame = tm.makeTimeDataFrame()\n frame.iloc[0, 0] = np.nan\n create_sql = sql.get_schema(frame, \"test\")\n cur = self.conn.cursor()\n cur.execute(create_sql)\n\n ins = \"INSERT INTO test VALUES (%s, %s, %s, %s)\"\n for _, row in frame.iterrows():\n fmt_sql = format_query(ins, *row)\n tquery(fmt_sql, con=self.conn)\n\n self.conn.commit()\n\n result = sql.read_sql(\"select * from test\", con=self.conn)\n result.index = frame.index\n tm.assert_frame_equal(result, frame, rtol=1e-3)\n\n def test_execute(self):\n frame = tm.makeTimeDataFrame()\n create_sql = sql.get_schema(frame, \"test\")\n cur = self.conn.cursor()\n cur.execute(create_sql)\n ins = \"INSERT INTO test VALUES (?, ?, ?, ?)\"\n\n row = frame.iloc[0]\n sql.execute(ins, self.conn, params=tuple(row))\n self.conn.commit()\n\n result = sql.read_sql(\"select * from test\", self.conn)\n result.index = frame.index[:1]\n tm.assert_frame_equal(result, frame[:1])\n\n def test_schema(self):\n frame = tm.makeTimeDataFrame()\n create_sql = sql.get_schema(frame, \"test\")\n lines = create_sql.splitlines()\n for line in lines:\n tokens = line.split(\" \")\n if len(tokens) == 2 and tokens[0] == \"A\":\n assert tokens[1] == \"DATETIME\"\n\n create_sql = sql.get_schema(frame, \"test\", keys=[\"A\", \"B\"])\n lines = create_sql.splitlines()\n assert 'PRIMARY KEY (\"A\", \"B\")' in create_sql\n cur = self.conn.cursor()\n cur.execute(create_sql)\n\n def test_execute_fail(self):\n create_sql = \"\"\"\n CREATE TABLE test\n (\n a TEXT,\n b TEXT,\n c REAL,\n PRIMARY KEY (a, b)\n );\n \"\"\"\n cur = self.conn.cursor()\n cur.execute(create_sql)\n\n sql.execute('INSERT INTO test VALUES(\"foo\", \"bar\", 1.234)', self.conn)\n sql.execute('INSERT INTO test VALUES(\"foo\", \"baz\", 2.567)', self.conn)\n\n with pytest.raises(sql.DatabaseError, match=\"Execution failed on sql\"):\n sql.execute('INSERT INTO test VALUES(\"foo\", \"bar\", 7)', self.conn)\n\n def test_execute_closed_connection(self):\n create_sql = \"\"\"\n CREATE TABLE test\n (\n a TEXT,\n b TEXT,\n c REAL,\n PRIMARY KEY (a, b)\n );\n \"\"\"\n cur = self.conn.cursor()\n cur.execute(create_sql)\n\n sql.execute('INSERT INTO test VALUES(\"foo\", \"bar\", 1.234)', self.conn)\n self.conn.close()\n\n with tm.external_error_raised(sqlite3.ProgrammingError):\n tquery(\"select * from test\", con=self.conn)\n\n def test_keyword_as_column_names(self):\n df = DataFrame({\"From\": np.ones(5)})\n sql.to_sql(df, con=self.conn, name=\"testkeywords\", index=False)\n\n def test_onecolumn_of_integer(self):\n # GH 3628\n # a column_of_integers dataframe should transfer well to sql\n\n mono_df = DataFrame([1, 2], columns=[\"c0\"])\n sql.to_sql(mono_df, con=self.conn, name=\"mono_df\", index=False)\n # computing the sum via sql\n con_x = self.conn\n the_sum = sum(my_c0[0] for my_c0 in con_x.execute(\"select * from mono_df\"))\n # it should not fail, and gives 3 ( Issue #3628 )\n assert the_sum == 3\n\n result = sql.read_sql(\"select * from mono_df\", con_x)\n tm.assert_frame_equal(result, mono_df)\n\n def test_if_exists(self):\n df_if_exists_1 = DataFrame({\"col1\": [1, 2], \"col2\": [\"A\", \"B\"]})\n df_if_exists_2 = DataFrame({\"col1\": [3, 4, 5], \"col2\": [\"C\", \"D\", \"E\"]})\n table_name = \"table_if_exists\"\n sql_select = f\"SELECT * FROM {table_name}\"\n\n msg = \"'notvalidvalue' is not valid for if_exists\"\n with pytest.raises(ValueError, match=msg):\n sql.to_sql(\n frame=df_if_exists_1,\n con=self.conn,\n name=table_name,\n if_exists=\"notvalidvalue\",\n )\n self.drop_table(table_name)\n\n # test if_exists='fail'\n sql.to_sql(\n frame=df_if_exists_1, con=self.conn, name=table_name, if_exists=\"fail\"\n )\n msg = \"Table 'table_if_exists' already exists\"\n with pytest.raises(ValueError, match=msg):\n sql.to_sql(\n frame=df_if_exists_1, con=self.conn, name=table_name, if_exists=\"fail\"\n )\n # test if_exists='replace'\n sql.to_sql(\n frame=df_if_exists_1,\n con=self.conn,\n name=table_name,\n if_exists=\"replace\",\n index=False,\n )\n assert tquery(sql_select, con=self.conn) == [(1, \"A\"), (2, \"B\")]\n sql.to_sql(\n frame=df_if_exists_2,\n con=self.conn,\n name=table_name,\n if_exists=\"replace\",\n index=False,\n )\n assert tquery(sql_select, con=self.conn) == [(3, \"C\"), (4, \"D\"), (5, \"E\")]\n self.drop_table(table_name)\n\n # test if_exists='append'\n sql.to_sql(\n frame=df_if_exists_1,\n con=self.conn,\n name=table_name,\n if_exists=\"fail\",\n index=False,\n )\n assert tquery(sql_select, con=self.conn) == [(1, \"A\"), (2, \"B\")]\n sql.to_sql(\n frame=df_if_exists_2,\n con=self.conn,\n name=table_name,\n if_exists=\"append\",\n index=False,\n )\n assert tquery(sql_select, con=self.conn) == [\n (1, \"A\"),\n (2, \"B\"),\n (3, \"C\"),\n (4, \"D\"),\n (5, \"E\"),\n ]\n self.drop_table(table_name)\n" ]
[ [ "pandas._testing.equalContents", "pandas.to_datetime", "pandas.Series", "pandas.io.sql.read_sql_table", "pandas.io.sql.read_sql", "pandas.DataFrame", "pandas.core.dtypes.common.is_datetime64tz_dtype", "numpy.round", "numpy.dtype", "numpy.random.randn", "pandas.io.sql.SQLiteDatabase", "pandas.DataFrame.from_records", "pandas.core.dtypes.common.is_datetime64_dtype", "pandas.isna", "pandas._testing.assert_frame_equal", "pandas.io.sql.get_schema", "pandas.io.sql.SQLTable", "numpy.arange", "pandas.io.sql.read_sql_query", "pandas.io.sql.execute", "pandas.io.sql.SQLiteTable", "pandas.io.sql._get_valid_mysql_name", "pandas._testing.assert_series_equal", "pandas.concat", "pandas._testing.assert_produces_warning", "pandas.option_context", "pandas.io.sql.get_engine", "pandas.MultiIndex.from_product", "pandas.date_range", "pandas.io.sql._get_valid_sqlite_name", "pandas._testing.external_error_raised", "pandas.io.sql.has_table", "pandas._testing.assert_equal", "pandas.io.sql.SQLDatabase", "pandas._testing.ensure_clean", "numpy.ones", "pandas.io.sql._gt14", "pandas._testing.makeTimeDataFrame", "pandas.io.sql.table_exists", "pandas.Timestamp", "pandas.io.sql.to_sql" ] ]
scheckmedia/dl-framework
[ "8fea39e166fda0ff8fa51696831bf5cb42f3ed10" ]
[ "dlf/losses/ntxent_loss.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nfrom dlf.core.registry import register_loss\n\n\n@register_loss('NTXentLoss', 'ntxentloss', 'ntx_ent_loss', 'ntexnt_loss')\nclass NTXentLoss(tf.keras.losses.Loss):\n \"\"\"Implementation of NTXentLoss like used in SimCLR\n\n # Arguments\n batch_size: int. Used batch size\n temperature: float. Temperature to scale features\n\n # YAML Configuration\n ```yaml\n loss:\n NTXentLoss:\n batch_size: 16\n temperature: 0.5\n ```\n\n # References\n - [SimCLR](https://arxiv.org/pdf/2002.05709.pdf)\n - [Tensorflow implementation](https://github.com/google-research/simclr/blob/f3ca72f7efc085ad4abdb65f7a63459d9cfda78f/objective.py)\n \"\"\"\n\n def __init__(self, batch_size, temperature=0.5):\n\n super().__init__(name='ntexnt_loss')\n self.temperature = temperature\n self.batch_size = batch_size\n self.LARGE_NUM = 1e9\n self.masks = tf.one_hot(tf.range(batch_size),\n batch_size, dtype=np.float32)\n\n self.criterion = tf.keras.losses.CategoricalCrossentropy(\n from_logits=True, reduction=tf.keras.losses.Reduction.SUM)\n self.labels = tf.one_hot(\n tf.range(self.batch_size), self.batch_size * 2)\n\n def __call__(self, zis, zjs):\n\n logits_aa = tf.matmul(zis, zis, transpose_b=True) / self.temperature\n logits_aa = logits_aa - self.masks * self.LARGE_NUM\n logits_bb = tf.matmul(zjs, zjs, transpose_b=True) / self.temperature\n logits_bb = logits_bb - self.masks * self.LARGE_NUM\n\n logits_ab = tf.matmul(zis, zjs, transpose_b=True) / self.temperature\n logits_ba = tf.matmul(zjs, zis, transpose_b=True) / self.temperature\n\n loss_a = self.criterion(y_pred=tf.concat(\n [logits_ab, logits_aa], 1), y_true=self.labels)\n loss_b = self.criterion(y_pred=tf.concat(\n [logits_ba, logits_bb], 1), y_true=self.labels)\n loss = loss_a + loss_b\n\n return loss, (logits_ab, logits_ba)\n" ]
[ [ "tensorflow.matmul", "tensorflow.keras.losses.CategoricalCrossentropy", "tensorflow.concat", "tensorflow.range" ] ]
concerttttt/RCNNs-on-Win64
[ "603f46efcf9fb74eb0c0975e021ce2c7db184bb7" ]
[ "RCNN-Python/utils/color_space.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy\nimport skimage.io\nimport skimage.color\n\ndef convert_color(I, name):\n if len(I.shape) != 3:\n I = skimage.color.gray2rgb(I)\n\n converters = {'rgb' : lambda I: I,\n 'lab' : to_Lab,\n 'rgi' : to_rgI,\n 'hsv' : to_HSV,\n 'nrgb' : to_nRGB,\n 'hue' : to_Hue}\n\n return converters[name](I)\n\ndef to_grey(I):\n grey_img = (255 * skimage.color.rgb2grey(I)).astype(numpy.uint8)\n return numpy.dstack([grey_img, grey_img, grey_img])\n\ndef to_Lab(I):\n lab = skimage.color.rgb2lab(I)\n l = 255 * lab[:, :, 0] / 100 # L component ranges from 0 to 100\n a = 127 + lab[:, :, 1] # a component ranges from -127 to 127\n b = 127 + lab[:, :, 2] # b component ranges from -127 to 127\n return numpy.dstack([l, a, b]).astype(numpy.uint8)\n\ndef to_rgI(I):\n rgi = I.copy()\n rgi[:, :, 2] = to_grey(I)[:, :, 0]\n return rgi\n\ndef to_HSV(I):\n return (255 * skimage.color.rgb2hsv(I)).astype(numpy.uint8)\n\ndef to_nRGB(I):\n _I = I / 255.0\n norm_I = numpy.sqrt(_I[:, :, 0] ** 2 + _I[:, :, 1] ** 2 + _I[:, :, 2] ** 2)\n norm_r = (_I[:, :, 0] / norm_I * 255).astype(numpy.uint8)\n norm_g = (_I[:, :, 1] / norm_I * 255).astype(numpy.uint8)\n norm_b = (_I[:, :, 2] / norm_I * 255).astype(numpy.uint8)\n return numpy.dstack([norm_r, norm_g, norm_b])\n\ndef to_Hue(I):\n I_h = to_HSV(I)[:, :, 0]\n return numpy.dstack([I_h, I_h, I_h])\n\n" ]
[ [ "numpy.sqrt", "numpy.dstack" ] ]
shelper/pypeline
[ "70e800a8b40592e9cde9491c77860143a732e6d3" ]
[ "pypeline/impl/oct/spectrum.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\npypeline.spectrum\n~~~~~~~~~~~~~~~~~\n\nthis module defines the spectrum or interferogram in OCT\nit should be the subclass of BaseData\n\"\"\"\n\nfrom .basedata import BaseData\nfrom scipy import signal\n\n\nclass Spectrum(BaseData):\n \"\"\"\n spectrum data based off the BaseData class with additional attributes and memeber functions\n Only supports Gaussian shape in k-space spectrum at this moment\n\n Attributes:\n wc: center wavelength in unit nm\n range: wavelength range from end to end\n \"\"\"\n def set_range(self, wc, range):\n self.wc, self.range = wc, range\n\n @property\n def wavelengths(self):\n \"\"\"\n return the wavelengths at begein, center and end for the spectrum\n\n Returns:\n [w0, wc, we]: the 3 wavelength at begin, center and end respectively\n \"\"\"\n return self.wc - self.range/2, self.wc, self.wc + self.range/2\n\n @property\n def wavenumbers(self):\n \"\"\"\n return the wavelengths at begein, center and end for the spectrum\n\n Returns:\n [k0, kc, ke]: the 3 wavenumber at begin, center and end respectively\n \"\"\"\n k0, ke = 1E7/(w0 + range/2), 1E7/(w0 - range/2)\n kc = (k0 + ke) / 2\n return k0, kc, ke\n\n def set_profile(self, fwhm):\n \"\"\"\n set the envelope shapre of the spectrum in wavenumber domain\n\n Args:\n fwhm (float): the full width half maxium of the spectrum\n\n Returns:\n None\n \"\"\"\n\n self.profile = signal.gaussian(self.shape[0], self.fwhm / 2.235)\n self.data *= self.shape\n\n\n def linearize(self, ref, base='wavenumber', method='cubic'):\n pass\n\n\n\n\n\n\n\n" ]
[ [ "scipy.signal.gaussian" ] ]
zburning/gluon-nlp
[ "101ce13bad3c26c802a4ff8ef47954fd2d0555d2" ]
[ "scripts/bert/pretraining_utils.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Utilities for pre-training.\"\"\"\nimport time\nimport os\nimport logging\nimport random\nimport multiprocessing\n\nimport numpy as np\nimport mxnet as mx\nimport gluonnlp as nlp\n\nfrom data.pretrain import BERTSamplerFn, BERTDataLoaderFn\nfrom data.dataloader import SimpleDatasetFn, DatasetLoader\nfrom create_pretraining_data import create_training_instances\n\n\n__all__ = ['get_model_loss', 'get_pretrain_data_npz', 'get_dummy_dataloader',\n 'save_parameters', 'save_states', 'evaluate', 'split_and_load',\n 'get_pretrain_data_text', 'generate_dev_set', 'profile']\n\ndef get_model_loss(ctx, model, pretrained, dataset_name, vocab, dtype,\n ckpt_dir=None, start_step=None):\n \"\"\"Get model for pre-training.\n\n Parameters\n ----------\n ctx : Context or list of Context\n Contexts to initialize model\n model : str\n The name of the model, 'bert_12_768_12' or 'bert_24_1024_16'.\n pretrained : bool\n Whether to use pre-trained model weights as initialization.\n dataset_name : str\n The name of the dataset, which is used to retrieve the corresponding vocabulary file\n when the vocab argument is not provided. Options include 'book_corpus_wiki_en_uncased',\n 'book_corpus_wiki_en_cased', 'wiki_multilingual_uncased', 'wiki_multilingual_cased',\n 'wiki_cn_cased'.\n vocab : BERTVocab or None\n The vocabulary for the model. If not provided, The vocabulary will be constructed\n based on dataset_name.\n dtype : float\n Data type of the model for training.\n ckpt_dir : str\n The path to the checkpoint directory.\n start_step : int or None\n If provided, it loads the model from the corresponding checkpoint from the ckpt_dir.\n\n Returns\n -------\n BERTForPretrain : the model for pre-training.\n BERTVocab : the vocabulary.\n \"\"\"\n # model\n model, vocabulary = nlp.model.get_model(model, dataset_name=dataset_name, vocab=vocab,\n pretrained=pretrained, ctx=ctx)\n\n if not pretrained:\n model.initialize(init=mx.init.Normal(0.02), ctx=ctx)\n model.cast(dtype)\n\n if ckpt_dir and start_step:\n param_path = os.path.join(ckpt_dir, '%07d.params'%start_step)\n nlp.utils.load_parameters(model, param_path, ctx=ctx)\n logging.info('Loading step %d checkpoints from %s.', start_step, param_path)\n\n model.hybridize(static_alloc=True, static_shape=True)\n\n # losses\n nsp_loss = mx.gluon.loss.SoftmaxCELoss()\n mlm_loss = mx.gluon.loss.SoftmaxCELoss()\n nsp_loss.hybridize(static_alloc=True, static_shape=True)\n mlm_loss.hybridize(static_alloc=True, static_shape=True)\n\n model = BERTForPretrain(model, nsp_loss, mlm_loss, len(vocabulary))\n return model, vocabulary\n\nclass BERTPretrainDataset(mx.gluon.data.ArrayDataset):\n \"\"\"Dataset for BERT pre-training.\n\n Each record contains the following numpy ndarrays: input_ids, masked_lm_ids,\n masked_lm_positions, masked_lm_weights, next_sentence_labels, segment_ids, valid_lengths.\n\n Parameters\n ----------\n filename : str\n Path to the input text file.\n tokenizer : BERTTokenizer\n The BERTTokenizer\n max_seq_length : int\n The hard limit of maximum sequence length of sentence pairs\n short_seq_prob : float\n The probability of sampling sequences shorter than the max_seq_length.\n masked_lm_prob : float\n The probability of replacing texts with masks/random words/original words.\n max_predictions_per_seq : int\n The hard limit of the number of predictions for masked words\n whole_word_mask : bool\n Whether to use whole word masking.\n vocab : BERTVocab\n The BERTVocab\n num_workers : int\n The number of worker processes for dataset contruction.\n worker_pool : multiprocessing.Pool\n The worker process pool. Must be provided if num_workers > 1.\n \"\"\"\n def __init__(self, filename, tokenizer, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, whole_word_mask,\n vocab, num_workers=1, worker_pool=None):\n logging.debug('start to load file %s ...', filename)\n dupe_factor = 1\n instances = create_training_instances(([filename], tokenizer, max_seq_length,\n short_seq_prob, masked_lm_prob,\n max_predictions_per_seq,\n whole_word_mask, vocab,\n dupe_factor, num_workers,\n worker_pool, None))\n super(BERTPretrainDataset, self).__init__(*instances)\n\ndef get_pretrain_data_text(data, batch_size, num_ctxes, shuffle,\n num_buckets, vocab, tokenizer, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, whole_word_mask,\n num_parts=1, part_idx=0, num_workers=1):\n \"\"\"Get a data iterator from raw text documents.\n\n Parameters\n ----------\n batch_size : int\n The batch size per GPU.\n num_ctxes : int\n The number of GPUs.\n shuffle : bool\n Whether to shuffle the data.\n num_buckets : int\n The number of buckets for the FixedBucketSampler for training.\n vocab : BERTVocab\n The vocabulary.\n tokenizer : BERTTokenizer or BERTSPTokenizer\n The tokenizer.\n max_seq_length : int\n The hard limit of maximum sequence length of sentence pairs.\n short_seq_prob : float\n The probability of sampling sequences shorter than the max_seq_length.\n masked_lm_prob : float\n The probability of replacing texts with masks/random words/original words.\n max_predictions_per_seq : int\n The hard limit of the number of predictions for masked words\n whole_word_mask : bool\n Whether to use whole word masking.\n num_parts : int\n The number of partitions for the dataset.\n part_idx : int\n The index of the partition to read.\n num_workers : int\n The number of worker processes for dataset contruction.\n \"\"\"\n num_files = len(nlp.utils.glob(data))\n logging.info('%d files are found.', num_files)\n assert num_files >= num_parts, \\\n 'The number of training text files must be no less than the number of ' \\\n 'workers/partitions (%d). Only %d files at %s are found.'%(num_parts, num_files, data)\n dataset_params = {'tokenizer': tokenizer, 'max_seq_length': max_seq_length,\n 'short_seq_prob': short_seq_prob, 'masked_lm_prob': masked_lm_prob,\n 'max_predictions_per_seq': max_predictions_per_seq, 'vocab':vocab,\n 'whole_word_mask': whole_word_mask}\n dataset_fn = SimpleDatasetFn(BERTPretrainDataset, dataset_params)\n sampler_fn = BERTSamplerFn(batch_size, shuffle, num_ctxes, num_buckets)\n dataloader_fn = BERTDataLoaderFn(num_ctxes, vocab)\n\n split_sampler = nlp.data.SplitSampler(num_files, num_parts=num_parts, part_index=part_idx)\n dataloader = DatasetLoader(data, split_sampler, dataset_fn, sampler_fn, dataloader_fn,\n num_dataset_workers=num_workers)\n return dataloader\n\n\ndef get_pretrain_data_npz(data, batch_size, num_ctxes, shuffle, num_buckets,\n vocab, num_parts=1, part_idx=0, num_workers=1):\n \"\"\"Get a data iterator from pre-processed npz files.\n\n Parameters\n ----------\n batch_size : int\n The batch size per GPU.\n num_ctxes : int\n The number of GPUs.\n shuffle : bool\n Whether to shuffle the data.\n num_buckets : int\n The number of buckets for the FixedBucketSampler for training.\n vocab : BERTVocab\n The vocabulary.\n num_parts : int\n The number of partitions for the dataset.\n part_idx : int\n The index of the partition to read.\n num_workers : int\n The number of worker processes for dataset contruction.\n \"\"\"\n num_files = len(nlp.utils.glob(data))\n logging.info('%d files are found.', num_files)\n assert num_files >= num_parts, \\\n 'The number of training text files must be no less than the number of ' \\\n 'workers/partitions (%d). Only %d files at %s are found.'%(num_parts, num_files, data)\n #split_sampler = nlp.data.SplitSampler(num_files, num_parts=num_parts, part_index=part_idx)\n dataset_params = {'allow_pickle' : True}\n dataset_fn = SimpleDatasetFn(nlp.data.NumpyDataset, dataset_params)\n sampler_fn = BERTSamplerFn(batch_size, shuffle, num_ctxes, num_buckets)\n dataloader_fn = BERTDataLoaderFn(num_ctxes, vocab)\n\n split_sampler = nlp.data.SplitSampler(num_files, num_parts=num_parts, part_index=part_idx)\n dataloader = DatasetLoader(data, split_sampler, dataset_fn, sampler_fn, dataloader_fn,\n num_dataset_workers=num_workers)\n return dataloader\n\n\ndef get_dummy_dataloader(batch_size, seq_len, max_predict):\n \"\"\"Return a dummy data loader which returns a fixed data batch of target shape\"\"\"\n class DummyIter():\n def __init__(self, batch):\n self._batch = batch\n\n def __iter__(self):\n while True:\n yield self._batch\n data_batch = ((mx.nd.zeros((batch_size, seq_len)),\n mx.nd.zeros((batch_size, max_predict)),\n mx.nd.zeros((batch_size, max_predict)),\n mx.nd.zeros((batch_size, max_predict)),\n mx.nd.ones((batch_size,)) * seq_len,\n mx.nd.zeros((batch_size, seq_len)),\n mx.nd.ones((batch_size,)) * seq_len))\n return DummyIter(data_batch)\n\n\ndef save_parameters(step_num, model, ckpt_dir):\n \"\"\"Save the model parameter, marked by step_num.\"\"\"\n param_path = os.path.join(ckpt_dir, '%07d.params'%step_num)\n logging.info('[step %d] Saving model params to %s.', step_num, param_path)\n nlp.utils.save_parameters(model, param_path)\n\ndef save_states(step_num, trainer, ckpt_dir, local_rank=0):\n \"\"\"Save the trainer states, marked by step_num.\"\"\"\n trainer_path = os.path.join(ckpt_dir, '%07d.states.%02d'%(step_num, local_rank))\n logging.info('[step %d] Saving trainer states to %s.', step_num, trainer_path)\n nlp.utils.save_states(trainer, trainer_path)\n\ndef log_noacc(begin_time, running_num_tks, running_mlm_loss, running_nsp_loss, step_num,\n trainer, log_interval):\n \"\"\"Log training progress.\"\"\"\n end_time = time.time()\n duration = end_time - begin_time\n throughput = running_num_tks / duration / 1000.0\n running_mlm_loss = running_mlm_loss / log_interval\n running_nsp_loss = running_nsp_loss / log_interval\n lr = trainer.learning_rate if trainer else 0\n # pylint: disable=line-too-long\n logging.info('[step {}]\\tmlm_loss={:7.5f}\\tnsp_loss={:5.2f}\\tthroughput={:.1f}K tks/s\\tlr={:.7f} time={:.2f}, latency={:.1f} ms/batch'\n .format(step_num, running_mlm_loss.asscalar(), running_nsp_loss.asscalar(),\n throughput.asscalar(), lr, duration, duration*1000/log_interval))\n # pylint: enable=line-too-long\n\ndef log(begin_time, running_num_tks, running_mlm_loss, running_nsp_loss, step_num,\n mlm_metric, nsp_metric, trainer, log_interval):\n \"\"\"Log training progress.\"\"\"\n end_time = time.time()\n duration = end_time - begin_time\n throughput = running_num_tks / duration / 1000.0\n running_mlm_loss = running_mlm_loss / log_interval\n running_nsp_loss = running_nsp_loss / log_interval\n lr = trainer.learning_rate if trainer else 0\n # pylint: disable=line-too-long\n logging.info('[step {}]\\tmlm_loss={:7.5f}\\tmlm_acc={:4.2f}\\tnsp_loss={:5.2f}\\tnsp_acc={:5.2f}\\tthroughput={:.1f}K tks/s\\tlr={:.7f} time={:.2f}, latency={:.1f} ms/batch'\n .format(step_num, running_mlm_loss.asscalar(), mlm_metric.get()[1] * 100, running_nsp_loss.asscalar(),\n nsp_metric.get()[1] * 100, throughput.asscalar(), lr, duration, duration*1000/log_interval))\n # pylint: enable=line-too-long\n\ndef split_and_load(arrs, ctx):\n \"\"\"split and load arrays to a list of contexts\"\"\"\n assert isinstance(arrs, (list, tuple))\n # split and load\n loaded_arrs = [mx.gluon.utils.split_and_load(arr, ctx, even_split=False) for arr in arrs]\n return zip(*loaded_arrs)\n\nclass BERTForPretrain(mx.gluon.Block):\n \"\"\"Model for pre-training MLM and NSP with BERT.\n\n Parameters\n ----------\n bert: BERTModel\n Bidirectional encoder with transformer.\n mlm_loss : Loss or None\n nsp_loss : Loss or None\n vocab_size : int\n prefix : str or None\n See document of `mx.gluon.Block`.\n params : ParameterDict or None\n See document of `mx.gluon.Block`.\n \"\"\"\n\n def __init__(self, bert, mlm_loss, nsp_loss, vocab_size, prefix=None, params=None):\n super(BERTForPretrain, self).__init__(prefix=prefix, params=params)\n self.bert = bert\n self.mlm_loss = mlm_loss\n self.nsp_loss = nsp_loss\n self._vocab_size = vocab_size\n\n def forward(self, input_id, masked_id, masked_position, masked_weight,\n next_sentence_label=None, segment_id=None, valid_length=None):\n # pylint: disable=arguments-differ\n \"\"\"Predict with BERT for MLM and NSP. \"\"\"\n num_masks = masked_weight.sum() + 1e-8\n valid_length = valid_length.reshape(-1)\n masked_id = masked_id.reshape(-1)\n _, _, classified, decoded = self.bert(input_id, segment_id, valid_length, masked_position)\n decoded = decoded.reshape((-1, self._vocab_size))\n ls1 = self.mlm_loss(decoded.astype('float32', copy=False),\n masked_id, masked_weight.reshape((-1, 1)))\n ls2 = self.nsp_loss(classified.astype('float32', copy=False), next_sentence_label)\n ls1 = ls1.sum() / num_masks\n ls2 = ls2.mean()\n return classified, decoded, ls1, ls2\n\ndef evaluate(data_eval, model, ctx, log_interval, dtype):\n \"\"\"Evaluation function.\"\"\"\n logging.info('Running evaluation ... ')\n mlm_metric = nlp.metric.MaskedAccuracy()\n nsp_metric = nlp.metric.MaskedAccuracy()\n mlm_metric.reset()\n nsp_metric.reset()\n\n eval_begin_time = time.time()\n begin_time = time.time()\n step_num = 0\n running_mlm_loss = running_nsp_loss = 0\n total_mlm_loss = total_nsp_loss = 0\n running_num_tks = 0\n for _, data_batch in enumerate(data_eval):\n step_num += 1\n\n data_list = split_and_load(data_batch, ctx)\n ns_label_list, ns_pred_list = [], []\n mask_label_list, mask_pred_list, mask_weight_list = [], [], []\n for data in data_list:\n (input_id, masked_id, masked_position, masked_weight, \\\n next_sentence_label, segment_id, valid_length) = data\n valid_length = valid_length.astype(dtype, copy=False)\n out = model(input_id, masked_id, masked_position, masked_weight, \\\n next_sentence_label, segment_id, valid_length)\n classified, decoded, ls1, ls2 = out\n masked_id = masked_id.reshape(-1)\n ns_label_list.append(next_sentence_label)\n ns_pred_list.append(classified)\n mask_label_list.append(masked_id)\n mask_pred_list.append(decoded)\n mask_weight_list.append(masked_weight)\n\n valid_length = valid_length.astype('float32', copy=False)\n running_mlm_loss += ls1.as_in_context(mx.cpu())\n running_nsp_loss += ls2.as_in_context(mx.cpu())\n running_num_tks += valid_length.sum().as_in_context(mx.cpu())\n nsp_metric.update(ns_label_list, ns_pred_list)\n mlm_metric.update(mask_label_list, mask_pred_list, mask_weight_list)\n\n # logging\n if (step_num + 1) % (log_interval) == 0:\n total_mlm_loss += running_mlm_loss\n total_nsp_loss += running_nsp_loss\n log(begin_time, running_num_tks, running_mlm_loss, running_nsp_loss,\n step_num, mlm_metric, nsp_metric, None, log_interval)\n begin_time = time.time()\n running_mlm_loss = running_nsp_loss = running_num_tks = 0\n mlm_metric.reset_local()\n nsp_metric.reset_local()\n\n mx.nd.waitall()\n eval_end_time = time.time()\n # accumulate losses from last few batches, too\n if running_mlm_loss != 0:\n total_mlm_loss += running_mlm_loss\n total_nsp_loss += running_nsp_loss\n total_mlm_loss /= step_num\n total_nsp_loss /= step_num\n logging.info('Eval mlm_loss={:.3f}\\tmlm_acc={:.1f}\\tnsp_loss={:.3f}\\tnsp_acc={:.1f}\\t'\n .format(total_mlm_loss.asscalar(), mlm_metric.get_global()[1] * 100,\n total_nsp_loss.asscalar(), nsp_metric.get_global()[1] * 100))\n logging.info('Eval cost={:.1f}s'.format(eval_end_time - eval_begin_time))\n\n\ndef generate_dev_set(tokenizer, vocab, cache_file, args):\n \"\"\"Generate validation set.\"\"\"\n # set random seed to generate dev data deterministically\n np.random.seed(0)\n random.seed(0)\n mx.random.seed(0)\n worker_pool = multiprocessing.Pool()\n eval_files = nlp.utils.glob(args.data_eval)\n num_files = len(eval_files)\n assert num_files > 0, 'Number of eval files must be greater than 0.' \\\n 'Only found %d files at %s'%(num_files, args.data_eval)\n logging.info('Generating validation set from %d files on rank 0.', len(eval_files))\n create_training_instances((eval_files, tokenizer, args.max_seq_length,\n args.short_seq_prob, args.masked_lm_prob,\n args.max_predictions_per_seq,\n args.whole_word_mask, vocab,\n 1, args.num_data_workers,\n worker_pool, cache_file))\n logging.info('Done generating validation set on rank 0.')\n\ndef profile(curr_step, start_step, end_step, profile_name='profile.json',\n early_exit=True):\n \"\"\"profile the program between [start_step, end_step).\"\"\"\n if curr_step == start_step:\n mx.nd.waitall()\n mx.profiler.set_config(profile_memory=False, profile_symbolic=True,\n profile_imperative=True, filename=profile_name,\n aggregate_stats=True)\n mx.profiler.set_state('run')\n elif curr_step == end_step:\n mx.nd.waitall()\n mx.profiler.set_state('stop')\n logging.info(mx.profiler.dumps())\n mx.profiler.dump()\n if early_exit:\n exit()\n" ]
[ [ "numpy.random.seed" ] ]
patrickmelix/Python4ChemistryTools
[ "a2891045edb44f2ff408a26855f6edb45328447f" ]
[ "move_atom.py" ]
[ "#!/usr/bin/env python3\n#\n# moves a chosen atom from a ase-compatible input around and saves all the coordinate files as xyz\n#\n# by Patrick Melix\n# 2017/03/17\n#\n# Usage:\n# a) Call this script, answer the questions, done.\n# b) Call this script with follogin arguments: <deltaX> <deltaY> <deltaZ> <nX> <nY> <nZ> <atomNumber> <input.xyz>\n# c) Load the module and set the four global variables and call .main()\n#\n\nimport sys, os\nfrom ase import Atoms, io\nimport numpy as np\n\n#global variables\nglobal delta\nglobal nSteps\nglobal atomNumber\nglobal xyzFile\ndelta = []\nnSteps = []\natomNumber = 0\nxyzFile = ''\n\n\ndef main():\n print('Hello!')\n\n #global Vars\n global delta\n global nSteps\n global atomNumber\n global xyzFile\n\n #input from variable\n if len(delta) is not 0:\n print('You provided input through global variables, trying to use them')\n\n #input from argument\n elif len(sys.argv) is 9:\n print('Provided input through arguments, trying to use them')\n delta, nSteps, atomNumber, xyzFile = getArgumentsInput(sys.argv)\n\n #read input from stdin\n elif len(sys.argv) is 1:\n delta, nSteps, atomNumber, xyzFile = getArgsSTDInput()\n\n #ERROR\n else:\n print('Input could not be gathered, error!')\n sys.exit(1)\n\n #read xyz\n atoms = io.read(xyzFile)\n\n #get atom of interest\n atomIdx = int(atomNumber)-1\n atomOfInterest = atoms[atomIdx]\n\n #loop over x,y,z\n xyz = ['x', 'y', 'z']\n for direct in range(3):\n i = 0\n vec = np.array([0, 0, 0], dtype='float')\n vec[direct] = float(delta[direct])\n newAtomsPlus = atoms.copy()\n newAtomsMinus = atoms.copy()\n\n #add delta\n while i < int(nSteps[direct]):\n i += 1\n newAtomsPlus[atomIdx].position += vec\n newAtomsMinus[atomIdx].position -= vec\n filenamePlus = xyz[direct] + '+' + str(i) + '.xyz'\n filenameMinus = xyz[direct] + '-' + str(i) + '.xyz'\n\n #write files\n io.write(filenamePlus, newAtomsPlus)\n io.write(filenameMinus, newAtomsMinus)\n\n print(\"Finished!\")\n\n\n\n\n#########################\n# Functions\n#########################\n\ndef getArgumentsInput(args):\n delta = args[1:4]\n nSteps = args[4:7]\n atomNumber = args[7]\n xyzFile = args[8]\n return delta, nSteps, atomNumber, xyzFile\n\ndef getArgsSTDInput():\n delta = input('Give me three delta values for x, y and z: ')\n nSteps = input('Give me three numbers for the number of steps in each direction for each x, y and z: ')\n atomNumber = input('Which atom should be moved (number in the xyz: ')\n xyzFile = input('Give me the name of the xyz file: ')\n return delta, nSteps, atomNumber, xyzFile\n\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array" ] ]
caiostringari/deepwaves
[ "2ed647f24e5965c0b5a2c52f3a7640e0df27c207" ]
[ "tracking/track.py" ]
[ "\"\"\"\r\nOptical flow.\r\n\r\nOriginal SORT code is from https://github.com/abewley/sort/blob/master/sort.py#L239\r\n\r\nPROGRAM : SORT.py\r\nPOURPOSE : Track detected waves using SORT.\r\nAUTHOR : Caio Eadi Stringari\r\nEMAIL : [email protected]\r\nV2.0 : 30/09/2020 [Caio Stringari]\r\n\"\"\"\r\n\r\nimport re\r\nimport os\r\nimport argparse\r\n\r\nfrom tqdm import tqdm\r\n\r\nfrom glob import glob\r\nfrom natsort import natsorted\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom filterpy.kalman import KalmanFilter\r\n\r\nfrom skimage.io import imread\r\n\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\n\r\n\r\ndef linear_assignment(cost_matrix):\r\n try:\r\n import lap\r\n _, x, y = lap.lapjv(cost_matrix, extend_cost=True)\r\n return np.array([[y[i], i] for i in x if i >= 0])\r\n except ImportError:\r\n from scipy.optimize import linear_sum_assignment\r\n x, y = linear_sum_assignment(cost_matrix)\r\n return np.array(list(zip(x, y)))\r\n\r\n\r\ndef iou_batch(bb_test, bb_gt):\r\n \"\"\"\r\n From SORT: Computes IUO between two bboxes in the form [l,t,w,h]\r\n \"\"\"\r\n bb_gt = np.expand_dims(bb_gt, 0)\r\n bb_test = np.expand_dims(bb_test, 1)\r\n\r\n xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])\r\n yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])\r\n xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])\r\n yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])\r\n w = np.maximum(0., xx2 - xx1)\r\n h = np.maximum(0., yy2 - yy1)\r\n wh = w * h\r\n o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])\r\n + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)\r\n return(o)\r\n\r\n\r\ndef convert_bbox_to_z(bbox):\r\n \"\"\"\r\n Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form\r\n [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is\r\n the aspect ratio\r\n \"\"\"\r\n w = bbox[2] - bbox[0]\r\n h = bbox[3] - bbox[1]\r\n x = bbox[0] + w / 2.\r\n y = bbox[1] + h / 2.\r\n s = w * h # scale is just area\r\n r = w / float(h)\r\n return np.array([x, y, s, r]).reshape((4, 1))\r\n\r\n\r\ndef convert_x_to_bbox(x, score=None):\r\n \"\"\"\r\n Takes a bounding box in the centre form [x,y,s,r] and returns it in the form\r\n [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right\r\n \"\"\"\r\n w = np.sqrt(x[2] * x[3])\r\n h = x[2] / w\r\n if not score:\r\n return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2.]).reshape((1, 4))\r\n else:\r\n return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score]).reshape((1, 5))\r\n\r\n\r\nclass KalmanBoxTracker(object):\r\n \"\"\"\r\n This class represents the internal state of individual tracked objects observed as bbox.\r\n \"\"\"\r\n count = 0\r\n\r\n def __init__(self, bbox):\r\n \"\"\"\r\n Initialises a tracker using initial bounding box.\r\n \"\"\"\r\n # define constant velocity model\r\n self.kf = KalmanFilter(dim_x=7, dim_z=4)\r\n self.kf.F = np.array([[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [\r\n 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]])\r\n self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [\r\n 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]])\r\n\r\n self.kf.R[2:, 2:] *= 10.\r\n self.kf.P[4:, 4:] *= 1000. # give high uncertainty to the unobservable initial velocities\r\n self.kf.P *= 10.\r\n self.kf.Q[-1, -1] *= 0.01\r\n self.kf.Q[4:, 4:] *= 0.01\r\n\r\n self.kf.x[:4] = convert_bbox_to_z(bbox)\r\n self.time_since_update = 0\r\n self.id = KalmanBoxTracker.count\r\n KalmanBoxTracker.count += 1\r\n self.history = []\r\n self.hits = 0\r\n self.hit_streak = 0\r\n self.age = 0\r\n\r\n def update(self, bbox):\r\n \"\"\"\r\n Updates the state vector with observed bbox.\r\n \"\"\"\r\n self.time_since_update = 0\r\n self.history = []\r\n self.hits += 1\r\n self.hit_streak += 1\r\n self.kf.update(convert_bbox_to_z(bbox))\r\n\r\n def predict(self):\r\n \"\"\"\r\n Advances the state vector and returns the predicted bounding box estimate.\r\n \"\"\"\r\n if((self.kf.x[6] + self.kf.x[2]) <= 0):\r\n self.kf.x[6] *= 0.0\r\n self.kf.predict()\r\n self.age += 1\r\n if(self.time_since_update > 0):\r\n self.hit_streak = 0\r\n self.time_since_update += 1\r\n self.history.append(convert_x_to_bbox(self.kf.x))\r\n return self.history[-1]\r\n\r\n def get_state(self):\r\n \"\"\"\r\n Returns the current bounding box estimate.\r\n \"\"\"\r\n return convert_x_to_bbox(self.kf.x)\r\n\r\n\r\ndef associate_detections_to_trackers(detections, trackers, iou_threshold=0.3):\r\n \"\"\"\r\n Assigns detections to tracked object (both represented as bounding boxes)\r\n\r\n Returns 3 lists of matches, unmatched_detections and unmatched_trackers\r\n \"\"\"\r\n if(len(trackers) == 0):\r\n return np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int)\r\n\r\n iou_matrix = iou_batch(detections, trackers)\r\n\r\n if min(iou_matrix.shape) > 0:\r\n a = (iou_matrix > iou_threshold).astype(np.int32)\r\n if a.sum(1).max() == 1 and a.sum(0).max() == 1:\r\n matched_indices = np.stack(np.where(a), axis=1)\r\n else:\r\n matched_indices = linear_assignment(-iou_matrix)\r\n else:\r\n matched_indices = np.empty(shape=(0, 2))\r\n\r\n unmatched_detections = []\r\n for d, det in enumerate(detections):\r\n if(d not in matched_indices[:, 0]):\r\n unmatched_detections.append(d)\r\n unmatched_trackers = []\r\n for t, trk in enumerate(trackers):\r\n if(t not in matched_indices[:, 1]):\r\n unmatched_trackers.append(t)\r\n\r\n # filter out matched with low IOU\r\n matches = []\r\n for m in matched_indices:\r\n if(iou_matrix[m[0], m[1]] < iou_threshold):\r\n unmatched_detections.append(m[0])\r\n unmatched_trackers.append(m[1])\r\n else:\r\n matches.append(m.reshape(1, 2))\r\n if(len(matches) == 0):\r\n matches = np.empty((0, 2), dtype=int)\r\n else:\r\n matches = np.concatenate(matches, axis=0)\r\n\r\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)\r\n\r\n\r\nclass Sort(object):\r\n def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):\r\n \"\"\"\r\n Sets key parameters for SORT\r\n \"\"\"\r\n self.max_age = max_age\r\n self.min_hits = min_hits\r\n self.iou_threshold = iou_threshold\r\n self.trackers = []\r\n self.frame_count = 0\r\n\r\n def update(self, dets=np.empty((0, 5))):\r\n \"\"\"\r\n Params:\r\n dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]\r\n Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).\r\n Returns the a similar array, where the last column is the object ID.\r\n\r\n NOTE: The number of objects returned may differ from the number of detections provided.\r\n \"\"\"\r\n self.frame_count += 1\r\n # get predicted locations from existing trackers.\r\n trks = np.zeros((len(self.trackers), 5))\r\n to_del = []\r\n ret = []\r\n for t, trk in enumerate(trks):\r\n pos = self.trackers[t].predict()[0]\r\n trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]\r\n if np.any(np.isnan(pos)):\r\n to_del.append(t)\r\n trks = np.ma.compress_rows(np.ma.masked_invalid(trks))\r\n for t in reversed(to_del):\r\n self.trackers.pop(t)\r\n matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(\r\n dets, trks, self.iou_threshold)\r\n\r\n # update matched trackers with assigned detections\r\n for m in matched:\r\n self.trackers[m[1]].update(dets[m[0], :])\r\n\r\n # create and initialise new trackers for unmatched detections\r\n for i in unmatched_dets:\r\n trk = KalmanBoxTracker(dets[i, :])\r\n self.trackers.append(trk)\r\n i = len(self.trackers)\r\n for trk in reversed(self.trackers):\r\n d = trk.get_state()[0]\r\n if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):\r\n # +1 as MOT benchmark requires positive\r\n ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1))\r\n i -= 1\r\n # remove dead tracklet\r\n if(trk.time_since_update > self.max_age):\r\n self.trackers.pop(i)\r\n if(len(ret) > 0):\r\n return np.concatenate(ret)\r\n return np.empty((0, 5))\r\n\r\n\r\ndef ellipse_to_bbox(df):\r\n\r\n minc = []\r\n minr = []\r\n dx = []\r\n dy = []\r\n for r, row in df.iterrows():\r\n\r\n ux = row[\"ir\"] * np.cos(np.deg2rad(row[\"theta_ij\"]))\r\n uy = row[\"ir\"] * np.sin(np.deg2rad(row[\"theta_ij\"]))\r\n vx = row[\"jr\"] * np.cos(np.deg2rad(row[\"theta_ij\"]) + np.pi/2)\r\n vy = row[\"jr\"] * np.sin(np.deg2rad(row[\"theta_ij\"]) + np.pi/2)\r\n\r\n bbox_halfwidth = np.sqrt(ux*ux + vx*vx)\r\n bbox_halfheight = np.sqrt(uy*uy + vy*vy)\r\n\r\n bbox_ul_corner = (row[\"ic\"] - bbox_halfwidth,\r\n row[\"jc\"] - bbox_halfheight)\r\n\r\n minc.append(bbox_ul_corner[0])\r\n minr.append(bbox_ul_corner[1])\r\n dx.append(bbox_halfwidth*2)\r\n dy.append(bbox_halfheight*2)\r\n\r\n return np.vstack([minc, minr, dx, dy]).T\r\n\r\n\r\ndef main():\r\n \"\"\"Call the main program.\"\"\"\r\n # read detections\r\n df = pd.read_csv(args.detections[0])\r\n df.drop_duplicates()\r\n\r\n # select only class 1\r\n try:\r\n df = df.loc[df[\"class\"] == 1]\r\n except Exception:\r\n pass\r\n\r\n # tracking from bounding boxes\r\n if not args.from_ellipses:\r\n targets = [\"minc\", \"minr\", \"dx\", \"dy\"]\r\n for t in targets:\r\n if t not in df.keys():\r\n raise ValueError(\r\n \"Key \\\"{}\\\" must be present in the data.\".format(t))\r\n # tacking from ellipses\r\n else:\r\n targets = [\"ic\", \"jc\", \"ir\", \"jr\", \"theta_ij\"]\r\n for t in targets:\r\n if t not in df.keys():\r\n raise ValueError(\r\n \"Key \\\"{}\\\" must be present in the data.\".format(t))\r\n\r\n # instanciate the tracker\r\n mot_tracker = Sort(max_age=args.max_age,\r\n min_hits=args.min_hits,\r\n iou_threshold=args.iou_threshold)\r\n\r\n pbar = tqdm(total=df.groupby(\"frame\").ngroups)\r\n with open(args.output[0], 'w') as f:\r\n\r\n # wirte header\r\n f.write(\"frame,track,minc,minr,dx,dy,ic,jc,ir,jr,theta_ij\\n\")\r\n\r\n # loop over unique frames\r\n for frame, gdf in df.groupby(\"frame\"):\r\n\r\n # detections for a given frame\r\n if not args.from_ellipses:\r\n # dataframe has a bounding box\r\n dets = gdf[[\"minc\", \"minr\", \"dx\", \"dy\"]].values\r\n else:\r\n dets = ellipse_to_bbox(gdf)\r\n\r\n # convert to [x1,y1,w,h] to [x1,y1,x2,y2]\r\n dets[:, 2:4] += dets[:, 0:2]\r\n\r\n # ellipse parameters, if any\r\n ell_pars = []\r\n for key in [\"ic\", \"jc\", \"ir\", \"jr\", \"theta_ij\"]:\r\n try:\r\n par = gdf[key].values\r\n except Exception:\r\n par = [np.nan]*len(gdf)\r\n ell_pars.append(par)\r\n\r\n # track\r\n trackers = mot_tracker.update(dets)\r\n\r\n k = 0\r\n for d in trackers:\r\n txt = \"{},{},{},{},{},{},{},{},{},{},{}\".format(frame,\r\n d[4],\r\n d[0], d[1],\r\n d[2] - d[0],\r\n d[3] - d[1],\r\n ell_pars[0][k], ell_pars[1][k],\r\n ell_pars[2][k], ell_pars[3][k],\r\n ell_pars[4][k])\r\n f.write(txt + \"\\n\")\r\n k += 1\r\n\r\n pbar.update()\r\n\r\n \r\nif __name__ == '__main__':\r\n\r\n parser = argparse.ArgumentParser(description='SORT')\r\n\r\n # I/O\r\n parser.add_argument(\"--input\", \"-i\",\r\n nargs=1,\r\n action=\"store\",\r\n dest=\"detections\",\r\n required=True,\r\n help=\"Input file with detections.\"\r\n \"Use extract_detections.py to get a valid file.\",)\r\n parser.add_argument(\"--output\", \"-o\",\r\n nargs=1,\r\n action=\"store\",\r\n dest=\"output\",\r\n required=True,\r\n help=\"Input file name (.csv).\",)\r\n\r\n # SORT parameters\r\n parser.add_argument(\"--max-age\",\r\n help=\"Maximum number of frames to keep alive a track\"\r\n \"without associated detections.\",\r\n type=int,\r\n dest=\"max_age\",\r\n default=1)\r\n parser.add_argument(\"--min-hits\",\r\n help=\"Minimum number of associated detections before\"\r\n \"track is initialised.\",\r\n type=int,\r\n default=3,\r\n dest=\"min_hits\")\r\n parser.add_argument(\"--iou-threshold\", \"-iou\",\r\n help=\"Minimum IOU for match.\",\r\n type=float,\r\n default=0.25,\r\n dest=\"iou_threshold\")\r\n\r\n parser.add_argument(\"--from-ellipses\",\r\n action=\"store_true\",\r\n dest=\"from_ellipses\",\r\n help=\"Will assume that the detections are ellipses.\")\r\n\r\n args = parser.parse_args()\r\n\r\n main()\r\n" ]
[ [ "pandas.read_csv", "numpy.expand_dims", "numpy.sqrt", "numpy.maximum", "numpy.minimum", "numpy.isnan", "numpy.vstack", "numpy.concatenate", "numpy.deg2rad", "numpy.ma.masked_invalid", "scipy.optimize.linear_sum_assignment", "numpy.array", "numpy.where", "numpy.empty" ] ]
kon-rad/tensorflow-chatbot
[ "9fb3b7dd40248748e13545e4f5f898d6208b3a64" ]
[ "execute.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport random\nimport sys\nimport time\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nimport data_utils\nimport seq2seq_model\n\n# python2 and python3 support\ntry:\n reload\nexcept NameError:\n # py3k has unicode by default\n pass\nelse:\n reload(sys).setdefaultencoding('utf-8')\n \ntry:\n from ConfigParser import SafeConfigParser\nexcept:\n from configparser import SafeConfigParser # In Python 3, ConfigParser has been renamed to configparser for PEP 8 compliance.\n \ngConfig = {}\n\ndef get_config(config_file='seq2seq.ini'):\n parser = SafeConfigParser()\n parser.read(config_file)\n # get the ints, floats and strings\n _conf_ints = [ (key, int(value)) for key,value in parser.items('ints') ]\n _conf_floats = [ (key, float(value)) for key,value in parser.items('floats') ]\n _conf_strings = [ (key, str(value)) for key,value in parser.items('strings') ]\n return dict(_conf_ints + _conf_floats + _conf_strings)\n\n# We use a number of buckets and pad to the closest one for efficiency.\n# See seq2seq_model.Seq2SeqModel for details of how they work.\n_buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]\n\n\ndef read_data(source_path, target_path, max_size=None):\n \"\"\"Read data from source and target files and put into buckets.\n\n Args:\n source_path: path to the files with token-ids for the source language.\n target_path: path to the file with token-ids for the target language;\n it must be aligned with the source file: n-th line contains the desired\n output for n-th line from the source_path.\n max_size: maximum number of lines to read, all other will be ignored;\n if 0 or None, data files will be read completely (no limit).\n\n Returns:\n data_set: a list of length len(_buckets); data_set[n] contains a list of\n (source, target) pairs read from the provided data files that fit\n into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and\n len(target) < _buckets[n][1]; source and target are lists of token-ids.\n \"\"\"\n data_set = [[] for _ in _buckets]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n counter = 0\n while source and target and (not max_size or counter < max_size):\n counter += 1\n if counter % 100000 == 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n source_ids = [int(x) for x in source.split()]\n target_ids = [int(x) for x in target.split()]\n target_ids.append(data_utils.EOS_ID)\n for bucket_id, (source_size, target_size) in enumerate(_buckets):\n if len(source_ids) < source_size and len(target_ids) < target_size:\n data_set[bucket_id].append([source_ids, target_ids])\n break\n source, target = source_file.readline(), target_file.readline()\n return data_set\n\n\ndef create_model(session, forward_only):\n\n \"\"\"Create model and initialize or load parameters\"\"\"\n model = seq2seq_model.Seq2SeqModel( gConfig['enc_vocab_size'], gConfig['dec_vocab_size'], _buckets, gConfig['layer_size'], gConfig['num_layers'], gConfig['max_gradient_norm'], gConfig['batch_size'], gConfig['learning_rate'], gConfig['learning_rate_decay_factor'], forward_only=forward_only)\n\n if 'pretrained_model' in gConfig:\n model.saver.restore(session,gConfig['pretrained_model'])\n return model\n\n ckpt = tf.train.get_checkpoint_state(gConfig['working_directory'])\n # the checkpoint filename has changed in recent versions of tensorflow\n checkpoint_suffix = \"\"\n if tf.__version__ > \"0.12\":\n checkpoint_suffix = \".index\"\n if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + checkpoint_suffix):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n print(\"Created model with fresh parameters.\")\n session.run(tf.initialize_all_variables())\n return model\n\n\ndef train():\n # prepare dataset\n print(\"Preparing data in %s\" % gConfig['working_directory'])\n enc_train, dec_train, enc_dev, dec_dev, _, _ = data_utils.prepare_custom_data(gConfig['working_directory'],gConfig['train_enc'],gConfig['train_dec'],gConfig['test_enc'],gConfig['test_dec'],gConfig['enc_vocab_size'],gConfig['dec_vocab_size'])\n\n # Only allocate 2/3 of the gpu memory to allow for running gpu-based predictions while training:\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.666)\n config = tf.ConfigProto(gpu_options=gpu_options)\n config.gpu_options.allocator_type = 'BFC'\n\n with tf.Session(config=config) as sess:\n # Create model.\n print(\"Creating %d layers of %d units.\" % (gConfig['num_layers'], gConfig['layer_size']))\n model = create_model(sess, False)\n\n # Read data into buckets and compute their sizes.\n print (\"Reading development and training data (limit: %d).\"\n % gConfig['max_train_data_size'])\n dev_set = read_data(enc_dev, dec_dev)\n train_set = read_data(enc_train, dec_train, gConfig['max_train_data_size'])\n train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]\n train_total_size = float(sum(train_bucket_sizes))\n\n # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use\n # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to\n # the size if i-th training bucket, as used later.\n train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size\n for i in xrange(len(train_bucket_sizes))]\n\n # This is the training loop.\n step_time, loss = 0.0, 0.0\n current_step = 0\n previous_losses = []\n while True:\n # Choose a bucket according to data distribution. We pick a random number\n # in [0, 1] and use the corresponding interval in train_buckets_scale.\n random_number_01 = np.random.random_sample()\n bucket_id = min([i for i in xrange(len(train_buckets_scale))\n if train_buckets_scale[i] > random_number_01])\n\n # Get a batch and make a step.\n start_time = time.time()\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n train_set, bucket_id)\n _, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, False)\n step_time += (time.time() - start_time) / gConfig['steps_per_checkpoint']\n loss += step_loss / gConfig['steps_per_checkpoint']\n current_step += 1\n\n # Once in a while, we save checkpoint, print statistics, and run evals.\n if current_step % gConfig['steps_per_checkpoint'] == 0:\n # Print statistics for the previous epoch.\n perplexity = math.exp(loss) if loss < 300 else float('inf')\n print (\"global step %d learning rate %.4f step-time %.2f perplexity \"\n \"%.2f\" % (model.global_step.eval(), model.learning_rate.eval(),\n step_time, perplexity))\n # Decrease learning rate if no improvement was seen over last 3 times.\n if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):\n sess.run(model.learning_rate_decay_op)\n previous_losses.append(loss)\n # Save checkpoint and zero timer and loss.\n checkpoint_path = os.path.join(gConfig['working_directory'], \"seq2seq.ckpt\")\n model.saver.save(sess, checkpoint_path, global_step=model.global_step)\n step_time, loss = 0.0, 0.0\n # Run evals on development set and print their perplexity.\n for bucket_id in xrange(len(_buckets)):\n if len(dev_set[bucket_id]) == 0:\n print(\" eval: empty bucket %d\" % (bucket_id))\n continue\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n dev_set, bucket_id)\n _, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True)\n eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')\n print(\" eval: bucket %d perplexity %.2f\" % (bucket_id, eval_ppx))\n sys.stdout.flush()\n\n\ndef decode():\n\n # Only allocate part of the gpu memory when predicting.\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)\n config = tf.ConfigProto(gpu_options=gpu_options)\n\n with tf.Session(config=config) as sess:\n # Create model and load parameters.\n model = create_model(sess, True)\n model.batch_size = 1 # We decode one sentence at a time.\n\n # Load vocabularies.\n enc_vocab_path = os.path.join(gConfig['working_directory'],\"vocab%d.enc\" % gConfig['enc_vocab_size'])\n dec_vocab_path = os.path.join(gConfig['working_directory'],\"vocab%d.dec\" % gConfig['dec_vocab_size'])\n\n enc_vocab, _ = data_utils.initialize_vocabulary(enc_vocab_path)\n _, rev_dec_vocab = data_utils.initialize_vocabulary(dec_vocab_path)\n\n # Decode from standard input.\n sys.stdout.write(\"> \")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n while sentence:\n # Get token-ids for the input sentence.\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), enc_vocab)\n # Which bucket does it belong to?\n bucket_id = min([b for b in xrange(len(_buckets))\n if _buckets[b][0] > len(token_ids)])\n # Get a 1-element batch to feed the sentence to the model.\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n {bucket_id: [(token_ids, [])]}, bucket_id)\n # Get output logits for the sentence.\n _, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True)\n # This is a greedy decoder - outputs are just argmaxes of output_logits.\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]\n # If there is an EOS symbol in outputs, cut them at that point.\n if data_utils.EOS_ID in outputs:\n outputs = outputs[:outputs.index(data_utils.EOS_ID)]\n # Print out French sentence corresponding to outputs.\n print(\" \".join([tf.compat.as_str(rev_dec_vocab[output]) for output in outputs]))\n print(\"> \", end=\"\")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n\n\ndef self_test():\n \"\"\"Test the translation model.\"\"\"\n with tf.Session() as sess:\n print(\"Self-test for neural translation model.\")\n # Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.\n model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,\n 5.0, 32, 0.3, 0.99, num_samples=8)\n sess.run(tf.initialize_all_variables())\n\n # Fake data set for both the (3, 3) and (6, 6) bucket.\n data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],\n [([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])\n for _ in xrange(5): # Train the fake model for 5 steps.\n bucket_id = random.choice([0, 1])\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n data_set, bucket_id)\n model.step(sess, encoder_inputs, decoder_inputs, target_weights,\n bucket_id, False)\n\n\ndef init_session(sess, conf='seq2seq.ini'):\n global gConfig\n gConfig = get_config(conf)\n \n # Create model and load parameters.\n model = create_model(sess, True)\n model.batch_size = 1 # We decode one sentence at a time.\n\n # Load vocabularies.\n enc_vocab_path = os.path.join(gConfig['working_directory'],\"vocab%d.enc\" % gConfig['enc_vocab_size'])\n dec_vocab_path = os.path.join(gConfig['working_directory'],\"vocab%d.dec\" % gConfig['dec_vocab_size'])\n\n enc_vocab, _ = data_utils.initialize_vocabulary(enc_vocab_path)\n _, rev_dec_vocab = data_utils.initialize_vocabulary(dec_vocab_path)\n\n return sess, model, enc_vocab, rev_dec_vocab\n\ndef decode_line(sess, model, enc_vocab, rev_dec_vocab, sentence):\n # Get token-ids for the input sentence.\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), enc_vocab)\n\n # Which bucket does it belong to?\n bucket_id = min([b for b in xrange(len(_buckets)) if _buckets[b][0] > len(token_ids)])\n\n # Get a 1-element batch to feed the sentence to the model.\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({bucket_id: [(token_ids, [])]}, bucket_id)\n\n # Get output logits for the sentence.\n _, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, True)\n\n # This is a greedy decoder - outputs are just argmaxes of output_logits.\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]\n\n # If there is an EOS symbol in outputs, cut them at that point.\n if data_utils.EOS_ID in outputs:\n outputs = outputs[:outputs.index(data_utils.EOS_ID)]\n\n return \" \".join([tf.compat.as_str(rev_dec_vocab[output]) for output in outputs])\n\nif __name__ == '__main__':\n if len(sys.argv) - 1:\n gConfig = get_config(sys.argv[1])\n else:\n # get configuration from seq2seq.ini\n gConfig = get_config()\n\n print('\\n>> Mode : %s\\n' %(gConfig['mode']))\n\n if gConfig['mode'] == 'train':\n # start training\n train()\n elif gConfig['mode'] == 'test':\n # interactive decode\n decode()\n else:\n # wrong way to execute \"serve\"\n # Use : >> python ui/app.py\n # uses seq2seq_serve.ini as conf file\n print('Serve Usage : >> python ui/app.py')\n print('# uses seq2seq_serve.ini as conf file')\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.gfile.Exists", "tensorflow.gfile.GFile", "numpy.random.random_sample", "tensorflow.ConfigProto", "tensorflow.compat.as_bytes", "tensorflow.initialize_all_variables", "numpy.argmax", "tensorflow.GPUOptions", "tensorflow.Session", "tensorflow.compat.as_str" ] ]
blaylockbk/HRRR_archive_download
[ "a9d6e0d5df3d3baa6fd2a9470ac20e838b52e650" ]
[ "herbie/archive.py" ]
[ "#!/usr/bin/env python3\n\n## Brian Blaylock\n## May 3, 2021\n\n\"\"\"\n===============================\nHerbie: Retrieve NWP Model Data\n===============================\n\nHerbie is your model output download assistant with a mind of his own!\nHerbie might look small on the outside, but he has a big heart on the\ninside and will get you to the\n`finish line <https://www.youtube.com/watch?v=4XWufUZ1mxQ&t=189s>`_.\nHappy racing! 🏎🏁\n\n`📔 Documentation <https://blaylockbk.github.io/Herbie/_build/html/>`_\n\nWith Herbie's API, you can search and download GRIB2 model output files\nfrom different archive sources for the High-Resolution Rapid Refresh\n(HRRR) HRRR-Alaska, Rapid Refresh (RAP), Global Forecast System (GFS),\nand others.\n\nHerbie looks for GRIB2 model output data from NOMADS, NOAA's Big Data\nProject partners (Amazon Web Services, Google Cloud Platform, and\nMicrosoft Azure), and the CHPC Pando archive at the University of Utah.\n\nHerbie supports subsetting of GRIB2 files by individual GRIB\nmessages (i.e. variable and level) when the index (.idx) file exist and\nhelp you open them with xarray/cfgrib.\n\nHerbie is extendable to support other models. Simply create a template\nfile in the ``herbie/models`` directory and make a pull-request.\n\nFor more details, see https://blaylockbk.github.io/Herbie/_build/html/user_guide/data_sources.html\n\n.. note:: Updates since the ``Herbie 0.0.5`` release\n\n - TODO: Rename 'searchString' to 'subset' (and rename subset function)\n - TODO: Create .idx file if wgrib2 is installed (linux only) when index file doesn't exist\n - TODO: add `idx_to_df()` and `df_to_idx()` methods.\n - TODO: clean up document examples. It's kind of scattered now.\n - TODO: Allow for searching of locally stored model data.\n\n\"\"\"\nimport hashlib\nimport os\nimport urllib.request\nimport warnings\nfrom datetime import datetime, timedelta\n\nimport cfgrib\nimport pandas as pd\nimport pygrib\nimport requests\nfrom pyproj import CRS\n\nimport herbie.models as models_template\n\n# NOTE: These config dict values are retrieved from __init__ and read\n# from the file ${HOME}/.config/herbie/config.toml\n# Path imported from __init__ because it has my custom `expand()` method\nfrom . import Path, config\n\ntry:\n # Load custom xarray accessors\n import herbie.accessors\nexcept:\n warnings.warn(\n \"herbie xarray accessors could not be imported.\"\n \"You are probably missing the Carpenter_Workshop.\"\n \"If you want to use these functions, try\"\n \"`pip install git+https://github.com/blaylockbk/Carpenter_Workshop.git`\"\n )\n pass\n\n\ndef _searchString_help():\n \"\"\"Help/Error Message for `searchString`\"\"\"\n msg = [\n \"\\nUse regular expression to search for lines in the .idx file\",\n \"Here are some examples you can use for `searchString`\",\n \" ============================= ===============================================\",\n \" ``searchString`` Messages that will be downloaded\",\n \" ============================= ===============================================\",\n \" ':TMP:2 m' Temperature at 2 m.\",\n \" ':TMP:' Temperature fields at all levels.\",\n \" ':UGRD:.* mb' U Wind at all pressure levels.\",\n \" ':500 mb:' All variables on the 500 mb level.\",\n \" ':APCP:' All accumulated precipitation fields.\",\n \" ':APCP:surface:0-[1-9]*' Accumulated precip since initialization time\",\n \" ':APCP:surface:[1-9]*-[1-9]*' Accumulated precip over last hour\",\n \" ':UGRD:10 m' U wind component at 10 meters.\",\n \" ':(U|V)GRD:(10|80) m' U and V wind component at 10 and 80 m.\",\n \" ':(U|V)GRD:' U and V wind component at all levels.\",\n \" ':.GRD:' (Same as above)\",\n \" ':(TMP|DPT):' Temperature and Dew Point for all levels .\",\n \" ':(TMP|DPT|RH):' TMP, DPT, and Relative Humidity for all levels.\",\n \" ':REFC:' Composite Reflectivity\",\n \" ':surface:' All variables at the surface.\",\n \" ============================= ===============================================\",\n \"\\nIf you need help with regular expression, search the web\",\n \" or look at this cheatsheet: https://www.petefreitag.com/cheatsheets/regex/.\",\n ]\n return \"\\n\".join(msg)\n\n\nclass Herbie:\n \"\"\"\n Locate GRIB2 file at one of the archive sources.\n\n Parameters\n ----------\n date : pandas-parsable datetime\n *Model initialization datetime*.\n If None, then must set ``valid_date``.\n valid_date : pandas-parsable datetime\n Model valid datetime. Must set when ``date`` is None.\n fxx : int\n Forecast lead time in hours. Available lead times depend on\n the model type and model version. Range is model and run\n dependant.\n model : {'hrrr', 'hrrrak', 'rap', 'gfs', 'gfs_wave', 'rrfs', etc.}\n Model name as defined in the models template folder. CASE INSENSITIVE\n Some examples:\n - ``'hrrr'`` HRRR contiguous United States model\n - ``'hrrrak'`` HRRR Alaska model (alias ``'alaska'``)\n - ``'rap'`` RAP model\n product : {'sfc', 'prs', 'nat', 'subh'}\n Output variable product file type. If not specified, will\n use first product in model template file. CASE SENSITIVE.\n For example, the HRRR model has these products:\n - ``'sfc'`` surface fields\n - ``'prs'`` pressure fields\n - ``'nat'`` native fields\n - ``'subh'`` subhourly fields\n member : None or int\n Some ensemble models (e.g. the future RRFS) will need to\n specify an ensemble member.\n priority : list or str\n List of model sources to get the data in the order of\n download priority. CASE INSENSITIVE. Some example data\n sources and the default priority order are listed below.\n - ``'aws'`` Amazon Web Services (Big Data Program)\n - ``'nomads'`` NOAA's NOMADS server\n - ``'google'`` Google Cloud Platform (Big Data Program)\n - ``'azure'`` Microsoft Azure (Big Data Program)\n - ``'pando'`` University of Utah Pando Archive (gateway 1)\n - ``'pando2'`` University of Utah Pando Archive (gateway 2)\n save_dir : str or pathlib.Path\n Location to save GRIB2 files locally. Default save directory\n is set in ``~/.config/herbie/config.cfg``.\n Overwrite : bool\n If True, look for GRIB2 files even if local copy exists.\n If False (default), use the local copy (still need to find\n the idx file).\n **kwargs\n Any other paremeter needed to satisfy the conditions in the\n model template file (e.g., nest=2, other_label='run2')\n \"\"\"\n\n def __init__(\n self,\n date=None,\n *,\n valid_date=None,\n model=config[\"default\"].get(\"model\"),\n fxx=config[\"default\"].get(\"fxx\"),\n product=config[\"default\"].get(\"product\"),\n member=config[\"default\"].get(\"member\", 1),\n priority=config[\"default\"].get(\"priority\"),\n save_dir=config[\"default\"].get(\"save_dir\"),\n overwrite=config[\"default\"].get(\"overwrite\", False),\n verbose=config[\"default\"].get(\"verbose\", True),\n **kwargs,\n ):\n \"\"\"\n Specify model output and find GRIB2 file at one of the sources.\n \"\"\"\n self.fxx = fxx\n\n if date is not None:\n # User supplied `date`, which is the model initialization datetime.\n self.date = pd.to_datetime(date)\n self.valid_date = self.date + timedelta(hours=self.fxx)\n else:\n assert valid_date is not None, \"`date` or `valid_date` is required.\"\n # User supplied `valid_date`, which is the model valid datetime.\n self.valid_date = pd.to_datetime(valid_date)\n self.date = self.valid_date - timedelta(hours=self.fxx)\n\n self.model = model.lower()\n self.member = member\n self.product = product\n\n self.priority = priority\n self.save_dir = Path(save_dir).expand()\n self.overwrite = overwrite\n\n # Some model templates may require kwargs not listed (e.g., \"nest\").\n for key, value in kwargs.items():\n # TODO: Check if the kwarg is a config default.\n setattr(self, key, value)\n\n # Get details from the template of the specified model.\n # This attaches the details from the `models.<model>.template`\n # class to this Herbie object.\n # This line is equivalent to `models_template.gfs.template(self)`.\n # We do it this way because the model name is a variable.\n # (see https://stackoverflow.com/a/7936588/2383070 for what I'm doing here)\n getattr(models_template, self.model).template(self)\n\n if product is None:\n # The user didn't specify a product, so lets use the first\n # product in the model template.\n self.product = list(self.PRODUCTS)[0]\n warnings.warn(f'`product` not specified. Will use [\"{self.product}\"].')\n # We need to rerun this so the sources have the new product value.\n getattr(models_template, self.model).template(self)\n\n self.product_description = self.PRODUCTS[self.product]\n\n # Default value is .idx, but some have weird suffix (.inv for NCEI files).\n self.IDX_SUFFIX = getattr(self, \"IDX_SUFFIX\", \".idx\")\n\n # Check the user input\n self._validate()\n\n # Ok, now we are ready to look for the GRIB2 file at each of the remote sources.\n # self.grib is the first existing GRIB2 file discovered.\n # self.idx is the first existing index file discovered.\n self.grib = None\n self.grib_source = None\n self.idx = None\n self.idx_source = None\n\n # But first, check if the GRIB2 file exists locally.\n local_copy = self.get_localFilePath()\n if local_copy.exists() and not overwrite:\n self.grib = local_copy\n self.grib_source = \"local\"\n # NOTE: We will still get the idx files from a remote\n # because they aren't stored locally, or are they?\n\n if list(self.SOURCES)[0] == \"local\":\n # TODO: Experimental special case, not very elegant yet.\n self.idx = Path(str(self.grib) + self.IDX_SUFFIX)\n return None\n\n # If priority list is set, we want to search SOURCES in that\n # priority order. If priority is None, then search all SOURCES\n # in the order given by the model template file.\n # NOTE: A source from the template will not be used if it is not\n # included in the priority list.\n if self.priority is not None:\n self.SOURCES = {\n key: self.SOURCES[key] for key in self.priority if key in self.SOURCES\n }\n\n # Ok, NOW we are ready to search for the remote GRIB2 files...\n for source in self.SOURCES:\n if \"pando\" in source:\n # Sometimes pando returns a bad handshake. Pinging\n # pando first can help prevent that.\n self._ping_pando()\n\n # Get the file URL for the source and determine if the\n # GRIB2 file and the index file exist. If found, store the\n # URL for the GRIB2 file and the .idx file.\n url = self.SOURCES[source]\n\n found_grib = False\n found_idx = False\n if self.grib is None and self._check_grib(url):\n found_grib = True\n self.grib = url\n self.grib_source = source\n if self.idx is None and self._check_idx(url):\n found_idx = True\n self.idx = url + self.IDX_SUFFIX\n self.idx_source = source\n\n if verbose:\n msg = (\n f\"Looked in [{source:^10s}] for {self.model.upper()} \"\n f\"{self.date:%H:%M UTC %d %b %Y} F{self.fxx:02d} \"\n f\"--> ({found_grib=}) ({found_idx=}) {' ':5s}\"\n )\n if verbose:\n print(msg, end=\"\\r\", flush=True)\n\n if all([self.grib is not None, self.idx is not None]):\n # Exit loop early if we found both GRIB2 and idx file.\n break\n\n # After searching each source, print some info about what we found...\n if verbose:\n if any([self.grib is not None, self.idx is not None]):\n print(\n f\"🏋🏻‍♂️ Found\",\n f\"\\033[32m{self.date:%Y-%b-%d %H:%M UTC} F{self.fxx:02d}\\033[m\",\n f\"[{self.model.upper()}] [product={self.product}]\",\n f\"GRIB2 file from \\033[38;5;202m{self.grib_source}\\033[m and\",\n f\"index file from \\033[38;5;202m{self.idx_source}\\033[m.\",\n f'{\" \":150s}',\n )\n else:\n print(\n f\"💔 Did not find a GRIB2 or Index File for\",\n f\"\\033[32m{self.date:%Y-%b-%d %H:%M UTC} F{self.fxx:02d}\\033[m\",\n f\"{self.model.upper()}\",\n f'{\" \":100s}',\n )\n\n def __repr__(self):\n \"\"\"Representation in Notebook\"\"\"\n msg = (\n f\"[{self.model.upper()}] model [{self.product}] product\",\n f\"run at \\033[32m{self.date:%Y-%b-%d %H:%M UTC}\",\n f\"F{self.fxx:02d}\\033[m\",\n )\n return \" \".join(msg)\n\n def __str__(self):\n \"\"\"When Herbie class object is printed, print all properties\"\"\"\n msg = []\n for i in dir(self):\n if isinstance(getattr(self, i), (int, str, dict)):\n if not i.startswith(\"__\"):\n msg.append(f\"self.{i}={getattr(self, i)}\")\n return \"\\n\".join(msg)\n\n def _validate(self):\n \"\"\"Validate the Herbie class input arguments\"\"\"\n\n # Accept model alias\n if self.model.lower() == \"alaska\":\n self.model = \"hrrrak\"\n\n _models = {m for m in dir(models_template) if not m.startswith(\"__\")}\n _products = set(self.PRODUCTS)\n\n assert self.date < datetime.utcnow(), \"🔮 `date` cannot be in the future.\"\n assert self.model in _models, f\"`model` must be one of {_models}\"\n assert self.product in _products, f\"`product` must be one of {_products}\"\n\n if isinstance(self.priority, str):\n self.priority = [self.priority]\n\n if self.priority is not None:\n self.priority = [i.lower() for i in self.priority]\n\n # Don't look for data from NOMADS if requested date is earlier\n # than 14 days ago. NOMADS doesn't keep data that old,\n # (I think this is true of all models).\n if \"nomads\" in self.priority:\n expired = datetime.utcnow() - timedelta(days=14)\n expired = pd.to_datetime(f\"{expired:%Y-%m-%d}\")\n if self.date < expired:\n self.priority.remove(\"nomads\")\n\n def _ping_pando(self):\n \"\"\"Pinging the Pando server before downloading can prevent a bad handshake.\"\"\"\n try:\n requests.head(\"https://pando-rgw01.chpc.utah.edu/\")\n except:\n print(\"🤝🏻⛔ Bad handshake with pando? Am I able to move on?\")\n pass\n\n def _check_grib(self, url):\n \"\"\"Check that the GRIB2 URL exist and is of useful length.\"\"\"\n head = requests.head(url)\n check_exists = head.ok\n if check_exists:\n check_content = int(head.raw.info()[\"Content-Length\"]) > 1_000_000\n return check_exists and check_content\n else:\n return False\n\n def _check_idx(self, url):\n \"\"\"Check if an index file exist for the GRIB2 URL.\"\"\"\n if not url.endswith(self.IDX_SUFFIX):\n url += self.IDX_SUFFIX\n return requests.head(url).ok\n\n @property\n def get_remoteFileName(self, source=None):\n \"\"\"Predict Remote File Name\"\"\"\n if source is None:\n source = list(self.SOURCES)[0]\n return self.SOURCES[source].split(\"/\")[-1]\n\n @property\n def get_localFileName(self):\n \"\"\"Predict Local File Name\"\"\"\n return self.LOCALFILE\n\n def get_localFilePath(self, searchString=None):\n \"\"\"Get path to local file\"\"\"\n if list(self.SOURCES)[0] == \"local\":\n # TODO: An experimental special case\n outFile = Path(self.SOURCES[\"local\"]).expand()\n else:\n outFile = (\n self.save_dir.expand()\n / self.model\n / f\"{self.date:%Y%m%d}\"\n / self.get_localFileName\n )\n\n if searchString is not None:\n # Reassign the index DataFrame with the requested searchString\n self.idx_df = self.read_idx(searchString)\n\n # Get a list of all GRIB message numbers. We will use this\n # in the output file name as a unique identifier.\n all_grib_msg = \"-\".join([f\"{i:g}\" for i in self.idx_df.index])\n\n # To prevent \"filename too long\" error, create a hash to\n # make unique filename.\n hash_label = hashlib.sha1(all_grib_msg.encode()).hexdigest()\n\n # Append the filename to distinguish it from the full file.\n outFile = outFile.with_suffix(f\".grib2.subset_{hash_label}\")\n\n return outFile\n\n def read_idx(self, searchString=None):\n \"\"\"\n Inspect the GRIB2 file contents by reading the index file.\n\n Parameters\n ----------\n searchString : str\n Filter dataframe by a searchString regular expression.\n Searches for strings in the index file lines, specifically\n the variable, level, and forecast_time columns.\n Execute ``_searchString_help()`` for examples of a good\n searchString.\n\n .. include:: ../user_guide/searchString.rst\n\n Returns\n -------\n A Pandas DataFrame of the index file.\n \"\"\"\n assert self.idx is not None, f\"No index file found for {self.grib}.\"\n\n # Sometimes idx end in ':', other times it doesn't (in some Pando files).\n # https://pando-rgw01.chpc.utah.edu/hrrr/sfc/20180101/hrrr.t00z.wrfsfcf00.grib2.idx\n # https://noaa-hrrr-bdp-pds.s3.amazonaws.com/hrrr.20210101/conus/hrrr.t00z.wrfsfcf00.grib2.idx\n # Sometimes idx has more than the standard messages\n # https://noaa-nbm-grib2-pds.s3.amazonaws.com/blend.20210711/13/core/blend.t13z.core.f001.co.grib2.idx\n\n # TODO: Experimental special case when self.idx is a pathlib.Path\n if not hasattr(self.idx, \"exists\"):\n # If the self.idx is not a pathlib.Path, then we assume it needs to be downloaded\n r = requests.get(self.idx)\n assert r.ok, f\"Index file does not exist: {self.idx}\"\n\n df = pd.read_csv(\n self.idx,\n sep=\":\",\n names=[\n \"grib_message\",\n \"start_byte\",\n \"reference_time\",\n \"variable\",\n \"level\",\n \"forecast_time\",\n \"?\",\n \"??\",\n \"???\",\n ],\n )\n\n # Format the DataFrame\n df[\"grib_message\"] = df[\"grib_message\"].astype(float)\n # ^ float because RAP idx files have some decimal grib message numbers\n df[\"reference_time\"] = pd.to_datetime(df.reference_time, format=\"d=%Y%m%d%H\")\n df[\"valid_time\"] = df[\"reference_time\"] + pd.to_timedelta(f\"{self.fxx}H\")\n df[\"start_byte\"] = df[\"start_byte\"].astype(int)\n df[\"end_byte\"] = df[\"start_byte\"].shift(-1, fill_value=\"\")\n df[\"range\"] = df.start_byte.astype(str) + \"-\" + df.end_byte.astype(str)\n df = df.set_index(\"grib_message\")\n df = df.reindex(\n columns=[\n \"start_byte\",\n \"end_byte\",\n \"range\",\n \"reference_time\",\n \"valid_time\",\n \"variable\",\n \"level\",\n \"forecast_time\",\n \"?\",\n \"??\",\n \"???\",\n ]\n )\n\n df = df.dropna(how=\"all\", axis=1)\n df = df.fillna(\"\")\n\n df.attrs = dict(\n url=self.idx,\n source=self.idx_source,\n description=\"Inventory index (.idx) file for the GRIB2 file.\",\n model=self.model,\n product=self.product,\n lead_time=self.fxx,\n datetime=self.date,\n )\n\n # Filter DataFrame by searchString\n if searchString not in [None, \":\"]:\n columns_to_search = df.loc[:, \"variable\":].apply(\n lambda x: \":\".join(x).rstrip(\":\"), axis=1\n )\n logic = columns_to_search.str.contains(searchString)\n if logic.sum() == 0:\n print(\n f\"No GRIB messages found. There might be something wrong with {searchString=}\"\n )\n print(_searchString_help(searchString))\n df = df.loc[logic]\n return df\n\n def download(\n self,\n searchString=None,\n *,\n source=None,\n save_dir=None,\n overwrite=None,\n verbose=True,\n errors=\"warn\",\n ):\n \"\"\"\n Download file from source.\n\n Subsetting by variable follows the same principles described here:\n https://www.cpc.ncep.noaa.gov/products/wesley/fast_downloading_grib.html\n\n Parameters\n ----------\n searchString : str\n If None, download the full file. Else, use regex to subset\n the file by specific variables and levels.\n .. include:: ../user_guide/searchString.rst\n source : {'nomads', 'aws', 'google', 'azure', 'pando', 'pando2'}\n If None, download GRIB2 file from self.grib2 which is\n the first location the GRIB2 file was found from the\n priority lists when this class was initialized. Else, you\n may specify the source to force downloading it from a\n different location.\n save_dir : str or pathlib.Path\n Location to save the model output files.\n If None, uses the default or path specified in __init__.\n Else, changes the path files are saved.\n overwrite : bool\n If True, overwrite existing files. Default will skip\n downloading if the full file exists. Not applicable when\n when searchString is not None because file subsets might\n be unique.\n errors : {'warn', 'raise'}\n When an error occurs, send a warning or raise a value error.\n \"\"\"\n\n def _reporthook(a, b, c):\n \"\"\"\n Print download progress in megabytes.\n\n Parameters\n ----------\n a : Chunk number\n b : Maximum chunk size\n c : Total size of the download\n \"\"\"\n chunk_progress = a * b / c * 100\n total_size_MB = c / 1000000.0\n print(\n f\"\\r🚛💨 Download Progress: {chunk_progress:.2f}% of {total_size_MB:.1f} MB\\r\",\n end=\"\",\n )\n\n def subset(searchString, outFile):\n \"\"\"Download a subset specified by the regex searchString\"\"\"\n grib_source = self.grib\n if hasattr(grib_source, \"as_posix\") and grib_source.exists():\n # The GRIB source is local. Curl the local file\n # See https://stackoverflow.com/a/21023161/2383070\n grib_source = f\"file://{str(self.grib)}\"\n if verbose:\n print(\n f'📇 Download subset: {self.__repr__()}{\" \":60s}\\n cURL from {grib_source}'\n )\n\n # Download subsets of the file by byte range with cURL.\n for i, (grbmsg, row) in enumerate(self.idx_df.iterrows()):\n if verbose:\n print(\n f\"{i+1:>4g}: GRIB_message={grbmsg:<3g} \\033[34m{':'.join(row.values[5:]).rstrip(':')}\\033[m\"\n )\n if i == 0:\n # If we are working on the first item, overwrite the existing file...\n curl = f\"curl -s --range {row.range} {grib_source} > {outFile}\"\n else:\n # ...all other messages are appended to the subset file.\n curl = f\"curl -s --range {row.range} {grib_source} >> {outFile}\"\n os.system(curl)\n\n self.local_grib_subset = outFile\n\n # If the file exists in the localPath and we don't want to\n # overwrite, then we don't need to download it.\n outFile = self.get_localFilePath(searchString=searchString)\n\n # This overrides the overwrite specified in __init__\n if overwrite is not None:\n self.overwrite = overwrite\n\n if outFile.exists() and not self.overwrite:\n if verbose:\n print(f\"🌉 Already have local copy --> {outFile}\")\n if searchString in [None, \":\"]:\n self.local_grib = outFile\n else:\n self.local_grib_subset = outFile\n return\n\n # Attach the index file to the object (how much overhead is this?)\n if self.idx is not None:\n self.idx_df = self.read_idx(searchString)\n\n # This overrides the save_dir specified in __init__\n if save_dir is not None:\n self.save_dir = Path(save_dir).expand()\n\n if not hasattr(Path(self.save_dir).expand(), \"exists\"):\n self.save_dir = Path(self.save_dir).expand()\n\n # Check that data exists\n if self.grib is None:\n msg = f\"🦨 GRIB2 file not found: {self.model=} {self.date=} {self.fxx=}\"\n if errors == \"warn\":\n warnings.warn(msg)\n return # Can't download anything without a GRIB file URL.\n elif errors == \"raise\":\n raise ValueError(msg)\n if self.idx is None and searchString is not None:\n msg = f\"🦨 Index file not found; cannot download subset: {self.model=} {self.date=} {self.fxx=}\"\n if errors == \"warn\":\n warnings.warn(\n msg + \" I will download the full file because I cannot subset.\"\n )\n elif errors == \"raise\":\n raise ValueError(msg)\n\n if source is not None:\n # Force download from a specified source and not from first in priority\n self.grib = self.SOURCES[source]\n\n # Create directory if it doesn't exist\n if not outFile.parent.is_dir():\n outFile.parent.mkdir(parents=True, exist_ok=True)\n print(f\"👨🏻‍🏭 Created directory: [{outFile.parent}]\")\n\n if searchString in [None, \":\"] or self.idx is None:\n # Download the full file from remote source\n urllib.request.urlretrieve(self.grib, outFile, _reporthook)\n if verbose:\n print(\n f\"✅ Success! Downloaded {self.model.upper()} from \\033[38;5;202m{self.grib_source:20s}\\033[m\\n\\tsrc: {self.grib}\\n\\tdst: {outFile}\"\n )\n self.local_grib = outFile\n else:\n # Download a subset of the file\n subset(searchString, outFile)\n\n def xarray(\n self, searchString, backend_kwargs={}, remove_grib=True, **download_kwargs\n ):\n \"\"\"\n Open GRIB2 data as xarray DataSet\n\n Parameters\n ----------\n searchString : str\n Variables to read into xarray Dataset\n remove_grib : bool\n If True, grib file will be removed ONLY IF it didn't exist\n before we downloaded it.\n \"\"\"\n\n download_kwargs = {**dict(overwrite=False), **download_kwargs}\n\n # Download file if local file does not exists\n local_file = self.get_localFilePath(searchString=searchString)\n\n # Only remove grib if it didn't exists before we download it\n remove_grib = not local_file.exists() and remove_grib\n\n if not local_file.exists() or download_kwargs[\"overwrite\"]:\n self.download(searchString=searchString, **download_kwargs)\n\n # Backend kwargs for cfgrib\n backend_kwargs.setdefault(\"indexpath\", \"\")\n backend_kwargs.setdefault(\n \"read_keys\", [\"parameterName\", \"parameterUnits\", \"stepRange\"]\n )\n backend_kwargs.setdefault(\"errors\", \"raise\")\n\n # Use cfgrib.open_datasets, just in case there are multiple \"hypercubes\"\n # for what we requested.\n Hxr = cfgrib.open_datasets(\n self.get_localFilePath(searchString=searchString),\n backend_kwargs=backend_kwargs,\n )\n\n # Get CF grid projection information with pygrib and pyproj because\n # this is something cfgrib doesn't do (https://github.com/ecmwf/cfgrib/issues/251)\n # NOTE: Assumes the projection is the same for all variables\n grib = pygrib.open(str(self.get_localFilePath(searchString=searchString)))\n msg = grib.message(1)\n cf_params = CRS(msg.projparams).to_cf()\n\n # Funny stuff with polar stereographic (https://github.com/pyproj4/pyproj/issues/856)\n # TODO: Is there a better way to handle this? What about south pole?\n if cf_params[\"grid_mapping_name\"] == \"polar_stereographic\":\n cf_params[\"latitude_of_projection_origin\"] = cf_params.get(\n \"latitude_of_projection_origin\", 90\n )\n\n # Here I'm looping over each dataset in the list returned by cfgrib\n for ds in Hxr:\n # Add some details\n # ----------------\n ds.attrs[\"model\"] = self.model\n ds.attrs[\"product\"] = self.product\n ds.attrs[\"description\"] = self.DESCRIPTION\n ds.attrs[\"remote_grib\"] = self.grib\n ds.attrs[\"local_grib\"] = self.get_localFilePath(searchString=searchString)\n\n # Attach CF grid mapping\n # ----------------------\n # http://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#appendix-grid-mappings\n ds[\"gribfile_projection\"] = None\n ds[\"gribfile_projection\"].attrs = cf_params\n ds[\"gribfile_projection\"].attrs[\n \"long_name\"\n ] = f\"{self.model.upper()} model grid projection\"\n\n # Assign this grid_mapping for all variables\n for var in list(ds):\n if var == \"gribfile_projection\":\n continue\n ds[var].attrs[\"grid_mapping\"] = \"gribfile_projection\"\n\n if remove_grib:\n # Only remove grib if it didn't exists before\n\n # Load the data to memory before removing the file\n Hxr = [ds.load() for ds in Hxr]\n # new = Hxr.copy()\n\n # Close the files so it can be removed (this issue seems\n # to be WindowsOS specific).\n # for ds in Hxr:\n # ds.close()\n\n # Removes file\n local_file.unlink()\n\n # Hxr = new\n\n if len(Hxr) == 1:\n return Hxr[0]\n else:\n return Hxr\n" ]
[ [ "pandas.to_timedelta", "pandas.read_csv", "pandas.to_datetime" ] ]
ksuarz/mongo-monary-driver
[ "491279737aef7e4d247b4cd9b3b7d87b82b21145" ]
[ "monary/monary.py" ]
[ "# Monary - Copyright 2011-2013 David J. C. Beach\n# Please see the included LICENSE.TXT and NOTICE.TXT for licensing information.\n\nimport os.path\nimport platform\nfrom urllib import urlencode\nfrom ctypes import *\n\ntry:\n # if we are using Python 2.7+\n from collections import OrderedDict\nexcept ImportError:\n # for Python 2.6 and earlier\n from .ordereddict import OrderedDict\n\nimport numpy\nimport bson\n\ncmonary = None\n\ndef _load_cmonary_lib():\n \"\"\"Loads the cmonary CDLL library (from the directory containing this module).\"\"\"\n global cmonary\n thismodule = __file__\n abspath = os.path.abspath(thismodule)\n moduledir = list(os.path.split(abspath))[:-1]\n if platform.system() == 'Windows':\n cmonary_fname = \"cmonary.dll\"\n else:\n cmonary_fname = \"libcmonary.so\"\n cmonaryfile = os.path.join(*(moduledir + [cmonary_fname]))\n cmonary = CDLL(cmonaryfile)\n\n_load_cmonary_lib()\n\nCTYPE_CODES = {\n \"P\": c_void_p, # pointer\n \"S\": c_char_p, # string\n \"I\": c_int, # int\n \"U\": c_uint, # unsigned int\n \"L\": c_long, # long\n \"0\": None, # None/void\n}\n\n# List of C function definitions from the cmonary library\nFUNCDEFS = [\n # format: \"func_name:arg_types:return_type\"\n \"monary_connect:S:P\",\n \"monary_disconnect:P:0\",\n \"monary_use_collection:PSS:P\",\n \"monary_destroy_collection:P:0\",\n \"monary_alloc_column_data:UU:P\",\n \"monary_free_column_data:P:I\",\n \"monary_set_column_item:PUSUUPP:I\",\n \"monary_query_count:PP:L\",\n \"monary_init_query:PUUPPI:P\",\n \"monary_load_query:P:I\",\n \"monary_close_query:P:0\",\n]\n\nMAX_COLUMNS = 1024\n\ndef _decorate_cmonary_functions():\n \"\"\"Decorates each of the cmonary functions with their argument and result types.\"\"\"\n for funcdef in FUNCDEFS:\n name, argtypes, restype = funcdef.split(\":\")\n func = getattr(cmonary, name)\n func.argtypes = [ CTYPE_CODES[c] for c in argtypes ]\n func.restype = CTYPE_CODES[restype]\n\n_decorate_cmonary_functions()\n\n# Table of type names and conversions between cmonary and numpy types\nMONARY_TYPES = {\n # \"common_name\": (cmonary_type_code, numpy_type_object)\n \"id\": (1, \"<V12\"),\n \"bool\": (2, numpy.bool),\n \"int8\": (3, numpy.int8),\n \"int16\": (4, numpy.int16),\n \"int32\": (5, numpy.int32),\n \"int64\": (6, numpy.int64),\n \"uint8\": (7, numpy.uint8),\n \"uint16\": (8, numpy.uint16),\n \"uint32\": (9, numpy.uint32),\n \"uint64\": (10, numpy.uint64),\n \"float32\": (11, numpy.float32),\n \"float64\": (12, numpy.float64),\n \"date\": (13, numpy.int64),\n \"timestamp\": (14, numpy.uint64),\n \"string\": (15, \"S\"), # The length argument here INCLUDES the null character\n \"binary\": (16, \"<V\"), # Little-endian raw data (void pointer)\n \"bson\": (17, \"<V\"),\n \"type\": (18, numpy.uint8),\n \"size\": (19, numpy.uint32),\n \"length\": (20, numpy.uint32),\n}\n\ndef get_monary_numpy_type(orig_typename):\n \"\"\"Given a common typename, find the corresponding cmonary type number,\n type argument, and numpy type object (or code).\n\n The input typename must be one of the keys found in the ``MONARY_TYPES``\n dictionary. These are common BSON type names such as ``id``, ``bool``,\n ``int32``, ``float64``, ``date``, or ``string``. If the type is ``string``,\n ``binary``, or ``bson``, its name must be followed by a ``:size`` suffix\n indicating the maximum number of bytes that will be used to store the\n representation.\n\n :param str orig_typename: a common type name with optional argument\n (for fields with a size)\n :returns: (type_num, type_arg, numpy_type)\n :rtype: tuple\n \"\"\"\n # process any type_arg that might be included\n if ':' in orig_typename:\n vals = orig_typename.split(':', 2)\n if len(vals) > 2:\n raise ValueError(\"too many parts in type: %r\" % orig_typename)\n type_name, arg = vals\n try:\n type_arg = int(arg)\n except ValueError:\n raise ValueError(\"unable to parse type argument in: %r\" % orig_typename)\n else:\n type_arg = 0\n type_name = orig_typename\n\n if type_name not in MONARY_TYPES:\n raise ValueError(\"unknown typename: %r\" % type_name)\n if type_name in (\"string\", \"binary\", \"bson\"):\n if type_arg == 0:\n raise ValueError(\"%r must have an explicit typearg with nonzero length \"\n \"(use 'string:20', for example)\" % type_name)\n type_num, numpy_type_code = MONARY_TYPES[type_name]\n numpy_type = \"%s%i\" % (numpy_type_code, type_arg)\n else:\n type_num, numpy_type = MONARY_TYPES[type_name]\n return type_num, type_arg, numpy_type\n\ndef make_bson(obj):\n \"\"\"Given a Python (JSON compatible) dictionary, returns a BSON string.\n\n (This hijacks the Python -> BSON conversion code from pymongo, which is needed for\n converting queries. Perhaps this dependency can be removed in a later version.)\n\n :param obj: object to be encoded as BSON (dict, string, or None)\n :returns: BSON encoded representation (byte string)\n :rtype: str\n \"\"\"\n if obj is None:\n obj = { }\n if not isinstance(obj, basestring):\n obj = bson.BSON.encode(obj)\n return obj\n\ndef get_ordering_dict(obj):\n \"\"\"Converts a field/direction specification to an OrderedDict, suitable\n for BSON encoding.\n \n :param obj: single field name or list of (field, direction) pairs\n :returns: mapping representing the field/direction list\n :rtype: OrderedDict\n \"\"\"\n if obj is None:\n return OrderedDict()\n elif isinstance(obj, basestring):\n return OrderedDict([(obj, 1)])\n elif isinstance(obj, list):\n return OrderedDict(obj)\n else:\n raise ValueError(\"invalid ordering: should be str or list of (column, direction) pairs\")\n\ndef get_plain_query(query):\n \"\"\"Composes a plain query from the given query object.\n \n :param dict query: query dictionary (or None)\n :returns: BSON encoded query (byte string)\n :rtype: str\n \"\"\"\n if query is None:\n query = { }\n return make_bson(query)\n\ndef get_full_query(query, sort=None, hint=None):\n \"\"\"Composes a full query from the given query object, and sort and hint clauses, if provided.\n \n :param dict query: query dictionary (or None)\n :param sort: (optional) single field name or list of (field, direction) pairs\n :param hint: (optional) single field name or list of (field, direction) pairs\n :returns: BSON encoded query (byte string)\n :rtype: str\n \"\"\"\n if query is None:\n query = { }\n\n if sort or hint:\n query = OrderedDict([(\"$query\", query)])\n if sort:\n try:\n query[\"$orderby\"] = get_ordering_dict(sort)\n except ValueError:\n raise ValueError(\"sort arg must be string or list of (field, direction) pairs\")\n if hint:\n try:\n query[\"$hint\"] = get_ordering_dict(hint)\n except ValueError:\n raise ValueError(\"hint arg must be string or list of (field, direction) pairs\")\n \n return make_bson(query)\n\nclass Monary(object):\n \"\"\"Represents a 'monary' connection to a particular MongoDB server.\"\"\"\n \n def __init__(self, host=\"localhost\", port=27017, username=None,\n password=None, database=None, options={}):\n \"\"\"Initialize this connection with the given host and port.\n \n :param host: either host name (or IP) to connect to, or full URI\n :param port: port number of running MongoDB service on host\n :param username: An optional username for authentication.\n :param password: An optional password for authentication.\n :param database: The database to authenticate to if the URI\n specifies a username and password. If this is not specified but\n credentials exist, this defaults to the \"admin\" database. See\n mongoc_uri(7).\n :param options: Connection-specific options as a dict.\n \"\"\"\n\n self._cmonary = cmonary\n self._connection = None\n self._collection_ns = ''\n self._collection = None\n if not self.connect(host, port, username, password, database, options):\n raise Warning(\"Connection failed.\")\n\n def connect(self, host=\"localhost\", port=27017, username=None,\n password=None, database=None, options={}):\n \"\"\"Connects to the given host and port.\n\n :param host: either host name (or IP) to connect to, or full URI\n :param port: port number of running MongoDB service on host\n :param username: An optional username for authentication.\n :param password: An optional password for authentication.\n :param database: The database to authenticate to if the URI\n specifies a username and password. If this is not specified but\n credentials exist, this defaults to the \"admin\" database. See\n mongoc_uri(7).\n :param options: Connection-specific options as a dict.\n\n :returns: True if successful; false otherwise.\n :rtype: bool\n \"\"\"\n\n if self._connection is not None:\n self.close()\n\n if \"mongodb://\" in host:\n uri = host\n else:\n # Build up the URI string.\n uri = [\"mongodb://\"]\n if username is not None:\n if password is None:\n uri.append(\"%s@\" % username)\n else:\n uri.append(\"%s:%s@\" % (username, password))\n elif password is not None:\n raise ValueError(\"You cannot have a password with no username.\")\n\n uri.append(\"%s:%d\" % (host, port))\n\n if database is not None:\n uri.append(\"/%s\" % database)\n if len(options) > 0:\n uri.append(\"?%s\" % urlencode(options))\n uri = \"\".join(uri)\n\n # Attempt the connection\n self._connection = cmonary.monary_connect(uri)\n return (self._connection is not None)\n\n def _make_column_data(self, fields, types, count):\n \"\"\"Builds the 'column data' structure used by the underlying cmonary code to\n populate the arrays. This code must allocate the array objects, and provide\n their corresponding storage pointers and sizes to cmonary.\n\n :param fields: list of field names\n :param types: list of Monary type names\n :param count: size of storage to be allocated\n \n :returns: (coldata, colarrays) where coldata is the cmonary\n column data storage structure, and colarrays is a list of\n numpy.ndarray instances\n :rtype: tuple\n \"\"\"\n\n if len(fields) != len(types):\n raise ValueError(\"number of fields and types do not match\")\n numcols = len(fields)\n if numcols > MAX_COLUMNS:\n raise ValueError(\"number of fields exceeds maximum of %d\" % MAX_COLUMNS)\n coldata = cmonary.monary_alloc_column_data(numcols, count)\n colarrays = [ ]\n for i, (field, typename) in enumerate(zip(fields, types)):\n\n cmonary_type, cmonary_type_arg, numpy_type = get_monary_numpy_type(typename)\n\n data = numpy.zeros([count], dtype=numpy_type)\n mask = numpy.ones([count], dtype=bool)\n storage = numpy.ma.masked_array(data, mask)\n colarrays.append(storage)\n\n data_p = data.ctypes.data_as(c_void_p)\n mask_p = mask.ctypes.data_as(c_void_p)\n cmonary.monary_set_column_item(coldata, i, field,\n cmonary_type, cmonary_type_arg,\n data_p, mask_p)\n\n return coldata, colarrays\n\n def _get_collection(self, db, collection):\n \"\"\"Returns the specified collection to query against.\n\n :param db: name of database\n :param collection: name of collection\n\n :returns: True if successful; false otherwise\n :rtype: bool\n \"\"\"\n if self._connection is not None:\n if self._collection_ns != db + '.' + collection:\n cmonary.monary_destroy_collection(self._collection)\n else:\n return True\n else:\n raise ValueError(\"failed to get collection %s.%s - not connected\" % (db, collection))\n\n self._collection = cmonary.monary_use_collection(self._connection,\n db,\n collection)\n success = (self._collection is not None)\n self._collection_ns = db + '.' + collection if success else ''\n return success\n\n def count(self, db, coll, query=None):\n \"\"\"Count the number of records that will be returned by the given query.\n \n :param db: name of database\n :param coll: name of the collection to be queried\n :param query: (optional) dictionary of Mongo query parameters\n \n :returns: the number of records\n :rtype: int\n \"\"\"\n if not self._get_collection(db, coll):\n raise ValueError(\"couldn't connect to collection %s.%s\" % (db, coll))\n query = make_bson(query)\n count = cmonary.monary_query_count(self._collection, query)\n if count < 0:\n raise RuntimeError(\"MongoDB C driver db.collection.count returned a negative value :(\")\n return count\n\n def query(self, db, coll, query, fields, types,\n sort=None, hint=None,\n limit=0, offset=0,\n do_count=True, select_fields=False):\n \"\"\"Performs an array query.\n \n :param db: name of database\n :param coll: name of the collection to be queried\n :param query: dictionary of Mongo query parameters\n :param fields: list of fields to be extracted from each record\n :param types: corresponding list of field types\n :param sort: (optional) single field name or list of (field, direction) pairs\n :param hint: (optional) single field name or list of (field, direction) pairs\n :param limit: (optional) limit number of records (and size of arrays)\n :param offset: (optional) skip this many records before gathering results\n :param bool do_count: count items before allocating arrays\n (otherwise, array size is set to limit)\n :param bool select_fields: select exact fields from database\n (performance/bandwidth tradeoff)\n\n :returns: list of numpy.ndarray, corresponding to the requested fields and types\n :rtype: list\n \"\"\"\n\n plain_query = get_plain_query(query)\n full_query = get_full_query(query, sort, hint)\n \n if not do_count and limit > 0:\n count = limit\n else:\n # count() doesn't like $query/$orderby/$hint clauses, so we need to use a plain query\n count = self.count(db, coll, plain_query)\n\n if count > limit > 0:\n count = limit\n\n coldata = None\n try:\n coldata, colarrays = self._make_column_data(fields, types, count)\n cursor = None\n try:\n if not self._get_collection(db, coll):\n raise ValueError(\"unable to get the collection\")\n cursor = cmonary.monary_init_query(self._collection, offset, limit,\n full_query, coldata, select_fields)\n cmonary.monary_load_query(cursor)\n finally:\n if cursor is not None:\n cmonary.monary_close_query(cursor)\n finally:\n if coldata is not None:\n cmonary.monary_free_column_data(coldata)\n return colarrays\n\n def block_query(self, db, coll, query, fields, types,\n sort=None, hint=None,\n block_size=8192, limit=0, offset=0,\n select_fields=False):\n \"\"\"Performs a block query.\n\n :param db: name of database\n :param coll: name of the collection to be queried\n :param query: dictionary of Mongo query parameters\n :param fields: list of fields to be extracted from each record\n :param types: corresponding list of field types\n :param sort: (optional) single field name or list of (field, direction) pairs\n :param hint: (optional) single field name or list of (field, direction) pairs\n :param block_size: (optional) size in number of rows of each yeilded list \n :param limit: (optional) limit number of records (and size of arrays)\n :param offset: (optional) skip this many records before gathering results\n :param bool select_fields: select exact fields from database\n (performance/bandwidth tradeoff)\n\n :returns: list of numpy.ndarray, corresponding to the requested fields and types\n :rtype: list\n\n A block query is a query whose results are returned in\n blocks of a given size. Instead of returning a list of arrays, this generator\n yields portions of each array in multiple blocks, where each block may contain\n up to *block_size* elements.\n\n An example::\n \n cumulative_gain = 0.0\n for buy_price_block, sell_price_block in (\n monary.block_query(\"finance\", \"assets\", {\"sold\": True},\n [\"buy_price\", \"sell_price\"],\n [\"float64\", \"float64\"],\n block_size=1024)):\n gain = sell_price_block - buy_price_block # vector subtraction\n cumulative_gain += numpy.sum(gain)\n\n .. note:: Memory for each block is reused between iterations. If the\n caller wishes to retain the values from a given iteration, it\n should copy the data.\n \"\"\"\n\n if block_size < 1:\n block_size = 1\n\n full_query = get_full_query(query, sort, hint)\n\n coldata = None\n try:\n coldata, colarrays = self._make_column_data(fields, types, block_size)\n cursor = None\n try:\n if not self._get_collection(db, coll):\n raise ValueError(\"unable to get the collection\")\n cursor = cmonary.monary_init_query(self._collection, offset, limit,\n full_query, coldata, select_fields)\n while True:\n num_rows = cmonary.monary_load_query(cursor)\n if num_rows == block_size:\n yield colarrays\n elif num_rows > 0:\n yield [ arr[:num_rows] for arr in colarrays ]\n break\n else:\n break\n finally:\n if cursor is not None:\n cmonary.monary_close_query(cursor)\n finally:\n if coldata is not None:\n cmonary.monary_free_column_data(coldata)\n\n def close(self):\n \"\"\"Destroy the current collection, if any.\"\"\"\n self._collection_ns = \"\"\n if self._collection is not None:\n cmonary.monary_destroy_collection(self._collection)\n self._collection = None\n \"\"\"Closes the current connection, if any.\"\"\"\n if self._connection is not None:\n cmonary.monary_disconnect(self._connection)\n self._connection = None\n \n def __enter__(self):\n \"\"\"Monary connections meet the ContextManager protocol.\"\"\"\n return self\n \n def __exit__(self, *args):\n \"\"\"Monary connections meet the ContextManager protocol.\"\"\"\n self.close()\n \n def __del__(self):\n \"\"\"Closes the Monary connection and cleans up resources.\"\"\"\n self.close()\n self._cmonary = None\n" ]
[ [ "numpy.ma.masked_array", "numpy.zeros", "numpy.ones" ] ]
RobbenRoll/emgfit
[ "780482a61f4dc080590c0036e9b729c0429530df" ]
[ "emgfit/sample.py" ]
[ "################################################################################\r\n##### Python module for creating simulated time-of-flight mass spectra with\r\n##### Gaussian and hyper-exponentially-modified Gaussian lines shapes\r\n##### Author: Stefan Paul\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.stats import exponnorm, uniform, norm\r\n\r\n################################################################################\r\n##### Define functions for drawing random variates from Gaussian and hyper-EMG\r\n##### PDFs\r\nnorm_precision = 1e-09 # required precision for normalization of eta parameters\r\n\r\n\r\ndef Gaussian_rvs(mu, sigma , N_samples=1):\r\n \"\"\"Draw random samples from a Gaussian probability density function\r\n\r\n Parameters\r\n ----------\r\n mu : float\r\n Nominal position of simulated peak (mean of Gaussian).\r\n sigma : float\r\n Nominal standard deviation of the simulated Gaussian peak.\r\n N_samples : int, optional, default: 1\r\n Number of random events to sample.\r\n\r\n Returns\r\n -------\r\n :class:`numpy.ndarray` of floats\r\n Array with simulated events.\r\n\r\n \"\"\"\r\n rvs = norm.rvs(loc=mu, scale=sigma, size=N_samples)\r\n return rvs\r\n\r\n\r\ndef _h_m_i_rvs(mu, sigma, tau_m, N_i):\r\n \"\"\"Helper function for definition of h_m_emg_rvs \"\"\"\r\n rvs = mu - exponnorm.rvs(loc=0,scale=sigma,K=tau_m/sigma,size=N_i)\r\n return rvs\r\n\r\n\r\ndef h_m_emg_rvs(mu, sigma, *t_args,N_samples=1):\r\n \"\"\"Draw random samples from negative skewed hyper-EMG probabaility density\r\n\r\n Parameters\r\n ----------\r\n mu : float\r\n Nominal position of simulated peak (mean of underlying Gaussian).\r\n sigma : float\r\n Nominal standard deviation (of the underlying Gaussian) of the simulated\r\n hyper-EMG peak.\r\n theta : float\r\n Mixing weight of pos. & neg. skewed EMG distributions.\r\n t_args : list of lists of float\r\n List containing lists of the EMG tail parameters with the signature:\r\n [[eta_m1, eta_m2, ...], [tau_m1, tau_m2, ...]]\r\n N_samples : int, optional, default: 1\r\n Number of random events to sample.\r\n\r\n Returns\r\n -------\r\n :class:`numpy.ndarray` of floats\r\n Array with simulated events.\r\n\r\n \"\"\"\r\n if not isinstance(N_samples,int):\r\n raise TypeError(\"N_samples must be of type int\")\r\n li_eta_m = t_args[0]\r\n li_tau_m = t_args[1]\r\n t_order_m = len(li_eta_m) # order of negative tail exponentials\r\n assert abs(sum(li_eta_m) - 1) < norm_precision, \"eta_m's don't add up to 1.\"\r\n if len(li_tau_m) != t_order_m: # check if all arguments match tail order\r\n raise Exception(\"orders of eta_m and tau_m do not match!\")\r\n\r\n # randomly distribute ions between tails according to eta_m weights\r\n tail_nos = np.random.choice(range(t_order_m),size=N_samples,p = li_eta_m)\r\n rvs = np.array([])\r\n for i in range(t_order_m):\r\n N_i = np.count_nonzero(tail_nos == i)\r\n tau_m = li_tau_m[i]\r\n rvs_i = _h_m_i_rvs(mu,sigma,tau_m,N_i)\r\n rvs = np.append(rvs,rvs_i)\r\n return rvs\r\n\r\n\r\ndef _h_p_i_rvs(mu, sigma, tau_p, N_i):\r\n \"\"\"Helper function for definition of h_p_emg_rvs \"\"\"\r\n rvs = exponnorm.rvs(loc=mu,scale=sigma,K=tau_p/sigma,size=N_i)\r\n return rvs\r\n\r\n\r\ndef h_p_emg_rvs(mu, sigma, *t_args, N_samples=1):\r\n \"\"\"Draw random samples from pos. skewed hyper-EMG probability density\r\n\r\n Parameters\r\n ----------\r\n mu : float\r\n Nominal position of simulated peak (mean of underlying Gaussian).\r\n sigma : float\r\n Nominal standard deviation (of the underlying Gaussian) of the simulated\r\n hyper-EMG peak.\r\n t_args : list of lists of float\r\n List containing lists of the EMG tail parameters with the signature:\r\n [[eta_p1, eta_p2, ...], [tau_p1, tau_p2, ...]]\r\n N_samples : int, optional, default: 1\r\n Number of random events to sample.\r\n\r\n Returns\r\n -------\r\n :class:`numpy.ndarray` of floats\r\n Array with simulated events.\r\n\r\n \"\"\"\r\n if not isinstance(N_samples,int):\r\n raise TypeError(\"N_samples must be of type int\")\r\n li_eta_p = t_args[0]\r\n li_tau_p = t_args[1]\r\n t_order_p = len(li_eta_p) # order of negative tail exponentials\r\n assert abs(sum(li_eta_p) - 1) < norm_precision, \"eta_p's don't add up to 1.\"\r\n if len(li_tau_p) != t_order_p: # check if all arguments match tail order\r\n raise Exception(\"orders of eta_p and tau_p do not match!\")\r\n\r\n # randomly distribute ions between tails according to eta_p weights\r\n tail_nos = np.random.choice(range(t_order_p),size=N_samples,p = li_eta_p)\r\n rvs = np.array([])\r\n for i in range(t_order_p):\r\n N_i = np.count_nonzero(tail_nos == i)\r\n tau_p = li_tau_p[i]\r\n rvs_i = _h_p_i_rvs(mu,sigma,tau_p,N_i)\r\n rvs = np.append(rvs,rvs_i)\r\n return rvs\r\n\r\n\r\ndef h_emg_rvs(mu, sigma , theta, *t_args, N_samples=1):\r\n \"\"\"Draw random samples from a hyper-EMG probability density function\r\n\r\n Parameters\r\n ----------\r\n mu : float\r\n Nominal position of simulated peak (mean of underlying Gaussian).\r\n sigma : float\r\n Nominal standard deviation (of the underlying Gaussian) of the simulated\r\n hyper-EMG peak.\r\n theta : float\r\n Mixing weight of pos. & neg. skewed EMG distributions.\r\n t_args : list of lists of float\r\n List containing lists of the EMG tail parameters with the signature:\r\n [[eta_m1, eta_m2, ...], [tau_m1, tau_m2, ...], [eta_p1, eta_p2, ...],\r\n [tau_p1, tau_p2, ...]]\r\n N_samples : int, optional, default: 1\r\n Number of random events to sample.\r\n\r\n Returns\r\n -------\r\n :class:`numpy.ndarray` of floats\r\n Array with simulated events.\r\n\r\n \"\"\"\r\n if not isinstance(N_samples,int):\r\n raise TypeError(\"N_samples must be of type int\")\r\n li_eta_m = t_args[0]\r\n li_tau_m = t_args[1]\r\n li_eta_p = t_args[2]\r\n li_tau_p = t_args[3]\r\n if theta == 1:\r\n rvs = h_m_emg_rvs(mu, sigma, li_eta_m, li_tau_m, N_samples=N_samples)\r\n elif theta == 0:\r\n rvs = h_p_emg_rvs(mu, sigma, li_eta_p, li_tau_p, N_samples=N_samples)\r\n else:\r\n # randomly distribute ions between h_m_emg and h_p_emg according to\r\n # left-right-weight theta:\r\n neg = np.random.choice([1,0],size=N_samples,p = [theta,1-theta])\r\n N_m = int(np.sum(neg)) #int(np.round(theta*N_samples)) # np.rint(theta*N_samples,dtype=int)\r\n N_p = N_samples - N_m # np.rint((1-theta)*N_samples,dtype=int)\r\n rvs_m = h_m_emg_rvs(mu, sigma, li_eta_m, li_tau_m, N_samples=N_m)\r\n rvs_p = h_p_emg_rvs(mu, sigma, li_eta_p, li_tau_p, N_samples=N_p)\r\n rvs = np.append(rvs_m,rvs_p)\r\n return rvs\r\n\r\n\r\n################################################################################\r\n##### Define functions for creating simulated spectra\r\n\r\ndef simulate_events(shape_pars, mus, amps, bkg_c, N_events, x_min,\r\n x_max, out='hist', N_bins=None, bin_cens=None):\r\n \"\"\"Create simulated detector events drawn from a user-defined probability\r\n distribution function (PDF)\r\n\r\n Events can either be output as a list of single events (mass stamps) or as a\r\n histogram. In histogram output mode, uniform binning is easily realized by\r\n specifying the `N_bins` argument. More control over the binning can be\r\n achieved by parsing the desired bin centers to the `bin_cens` argument (e.g.\r\n for non-uniform binning).\r\n\r\n Parameters\r\n ----------\r\n shape_pars : dict\r\n Peak-shape parameters to use for sampling. The dictionary must follow\r\n the structure of the :attr:`~spectrum.shape_cal_pars` attribute of the\r\n :class:`~emgfit.spectrum.spectrum` class.\r\n mus : float or list of float\r\n Nominal peak positions of peaks in simulated spectrum.\r\n amps : float or list of float [(counts in peak)*(bin width in u)]\r\n Nominal amplitudes of peaks in simulated spectrum.\r\n bkg_c : float [counts per bin], optional, default: 0.0\r\n Nominal amplitude of uniform background in simulated spectrum.\r\n x_min : float\r\n Beginning of sampling x-range.\r\n x_max : float\r\n End of sampling x-range.\r\n N_events : int, optional, default: 1000\r\n Total number of events to simulate (signal and background events).\r\n out : str, optional\r\n Output format of sampled data. Options:\r\n\r\n - ``'hist'`` for binned mass spectrum (default). The centres of the mass\r\n bins must be specified with the `bin_cens` argument.\r\n - ``'list'`` for unbinned list of single ion and background events.\r\n\r\n N_bins : int, optional\r\n Number of uniform bins to use in ``'hist'`` output mode. The **outer**\r\n edges of the first and last bin are fixed to the start and end of the\r\n sampling range respectively (i.e. `x_min` and `x_max`). In between, bins\r\n are distributed with a fixed spacing of (`x_max`-`x_min`)/`N_bins`.\r\n bin_cens : :class:`numpy.ndarray`\r\n Centres of bins to use in ``'hist'`` output mode. This argument\r\n allows the realization of non-uniform binning. Bin edges are centred\r\n between neighboring bins. Note: Bins outside the sampling range defined\r\n with `x_min` and `x_max` will be empty.\r\n\r\n Returns\r\n -------\r\n :class:`numpy.ndarray` or :class:`pandas.Dataframe`\r\n If out='hist' a dataframe with a histogram of the format\r\n [bin centre, counts in bin] is returned. If out='list' an unbinned\r\n array with the x-values of single ion or background events is returned.\r\n\r\n Notes\r\n -----\r\n Random events are created via custom hyper-EMG extensions of Scipy's\r\n :meth:`scipy.stats.exponnorm.rvs` method.\r\n\r\n Currently, all simulated peaks have identical width and shape (no re-scaling\r\n of mass-dependent shape parameters to a peak's mass centroid).\r\n\r\n Routine requires tail arguments in shape_cal_pars dict to be ordered\r\n (eta_m1, eta_m2, ...) etc..\r\n\r\n **Mind the different units for peak amplitudes `amps`\r\n (<counts in peak> * <bin width in x-axis units>) and the background level\r\n `bkg_c` (counts per bin).** When spectrum data is simulated counts are\r\n distributed between the different peaks and the background with probability\r\n weights `amps` / <bin width in u> and `bkg_c` * <number of bins>,\r\n respectively. As a consequence, simply changing `N_events` (while keeping\r\n all other arguments constant), will cause `amps` and `bkg_c` to deviate from\r\n their nominal units.\r\n\r\n \"\"\"\r\n mus = np.atleast_1d(mus)\r\n amps = np.atleast_1d(amps)\r\n assert len(mus) == len(amps), \"Lengths of `mus` and `amps` arrays must match.\"\r\n if (mus < x_min).any() or (mus > x_max).any():\r\n import warnings\r\n msg = str(\"At least one peak centroid in `mus` is outside the sampling range.\")\r\n warnings.warn(msg, UserWarning)\r\n\r\n sample_range = x_max - x_min\r\n\r\n # Get bin parameters\r\n if N_bins is not None and bin_cens is not None:\r\n msg = \"Either specify the `N_bins` OR the `bin_cens` argument.\"\r\n raise Exception(msg)\r\n elif bin_cens is not None: # user-defined bins\r\n N_bins = len(bin_cens)\r\n bin_edges = np.empty((N_bins+1,))\r\n spacings = bin_cens[1:] - bin_cens[:-1]\r\n inner_edges = bin_cens[:-1] + spacings/2\r\n bin_edges[1:-1] = inner_edges # set inner bin edges\r\n # Get outer edges\r\n width_start = bin_cens[1] - bin_cens[0]\r\n bin_edges[0] = bin_cens[0] - width_start/2 # set first bin edge\r\n width_end = bin_cens[-1] - bin_cens[-2]\r\n bin_edges[-1] = bin_cens[-1] + width_end/2 # set last bin edge\r\n bin_width = (bin_edges[-1] - bin_edges[0])/N_bins # AVERAGE bin width\r\n elif N_bins is not None: # automatic uniform binning\r\n bin_edges = np.linspace(x_min, x_max, num=N_bins+1, endpoint=True)\r\n bin_width = sample_range/N_bins\r\n bin_cens = bin_edges[:-1] + bin_width/2\r\n else:\r\n raise Exception(\"`N_bins` or `bin_cens` argument must be specified!\")\r\n\r\n # Prepare shape parameters\r\n sigma = shape_pars['sigma']\r\n li_eta_m = []\r\n li_tau_m = []\r\n li_eta_p = []\r\n li_tau_p = []\r\n for key, val in shape_pars.items():\r\n if key.startswith('eta_m'):\r\n li_eta_m.append(val)\r\n if key.startswith('tau_m'):\r\n li_tau_m.append(val)\r\n if key.startswith('eta_p'):\r\n li_eta_p.append(val)\r\n if key.startswith('tau_p'):\r\n li_tau_p.append(val)\r\n if len(li_eta_m) == 0 and len(li_eta_p) == 0: # Gaussian\r\n theta = -1 # flag for below\r\n elif len(li_eta_m) == 0 and len(li_tau_m) == 1: # emg10\r\n li_eta_m = [1]\r\n theta = 1\r\n elif len(li_eta_p) == 0 and len(li_tau_p) == 1: # emg01\r\n li_eta_p = [1]\r\n theta = 0\r\n else: # emg11 or higher tail order\r\n theta = shape_pars['theta']\r\n\r\n # Distribute counts over different peaks and background (bkgd)\r\n # randomly distribute ions using amps and c_bkg as prob. weights\r\n N_peaks = len(amps)\r\n counts = np.append(amps/bin_width,bkg_c*N_bins) # cts in each peak & bkgd\r\n weights = counts/np.sum(counts) # normalized probability weights\r\n\r\n peak_dist = np.random.choice(range(N_peaks+1), size=N_events, p=weights)\r\n N_bkg = np.count_nonzero(peak_dist == N_peaks) # calc. number of bkgd counts\r\n\r\n events = np.array([])\r\n # Loop over peaks and create random samples from each peak\r\n for i in range(N_peaks):\r\n N_i = np.count_nonzero(peak_dist == i) # get no. of ions in peak\r\n mu = mus[i]\r\n if theta == -1: # Gaussian\r\n events_i = Gaussian_rvs(mu, sigma, N_samples=N_i)\r\n else: # hyper-EMG\r\n events_i = h_emg_rvs(mu, sigma, theta, li_eta_m, li_tau_m,\r\n li_eta_p, li_tau_p, N_samples=N_i)\r\n\r\n events = np.append(events, events_i)\r\n\r\n # Create background events\r\n bkg = uniform.rvs(size=N_bkg, loc=x_min, scale=sample_range)\r\n events = np.append(events, bkg)\r\n\r\n if out == 'list': # return unbinned list of events\r\n return events\r\n elif out == 'hist': # return histogram\r\n y = np.histogram(events, bins=bin_edges)[0]\r\n df = pd.DataFrame(data=y, index=bin_cens, columns = ['Counts'])\r\n df.index.rename('m/z [u]', inplace=True)\r\n return df\r\n\r\n\r\ndef simulate_spectrum(spec, x_cen=None, x_range=None, mus=None, amps=None,\r\n bkg_c=None, N_events=None, copy_spec=False):\r\n \"\"\"Create a simulated spectrum using the attributes of a reference spectrum\r\n\r\n The peak shape of the sampling probability density function (PDF)\r\n follows the shape calibration of the reference spectrum (`spec`). By\r\n default, all other parameters of the sampling PDF are identical to the\r\n best-fit parameters of the reference spectrum. If desired, the positions,\r\n amplitudes and number of peaks in the sampling PDF as well as the background\r\n level can be changed with the `mus`, `amps` and `bkg_c` arguments.\r\n\r\n Parameters\r\n ----------\r\n spec : :class:`~emgfit.spectrum.spectrum`\r\n Reference spectrum object whose best-fit parameters will be used to\r\n sample from.\r\n mus : float or list of float, optional\r\n Nominal peak centres of peaks in simulated spectrum. Defaults to the\r\n mus of the reference spectrum fit.\r\n amps : float or list of float [(counts in peak)*(bin width in u)], optional\r\n Nominal amplitudes of peaks in simulated spectrum. Defaults to the\r\n amplitudes of the reference spectrum fit.\r\n bkg_c : float [counts per bin], optional\r\n Nominal amplitude of uniform background in simulated spectrum. Defaults\r\n to the c_bkg obtained in the fit of the first peak in the reference\r\n spectrum.\r\n x_cen : float, optional\r\n Center of simulated x-range. Defaults to `x_cen` of `spec`.\r\n x_range : float, optional\r\n Covered x-range of simulated spectrum. Defaults to `x_range` of\r\n `spectrum`.\r\n N_events : int, optional\r\n Number of ion events to simulate (including background events). Defaults\r\n to total number of events in `spec`.\r\n copy_spec : bool, optional, default: False\r\n If `False` (default), this function returns a fresh\r\n :class:`~emgfit.spectrum.spectrum` object created from the simulated\r\n mass data. If `True`, this function returns an exact copy of `spec` with\r\n only the :attr`data` attribute replaced by the new simulated mass data.\r\n\r\n Returns\r\n -------\r\n :class:`~emgfit.spectrum.spectrum`\r\n If `copy_spec = False` (default) a fresh spectrum object holding the\r\n simulated mass data is returned. If `copy_spec = True`, a copy of the\r\n reference spectrum `spec` is returned with only the :attr:`data`\r\n attribute replaced by the new simulated mass data.\r\n\r\n Notes\r\n -----\r\n Random events are created via custom Hyper-EMG extensions of Scipy's\r\n :meth:`scipy.stats.exponnorm.rvs` method.\r\n\r\n Currently, all simulated peaks have identical width and shape (no re-scaling\r\n of mass-dependent shape parameters to a peak's mass centroid).\r\n\r\n The returned spectrum follows the binning of the reference spectrum.\r\n\r\n Mind the different units for peak amplitudes `amps`\r\n (<counts in peak> * <bin width in x-axis units>) and the background level\r\n `bkg_c` (counts per bin). When spectrum data is simulated counts are\r\n distributed between the different peaks and the background with probability\r\n weights `amps` / <bin width in x-axis units> and `bkg_c` * <number of bins>,\r\n respectively. As a consequence, simply changing `N_events` (while keeping\r\n all other arguments constant), will cause `amps` and `bkg_c` to deviate from\r\n their nominal units.\r\n\r\n \"\"\"\r\n if spec.fit_results is [] or None:\r\n raise Exception(\"No fit results found in reference spectrum `spec`.\")\r\n if x_cen is None and x_range is None:\r\n x_min = spec.data.index.values[0]\r\n x_max = spec.data.index.values[-1]\r\n indeces = range(len(spec.peaks)) # get peak indeces in sampling range\r\n else:\r\n x_min = x_cen - x_range\r\n x_max = x_cen + x_range\r\n # Get peak indeces in sampling range:\r\n peaks = spec.peaks\r\n indeces = [i for i in range(len(peaks)) if x_min <= peaks[i].x_pos <= x_max]\r\n if mus is None:\r\n if len(indeces) == 0:\r\n import warnings\r\n msg = str(\"No peaks in sampling range.\")\r\n warnings.warn(msg, UserWarning)\r\n mus = []\r\n for i in indeces:\r\n result = spec.fit_results[i]\r\n pref = 'p{0}_'.format(i)\r\n mus.append(result.best_values[pref+'mu'])\r\n if amps is None:\r\n amps = []\r\n for i in indeces:\r\n result = spec.fit_results[i]\r\n pref = 'p{0}_'.format(i)\r\n amps.append(result.best_values[pref+'amp'])\r\n if bkg_c is None:\r\n assert len(indeces) != 0, \"Zero background and no peaks in sampling range.\"\r\n result = spec.fit_results[indeces[0]]\r\n bkg_c = result.best_values['bkg_c']\r\n if N_events is None:\r\n N_events = int(np.sum(spec.data['Counts'])) # total number of counts in spectrum\r\n\r\n # Create histogram with Monte Carlo events\r\n x = spec.data[x_min:x_max].index.values\r\n df = simulate_events(spec.shape_cal_pars, mus, amps, bkg_c, N_events, x_min,\r\n x_max, out='hist', N_bins=None, bin_cens=x)\r\n\r\n # Copy original spectrum and overwrite data\r\n # This copies all results such as peak assignments, PS calibration,\r\n # fit_results etc.\r\n if copy_spec:\r\n from copy import deepcopy\r\n new_spec = deepcopy(spec)\r\n new_spec.data = df\r\n else: # Define a fresh spectrum with sampled data\r\n from emgfit import spectrum\r\n new_spec = spectrum.spectrum(df=df)\r\n\r\n return new_spec\r\n" ]
[ [ "numpy.linspace", "numpy.random.choice", "pandas.DataFrame", "numpy.atleast_1d", "scipy.stats.exponnorm.rvs", "numpy.append", "scipy.stats.norm.rvs", "scipy.stats.uniform.rvs", "numpy.count_nonzero", "numpy.array", "numpy.histogram", "numpy.sum", "numpy.empty" ] ]
ahmadhafidh/analysis-of-sentiment-ranking-and-rating-apps
[ "0d9b71eef573bff27575ff0d5be211b51cefd31b" ]
[ "sklearn_try/try_new_datasentimen.py" ]
[ "from sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.pipeline import Pipeline\r\nimport pickle\r\nimport mysql.connector\r\nimport json\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"\",\r\n database=\"project\"\r\n)\r\n\r\n## TOKENIZING\r\ncount_vect = CountVectorizer()\r\nfilobjek=open(\"sklearn_try/train_data\",'rb')\r\ntrain_data=pickle.load(filobjek)\r\nX_train_counts = count_vect.fit_transform(train_data)\r\n# print(train_data)\r\n\r\n## TF TRANSFORMER\r\nfilobjek=open(\"sklearn_try/train_count\",'rb')\r\ntrain_count=pickle.load(filobjek)\r\ntf_transformer = TfidfTransformer().fit(train_count)\r\n# print(train_count)\r\n\r\ntest_data=list()\r\nmycursor = mydb.cursor()\r\nmycursor.execute(\"SELECT * FROM data_crawling_baru\")\r\nmyresult = mycursor.fetchall()\r\nfor id_crawl,konten,id_tes in myresult:\r\n test_data.append(konten)\r\n\r\nfilobjek=open(\"sklearn_try/model_train\",'rb')\r\nclff=pickle.load(filobjek)\r\nX_new_counts = count_vect.transform(test_data)\r\nX_new_tf = tf_transformer.transform(X_new_counts)\r\npredicted = clff.predict(X_new_tf)\r\n\r\nsentimen=list()\r\nfor doc, category in zip(test_data, predicted):\r\n # print('%r => %s' % (doc, category))\r\n sentimen.append(category)\r\n\r\npos=0\r\nneg=0\r\nfor status in sentimen:\r\n if status==\"positif\":\r\n pos=pos+1\r\n elif status==\"negatif\":\r\n neg=neg+1\r\n\r\n\r\nhasil_sentimen={\"positif\":pos,\"negatif\":neg}\r\njson_sentimen=json.dumps(hasil_sentimen)\r\nprint(json_sentimen)" ]
[ [ "sklearn.feature_extraction.text.CountVectorizer", "sklearn.feature_extraction.text.TfidfTransformer" ] ]
ademyanchuk/pytorch_segmentation
[ "b615d584631c09b1223670bdb409f90199e1b4ae" ]
[ "models/unet.py" ]
[ "from base import BaseModel\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom itertools import chain\nfrom utils.helpers import initialize_weights, set_trainable\nfrom models import resnet\n\n\ndef x2conv(in_channels, out_channels, inner_channels=None):\n inner_channels = out_channels // 2 if inner_channels is None else inner_channels\n down_conv = nn.Sequential(\n nn.Conv2d(in_channels, inner_channels, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(inner_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(inner_channels, out_channels, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n )\n return down_conv\n\n\nclass encoder(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(encoder, self).__init__()\n self.down_conv = x2conv(in_channels, out_channels)\n self.pool = nn.MaxPool2d(kernel_size=2, ceil_mode=True)\n\n def forward(self, x):\n x = self.down_conv(x)\n x = self.pool(x)\n return x\n\n\nclass decoder(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(decoder, self).__init__()\n self.up = nn.ConvTranspose2d(\n in_channels, in_channels // 2, kernel_size=2, stride=2\n )\n self.up_conv = x2conv(in_channels, out_channels)\n\n def forward(self, x_copy, x, interpolate=True):\n x = self.up(x)\n\n if (x.size(2) != x_copy.size(2)) or (x.size(3) != x_copy.size(3)):\n if interpolate:\n # Iterpolating instead of padding\n x = F.interpolate(\n x,\n size=(x_copy.size(2), x_copy.size(3)),\n mode=\"bilinear\",\n align_corners=True,\n )\n else:\n # Padding in case the incomping volumes are of different sizes\n diffY = x_copy.size()[2] - x.size()[2]\n diffX = x_copy.size()[3] - x.size()[3]\n x = F.pad(\n x, (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2)\n )\n\n # Concatenate\n x = torch.cat([x_copy, x], dim=1)\n x = self.up_conv(x)\n return x\n\n\nclass UNet(BaseModel):\n def __init__(self, num_classes, in_channels=3, freeze_bn=False, **kwargs):\n super(UNet, self).__init__()\n\n self.start_conv = x2conv(in_channels, 64)\n self.down1 = encoder(64, 128)\n self.down2 = encoder(128, 256)\n self.down3 = encoder(256, 512)\n self.down4 = encoder(512, 1024)\n\n self.middle_conv = x2conv(1024, 1024)\n\n self.up1 = decoder(1024, 512)\n self.up2 = decoder(512, 256)\n self.up3 = decoder(256, 128)\n self.up4 = decoder(128, 64)\n self.final_conv = nn.Conv2d(64, num_classes, kernel_size=1)\n self._initialize_weights()\n\n if freeze_bn:\n self.freeze_bn()\n\n def _initialize_weights(self):\n for module in self.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.BatchNorm2d):\n module.weight.data.fill_(1)\n module.bias.data.zero_()\n\n def forward(self, x):\n x1 = self.start_conv(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x = self.middle_conv(self.down4(x4))\n\n x = self.up1(x4, x)\n x = self.up2(x3, x)\n x = self.up3(x2, x)\n x = self.up4(x1, x)\n\n x = self.final_conv(x)\n return x\n\n def get_backbone_params(self):\n # There is no backbone for unet, all the parameters are trained from scratch\n return []\n\n def get_decoder_params(self):\n return self.parameters()\n\n def freeze_bn(self):\n for module in self.modules():\n if isinstance(module, nn.BatchNorm2d):\n module.eval()\n\n\n\"\"\"\n-> Unet with a resnet backbone\n\"\"\"\n\n\nclass UNetResnet(BaseModel):\n def __init__(\n self,\n num_classes,\n in_channels=3,\n backbone=\"resnet50\",\n pretrained=True,\n freeze_bn=False,\n freeze_backbone=False,\n **kwargs\n ):\n super(UNetResnet, self).__init__()\n model = getattr(resnet, backbone)(pretrained, norm_layer=nn.BatchNorm2d)\n\n self.initial = list(model.children())[:4]\n if in_channels != 3:\n self.initial[0] = nn.Conv2d(\n in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False\n )\n self.initial = nn.Sequential(*self.initial)\n\n # encoder\n self.layer1 = model.layer1\n self.layer2 = model.layer2\n self.layer3 = model.layer3\n self.layer4 = model.layer4\n\n # decoder\n self.conv1 = nn.Conv2d(2048, 192, kernel_size=3, stride=1, padding=1)\n self.upconv1 = nn.ConvTranspose2d(192, 128, 4, 2, 1, bias=False)\n\n self.conv2 = nn.Conv2d(1152, 128, kernel_size=3, stride=1, padding=1)\n self.upconv2 = nn.ConvTranspose2d(128, 96, 4, 2, 1, bias=False)\n\n self.conv3 = nn.Conv2d(608, 96, kernel_size=3, stride=1, padding=1)\n self.upconv3 = nn.ConvTranspose2d(96, 64, 4, 2, 1, bias=False)\n\n self.conv4 = nn.Conv2d(320, 64, kernel_size=3, stride=1, padding=1)\n self.upconv4 = nn.ConvTranspose2d(64, 48, 4, 2, 1, bias=False)\n\n self.conv5 = nn.Conv2d(48, 48, kernel_size=3, stride=1, padding=1)\n self.upconv5 = nn.ConvTranspose2d(48, 32, 4, 2, 1, bias=False)\n\n self.conv6 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)\n self.conv7 = nn.Conv2d(32, num_classes, kernel_size=1, bias=False)\n\n initialize_weights(self)\n\n if freeze_bn:\n self.freeze_bn()\n if freeze_backbone:\n set_trainable(\n [self.initial, self.layer1, self.layer2, self.layer3, self.layer4],\n False,\n )\n\n def forward(self, x):\n H, W = x.size(2), x.size(3)\n x1 = self.layer1(self.initial(x))\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n\n x = self.upconv1(self.conv1(x4))\n x = F.interpolate(\n x, size=(x3.size(2), x3.size(3)), mode=\"bilinear\", align_corners=True\n )\n x = torch.cat([x, x3], dim=1)\n x = self.upconv2(self.conv2(x))\n\n x = F.interpolate(\n x, size=(x2.size(2), x2.size(3)), mode=\"bilinear\", align_corners=True\n )\n x = torch.cat([x, x2], dim=1)\n x = self.upconv3(self.conv3(x))\n\n x = F.interpolate(\n x, size=(x1.size(2), x1.size(3)), mode=\"bilinear\", align_corners=True\n )\n x = torch.cat([x, x1], dim=1)\n\n x = self.upconv4(self.conv4(x))\n\n x = self.upconv5(self.conv5(x))\n\n # if the input is not divisible by the output stride\n if x.size(2) != H or x.size(3) != W:\n x = F.interpolate(x, size=(H, W), mode=\"bilinear\", align_corners=True)\n\n x = self.conv7(self.conv6(x))\n return x\n\n def get_backbone_params(self):\n return chain(\n self.initial.parameters(),\n self.layer1.parameters(),\n self.layer2.parameters(),\n self.layer3.parameters(),\n self.layer4.parameters(),\n )\n\n def get_decoder_params(self):\n return chain(\n self.conv1.parameters(),\n self.upconv1.parameters(),\n self.conv2.parameters(),\n self.upconv2.parameters(),\n self.conv3.parameters(),\n self.upconv3.parameters(),\n self.conv4.parameters(),\n self.upconv4.parameters(),\n self.conv5.parameters(),\n self.upconv5.parameters(),\n self.conv6.parameters(),\n self.conv7.parameters(),\n )\n\n def freeze_bn(self):\n for module in self.modules():\n if isinstance(module, nn.BatchNorm2d):\n module.eval()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ConvTranspose2d", "torch.cat", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.functional.pad", "torch.nn.init.kaiming_normal_" ] ]
inaccel/TractSeg
[ "cc9feefd71ba9fcfacc4d3a7656f1a77bab9a287" ]
[ "tractseg/data/data_loader_inference.py" ]
[ "\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os.path import join\nfrom builtins import object\nimport numpy as np\n\nfrom tractseg.libs.system_config import SystemConfig as C\nfrom tractseg.libs import exp_utils\nfrom tractseg.libs import data_utils\nfrom tractseg.libs import peak_utils\nfrom tractseg.data.DLDABG_standalone import ZeroMeanUnitVarianceTransform as ZeroMeanUnitVarianceTransform_Standalone\nfrom tractseg.data.DLDABG_standalone import SingleThreadedAugmenter\nfrom tractseg.data.DLDABG_standalone import Compose\nfrom tractseg.data.DLDABG_standalone import NumpyToTensor\n\nnp.random.seed(1337)\n\n\nclass BatchGenerator2D_data_ordered_standalone(object):\n \"\"\"\n Creates batch of 2D slices from one subject.\n\n Does not depend on DKFZ/BatchGenerators package. Therefore good for inference on windows\n where DKFZ/Batchgenerators do not work (because of MultiThreading problems)\n \"\"\"\n def __init__(self, data, batch_size):\n self.Config = None\n self.batch_size = batch_size\n self.global_idx = 0\n self._data = data\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.generate_train_batch()\n\n def generate_train_batch(self):\n data = self._data[0]\n seg = self._data[1]\n\n if self.Config.SLICE_DIRECTION == \"x\":\n end = data.shape[0]\n elif self.Config.SLICE_DIRECTION == \"y\":\n end = data.shape[1]\n elif self.Config.SLICE_DIRECTION == \"z\":\n end = data.shape[2]\n\n # Stop iterating if we reached end of data\n if self.global_idx >= end:\n self.global_idx = 0\n raise StopIteration\n\n new_global_idx = self.global_idx + self.batch_size\n\n # If we reach end, make last batch smaller, so it fits exactly for rest\n if new_global_idx >= end:\n new_global_idx = end # not end-1, because this goes into range, and there automatically -1\n\n slice_idxs = list(range(self.global_idx, new_global_idx))\n slice_direction = data_utils.slice_dir_to_int(self.Config.SLICE_DIRECTION)\n\n if self.Config.NR_SLICES > 1:\n x, y = data_utils.sample_Xslices(data, seg, slice_idxs, slice_direction=slice_direction,\n labels_type=self.Config.LABELS_TYPE, slice_window=self.Config.NR_SLICES)\n else:\n x, y = data_utils.sample_slices(data, seg, slice_idxs,\n slice_direction=slice_direction,\n labels_type=self.Config.LABELS_TYPE)\n\n data_dict = {\"data\": x, # (batch_size, channels, x, y, [z])\n \"seg\": y} # (batch_size, channels, x, y, [z])\n self.global_idx = new_global_idx\n return data_dict\n\n\nclass BatchGenerator3D_data_ordered_standalone(object):\n def __init__(self, data, batch_size=1):\n self.Config = None\n if batch_size != 1:\n raise ValueError(\"only batch_size=1 allowed\")\n self.batch_size = batch_size\n self.global_idx = 0\n self._data = data\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.generate_train_batch()\n\n def generate_train_batch(self):\n data = self._data[0] # (x, y, z, channels)\n seg = self._data[1]\n\n # Stop iterating if we reached end of data\n if self.global_idx >= 1:\n self.global_idx = 0\n raise StopIteration\n self.global_idx += self.batch_size\n\n x = data.transpose(3, 0, 1, 2)[np.newaxis,...] # channels have to be first, add batch_size of 1\n y = seg.transpose(3, 0, 1, 2)[np.newaxis,...]\n\n data_dict = {\"data\": np.array(x), # (batch_size, channels, x, y, [z])\n \"seg\": np.array(y)} # (batch_size, channels, x, y, [z])\n return data_dict\n\n\nclass DataLoaderInference():\n \"\"\"\n Data loader for only one subject and returning slices in ordered way.\n \"\"\"\n def __init__(self, Config, data=None, subject=None):\n \"\"\"\n Set either data or subject, not both.\n\n Args:\n Config: Config class\n data: 4D numpy array with subject data\n subject: ID for a subject from the training data (string)\n \"\"\"\n self.Config = Config\n self.data = data\n self.subject = subject\n\n def _augment_data(self, batch_generator, type=None):\n tfs = []\n\n if self.Config.NORMALIZE_DATA:\n tfs.append(ZeroMeanUnitVarianceTransform_Standalone(per_channel=self.Config.NORMALIZE_PER_CHANNEL))\n\n tfs.append(NumpyToTensor(keys=[\"data\", \"seg\"], cast_to=\"float\"))\n\n batch_gen = SingleThreadedAugmenter(batch_generator, Compose(tfs))\n return batch_gen\n\n def get_batch_generator(self, batch_size=1):\n\n if self.data is not None:\n exp_utils.print_verbose(self.Config.VERBOSE, \"Loading data from PREDICT_IMG input file\")\n data = np.nan_to_num(self.data)\n # Use dummy mask in case we only want to predict on some data (where we do not have ground truth))\n seg = np.zeros((self.Config.INPUT_DIM[0], self.Config.INPUT_DIM[0],\n self.Config.INPUT_DIM[0], self.Config.NR_OF_CLASSES)).astype(self.Config.LABELS_TYPE)\n elif self.subject is not None:\n if self.Config.TYPE == \"combined\":\n # Load from npy file for Fusion\n data = np.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, self.subject,\n self.Config.FEATURES_FILENAME + \".npy\"), mmap_mode=\"r\")\n seg = np.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, self.subject,\n self.Config.LABELS_FILENAME + \".npy\"), mmap_mode=\"r\")\n data = np.nan_to_num(data)\n seg = np.nan_to_num(seg)\n data = np.reshape(data, (data.shape[0], data.shape[1], data.shape[2], data.shape[3] * data.shape[4]))\n else:\n from tractseg.data.data_loader_training import load_training_data\n data, seg = load_training_data(self.Config, self.subject)\n\n # Convert peaks to tensors if tensor model\n if self.Config.NR_OF_GRADIENTS == 18 * self.Config.NR_SLICES:\n data = peak_utils.peaks_to_tensors(data)\n\n data, transformation = data_utils.pad_and_scale_img_to_square_img(data,\n target_size=self.Config.INPUT_DIM[0],\n nr_cpus=1)\n seg, transformation = data_utils.pad_and_scale_img_to_square_img(seg,\n target_size=self.Config.INPUT_DIM[0],\n nr_cpus=1)\n else:\n raise ValueError(\"Neither 'data' nor 'subject' set.\")\n\n if self.Config.DIM == \"2D\":\n batch_gen = BatchGenerator2D_data_ordered_standalone((data, seg), batch_size=batch_size)\n else:\n batch_gen = BatchGenerator3D_data_ordered_standalone((data, seg), batch_size=batch_size)\n batch_gen.Config = self.Config\n\n batch_gen = self._augment_data(batch_gen, type=type)\n return batch_gen\n\n" ]
[ [ "numpy.random.seed", "numpy.reshape", "numpy.nan_to_num", "numpy.array", "numpy.zeros" ] ]
dualword/faunus
[ "89f14398b960889cc74d0cceab96ef0e197088ae" ]
[ "examples/gromos_bend/gromos_bend_plot.py" ]
[ "#!/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nmc = np.loadtxt('gromos_bend_mc_rdf.dat', unpack=True, usecols=(0,1))\nld = np.loadtxt('gromos_bend_md_rdf.dat', unpack=True, usecols=(0,1))\n\nplt.plot(mc[0], mc[1], label='Monte Carlo', linewidth=10, alpha=0.3)\nplt.plot(ld[0], ld[1], label='Langevin', linewidth=2)\n\nif os.path.isfile('rdf.dat'):\n test = np.loadtxt('rdf.dat', unpack=True, usecols=(0,1))\n plt.plot(test[0], test[1], label='Test', linewidth=2)\n\nplt.legend(loc=0, frameon=False, fontsize=10)\nplt.xlabel('Distance, $r$ (Å)', fontsize=14)\nplt.ylabel('$g(r)$', fontsize=14)\nplt.savefig('gromos_bend_plot.png')\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.loadtxt", "matplotlib.pyplot.ylabel" ] ]
caley/sympy
[ "7dfa5ceadf8b1500119583b33c70b618b59ca7ac" ]
[ "sympy/stats/drv.py" ]
[ "from sympy import (Basic, sympify, symbols, Dummy, Lambda, summation,\n Piecewise, S, cacheit, Sum, exp, I, Ne, Eq, poly,\n series, factorial, And, lambdify, floor)\n\nfrom sympy.polys.polyerrors import PolynomialError\nfrom sympy.stats.crv import reduce_rational_inequalities_wrap\nfrom sympy.stats.rv import (NamedArgsMixin, SinglePSpace, SingleDomain,\n random_symbols, PSpace, ConditionalDomain, RandomDomain,\n ProductDomain, Distribution)\nfrom sympy.stats.symbolic_probability import Probability\nfrom sympy.sets.fancysets import Range, FiniteSet\nfrom sympy.sets.sets import Union\nfrom sympy.sets.contains import Contains\nfrom sympy.utilities import filldedent\nfrom sympy.core.sympify import _sympify\nfrom sympy.external import import_module\n\n\nclass DiscreteDistribution(Distribution):\n def __call__(self, *args):\n return self.pdf(*args)\n\n\nclass SampleDiscreteScipy:\n \"\"\"Returns the sample from scipy of the given distribution\"\"\"\n def __new__(cls, dist, size, seed=None):\n return cls._sample_scipy(dist, size, seed)\n\n @classmethod\n def _sample_scipy(cls, dist, size, seed):\n \"\"\"Sample from SciPy.\"\"\"\n\n from scipy import stats as scipy_stats\n scipy_rv_map = {\n 'GeometricDistribution': lambda dist, size: scipy_stats.geom.rvs(p=float(dist.p),\n size=size, random_state=seed),\n 'LogarithmicDistribution': lambda dist, size: scipy_stats.logser.rvs(p=float(dist.p),\n size=size, random_state=seed),\n 'NegativeBinomialDistribution': lambda dist, size: scipy_stats.nbinom.rvs(n=float(dist.r),\n p=float(dist.p), size=size, random_state=seed),\n 'PoissonDistribution': lambda dist, size: scipy_stats.poisson.rvs(mu=float(dist.lamda),\n size=size, random_state=seed),\n 'SkellamDistribution': lambda dist, size: scipy_stats.skellam.rvs(mu1=float(dist.mu1),\n mu2=float(dist.mu2), size=size, random_state=seed),\n 'YuleSimonDistribution': lambda dist, size: scipy_stats.yulesimon.rvs(alpha=float(dist.rho),\n size=size, random_state=seed),\n 'ZetaDistribution': lambda dist, size: scipy_stats.zipf.rvs(a=float(dist.s),\n size=size, random_state=seed)\n }\n\n dist_list = scipy_rv_map.keys()\n\n if dist.__class__.__name__ == 'DiscreteDistributionHandmade':\n from scipy.stats import rv_discrete\n z = Dummy('z')\n handmade_pmf = lambdify(z, dist.pdf(z), ['numpy', 'scipy'])\n class scipy_pmf(rv_discrete):\n def _pmf(self, x):\n return handmade_pmf(x)\n scipy_rv = scipy_pmf(a=float(dist.set._inf), b=float(dist.set._sup),\n name='scipy_pmf')\n return scipy_rv.rvs(size=size, random_state=seed)\n\n if dist.__class__.__name__ not in dist_list:\n return None\n\n return scipy_rv_map[dist.__class__.__name__](dist, size)\n\nclass SampleDiscreteNumpy:\n \"\"\"Returns the sample from numpy of the given distribution\"\"\"\n\n def __new__(cls, dist, size, seed=None):\n return cls._sample_numpy(dist, size, seed)\n\n @classmethod\n def _sample_numpy(cls, dist, size, seed):\n \"\"\"Sample from NumPy.\"\"\"\n\n import numpy\n if seed is None or isinstance(seed, int):\n rand_state = numpy.random.default_rng(seed=seed)\n else:\n rand_state = seed\n numpy_rv_map = {\n 'GeometricDistribution': lambda dist, size: rand_state.geometric(p=float(dist.p),\n size=size),\n 'PoissonDistribution': lambda dist, size: rand_state.poisson(lam=float(dist.lamda),\n size=size),\n 'ZetaDistribution': lambda dist, size: rand_state.zipf(a=float(dist.s),\n size=size)\n }\n\n dist_list = numpy_rv_map.keys()\n\n if dist.__class__.__name__ not in dist_list:\n return None\n\n return numpy_rv_map[dist.__class__.__name__](dist, size)\n\nclass SampleDiscretePymc:\n \"\"\"Returns the sample from pymc3 of the given distribution\"\"\"\n\n def __new__(cls, dist, size, seed=None):\n return cls._sample_pymc3(dist, size, seed)\n\n @classmethod\n def _sample_pymc3(cls, dist, size, seed):\n \"\"\"Sample from PyMC3.\"\"\"\n\n import pymc3\n pymc3_rv_map = {\n 'GeometricDistribution': lambda dist: pymc3.Geometric('X', p=float(dist.p)),\n 'PoissonDistribution': lambda dist: pymc3.Poisson('X', mu=float(dist.lamda)),\n 'NegativeBinomialDistribution': lambda dist: pymc3.NegativeBinomial('X',\n mu=float((dist.p*dist.r)/(1-dist.p)), alpha=float(dist.r))\n }\n\n dist_list = pymc3_rv_map.keys()\n\n if dist.__class__.__name__ not in dist_list:\n return None\n\n with pymc3.Model():\n pymc3_rv_map[dist.__class__.__name__](dist)\n return pymc3.sample(size, chains=1, progressbar=False, random_seed=seed)[:]['X']\n\n\n_get_sample_class_drv = {\n 'scipy': SampleDiscreteScipy,\n 'pymc3': SampleDiscretePymc,\n 'numpy': SampleDiscreteNumpy\n}\n\n\nclass SingleDiscreteDistribution(DiscreteDistribution, NamedArgsMixin):\n \"\"\" Discrete distribution of a single variable\n\n Serves as superclass for PoissonDistribution etc....\n\n Provides methods for pdf, cdf, and sampling\n\n See Also:\n sympy.stats.crv_types.*\n \"\"\"\n\n set = S.Integers\n\n def __new__(cls, *args):\n args = list(map(sympify, args))\n return Basic.__new__(cls, *args)\n\n @staticmethod\n def check(*args):\n pass\n\n def sample(self, size=(), library='scipy', seed=None):\n \"\"\" A random realization from the distribution\"\"\"\n\n libraries = ['scipy', 'numpy', 'pymc3']\n if library not in libraries:\n raise NotImplementedError(\"Sampling from %s is not supported yet.\"\n % str(library))\n if not import_module(library):\n raise ValueError(\"Failed to import %s\" % library)\n\n samps = _get_sample_class_drv[library](self, size, seed)\n\n if samps is not None:\n return samps\n raise NotImplementedError(\n \"Sampling for %s is not currently implemented from %s\"\n % (self.__class__.__name__, library)\n )\n\n\n @cacheit\n def compute_cdf(self, **kwargs):\n \"\"\" Compute the CDF from the PDF\n\n Returns a Lambda\n \"\"\"\n x = symbols('x', integer=True, cls=Dummy)\n z = symbols('z', real=True, cls=Dummy)\n left_bound = self.set.inf\n\n # CDF is integral of PDF from left bound to z\n pdf = self.pdf(x)\n cdf = summation(pdf, (x, left_bound, floor(z)), **kwargs)\n # CDF Ensure that CDF left of left_bound is zero\n cdf = Piecewise((cdf, z >= left_bound), (0, True))\n return Lambda(z, cdf)\n\n def _cdf(self, x):\n return None\n\n def cdf(self, x, **kwargs):\n \"\"\" Cumulative density function \"\"\"\n if not kwargs:\n cdf = self._cdf(x)\n if cdf is not None:\n return cdf\n return self.compute_cdf(**kwargs)(x)\n\n @cacheit\n def compute_characteristic_function(self, **kwargs):\n \"\"\" Compute the characteristic function from the PDF\n\n Returns a Lambda\n \"\"\"\n x, t = symbols('x, t', real=True, cls=Dummy)\n pdf = self.pdf(x)\n cf = summation(exp(I*t*x)*pdf, (x, self.set.inf, self.set.sup))\n return Lambda(t, cf)\n\n def _characteristic_function(self, t):\n return None\n\n def characteristic_function(self, t, **kwargs):\n \"\"\" Characteristic function \"\"\"\n if not kwargs:\n cf = self._characteristic_function(t)\n if cf is not None:\n return cf\n return self.compute_characteristic_function(**kwargs)(t)\n\n @cacheit\n def compute_moment_generating_function(self, **kwargs):\n t = Dummy('t', real=True)\n x = Dummy('x', integer=True)\n pdf = self.pdf(x)\n mgf = summation(exp(t*x)*pdf, (x, self.set.inf, self.set.sup))\n return Lambda(t, mgf)\n\n def _moment_generating_function(self, t):\n return None\n\n def moment_generating_function(self, t, **kwargs):\n if not kwargs:\n mgf = self._moment_generating_function(t)\n if mgf is not None:\n return mgf\n return self.compute_moment_generating_function(**kwargs)(t)\n\n @cacheit\n def compute_quantile(self, **kwargs):\n \"\"\" Compute the Quantile from the PDF\n\n Returns a Lambda\n \"\"\"\n x = Dummy('x', integer=True)\n p = Dummy('p', real=True)\n left_bound = self.set.inf\n pdf = self.pdf(x)\n cdf = summation(pdf, (x, left_bound, x), **kwargs)\n set = ((x, p <= cdf), )\n return Lambda(p, Piecewise(*set))\n\n def _quantile(self, x):\n return None\n\n def quantile(self, x, **kwargs):\n \"\"\" Cumulative density function \"\"\"\n if not kwargs:\n quantile = self._quantile(x)\n if quantile is not None:\n return quantile\n return self.compute_quantile(**kwargs)(x)\n\n def expectation(self, expr, var, evaluate=True, **kwargs):\n \"\"\" Expectation of expression over distribution \"\"\"\n # TODO: support discrete sets with non integer stepsizes\n\n if evaluate:\n try:\n p = poly(expr, var)\n\n t = Dummy('t', real=True)\n\n mgf = self.moment_generating_function(t)\n deg = p.degree()\n taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t)\n result = 0\n for k in range(deg+1):\n result += p.coeff_monomial(var ** k) * taylor.coeff_monomial(t ** k) * factorial(k)\n\n return result\n\n except PolynomialError:\n return summation(expr * self.pdf(var),\n (var, self.set.inf, self.set.sup), **kwargs)\n\n else:\n return Sum(expr * self.pdf(var),\n (var, self.set.inf, self.set.sup), **kwargs)\n\n def __call__(self, *args):\n return self.pdf(*args)\n\n\nclass DiscreteDomain(RandomDomain):\n \"\"\"\n A domain with discrete support with step size one.\n Represented using symbols and Range.\n \"\"\"\n is_Discrete = True\n\nclass SingleDiscreteDomain(DiscreteDomain, SingleDomain):\n def as_boolean(self):\n return Contains(self.symbol, self.set)\n\n\nclass ConditionalDiscreteDomain(DiscreteDomain, ConditionalDomain):\n \"\"\"\n Domain with discrete support of step size one, that is restricted by\n some condition.\n \"\"\"\n @property\n def set(self):\n rv = self.symbols\n if len(self.symbols) > 1:\n raise NotImplementedError(filldedent('''\n Multivariate conditional domains are not yet implemented.'''))\n rv = list(rv)[0]\n return reduce_rational_inequalities_wrap(self.condition,\n rv).intersect(self.fulldomain.set)\n\n\nclass DiscretePSpace(PSpace):\n is_real = True\n is_Discrete = True\n\n @property\n def pdf(self):\n return self.density(*self.symbols)\n\n def where(self, condition):\n rvs = random_symbols(condition)\n assert all(r.symbol in self.symbols for r in rvs)\n if len(rvs) > 1:\n raise NotImplementedError(filldedent('''Multivariate discrete\n random variables are not yet supported.'''))\n conditional_domain = reduce_rational_inequalities_wrap(condition,\n rvs[0])\n conditional_domain = conditional_domain.intersect(self.domain.set)\n return SingleDiscreteDomain(rvs[0].symbol, conditional_domain)\n\n def probability(self, condition):\n complement = isinstance(condition, Ne)\n if complement:\n condition = Eq(condition.args[0], condition.args[1])\n try:\n _domain = self.where(condition).set\n if condition == False or _domain is S.EmptySet:\n return S.Zero\n if condition == True or _domain == self.domain.set:\n return S.One\n prob = self.eval_prob(_domain)\n except NotImplementedError:\n from sympy.stats.rv import density\n expr = condition.lhs - condition.rhs\n dens = density(expr)\n if not isinstance(dens, DiscreteDistribution):\n from sympy.stats.drv_types import DiscreteDistributionHandmade\n dens = DiscreteDistributionHandmade(dens)\n z = Dummy('z', real=True)\n space = SingleDiscretePSpace(z, dens)\n prob = space.probability(condition.__class__(space.value, 0))\n if prob is None:\n prob = Probability(condition)\n return prob if not complement else S.One - prob\n\n def eval_prob(self, _domain):\n sym = list(self.symbols)[0]\n if isinstance(_domain, Range):\n n = symbols('n', integer=True)\n inf, sup, step = (r for r in _domain.args)\n summand = ((self.pdf).replace(\n sym, n*step))\n rv = summation(summand,\n (n, inf/step, (sup)/step - 1)).doit()\n return rv\n elif isinstance(_domain, FiniteSet):\n pdf = Lambda(sym, self.pdf)\n rv = sum(pdf(x) for x in _domain)\n return rv\n elif isinstance(_domain, Union):\n rv = sum(self.eval_prob(x) for x in _domain.args)\n return rv\n\n def conditional_space(self, condition):\n # XXX: Converting from set to tuple. The order matters to Lambda\n # though so we should be starting with a set...\n density = Lambda(tuple(self.symbols), self.pdf/self.probability(condition))\n condition = condition.xreplace({rv: rv.symbol for rv in self.values})\n domain = ConditionalDiscreteDomain(self.domain, condition)\n return DiscretePSpace(domain, density)\n\nclass ProductDiscreteDomain(ProductDomain, DiscreteDomain):\n def as_boolean(self):\n return And(*[domain.as_boolean for domain in self.domains])\n\nclass SingleDiscretePSpace(DiscretePSpace, SinglePSpace):\n \"\"\" Discrete probability space over a single univariate variable \"\"\"\n is_real = True\n\n @property\n def set(self):\n return self.distribution.set\n\n @property\n def domain(self):\n return SingleDiscreteDomain(self.symbol, self.set)\n\n def sample(self, size=(), library='scipy', seed=None):\n \"\"\"\n Internal sample method\n\n Returns dictionary mapping RandomSymbol to realization value.\n \"\"\"\n return {self.value: self.distribution.sample(size, library=library, seed=seed)}\n\n def compute_expectation(self, expr, rvs=None, evaluate=True, **kwargs):\n rvs = rvs or (self.value,)\n if self.value not in rvs:\n return expr\n\n expr = _sympify(expr)\n expr = expr.xreplace({rv: rv.symbol for rv in rvs})\n\n x = self.value.symbol\n try:\n return self.distribution.expectation(expr, x, evaluate=evaluate,\n **kwargs)\n except NotImplementedError:\n return Sum(expr * self.pdf, (x, self.set.inf, self.set.sup),\n **kwargs)\n\n def compute_cdf(self, expr, **kwargs):\n if expr == self.value:\n x = Dummy(\"x\", real=True)\n return Lambda(x, self.distribution.cdf(x, **kwargs))\n else:\n raise NotImplementedError()\n\n def compute_density(self, expr, **kwargs):\n if expr == self.value:\n return self.distribution\n raise NotImplementedError()\n\n def compute_characteristic_function(self, expr, **kwargs):\n if expr == self.value:\n t = Dummy(\"t\", real=True)\n return Lambda(t, self.distribution.characteristic_function(t, **kwargs))\n else:\n raise NotImplementedError()\n\n def compute_moment_generating_function(self, expr, **kwargs):\n if expr == self.value:\n t = Dummy(\"t\", real=True)\n return Lambda(t, self.distribution.moment_generating_function(t, **kwargs))\n else:\n raise NotImplementedError()\n\n def compute_quantile(self, expr, **kwargs):\n if expr == self.value:\n p = Dummy(\"p\", real=True)\n return Lambda(p, self.distribution.quantile(p, **kwargs))\n else:\n raise NotImplementedError()\n" ]
[ [ "numpy.random.default_rng" ] ]
reeya26/textscrub
[ "794357fe0c86dde30b611d86381e39de3537f7db" ]
[ "textscrub/clean.py" ]
[ "import unicodedata\nimport regex as re\nimport sys\n\nfrom string import punctuation\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import dbscan\nimport Levenshtein\n\n\ndef remove_glyphs(text):\n \"\"\"\n Remove all the non-ascii, non-latin and non-printable characters\n from the raw text\n Args:\n text(str) -- raw text\n Returns:\n text(str) -- text clean from non ascii, non-latin and\n non-printable characters\n \"\"\"\n\n # remove non - ascii characters\n text = unicodedata.normalize(\"NFKD\", text)\n text = re.sub(r'[^\\x00-\\x7F]+', '', text)\n\n # remove accents\n text = unicodedata.normalize('NFD', text).encode('ascii', 'ignore').decode(\"utf-8\")\n\n # Get all unicode characters\n all_chars = (chr(i) for i in range(sys.maxunicode))\n # Get all non printable characters\n control_chars = ''.join(c for c in all_chars\n if unicodedata.category(c) == 'Cc')\n # Create regex of above characters\n control_char_re = re.compile('[%s]' % re.escape(control_chars))\n # remove non-printable characters\n text = control_char_re.sub('', text)\n\n # remove non-latin characters\n text = re.sub(r'[^\\p{Latin}]', '', text)\n\n return text\n\n\ndef remove_spaces(text):\n \"\"\"\n Remove all the tabs, spaces, and line breaks from the raw text\n Args:\n text(str) -- raw text\n Returns:\n text(str) -- text clean from tabs, and spaces\n \"\"\"\n # remove \\t, \\n, \\r\n text = text.replace(\"\\t\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n # remove 2 or more than 2 spaces\n text = re.sub('\\s{2,}', '', text)\n\n return text\n\n\ndef remove_html_tags(text):\n \"\"\"\n Remove all html tags from the raw text\n Args:\n text(str) -- raw text\n REturns:\n text(str) -- text clean from html tags\n \"\"\"\n\n # create regex for html tags\n html_re = re.compile(r'<.*?>')\n # remove html tags\n text = re.sub(html_re, '', text)\n\n return text\n\n\ndef remove_hyperlinks(text):\n \"\"\"\n Remove all hyperlinks and URLs from the raw text\n Args:\n text(str) -- raw text\n Returns:\n text(str) -- text clean from hyperlinks\n \"\"\"\n text = re.sub(r'https?://\\S+', '', text)\n\n return text\n\n\ndef remove_punctuation(text):\n \"\"\"\n Remove all punctuations from the raw text\n Args:\n text(str) -- raw text\n Returns:\n text(str) -- text clean from punctuation\n \"\"\"\n\n text = re.sub('[%s]' % re.escape(punctuation), '', text)\n text = remove_spaces(text)\n\n return text\n\n\ndef homogenize_column(obj, eps=1, min_samples=2):\n\n \"\"\"\n Remove all hyperlinks and URLs from the raw text\n Args:\n dataframe(str) -- almost similar text\n Returns:\n dataframe(str) -- text clean from multiple instances of same value\n \"\"\"\n\n def homog_lev_series(obj, eps=eps, min_samples=min_samples):\n name = obj.name\n\n original = obj.copy()\n obj = obj.drop_duplicates()\n data = obj.tolist()\n\n def lev_metric(x, y):\n i, j = int(x[0]), int(y[0])\n return Levenshtein.distance(data[i], data[j])\n\n X = np.arange(len(data)).reshape(-1, 1)\n labels = dbscan(X, metric=lev_metric, eps=eps,\n min_samples=min_samples)[1]\n\n x = pd.DataFrame({'A': obj.reset_index(drop=True),\n 'B': pd.Series(labels)})\n y = x.drop_duplicates('B')\n y = y[~(y.B == -1)]\n y.columns = ['C', 'B']\n x = x.merge(y, on='B', how='left')\n x['C'] = np.where(x.C.isnull(), x.A, x.C)\n\n results = pd.DataFrame({'A': original})\n results = results.merge(x[['A', 'C']], on='A', how='left')\n out = results.C.rename(name)\n\n return out\n\n if isinstance(obj, pd.DataFrame):\n for col in obj.columns:\n obj['{}'.format(col)] = homog_lev_series(obj['{}'.format(col)])\n else:\n obj = homog_lev_series(obj)\n\n return obj\n" ]
[ [ "pandas.Series", "pandas.DataFrame", "sklearn.cluster.dbscan" ] ]
Oliph/likertScalePlot
[ "8b980a813a56008089be61bd0932e2e83e9da854" ]
[ "likertscaleplot/likertScalePlot.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPlotting function to draw a likert scale.\n\nTo plot diverging horizontal barchart as needed for a likert scale\nA simple code that try to mimic what it is possible with the package HH in R.\nThis script is an adaptation of the answers found:\n\"\"\"\n\n__author__ = 'Olivier PHILIPPE'\n__licence__ = 'BSD 2-clause'\n\nimport math\nimport pandas as pd\nimport numpy as np\n\n# When using Ipython within vim\n# matplotlib.use('TkAgg')\n\n# When using within jupyter\n# get_ipython().magic('matplotlib inline') # Activat that line to use in Jupyter\n\n\nimport matplotlib.pyplot as plt\n# When using this script with ipython and vim\nplt.ion()\nplt.show()\n\n\ndef get_colors(df, colormap=plt.cm.RdBu, vmin=None, vmax=None, axis=1):\n \"\"\"\n Function to automatically gets a colormap for all the values passed in,\n Have the option to normalise the colormap.\n :params:\n values list(): list of int() or str() that have all the values that need a color to be map\n to. In case of a list() of str(), the try/except use the range(len()) to map a colour\n colormap cm(): type of colormap that need to be used. All can be found here:\n https://matplotlib.org/examples/color/colormaps_reference.html\n vmin, vmax int(): Number to normalise the return of the colourmap if needed a Normalised colourmap\n\n :return:\n colormap cm.colormap(): An array of RGBA values\n\n Original version found on stackerOverflow (w/o the try/except) but cannot find it back\n \"\"\"\n if axis == 0:\n values = df.index\n elif axis == 1:\n values = df.columns\n norm = plt.Normalize(vmin, vmax)\n try:\n return colormap(norm(values))\n except (AttributeError, TypeError): # May happen when gives a list of categorical values\n return colormap(norm(range(len(values))))\n\n\ndef wrap_label(label, max_size=30, splitter=' '):\n \"\"\"\n Function to automatically wrap labels if they are too long\n Split only if whitespace\n params:\n :labels str(): string that contains the labels\n :max_size int(): 20 by Default, the size of the string\n before being wrapped\n :return:\n :str() of wrapped labels according to the max size\n \"\"\"\n def split_at_whitespace(label, splitter):\n label_to_return = list()\n n = 0\n for letter in label:\n n +=1\n if n >= max_size:\n if letter == splitter:\n letter = '\\n'\n n = 0\n label_to_return.append(letter)\n return ''.join(label_to_return)\n\n return split_at_whitespace(label, splitter)\n\n\ndef create_bars(df, ax, y_pos, colors, left_gap):\n \"\"\"\n Loop through the columns and create an horizontal bar for each.\n First it creates all the left bars, for all the columns, then the\n one on the right. Each time, it add the distance from the previous bar.\n If 'left_invisible_bar' is passed, it will create a empty gap on the left\n before the first bar to centred the plot in the middle\n\n :params:\n df df(): The dataframe containing the information\n ax plt(): The subplot to draw on\n y_pos np.array(): an array of the number of bars (likert items)\n colors np.array(): an array containing the colors for the different answers\n left_gap np.array(): the empty left gap needed to\n centre the stacked bar\n\n :return:\n patch_handles list(): A list containing the drawn horizontal stacked bars\n \"\"\"\n patch_handles = []\n for i, c in enumerate(df.columns):\n d = np.array(df[c])\n new_bar = ax.barh(y_pos,\n d,\n color=colors[i],\n align='center',\n left=left_gap)\n patch_handles.append(new_bar)\n # accumulate the left-hand offsets\n left_gap += d\n return patch_handles\n\n\ndef compute_middle_sum(df, first_half, middle):\n try:\n return df[first_half].sum(axis=1) + df[middle] *.5\n except ValueError: # In case middle value is none\n return df[first_half].sum(axis=1)\n\n\ndef get_middle(inputlist):\n \"\"\"\n Return the first half of a list and the middle element\n In case the list can be splitted in two equal element,\n return only the first half\n :params:\n inputlist list(): list to split\n :returns:\n first_half list(): list of the first half element\n middle_elmenet int():\n \"\"\"\n middle = float(len(inputlist) /2)\n if len(inputlist) % 2 !=0:\n # If the list has a true middle element it needs\n # to be accessed by adding 0.5 to the index\n middle = int(middle + 0.5) - 1\n # In the case of a true middle is found, the first half\n # is all elements except the middle\n first_half = middle\n return inputlist[middle], inputlist[0:first_half]\n # In case of not true middle can be found (in case the\n # list has a lenght of an even number, it can only\n # return the first half. The middle value is None\n return None, inputlist[:int(middle)]\n\n\ndef get_total_mid_answers(df):\n \"\"\"\n Get the list of the columns\n \"\"\"\n middle, first_half = get_middle(df.columns)\n return compute_middle_sum(df, first_half, middle)\n\n\ndef compute_percentage(df, by_row=True, by_col=False):\n \"\"\"\n Transform every cell into a percentage\n \"\"\"\n def compute_perc(row, total=None):\n if total is None:\n total = np.sum(row)\n return [((x /total) *100) for x in row]\n\n if by_row is True and by_col is False:\n return [x for x in df.apply(compute_perc, axis=1)]\n\n elif by_col is True and by_row is False:\n return np.array(df.apply(compute_perc, axis=0))\n\n elif by_row is True and by_col is True:\n total = df.values.sum()\n return np.array(df.apply(compute_perc, total=total))\n\n\ndef normalise_per_row(df):\n df = df.div(df.sum(axis=1), axis=0)\n return df.multiply(100)\n\n\ndef add_labels(df, ax, bars, rotation=0, rounding=True):\n \"\"\"\n \"\"\"\n # Create percentage for each cells to have the right annotation\n percentages = compute_percentage(df)\n # go through all of the bar segments and annotate\n for j in range(len(bars)):\n for i, bar in enumerate(bars[j].get_children()):\n bl = bar.get_xy()\n x = 0.5 *bar.get_width() +bl[0]\n y = 0.5 *bar.get_height() +bl[1]\n # Avoid labels when percentage is under 5 (the bar is too small)\n if percentages[i][j] > 5:\n if rounding is True:\n ax.text(x, y, \"{}\".format(str(int(round(percentages[i][j])))), ha='center', rotation=rotation)\n else:\n ax.text(x, y, \"{}\".format(percentages[i][j]), ha='center', rotation=rotation)\n\n\ndef draw_middle_line(ax, normalise, longest_middle):\n \"\"\"\n \"\"\"\n # Draw a dashed line on the middle to visualise it\n if normalise:\n z = ax.axvline(100, linestyle='--', color='black', alpha=.5)\n else:\n z = ax.axvline(longest_middle, linestyle='--', color='black', alpha=.5)\n # Plot the line behind the barchart\n z.set_zorder(-1)\n\n\ndef drawing_x_labels(ax, normalise, complete_longest, longest_middle):\n \"\"\"\n \"\"\"\n # Create the values with the same length as the xlim\n if normalise:\n xvalues = range(0, 210, 10)\n xlabels = [str(math.floor(abs(x - 100))) for x in xvalues]\n else:\n xvalues = [math.floor(i - (longest_middle %5))\n for i in range(0, int(complete_longest),\n int(int(longest_middle)/ 5))]\n xlabels = [str(math.floor(abs(x - longest_middle))) for x in xvalues]\n # Set the tick positions\n ax.set_xticks(xvalues)\n # Set the tick labels\n ax.set_xticklabels(xlabels)\n\n\ndef likert_scale(df, ax=None, normalise=True, labels=True, middle_line=True, legend=True, rotation=0, title_plot=False, rounding=True, font_size=16):\n \"\"\"\n The idea is to create a fake bar on the left to centre the bar on the same point.\n :params:\n :return:\n \"\"\"\n # Replace the Nan value by 0 for plotting\n df = df.fillna(0)\n try:\n # Create the figure object\n # if figsize is None:\n # fig = plt.figure(figsize=(10, 8))\n # else:\n # fig = plt.figure(figsize=figsize)\n # # Create an axes object in the figure\n # ax = fig.add_subplot(111)\n # fig, ax = plt.subplots()\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1) # make a blank plotting area\n\n # Generate an array of colors based on different colormap. The default value\n # Use a divergent colormap.\n colors = get_colors(df)\n\n # Get the position of each bar for all the items\n y_pos = np.arange(len(df.index))\n\n if normalise:\n df = normalise_per_row(df)\n\n # Compute the middle of the possible answers. Assuming the answers are columns\n # Get the sum of the middles +.5 if middle value and without .5 if splitted in 2\n # equal divides\n middles = get_total_mid_answers(df)\n\n # Calculate the longest middle bar to set up the middle of the x-axis for the x-lables\n # and plot the middle line\n if normalise:\n longest_middle = 100\n else:\n longest_middle = middles.max()\n\n # Create the left bar to centre the barchart in the middle\n left_invisible_bar = np.array((middles - longest_middle).abs())\n\n # Calculate the longest bar with the left gap in it to plot the x_value at the end\n # Calculate the total of the longest bar to have the appropriate width +\n # the invisible bar in case it is used to center everything\n complete_longest = (df.sum(axis=1) + left_invisible_bar).max()\n\n # Create the horizontal bars\n bars = create_bars(df, ax, y_pos, colors, left_invisible_bar)\n\n # Set up the limit from 0 to the longest total barchart\n # Keeping this drawing before drawing_x_labels or it will failed to draw\n # all the labels on the right side\n ax.set_xlim([-0.5, complete_longest + 0.5])\n\n # Drawing x_labels\n drawing_x_labels(ax, normalise, complete_longest, longest_middle)\n ax.set_xlabel('Percentage')\n\n # Setting up the y-axis\n ax.set_yticks(y_pos)\n ax.set_yticklabels([wrap_label(labels) for labels in df.index], fontsize=14)\n\n # Add labels to each box\n if labels:\n add_labels(df, ax, bars, rotation, rounding=rounding)\n\n # Create a line on the middle\n if middle_line:\n draw_middle_line(ax, normalise, longest_middle)\n\n # Add legend\n if legend:\n ax.legend(bars, df.columns, fontsize=14)\n\n # Change the plot title\n if title_plot:\n plt.suptitle(title_plot, fontsize=font_size)\n return ax\n except Exception:\n raise\n\n\ndef count_unique_value(df, colnames, rename_columns=False, dropna=False, normalize=False):\n \"\"\"\n Count the values of different columns and transpose the count\n :params:\n :df pd.df(): dataframe containing the data\n :colnames list(): list of strings corresponding to the column header to select the right column\n :return:\n :result_df pd.df(): dataframe with the count of each answer for each columns\n \"\"\"\n # Subset the columns\n df_sub = df[colnames]\n\n if rename_columns is True:\n df_sub.columns = [s.split('[')[2][:-1] for s in colnames]\n\n # Calculate the counts for them\n df_sub = df_sub.apply(pd.Series.value_counts, dropna=dropna, normalize=normalize)\n # Transpose the column to row to be able to plot a stacked bar chart\n return df_sub.transpose()\n\n\ndef main():\n \"\"\"\n \"\"\"\n\n # df = pd.DataFrame(np.random.randint(0,100,size=(100, 3)), columns=list('XYZ'))\n dummy = pd.DataFrame([[1, 2, 3, 4, 5, 2], [5, 6, 7, 8, 5, 2], [10, 4, 2, 10, 5, 2]],\n columns=[\"SD\", \"D\", \"N\", \"A\", \"SA\", 'TEST'],\n index=[\"Key 1\", \"Key B\", \"Key III\"])\n #\n # dummy = pd.DataFrame([[1], [2], [3]],\n # columns=['TEST'],\n # index=['Key1', 'Key2', 'Key3'])\n\n likert_scale(dummy)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.show", "pandas.DataFrame", "matplotlib.pyplot.Normalize", "matplotlib.pyplot.suptitle", "numpy.array", "numpy.sum", "matplotlib.pyplot.ion", "matplotlib.pyplot.figure" ] ]
JonathanGildevall/Automatic-Emergency-Detection-in-Naval-VHF-Transmissions
[ "b691f29b3d3568666bcb0716ebd9efba465a26c2" ]
[ "startupkit/src/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py" ]
[ "# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Convert Wav2Vec2 checkpoint.\"\"\"\n\n\nimport argparse\nimport json\nimport os\n\nimport fairseq\nimport torch\nfrom fairseq.data import Dictionary\n\nfrom transformers import (\n Wav2Vec2Config,\n Wav2Vec2CTCTokenizer,\n Wav2Vec2FeatureExtractor,\n Wav2Vec2ForCTC,\n Wav2Vec2Model,\n Wav2Vec2Processor,\n logging,\n)\n\n\nlogging.set_verbosity_info()\nlogger = logging.get_logger(__name__)\n\nMAPPING = {\n \"post_extract_proj\": \"feature_projection.projection\",\n \"encoder.pos_conv.0\": \"encoder.pos_conv_embed.conv\",\n \"self_attn.k_proj\": \"encoder.layers.*.attention.k_proj\",\n \"self_attn.v_proj\": \"encoder.layers.*.attention.v_proj\",\n \"self_attn.q_proj\": \"encoder.layers.*.attention.q_proj\",\n \"self_attn.out_proj\": \"encoder.layers.*.attention.out_proj\",\n \"self_attn_layer_norm\": \"encoder.layers.*.layer_norm\",\n \"fc1\": \"encoder.layers.*.feed_forward.intermediate_dense\",\n \"fc2\": \"encoder.layers.*.feed_forward.output_dense\",\n \"final_layer_norm\": \"encoder.layers.*.final_layer_norm\",\n \"encoder.layer_norm\": \"encoder.layer_norm\",\n \"w2v_model.layer_norm\": \"feature_projection.layer_norm\",\n \"w2v_encoder.proj\": \"lm_head\",\n \"mask_emb\": \"masked_spec_embed\",\n}\n\n\ndef set_recursively(hf_pointer, key, value, full_name, weight_type):\n for attribute in key.split(\".\"):\n hf_pointer = getattr(hf_pointer, attribute)\n\n if weight_type is not None:\n hf_shape = getattr(hf_pointer, weight_type).shape\n else:\n hf_shape = hf_pointer.shape\n\n print(hf_shape)\n print(value.shape)\n assert (\n hf_shape == value.shape\n ), f\"Shape of hf {key + '.' + weight_type} is {hf_shape}, but should be {value.shape} for {full_name}\"\n if weight_type == \"weight\":\n hf_pointer.weight.data = value\n elif weight_type == \"weight_g\":\n hf_pointer.weight_g.data = value\n elif weight_type == \"weight_v\":\n hf_pointer.weight_v.data = value\n elif weight_type == \"bias\":\n hf_pointer.bias.data = value\n else:\n hf_pointer.data = value\n\n logger.info(f\"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.\")\n\n\ndef recursively_load_weights(fairseq_model, hf_model, is_finetuned):\n unused_weights = []\n fairseq_dict = fairseq_model.state_dict()\n\n feature_extractor = hf_model.wav2vec2.feature_extractor if is_finetuned else hf_model.feature_extractor\n\n for name, value in fairseq_dict.items():\n is_used = False\n if \"conv_layers\" in name:\n load_conv_layer(\n name,\n value,\n feature_extractor,\n unused_weights,\n hf_model.config.feat_extract_norm == \"group\",\n )\n is_used = True\n else:\n for key, mapped_key in MAPPING.items():\n mapped_key = \"wav2vec2.\" + mapped_key if (is_finetuned and mapped_key != \"lm_head\") else mapped_key\n\n if key in name or (key.split(\"w2v_model.\")[-1] == name.split(\".\")[0] and not is_finetuned):\n is_used = True\n if \"*\" in mapped_key:\n layer_index = name.split(key)[0].split(\".\")[-2]\n mapped_key = mapped_key.replace(\"*\", layer_index)\n if \"weight_g\" in name:\n weight_type = \"weight_g\"\n elif \"weight_v\" in name:\n weight_type = \"weight_v\"\n elif \"weight\" in name:\n weight_type = \"weight\"\n elif \"bias\" in name:\n weight_type = \"bias\"\n else:\n weight_type = None\n set_recursively(hf_model, mapped_key, value, name, weight_type)\n continue\n if not is_used:\n unused_weights.append(name)\n\n logger.warning(f\"Unused weights: {unused_weights}\")\n\n\ndef load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):\n name = full_name.split(\"conv_layers.\")[-1]\n items = name.split(\".\")\n layer_id = int(items[0])\n type_id = int(items[1])\n\n if type_id == 0:\n if \"bias\" in name:\n assert (\n value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape\n ), f\"{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.\"\n feature_extractor.conv_layers[layer_id].conv.bias.data = value\n logger.info(f\"Feat extract conv layer {layer_id} was initialized from {full_name}.\")\n elif \"weight\" in name:\n assert (\n value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape\n ), f\"{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.\"\n feature_extractor.conv_layers[layer_id].conv.weight.data = value\n logger.info(f\"Feat extract conv layer {layer_id} was initialized from {full_name}.\")\n elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):\n if \"bias\" in name:\n assert (\n value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape\n ), f\"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.\"\n feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value\n logger.info(f\"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.\")\n elif \"weight\" in name:\n assert (\n value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape\n ), f\"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.\"\n feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value\n logger.info(f\"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.\")\n else:\n unused_weights.append(full_name)\n\n\[email protected]_grad()\ndef convert_wav2vec2_checkpoint(\n checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True\n):\n \"\"\"\n Copy/paste/tweak model's weights to transformers design.\n \"\"\"\n if config_path is not None:\n config = Wav2Vec2Config.from_pretrained(config_path)\n else:\n config = Wav2Vec2Config()\n\n if is_finetuned:\n if dict_path:\n target_dict = Dictionary.load(dict_path)\n\n config.bos_token_id = target_dict.bos_index\n config.eos_token_id = target_dict.eos_index\n config.pad_token_id = target_dict.pad_index\n config.vocab_size = len(target_dict.symbols)\n vocab_path = os.path.join(pytorch_dump_folder_path, \"vocab.json\")\n if not os.path.isdir(pytorch_dump_folder_path):\n logger.error(\"--pytorch_dump_folder_path ({}) should be a directory\".format(pytorch_dump_folder_path))\n return\n os.makedirs(pytorch_dump_folder_path, exist_ok=True)\n with open(vocab_path, \"w\", encoding=\"utf-8\") as vocab_handle:\n json.dump(target_dict.indices, vocab_handle)\n tokenizer = Wav2Vec2CTCTokenizer(\n vocab_path,\n unk_token=target_dict.unk_word,\n pad_token=target_dict.pad_word,\n bos_token=target_dict.bos_word,\n eos_token=target_dict.eos_word,\n word_delimiter_token=\"|\",\n do_lower_case=False,\n )\n return_attention_mask = True if config.feat_extract_norm == \"layer\" else False\n feature_extractor = Wav2Vec2FeatureExtractor(\n feature_size=1,\n sampling_rate=16000,\n padding_value=0,\n do_normalize=True,\n return_attention_mask=return_attention_mask,\n )\n processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)\n processor.save_pretrained(pytorch_dump_folder_path)\n\n hf_wav2vec = Wav2Vec2ForCTC(config)\n else:\n hf_wav2vec = Wav2Vec2Model(config)\n\n if is_finetuned:\n\n model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(\n [checkpoint_path], arg_overrides={\"data\": dict_path}\n )\n else:\n model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])\n\n model = model[0].eval()\n\n recursively_load_weights(model, hf_wav2vec, is_finetuned)\n\n hf_wav2vec.save_pretrained(pytorch_dump_folder_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--pytorch_dump_folder_path\", default=None, type=str, help=\"Path to the output PyTorch model.\")\n parser.add_argument(\"--checkpoint_path\", default=None, type=str, help=\"Path to fairseq checkpoint\")\n parser.add_argument(\"--dict_path\", default=None, type=str, help=\"Path to dict of fine-tuned model\")\n parser.add_argument(\"--config_path\", default=None, type=str, help=\"Path to hf config.json of model to convert\")\n parser.add_argument(\n \"--not_finetuned\", action=\"store_true\", help=\"Whether the model to convert is a fine-tuned model or not\"\n )\n args = parser.parse_args()\n convert_wav2vec2_checkpoint(\n args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned\n )\n" ]
[ [ "torch.no_grad" ] ]
eugenemfu/TTS_HW
[ "34b3a32da2904578ddbd86bfd9529798cc3a1e9f" ]
[ "wavelet_prosody_toolkit/test/diff_num.py" ]
[ "# travis test for comparing prominence and boundary values across versions.\n# allow for minor differences in values \nimport sys, glob\nimport numpy as np\nref_files = sorted(glob.glob(sys.argv[1]+\"/*.prom\"))\ntest_files = sorted(glob.glob(sys.argv[2]+\"/*.prom\"))\n\nfor i in range(len(ref_files)):\n ref = (open(ref_files[i], \"r\")).readlines()\n test = (open(test_files[i], \"r\")).readlines()\n\n val_ref = []\n val_test = []\n # compare prominence and boundary values with some tolerance\n for l in ref:\n val_ref.append(float(l.strip().split(\"\\t\")[-1]))\n val_ref.append(float(l.strip().split(\"\\t\")[-2]))\n for l in test:\n val_test.append(float(l.strip().split(\"\\t\")[-1]))\n val_test.append(float(l.strip().split(\"\\t\")[-2]))\n\n\n assert np.allclose(np.array(val_ref), np.array(val_test), atol=0.3), \\\n ref_files[i]+\" and \"+test_files[i]+ \" differ too much!\"\n" ]
[ [ "numpy.array" ] ]
wngfra/ros2_tactile
[ "feb0ea2703052cbd56965a2dd12b4068334f4ea9" ]
[ "finger_sense/finger_sense/commander.py" ]
[ "# Copyright (c) 2020 wngfra\n# Use of this source code is governed by the Apache-2.0 license, see LICENSE\nimport numpy as np\nimport os\nimport rclpy\nimport time\nfrom collections import deque\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation\nfrom matplotlib import pyplot as plt\nfrom sklearn.decomposition import IncrementalPCA\n\n\nfrom rclpy.executors import MultiThreadedExecutor\nfrom rclpy.node import Node\nfrom std_msgs.msg import String\n\nfrom franka_interfaces.msg import RobotState\nfrom franka_interfaces.srv import SlidingControl\nfrom tactile_interfaces.msg import TactileSignal\nfrom tactile_interfaces.srv import ChangeState\n\nfrom finger_sense.utility import resample_fft\n\n\n# train params\nMATERIAL = \"BeigeLinen\"\nDISTANCE = 0.15\nPARAMS = []\nfor i in range(1):\n for j in range(5):\n PARAMS.append((i*1.0+8.0, -j*0.005-0.01, -DISTANCE))\n PARAMS.append((i*1.0+8.0, j*0.005+0.01, DISTANCE))\nPARAMS.append((-1.0, 0.0, 0.0))\n\n\nclass Commander(Node):\n def __init__(self):\n super().__init__(\"commander\")\n\n self.declare_parameters(\n namespace=\"\",\n parameters=[\n (\"save_dir\", ''),\n (\"mode\", ''),\n ],\n )\n self.get_params()\n\n self.pub = self.create_publisher(String, 'commander_state', 10)\n self.timer = self.create_timer(0.01, self.timer_callback)\n self.sub_robot = self.create_subscription(\n RobotState, \"franka_state\", self.robot_state_callback, 100\n )\n self.sub_tactile = self.create_subscription(\n TactileSignal, \"tactile_signals\", self.tactile_callback, 10\n )\n self.sliding_control_cli = self.create_client(\n SlidingControl, \"sliding_control\")\n self.sliding_control_req = SlidingControl.Request()\n self.sensor_cli = self.create_client(\n ChangeState, \"tactile_publisher/change_state\"\n )\n self.sensor_req = ChangeState.Request()\n\n # control params\n self.direction = -1.0\n self.robot_state = np.zeros(19)\n self.record = False\n # mode dependent params\n if self.mode == \"train\":\n self.count = 0\n self.initialized = False\n maxlen = None\n if self.mode == \"test\":\n self.freq_buf = []\n self.freqXd= []\n self.transformer = IncrementalPCA(n_components=3, batch_size=32)\n maxlen = 128\n \n self.buffer_count = 0\n self.buffer = deque(maxlen=maxlen)\n\n self.send_sliding_control_request(\n 0.0, (0.0, 0.0, 0.0), (0.0, 0.0, 0.0))\n\n \"\"\" Prepare 3D visualization \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n def update(num, data, line):\n line.set_data(data[:2, :num])\n line.set_3d_properties(data[2, :num])\n\n self.sct, = ax.plot([], [], [], \"o\", markersize=2)\n\n ani = animation.FuncAnimation(fig, update, 24, fargs=(xs,ys,zs), interval=1)\n \n def update(self, xa, ya, za):\n self.sct.set_data(self.freqXd[:, 0], self.freqXd[:, 1])\n self.sct.set_3d_properties(self.freqXd[:, 2])\n\n def get_params(self):\n self.save_dir = str(self.get_parameter(\"save_dir\").value)\n self.mode = str(self.get_parameter(\"mode\").value)\n\n def robot_state_callback(self, msg):\n self.robot_state = np.hstack([msg.position, msg.external_wrench])\n\n def tactile_callback(self, msg):\n self.buffer.append(msg.data)\n self.buffer_count += 1\n\n def timer_callback(self):\n success = False\n control_type = -1\n try:\n response = self.sliding_control_future.result()\n success = response.success\n control_type = response.type\n except Exception:\n success = False\n control_type = -1\n\n nanoseconds = self.get_clock().now().nanoseconds\n\n if success:\n \"\"\" Training mode for data collection. \"\"\"\n if self.mode == \"train\" and self.count < len(PARAMS):\n # Save buffer\n if control_type == 3:\n basename = \"{}_{:.1f}N_{:.3f}mps_{}\".format(\n MATERIAL,\n PARAMS[self.count-1][0],\n PARAMS[self.count-1][1],\n nanoseconds)\n filename = os.path.join(self.save_dir, basename)\n np.save(filename, self.buffer)\n self.get_logger().info(\"Saved to file {}.npy\".format(filename))\n\n self.buffer.clear()\n force = PARAMS[self.count][0]\n dy = PARAMS[self.count][1]\n y = PARAMS[self.count][2]\n self.send_sliding_control_request(\n force, [0.0, y, 0.0], [0.0, dy, 0.0])\n \n self.count += 1\n\n if self.mode == \"test\" and len(self.buffer) >= self.buffer.maxlen:\n \"\"\" Test mode for active perception.\"\"\"\n y = resample_fft(self.buffer, Ns=32, axis=0, flatten=True)\n self.freq_buf.append(y)\n if len(self.freq_buf) > 32:\n self.freqXd = self.transformer.fit_transform(self.freq_buf)\n plt.pause(0.001)\n \n\n def send_sliding_control_request(self, force, distance, speed):\n \"\"\"\n Send parameter change request to control parameter server\n \"\"\"\n self.sliding_control_req.force = force\n self.sliding_control_req.distance = distance\n self.sliding_control_req.speed = speed\n self.sliding_control_future = self.sliding_control_cli.call_async(\n self.sliding_control_req\n )\n\n def send_sensor_request(self, transition):\n self.sensor_req.transition = transition\n self.sensor_future = self.sensor_cli.call_async(self.sensor_req)\n\n\ndef main(args=None):\n time.sleep(3)\n rclpy.init(args=args)\n node = Commander()\n executor = MultiThreadedExecutor(num_threads=3)\n executor.add_node(node)\n try:\n executor.spin()\n finally:\n executor.shutdown()\n node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.hstack", "sklearn.decomposition.IncrementalPCA", "numpy.save", "matplotlib.animation.FuncAnimation", "numpy.zeros", "matplotlib.pyplot.pause", "matplotlib.pyplot.figure" ] ]
erasromani/pytorch_wrapper
[ "8f790b7c8af5c70a093656bf806613f9e4a4e5c3" ]
[ "pytorch_wrapper/metrics.py" ]
[ "import gin\nimport torch\nimport dataclasses\nimport torch.nn.functional as F\n\nfrom typing import Any, Callable, List\n\ndef accuracy(input, target):\n input = F.softmax(input, dim=1)\n pred = input.max(1, keepdim=True)[1]\n accuracy = pred.eq(target.view_as(pred))\n return accuracy\n\[email protected]\[email protected]\nclass MetricConfig:\n names: List[Callable[[Any, Any], Any]]\n \n def get_metric_func(self, name):\n if name == \"accuracy\":\n return accuracy\n else:\n raise ValueError(\"invalid name {}\".format(name))\n \n def get_functions(self):\n functions = {}\n for name in self.names:\n functions[name] = self.get_metric_func(name)\n return functions" ]
[ [ "torch.nn.functional.softmax" ] ]
xiaolu1990/pandapower
[ "53af7f2d455dc4ff6a4fcaccc870b0076fa97bf9" ]
[ "pandapower/test/networks/test_cigre_networks.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and\n# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed\n# by a BSD-style license that can be found in the LICENSE file.\n\nimport pandas as pd\nimport pytest\n\nimport pandapower as pp\nimport pandapower.networks as pn\n\n\ndef test_cigre_hv():\n net = pn.create_cigre_network_hv() # length_km_6a_6b=0.1\n pp.runpp(net)\n\n all_vn_kv = pd.Series([22, 220, 380])\n assert net.bus.vn_kv.isin(all_vn_kv).all()\n all_length_km = pd.Series([100, 300, 600, 0.1])\n assert net.line.length_km.isin(all_length_km).all()\n assert len(net.bus) == 13\n assert len(net.line) == 9\n assert len(net.gen) == 3\n assert len(net.sgen) == 0\n assert len(net.shunt) == 3\n assert len(net.trafo) == 6\n assert len(net.load) == 5\n assert len(net.ext_grid) == 1\n assert net.converged\n\n net = pn.create_cigre_network_hv(length_km_6a_6b=80)\n assert net.line.length_km[8] == 80\n\n\ndef test_cigre_mv():\n net = pn.create_cigre_network_mv() # with_der=False\n pp.runpp(net)\n\n all_vn_kv = pd.Series([110, 20])\n assert net.bus.vn_kv.isin(all_vn_kv).all()\n assert len(net.bus) == 15\n assert len(net.line) == 15\n assert len(net.gen) == 0\n assert len(net.sgen) == 0\n assert len(net.shunt) == 0\n assert len(net.trafo) == 2\n assert len(net.load) == 18\n assert len(net.ext_grid) == 1\n assert len(net.switch) == 8\n assert net.converged\n\n net = pn.create_cigre_network_mv(with_der=\"pv_wind\")\n pp.runpp(net)\n\n all_vn_kv = pd.Series([110, 20])\n assert net.bus.vn_kv.isin(all_vn_kv).all()\n assert len(net.bus) == 15\n assert len(net.line) == 15\n assert len(net.gen) == 0\n assert len(net.sgen) == 9\n assert len(net.shunt) == 0\n assert len(net.trafo) == 2\n assert len(net.load) == 18\n assert len(net.ext_grid) == 1\n assert len(net.switch) == 8\n assert net.converged\n\n net = pn.create_cigre_network_mv(with_der=\"all\")\n pp.runpp(net)\n\n all_vn_kv = pd.Series([110, 20])\n assert net.bus.vn_kv.isin(all_vn_kv).all()\n assert len(net.bus) == 15\n assert len(net.line) == 15\n assert len(net.gen) == 0\n assert len(net.sgen) == 15\n assert len(net.shunt) == 0\n assert len(net.trafo) == 2\n assert len(net.load) == 18\n assert len(net.ext_grid) == 1\n assert len(net.switch) == 8\n assert net.converged\n\n\ndef test_cigre_lv():\n net = pn.create_cigre_network_lv()\n pp.runpp(net)\n\n all_vn_kv = pd.Series([20, 0.4])\n assert net.bus.vn_kv.isin(all_vn_kv).all()\n assert len(net.bus) == 44\n assert len(net.line) == 37\n assert len(net.gen) == 0\n assert len(net.sgen) == 0\n assert len(net.shunt) == 0\n assert len(net.trafo) == 3\n assert len(net.load) == 15\n assert len(net.ext_grid) == 1\n assert len(net.switch) == 3\n assert net.converged\n\nif __name__ == '__main__':\n pytest.main(['-x', \"test_cigre_networks.py\"])\n" ]
[ [ "pandas.Series" ] ]
vinhtq115/CRAFT-pytorch
[ "2be0399fde1ba9bbe7af73e29054fbb678a1d01b" ]
[ "craft_detector/craft.py" ]
[ "\"\"\" \nCopyright (c) 2019-present NAVER Corp.\nMIT License\n\"\"\"\n\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom craft_detector.basenet.vgg16_bn import vgg16_bn, init_weights\n\n\nclass double_conv(nn.Module):\n def __init__(self, in_ch, mid_ch, out_ch):\n super(double_conv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=1),\n nn.BatchNorm2d(mid_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n\nclass CRAFT(nn.Module):\n def __init__(self, pretrained=False, freeze=False):\n super(CRAFT, self).__init__()\n\n \"\"\" Base network \"\"\"\n self.basenet = vgg16_bn(pretrained, freeze)\n\n \"\"\" U network \"\"\"\n self.upconv1 = double_conv(1024, 512, 256)\n self.upconv2 = double_conv(512, 256, 128)\n self.upconv3 = double_conv(256, 128, 64)\n self.upconv4 = double_conv(128, 64, 32)\n\n num_class = 2\n self.conv_cls = nn.Sequential(\n nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True),\n nn.Conv2d(32, 16, kernel_size=3, padding=1), nn.ReLU(inplace=True),\n nn.Conv2d(16, 16, kernel_size=1), nn.ReLU(inplace=True),\n nn.Conv2d(16, num_class, kernel_size=1),\n )\n\n init_weights(self.upconv1.modules())\n init_weights(self.upconv2.modules())\n init_weights(self.upconv3.modules())\n init_weights(self.upconv4.modules())\n init_weights(self.conv_cls.modules())\n \n def forward(self, x):\n \"\"\" Base network \"\"\"\n sources = self.basenet(x)\n\n \"\"\" U network \"\"\"\n y = torch.cat([sources[0], sources[1]], dim=1)\n y = self.upconv1(y)\n\n y = F.interpolate(y, size=sources[2].size()[2:], mode='bilinear', align_corners=False)\n y = torch.cat([y, sources[2]], dim=1)\n y = self.upconv2(y)\n\n y = F.interpolate(y, size=sources[3].size()[2:], mode='bilinear', align_corners=False)\n y = torch.cat([y, sources[3]], dim=1)\n y = self.upconv3(y)\n\n y = F.interpolate(y, size=sources[4].size()[2:], mode='bilinear', align_corners=False)\n y = torch.cat([y, sources[4]], dim=1)\n feature = self.upconv4(y)\n\n y = self.conv_cls(feature)\n\n return y.permute(0,2,3,1), feature\n\nif __name__ == '__main__':\n model = CRAFT(pretrained=True).cuda()\n output, _ = model(torch.randn(1, 3, 768, 768).cuda())\n print(output.shape)" ]
[ [ "torch.cat", "torch.randn", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
hdoupe/OG-USA
[ "2e5c116bb8656ab190a59e431a8d57415fe26b08" ]
[ "ogusa/tax.py" ]
[ "'''\n------------------------------------------------------------------------\nFunctions for taxes in the steady state and along the transition path.\n------------------------------------------------------------------------\n'''\n\n# Packages\nimport numpy as np\nfrom ogusa import utils\n\n'''\n------------------------------------------------------------------------\n Functions\n------------------------------------------------------------------------\n'''\n\n\ndef replacement_rate_vals(nssmat, wss, factor_ss, j, p):\n '''\n Calculates replacement rate values for the social security system.\n\n Args:\n nssmat (Numpy array): initial guess at labor supply, size = SxJ\n new_w (scalar): steady state real wage rate\n factor_ss (scalar): scaling factor converting model units to\n dollars\n j (int): index of lifetime income group\n p (OG-USA Specifications object): model parameters\n\n Returns:\n theta (Numpy array): social security replacement rate value for\n lifetime income group j\n\n '''\n if j is not None:\n e = p.e[:, j]\n else:\n e = p.e\n # adjust number of calendar years AIME computed from int model periods\n equiv_periods = int(round((p.S / 80.0) * p.AIME_num_years)) - 1\n if e.ndim == 2:\n dim2 = e.shape[1]\n else:\n dim2 = 1\n earnings = (e * (wss * nssmat * factor_ss)).reshape(p.S, dim2)\n # get highest earning years for number of years AIME computed from\n highest_earn =\\\n (-1.0 * np.sort(-1.0 * earnings[:p.retire[-1], :],\n axis=0))[:equiv_periods]\n AIME = highest_earn.sum(0) / ((12.0 * (p.S / 80.0)) * equiv_periods)\n PIA = np.zeros(dim2)\n # Compute level of replacement using AIME brackets and PIA rates\n for j in range(dim2):\n if AIME[j] < p.AIME_bkt_1:\n PIA[j] = p.PIA_rate_bkt_1 * AIME[j]\n elif AIME[j] < p.AIME_bkt_2:\n PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +\n p.PIA_rate_bkt_2 * (AIME[j] - p.AIME_bkt_1))\n else:\n PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +\n p.PIA_rate_bkt_2 * (p.AIME_bkt_2 - p.AIME_bkt_1) +\n p.PIA_rate_bkt_3 * (AIME[j] - p.AIME_bkt_2))\n # Set the maximum monthly replacment rate from SS benefits tables\n PIA[PIA > p.PIA_maxpayment] = p.PIA_maxpayment\n if p.PIA_minpayment != 0.0:\n PIA[PIA < p.PIA_minpayment] = p.PIA_minpayment\n theta = (PIA * (12.0 * p.S / 80.0)) / (factor_ss * wss)\n return theta\n\n\ndef ETR_wealth(b, h_wealth, m_wealth, p_wealth):\n r'''\n Calculates the effective tax rate on wealth.\n\n .. math::\n T_{j,s,t}^{w} = \\frac{h^{w}p_{w}b_{j,s,t}}{h^{w}b_{j,s,t} + m^{w}}\n\n Args:\n b (Numpy array): savings\n h_wealth (scalar): parameter of wealth tax function\n p_wealth (scalar): parameter of wealth tax function\n m_wealth (scalar): parameter of wealth tax function\n\n Returns:\n tau_w (Numpy array): effective tax rate on wealth, size = SxJ\n\n '''\n tau_w = (p_wealth * h_wealth * b) / (h_wealth * b + m_wealth)\n return tau_w\n\n\ndef MTR_wealth(b, h_wealth, m_wealth, p_wealth):\n r'''\n Calculates the marginal tax rate on wealth from the wealth tax.\n\n .. math::\n \\frac{\\partial T_{j,s,t}^{w}}{\\partial b_{j,s,t}} = \\frac{h^{w}m^{w}p_{w}}{(b_{j,s,t}h^{w}m^{w})^{2}}\n\n Args:\n b (Numpy array): savings\n h_wealth (scalar): parameter of wealth tax function\n p_wealth (scalar): parameter of wealth tax function\n m_wealth (scalar): parameter of wealth tax function\n\n Returns:\n tau_prime (Numpy array): marginal tax rate on wealth, size = SxJ\n\n '''\n tau_prime = ((b * h_wealth * m_wealth * p_wealth) /\n ((b * h_wealth + m_wealth) ** 2) +\n ETR_wealth(b, h_wealth, m_wealth, p_wealth))\n return tau_prime\n\n\ndef ETR_income(r, w, b, n, factor, e, etr_params, p):\n '''\n Calculates effective personal income tax rate.\n\n Args:\n r (array_like): real interest rate\n w (array_like): real wage rate\n b (Numpy array): savings\n n (Numpy array): labor supply\n factor (scalar): scaling factor converting model units to\n dollars\n e (Numpy array): effective labor units\n etr_params (Numpy array): effective tax rate function parameters\n p (OG-USA Specifications object): model parameters\n\n Returns:\n tau (Numpy array): effective tax rate on total income\n\n '''\n X = (w * e * n) * factor\n Y = (r * b) * factor\n X2 = X ** 2\n Y2 = Y ** 2\n income = X + Y\n income2 = income ** 2\n\n if p.tax_func_type == 'GS':\n phi0 = np.squeeze(etr_params[..., 0])\n phi1 = np.squeeze(etr_params[..., 1])\n phi2 = np.squeeze(etr_params[..., 2])\n tau = ((phi0 * (income - ((income ** -phi1) + phi2) **\n (-1 / phi1))) / income)\n elif p.tax_func_type == 'DEP_totalinc':\n A = np.squeeze(etr_params[..., 0])\n B = np.squeeze(etr_params[..., 1])\n max_income = np.squeeze(etr_params[..., 4])\n min_income = np.squeeze(etr_params[..., 5])\n shift_income = np.squeeze(etr_params[..., 8])\n shift = np.squeeze(etr_params[..., 10])\n tau_income = (((max_income - min_income) *\n (A * income2 + B * income) /\n (A * income2 + B * income + 1)) + min_income)\n tau = tau_income + shift_income + shift\n else: # DEP or linear\n A = np.squeeze(etr_params[..., 0])\n B = np.squeeze(etr_params[..., 1])\n C = np.squeeze(etr_params[..., 2])\n D = np.squeeze(etr_params[..., 3])\n max_x = np.squeeze(etr_params[..., 4])\n min_x = np.squeeze(etr_params[..., 5])\n max_y = np.squeeze(etr_params[..., 6])\n min_y = np.squeeze(etr_params[..., 7])\n shift_x = np.squeeze(etr_params[..., 8])\n shift_y = np.squeeze(etr_params[..., 9])\n shift = np.squeeze(etr_params[..., 10])\n share = np.squeeze(etr_params[..., 11])\n\n tau_x = ((max_x - min_x) * (A * X2 + B * X) /\n (A * X2 + B * X + 1) + min_x)\n tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /\n (C * Y2 + D * Y + 1) + min_y)\n tau = (((tau_x + shift_x) ** share) *\n ((tau_y + shift_y) ** (1 - share))) + shift\n\n return tau\n\n\ndef MTR_income(r, w, b, n, factor, mtr_capital, e, etr_params,\n mtr_params, p):\n r'''\n Generates the marginal tax rate on labor income for households.\n\n Args:\n r (array_like): real interest rate\n w (array_like): real wage rate\n b (Numpy array): savings\n n (Numpy array): labor supply\n factor (scalar): scaling factor converting model units to\n dollars\n mtr_capital (bool): whether to compute the marginal tax rate on\n capital income or labor income\n e (Numpy array): effective labor units\n etr_params (Numpy array): effective tax rate function parameters\n p (OG-USA Specifications object): model parameters\n\n Returns:\n tau (Numpy array): marginal tax rate on income source\n\n '''\n X = (w * e * n) * factor\n Y = (r * b) * factor\n X2 = X ** 2\n Y2 = Y ** 2\n income = X + Y\n income2 = income ** 2\n\n if p.tax_func_type == 'GS':\n if p.analytical_mtrs:\n phi0 = np.squeeze(etr_params[..., 0])\n phi1 = np.squeeze(etr_params[..., 1])\n phi2 = np.squeeze(etr_params[..., 2])\n else:\n phi0 = np.squeeze(mtr_params[..., 0])\n phi1 = np.squeeze(mtr_params[..., 1])\n phi2 = np.squeeze(mtr_params[..., 2])\n tau = (phi0*(1 - (income ** (-phi1 - 1) *\n ((income ** -phi1) + phi2) **\n ((-1 - phi1) / phi1))))\n elif p.tax_func_type == 'DEP_totalinc':\n if p.analytical_mtrs:\n A = np.squeeze(etr_params[..., 0])\n B = np.squeeze(etr_params[..., 1])\n max_income = np.squeeze(etr_params[..., 4])\n min_income = np.squeeze(etr_params[..., 5])\n shift_income = np.squeeze(etr_params[..., 8])\n shift = np.squeeze(etr_params[..., 10])\n d_etr = ((max_income - min_income) * ((2 * A * income + B) /\n ((A * income2 + B * income + 1) ** 2)))\n etr = (((max_income - min_income) *\n ((A * income2 + B * income) /\n (A * income2 + B * income + 1)) + min_income) +\n shift_income + shift)\n tau = (d_etr * income) + (etr)\n else:\n A = np.squeeze(mtr_params[..., 0])\n B = np.squeeze(mtr_params[..., 1])\n max_income = np.squeeze(mtr_params[..., 4])\n min_income = np.squeeze(mtr_params[..., 5])\n shift_income = np.squeeze(mtr_params[..., 8])\n shift = np.squeeze(mtr_params[..., 10])\n tau_income = (((max_income - min_income) *\n (A * income2 + B * income) /\n (A * income2 + B * income + 1)) + min_income)\n tau = tau_income + shift_income + shift\n else: # DEP or linear\n if p.analytical_mtrs:\n A = np.squeeze(etr_params[..., 0])\n B = np.squeeze(etr_params[..., 1])\n C = np.squeeze(etr_params[..., 2])\n D = np.squeeze(etr_params[..., 3])\n max_x = np.squeeze(etr_params[..., 4])\n min_x = np.squeeze(etr_params[..., 5])\n max_y = np.squeeze(etr_params[..., 6])\n min_y = np.squeeze(etr_params[..., 7])\n shift_x = np.squeeze(etr_params[..., 8])\n shift_y = np.squeeze(etr_params[..., 9])\n shift = np.squeeze(etr_params[..., 10])\n share = np.squeeze(etr_params[..., 11])\n\n tau_x = ((max_x - min_x) * (A * X2 + B * X) /\n (A * X2 + B * X + 1) + min_x)\n tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /\n (C * Y2 + D * Y + 1) + min_y)\n etr = (((tau_x + shift_x) ** share) *\n ((tau_y + shift_y) ** (1 - share))) + shift\n if mtr_capital:\n d_etr = ((1-share) * ((tau_y + shift_y) ** (-share)) *\n (max_y - min_y) * ((2 * C * Y + D) /\n ((C * Y2 + D * Y + 1)\n ** 2)) *\n ((tau_x + shift_x) ** share))\n tau = d_etr * income + etr\n else:\n d_etr = (share * ((tau_x + shift_x) ** (share - 1)) *\n (max_x - min_x) * ((2 * A * X + B) /\n ((A * X2 + B * X + 1)\n ** 2)) *\n ((tau_y + shift_y) ** (1 - share)))\n tau = d_etr * income + etr\n else:\n A = np.squeeze(mtr_params[..., 0])\n B = np.squeeze(mtr_params[..., 1])\n C = np.squeeze(mtr_params[..., 2])\n D = np.squeeze(mtr_params[..., 3])\n max_x = np.squeeze(mtr_params[..., 4])\n min_x = np.squeeze(mtr_params[..., 5])\n max_y = np.squeeze(mtr_params[..., 6])\n min_y = np.squeeze(mtr_params[..., 7])\n shift_x = np.squeeze(mtr_params[..., 8])\n shift_y = np.squeeze(mtr_params[..., 9])\n shift = np.squeeze(mtr_params[..., 10])\n share = np.squeeze(mtr_params[..., 11])\n\n tau_x = ((max_x - min_x) * (A * X2 + B * X) /\n (A * X2 + B * X + 1) + min_x)\n tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /\n (C * Y2 + D * Y + 1) + min_y)\n tau = (((tau_x + shift_x) ** share) *\n ((tau_y + shift_y) ** (1 - share))) + shift\n\n return tau\n\n\ndef get_biz_tax(w, Y, L, K, p, method):\n r'''\n Finds total business income tax revenue.\n\n .. math::\n R_{t}^{b} = \\tau_{t}^{b}(Y_{t} - w_{t}L_{t}) - \\tau_{t}^{b}\\delta_{t}^{\\tau}K_{t}^{\\tau}\n Args:\n r (array_like): real interest rate\n Y (array_like): aggregate output\n L (array_like): aggregate labor demand\n K (array_like): aggregate capital demand\n\n Returns:\n business_revenue (array_like): aggregate business tax revenue\n\n '''\n if method == 'SS':\n delta_tau = p.delta_tau[-1]\n tau_b = p.tau_b[-1]\n else:\n delta_tau = p.delta_tau[:p.T]\n tau_b = p.tau_b[:p.T]\n business_revenue = tau_b * (Y - w * L) - tau_b * delta_tau * K\n return business_revenue\n\n\ndef net_taxes(r, w, b, n, bq, factor, tr, theta, t, j, shift, method,\n e, etr_params, p):\n '''\n Calculate net taxes paid for each household.\n\n Args:\n r (array_like): real interest rate\n w (array_like): real wage rate\n b (Numpy array): savings\n n (Numpy array): labor supply\n bq (Numpy array): bequests received\n factor (scalar): scaling factor converting model units to\n dollars\n tr (Numpy array): government transfers to the household\n theta (Numpy array): social security replacement rate value for\n lifetime income group j\n t (int): time period\n j (int): index of lifetime income group\n shift (bool): whether computing for periods 0--s or 1--(s+1),\n =True for 1--(s+1)\n method (str): adjusts calculation dimensions based on 'SS' or\n 'TPI'\n e (Numpy array): effective labor units\n etr_params (Numpy array): effective tax rate function parameters\n p (OG-USA Specifications object): model parameters\n\n Returns:\n net_tax (Numpy array): net taxes paid for each household\n\n '''\n T_I = income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p)\n pension = pension_amount(w, n, theta, t, j, shift, method, e, p)\n T_BQ = bequest_tax_liab(r, b, bq, t, j, method, p)\n T_W = wealth_tax_liab(r, b, t, j, method, p)\n\n net_tax = T_I - pension + T_BQ + T_W - tr\n\n return net_tax\n\n\ndef income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p):\n '''\n Calculate income and payroll tax liability for each household\n\n Args:\n r (array_like): real interest rate\n w (array_like): real wage rate\n b (Numpy array): savings\n n (Numpy array): labor supply\n factor (scalar): scaling factor converting model units to\n dollars\n t (int): time period\n j (int): index of lifetime income group\n method (str): adjusts calculation dimensions based on 'SS' or\n 'TPI'\n e (Numpy array): effective labor units\n etr_params (Numpy array): effective tax rate function parameters\n p (OG-USA Specifications object): model parameters\n\n Returns:\n T_I (Numpy array): total income and payroll taxes paid for each\n household\n\n '''\n if j is not None:\n if method == 'TPI':\n if b.ndim == 2:\n r = r.reshape(r.shape[0], 1)\n w = w.reshape(w.shape[0], 1)\n else:\n if method == 'TPI':\n r = utils.to_timepath_shape(r)\n w = utils.to_timepath_shape(w)\n\n income = r * b + w * e * n\n labor_income = w * e * n\n T_I = ETR_income(r, w, b, n, factor, e, etr_params, p) * income\n if method == 'SS':\n T_P = p.tau_payroll[-1] * labor_income\n elif method == 'TPI':\n length = w.shape[0]\n if len(b.shape) == 1:\n T_P = p.tau_payroll[t: t + length] * labor_income\n elif len(b.shape) == 2:\n T_P = (p.tau_payroll[t: t + length].reshape(length, 1) *\n labor_income)\n else:\n T_P = (p.tau_payroll[t:t + length].reshape(length, 1, 1) *\n labor_income)\n elif method == 'TPI_scalar':\n T_P = p.tau_payroll[0] * labor_income\n\n income_payroll_tax_liab = T_I + T_P\n\n return income_payroll_tax_liab\n\n\ndef pension_amount(w, n, theta, t, j, shift, method, e, p):\n '''\n Calculate public pension benefit amounts for each household.\n\n Args:\n w (array_like): real wage rate\n n (Numpy array): labor supply\n theta (Numpy array): social security replacement rate value for\n lifetime income group j\n t (int): time period\n j (int): index of lifetime income group\n shift (bool): whether computing for periods 0--s or 1--(s+1),\n =True for 1--(s+1)\n method (str): adjusts calculation dimensions based on 'SS' or\n 'TPI'\n e (Numpy array): effective labor units\n p (OG-USA Specifications object): model parameters\n\n Returns:\n pension (Numpy array): pension amount for each household\n\n '''\n if j is not None:\n if method == 'TPI':\n if n.ndim == 2:\n w = w.reshape(w.shape[0], 1)\n else:\n if method == 'TPI':\n w = utils.to_timepath_shape(w)\n\n pension = np.zeros_like(n)\n if method == 'SS':\n # Depending on if we are looking at b_s or b_s+1, the\n # entry for retirement will change (it shifts back one).\n # The shift boolean makes sure we start replacement rates\n # at the correct age.\n if shift is False:\n pension[p.retire[-1]:] = theta * w\n else:\n pension[p.retire[-1] - 1:] = theta * w\n elif method == 'TPI':\n length = w.shape[0]\n if not shift:\n # retireTPI is different from retire, because in TP income\n # we are counting backwards with different length lists.\n # This will always be the correct location of retirement,\n # depending on the shape of the lists.\n retireTPI = (p.retire[t: t + length] - p.S)\n else:\n retireTPI = (p.retire[t: t + length] - 1 - p.S)\n if len(n.shape) == 1:\n if not shift:\n retireTPI = p.retire[t] - p.S\n else:\n retireTPI = p.retire[t] - 1 - p.S\n pension[retireTPI:] = (\n theta[j] * p.replacement_rate_adjust[t] * w[retireTPI:])\n elif len(n.shape) == 2:\n for tt in range(pension.shape[0]):\n pension[tt, retireTPI[tt]:] = (\n theta * p.replacement_rate_adjust[t + tt] * w[tt])\n else:\n for tt in range(pension.shape[0]):\n pension[tt, retireTPI[tt]:, :] = (\n theta.reshape(1, p.J) *\n p.replacement_rate_adjust[t + tt] * w[tt])\n elif method == 'TPI_scalar':\n # The above methods won't work if scalars are used. This option\n # is only called by the SS_TPI_firstdoughnutring function in TPI.\n pension = theta * p.replacement_rate_adjust[0] * w\n\n return pension\n\n\ndef wealth_tax_liab(r, b, t, j, method, p):\n '''\n Calculate wealth tax liability for each household.\n\n Args:\n r (array_like): real interest rate\n b (Numpy array): savings\n t (int): time period\n j (int): index of lifetime income group\n method (str): adjusts calculation dimensions based on 'SS' or\n 'TPI'\n p (OG-USA Specifications object): model parameters\n\n Returns:\n T_W (Numpy array): wealth tax liability for each household\n\n '''\n if j is not None:\n if method == 'TPI':\n if b.ndim == 2:\n r = r.reshape(r.shape[0], 1)\n else:\n if method == 'TPI':\n r = utils.to_timepath_shape(r)\n\n if method == 'SS':\n T_W = (ETR_wealth(b, p.h_wealth[-1], p.m_wealth[-1],\n p.p_wealth[-1]) * b)\n elif method == 'TPI':\n length = r.shape[0]\n if len(b.shape) == 1:\n T_W = (ETR_wealth(b, p.h_wealth[t:t + length],\n p.m_wealth[t:t + length],\n p.p_wealth[t:t + length]) * b)\n elif len(b.shape) == 2:\n T_W = (ETR_wealth(b, p.h_wealth[t:t + length],\n p.m_wealth[t:t + length],\n p.p_wealth[t:t + length]) * b)\n else:\n T_W = (ETR_wealth(\n b, p.h_wealth[t:t + length].reshape(length, 1, 1),\n p.m_wealth[t:t + length].reshape(length, 1, 1),\n p.p_wealth[t:t + length].reshape(length, 1, 1)) * b)\n elif method == 'TPI_scalar':\n T_W = (ETR_wealth(b, p.h_wealth[0], p.m_wealth[0],\n p.p_wealth[0]) * b)\n\n return T_W\n\n\ndef bequest_tax_liab(r, b, bq, t, j, method, p):\n '''\n Calculate liability due from taxes on bequests for each household.\n\n Args:\n r (array_like): real interest rate\n b (Numpy array): savings\n bq (Numpy array): bequests received\n t (int): time period\n j (int): index of lifetime income group\n method (str): adjusts calculation dimensions based on 'SS' or\n 'TPI'\n p (OG-USA Specifications object): model parameters\n\n Returns:\n T_BQ (Numpy array): bequest tax liability for each household\n\n '''\n if j is not None:\n lambdas = p.lambdas[j]\n if method == 'TPI':\n if b.ndim == 2:\n r = r.reshape(r.shape[0], 1)\n else:\n lambdas = np.transpose(p.lambdas)\n if method == 'TPI':\n r = utils.to_timepath_shape(r)\n\n if method == 'SS':\n T_BQ = p.tau_bq[-1] * bq\n elif method == 'TPI':\n length = r.shape[0]\n if len(b.shape) == 1:\n T_BQ = p.tau_bq[t:t + length] * bq\n elif len(b.shape) == 2:\n T_BQ = p.tau_bq[t:t + length].reshape(length, 1) * bq / lambdas\n else:\n T_BQ = p.tau_bq[t:t + length].reshape(length, 1, 1) * bq\n elif method == 'TPI_scalar':\n # The above methods won't work if scalars are used. This option\n # is only called by the SS_TPI_firstdoughnutring function in TPI.\n T_BQ = p.tau_bq[0] * bq\n\n return T_BQ\n" ]
[ [ "numpy.squeeze", "numpy.sort", "numpy.zeros_like", "numpy.transpose", "numpy.zeros" ] ]
cwlroda/falldetection
[ "536816022208e00f61d7c13613d1447c4bc6bfb2" ]
[ "core_others/tpu-usbcamera-sync.py" ]
[ "import sys\nimport argparse\nimport numpy as np\nimport cv2\nimport time\n#from edgetpu.detection.engine import DetectionEngine\nfrom edgetpu.basic.basic_engine import BasicEngine\n\n\nkeypointsMapping = ['Nose', 'Neck', 'R-Sho', 'R-Elb', 'R-Wr', 'L-Sho', 'L-Elb', 'L-Wr', 'R-Hip', 'R-Knee', 'R-Ank', 'L-Hip', 'L-Knee', 'L-Ank', 'R-Eye', 'L-Eye', 'R-Ear', 'L-Ear']\nPOSE_PAIRS = [[1,2], [1,5], [2,3], [3,4], [5,6], [6,7], [1,8], [8,9], [9,10], [1,11], [11,12], [12,13], [1,0], [0,14], [14,16], [0,15], [15,17], [2,17], [5,16]]\nmapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], [23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], [55,56], [37,38], [45,46]]\ncolors = [[0,100,255], [0,100,255], [0,255,255], [0,100,255], [0,255,255], [0,100,255], [0,255,0], [255,200,100], [255,0,255], [0,255,0], [255,200,100], [255,0,255], [0,0,255], [255,0,0], [200,200,0], [255,0,0], [200,200,0], [0,0,0]]\n\n\ndef getKeypoints(probMap, threshold=0.1):\n\n mapSmooth = cv2.GaussianBlur(probMap, (3, 3), 0, 0)\n mapMask = np.uint8(mapSmooth>threshold)\n keypoints = []\n contours = None\n try:\n #OpenCV4.x\n contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n except:\n #OpenCV3.x\n _, contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for cnt in contours:\n blobMask = np.zeros(mapMask.shape)\n blobMask = cv2.fillConvexPoly(blobMask, cnt, 1)\n maskedProbMap = mapSmooth * blobMask\n _, maxVal, _, maxLoc = cv2.minMaxLoc(maskedProbMap)\n keypoints.append(maxLoc + (probMap[maxLoc[1], maxLoc[0]],))\n\n return keypoints\n\n\ndef getValidPairs(outputs, w, h, detected_keypoints):\n valid_pairs = []\n invalid_pairs = []\n n_interp_samples = 10\n paf_score_th = 0.1\n conf_th = 0.7\n\n for k in range(len(mapIdx)):\n pafA = outputs[0, mapIdx[k][0], :, :]\n pafB = outputs[0, mapIdx[k][1], :, :]\n pafA = cv2.resize(pafA, (w, h))\n pafB = cv2.resize(pafB, (w, h))\n\n candA = detected_keypoints[POSE_PAIRS[k][0]]\n candB = detected_keypoints[POSE_PAIRS[k][1]]\n nA = len(candA)\n nB = len(candB)\n\n if( nA != 0 and nB != 0):\n valid_pair = np.zeros((0,3))\n for i in range(nA):\n max_j=-1\n maxScore = -1\n found = 0\n for j in range(nB):\n d_ij = np.subtract(candB[j][:2], candA[i][:2])\n norm = np.linalg.norm(d_ij)\n if norm:\n d_ij = d_ij / norm\n else:\n continue\n interp_coord = list(zip(np.linspace(candA[i][0], candB[j][0], num=n_interp_samples),\n np.linspace(candA[i][1], candB[j][1], num=n_interp_samples)))\n paf_interp = []\n for k in range(len(interp_coord)):\n paf_interp.append([pafA[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))],\n pafB[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))] ])\n paf_scores = np.dot(paf_interp, d_ij)\n avg_paf_score = sum(paf_scores)/len(paf_scores)\n\n if ( len(np.where(paf_scores > paf_score_th)[0]) / n_interp_samples ) > conf_th :\n if avg_paf_score > maxScore:\n max_j = j\n maxScore = avg_paf_score\n found = 1\n if found:\n valid_pair = np.append(valid_pair, [[candA[i][3], candB[max_j][3], maxScore]], axis=0)\n\n valid_pairs.append(valid_pair)\n else:\n invalid_pairs.append(k)\n valid_pairs.append([])\n return valid_pairs, invalid_pairs\n\n\ndef getPersonwiseKeypoints(valid_pairs, invalid_pairs, keypoints_list):\n personwiseKeypoints = -1 * np.ones((0, 19))\n\n for k in range(len(mapIdx)):\n if k not in invalid_pairs:\n partAs = valid_pairs[k][:,0]\n partBs = valid_pairs[k][:,1]\n indexA, indexB = np.array(POSE_PAIRS[k])\n\n for i in range(len(valid_pairs[k])):\n found = 0\n person_idx = -1\n for j in range(len(personwiseKeypoints)):\n if personwiseKeypoints[j][indexA] == partAs[i]:\n person_idx = j\n found = 1\n break\n\n if found:\n personwiseKeypoints[person_idx][indexB] = partBs[i]\n personwiseKeypoints[person_idx][-1] += keypoints_list[partBs[i].astype(int), 2] + valid_pairs[k][i][2]\n\n elif not found and k < 17:\n row = -1 * np.ones(19)\n row[indexA] = partAs[i]\n row[indexB] = partBs[i]\n row[-1] = sum(keypoints_list[valid_pairs[k][i,:2].astype(int), 2]) + valid_pairs[k][i][2]\n personwiseKeypoints = np.vstack([personwiseKeypoints, row])\n return personwiseKeypoints\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", default=\"models/train/test/tpu/mobilenet_v2_1.4_224/output_tflite_graph_edgetpu.tflite\", help=\"Path of the inference model.\")\n parser.add_argument(\"--usbcamno\", type=int, default=0, help=\"USB Camera number.\")\n parser.add_argument(\"--usbcamfps\", type=int, default=30, help=\"USB Camera FPS.\")\n args = parser.parse_args()\n\n camera_width = 320\n camera_height = 240\n\n fps = \"\"\n framecount = 0\n time1 = 0\n elapsedTime = 0\n\n h = 368\n w = 432\n\n new_w = int(camera_width * min(w/camera_width, h/camera_height))\n new_h = int(camera_height * min(w/camera_width, h/camera_height))\n\n threshold = 0.1\n nPoints = 18\n\n cap = cv2.VideoCapture(args.usbcamno)\n cap.set(cv2.CAP_PROP_FPS, args.usbcamfps)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)\n cv2.namedWindow(\"USB Camera\", cv2.WINDOW_AUTOSIZE)\n\n # Initialize engine.\n engine = BasicEngine(args.model)\n\n # Run inference.\n while True:\n t1 = time.perf_counter()\n\n ret, color_image = cap.read()\n if not ret:\n break\n\n resized_image = cv2.resize(color_image, (new_w, new_h), interpolation = cv2.INTER_CUBIC)\n prepimg = resized_image[:, :, ::-1].copy()\n canvas = canvas2 = np.full((h, w, 3), 128)\n canvas[(h - new_h)//2:(h - new_h)//2 + new_h,(w - new_w)//2:(w - new_w)//2 + new_w, :] = prepimg\n canvas2[(h - new_h)//2:(h - new_h)//2 + new_h,(w - new_w)//2:(w - new_w)//2 + new_w, :] = resized_image\n\n prepimg = np.uint8(canvas).flatten()\n #prepimg = canvas.flatten()\n\n #tinf = time.perf_counter()\n #ans = engine.DetectWithImage(prepimg, threshold=0.5, keep_aspect_ratio=True, relative_coord=False, top_k=10)\n\n #print(\"engine.required_input_array_size()=\", engine.required_input_array_size()) #476928\n #print(\"prepimg.flatten()=\", len(prepimg)) #476928\n ans = engine.RunInference(prepimg)\n #print(\"len(ans)=\", len(ans)) #2\n #print(\"ans[0]=\", ans[0]) #3.071000099182129\n print(\"ans[1]=\", ans[1]) #[0.04705882 0.09411765 0.04705882 ... 0.07058824 0.23529412 0. ]\n print(\"len(ans[1])=\", len(ans[1])) #141588=1x46x54x57 or 1x57x46x54\n\n outputs = ans[1].reshape((1, 46, 54, 57)).transpose((0, 3, 1, 2)) #(1, 57, 46, 54)\n #outputs = outputs[np.newaxis, :, :, :]\n #outputs = ans[1].reshape((1, 57, 46, 54)) #(1, 57, 46, 54)\n\n #print(time.perf_counter() - tinf, \"sec\")\n #sys.exit(0)\n\n detected_keypoints = []\n keypoints_list = np.zeros((0, 3))\n keypoint_id = 0\n\n #print(\"outputs.shape()=\", outputs.shape)\n #print(\"outputs.shape(outputs[0, 0, :, :])=\", outputs[0, 0, :, :])\n\n for part in range(nPoints):\n probMap = outputs[0, part, :, :]\n probMap = cv2.resize(probMap, (w, h)) # (432, 368)\n keypoints = getKeypoints(probMap, threshold)\n keypoints_with_id = []\n\n for i in range(len(keypoints)):\n keypoints_with_id.append(keypoints[i] + (keypoint_id,))\n keypoints_list = np.vstack([keypoints_list, keypoints[i]])\n keypoint_id += 1\n\n detected_keypoints.append(keypoints_with_id)\n\n #print(\"len(detected_keypoints)=\", len(detected_keypoints))\n\n frameClone = np.uint8(canvas2.copy())\n for i in range(nPoints):\n #print(\"detected_keypoints[i]=\", detected_keypoints[i])\n for j in range(len(detected_keypoints[i])):\n\n cv2.circle(frameClone, detected_keypoints[i][j][0:2], 5, colors[i], -1, cv2.LINE_AA)\n\n valid_pairs, invalid_pairs = getValidPairs(outputs, w, h, detected_keypoints)\n #print(\"valid_pairs, invalid_pairs=\", valid_pairs, invalid_pairs)\n personwiseKeypoints = getPersonwiseKeypoints(valid_pairs, invalid_pairs, keypoints_list)\n\n print(\"personwiseKeypoints=\", personwiseKeypoints)\n\n for i in range(17):\n for n in range(len(personwiseKeypoints)):\n index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])]\n if -1 in index:\n continue\n B = np.int32(keypoints_list[index.astype(int), 0])\n A = np.int32(keypoints_list[index.astype(int), 1])\n cv2.line(frameClone, (B[0], A[0]), (B[1], A[1]), colors[i], 3, cv2.LINE_AA)\n\n cv2.putText(frameClone, fps, (w-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)\n\n cv2.imshow(\"USB Camera\" , frameClone)\n\n if cv2.waitKey(1)&0xFF == ord('q'):\n break\n\n # FPS calculation\n framecount += 1\n if framecount >= 5:\n fps = \"(Playback) {:.1f} FPS\".format(time1/15)\n framecount = 0\n time1 = 0\n t2 = time.perf_counter()\n elapsedTime = t2-t1\n time1 += 1/elapsedTime\n\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.dot", "numpy.linspace", "numpy.uint8", "numpy.subtract", "numpy.linalg.norm", "numpy.full", "numpy.ones", "numpy.append", "numpy.array", "numpy.zeros", "numpy.where", "numpy.vstack" ] ]
seu-labview/ObjectDatasetTools
[ "491c4c1d760dd3fd95b4a91abb3b929f16a08633" ]
[ "registration.py" ]
[ "\"\"\"\nregistration.py\n---------------\n\nFunctions for registering (aligning) point clouds with ICP and feature registration.\n\n\"\"\"\n\nfrom open3d import *\nimport numpy as np\nimport cv2\n\ndef icp(source,target,voxel_size,max_correspondence_distance_coarse,max_correspondence_distance_fine,\n method = \"colored-icp\"):\n\n \"\"\"\n Perform pointcloud registration using iterative closest point.\n\n Parameters\n ----------\n source : An open3d.Pointcloud instance\n 6D pontcloud of a source segment\n target : An open3d.Pointcloud instance\n 6D pointcloud of a target segment\n method : string\n colored-icp, as in Park, Q.-Y. Zhou, and V. Koltun, Colored Point Cloud \n Registration Revisited, ICCV, 2017 (slower)\n point-to-plane, a coarse to fine implementation of point-to-plane icp (faster)\n max_correspondence_distance_coarse : float\n The max correspondence distance used for the course ICP during the process\n of coarse to fine registration (if point-to-plane)\n max_correspondence_distance_fine : float\n The max correspondence distance used for the fine ICP during the process \n of coarse to fine registration (if point-to-plane)\n\n Returns\n ----------\n transformation_icp: (4,4) float\n The homogeneous rigid transformation that transforms source to the target's\n frame\n information_icp:\n An information matrix returned by open3d.get_information_matrix_from_ \\\n point_clouds function\n \"\"\"\n\n\n assert method in [\"point-to-plane\",\"colored-icp\"],\"point-to-plane or colored-icp\"\n if method == \"point-to-plane\":\n icp_coarse = registration.registration_icp(source, target,\n max_correspondence_distance_coarse, np.identity(4),\n registration.TransformationEstimationPointToPlane())\n icp_fine = registration.registration_icp(source, target,\n max_correspondence_distance_fine, icp_coarse.transformation,\n registration.TransformationEstimationPointToPlane())\n\n transformation_icp = icp_fine.transformation\n\n\n if method == \"colored-icp\":\n result_icp = registration.registration_colored_icp(source,target,voxel_size, np.identity(4),\n registration.ICPConvergenceCriteria(relative_fitness = 1e-8,\n relative_rmse = 1e-8, max_iteration = 50))\n\n transformation_icp = result_icp.transformation\n\n \n information_icp = registration.get_information_matrix_from_point_clouds(\n source, target, max_correspondence_distance_fine,\n transformation_icp)\n \n return transformation_icp, information_icp\n\n\ndef feature_registration(source,target, MIN_MATCH_COUNT = 12):\n \"\"\"\n Obtain the rigid transformation from source to target\n first find correspondence of color images by performing fast registration\n using SIFT features on color images.\n The corresponding depth values of the matching keypoints is then used to\n obtain rigid transformation through a ransac process.\n \n\n Parameters\n ----------\n source : ((n,m) uint8, (n,m) float)\n The source color image and the corresponding 3d pointcloud combined in a list\n target : ((n,m) uint8, (n,m) float)\n The target color image and the corresponding 3d pointcloud combined in a list\n MIN_MATCH_COUNT : int \n The minimum number of good corresponding feature points for the algorithm to \n trust the pairwise registration result with feature matching only\n\n Returns\n ----------\n transform: (4,4) float or None\n The homogeneous rigid transformation that transforms source to the target's\n frame\n if None, registration result using feature matching only cannot be trusted\n either due to no enough good matching feature points are found, or the ransac\n process does not return a solution\n \n \"\"\"\n cad_src, depth_src = source\n cad_des, depth_des = target\n\n # Initiate SIFT detector\n sift = cv2.xfeatures2d.SIFT_create()\n\n # find the keypoints and descripto rs with SIFT\n kp1, des1 = sift.detectAndCompute(cad_src,None)\n kp2, des2 = sift.detectAndCompute(cad_des,None)\n\n # find good mathces\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(des1,des2, k=2)\n good = []\n for m,n in matches:\n if m.distance < 0.7*n.distance:\n good.append(m)\n\n # if number of good matching feature point is greater than the MIN_MATCH_COUNT\n\n if len(good)>MIN_MATCH_COUNT:\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n matchesMask = mask.ravel().tolist()\n\n bad_match_index = np.where(np.array(matchesMask) == 0)\n src_index=np.vstack(src_pts).squeeze()\n src_index = np.delete(src_index, tuple(bad_match_index[0]), axis=0)\n src_index[:,[0, 1]] = src_index[:,[1, 0]]\n src_index = tuple(src_index.T.astype(np.int32))\n\n src_depths = depth_src[src_index]\n dst_index=np.vstack(dst_pts).squeeze()\n dst_index = np.delete(dst_index, tuple(bad_match_index[0]), axis=0)\n dst_index[:,[0, 1]] = dst_index[:,[1, 0]]\n dst_index = tuple(dst_index.T.astype(np.int32))\n dst_depths = depth_des[dst_index]\n\n\n dst_good=[]\n src_good=[]\n dst_depths=dst_depths[matchesMask>0][0]\n src_depths=src_depths[matchesMask>0][0]\n\n\n for i in xrange(len(dst_depths)):\n if np.sum(dst_depths[i])!=0 and np.sum(src_depths[i])!=0:\n dst_good.append(dst_depths[i].tolist())\n src_good.append(src_depths[i].tolist())\n\n \n # get rigid transforms between 2 set of feature points through ransac \n transform = match_ransac(np.asarray(src_good),np.asarray(dst_good))\n return transform\n\n else:\n return None\n\n\n\ndef match_ransac(p, p_prime, tol = 0.01):\n \"\"\"\n A ransac process that estimates the transform between two set of points\n p and p_prime.\n The transform is returned if the RMSE of the smallest 70% is smaller\n than the tol.\n \n Parameters\n ----------\n p : (n,3) float\n The source 3d pointcloud as a numpy.ndarray\n target : (n,3) float\n The target 3d pointcloud as a numpy.ndarray\n tol : float \n A transform is considered found if the smallest 70% RMSE error between the \n transformed p to p_prime is smaller than the tol\n\n Returns\n ----------\n transform: (4,4) float or None\n The homogeneous rigid transformation that transforms p to the p_prime's \n frame\n if None, the ransac does not find a sufficiently good solution\n \n \"\"\"\n\n leastError = None\n R = None\n t= None\n # the smallest 70% of the error is used to compute RMSE\n k= int(len(p)*0.7)\n assert len(p) == len(p_prime)\n R_temp,t_temp = rigid_transform_3D(p,p_prime)\n R_temp = np.array(R_temp)\n t_temp = (np.array(t_temp).T)[0]\n transformed = (np.dot(R_temp, p.T).T)+t_temp\n error = (transformed - p_prime)**2\n error = np.sum(error, axis=1)\n error = np.sqrt(error)\n\n RMSE = np.sum(error[np.argpartition(error, k)[:k]])/k\n if RMSE < tol:\n R = R_temp\n t = t_temp\n\n transform = [[R[0][0],R[0][1],R[0][2],t[0]],\n [R[1][0],R[1][1],R[1][2],t[1]],\n [R[2][0],R[2][1],R[2][2],t[2]],\n [0,0,0,1]]\n return transform\n\n return None\n\n \n\ndef rigid_transform_3D(A, B):\n \"\"\"\n Estimate a rigid transform between 2 set of points of equal length\n through singular value decomposition(svd), return a rotation and a \n transformation matrix\n\n Parameters\n ----------\n A : (n,3) float\n The source 3d pointcloud as a numpy.ndarray\n B : (n,3) float\n The target 3d pointcloud as a numpy.ndarray\n\n Returns\n ----------\n R: (3,3) float\n A rigid rotation matrix\n t: (3) float\n A translation vector\n \n \"\"\"\n\n assert len(A) == len(B)\n A= np.asmatrix(A)\n B= np.asmatrix(B)\n N = A.shape[0]; \n\n centroid_A = np.mean(A, axis=0)\n centroid_B = np.mean(B, axis=0)\n \n AA = A - np.tile(centroid_A, (N, 1))\n BB = B - np.tile(centroid_B, (N, 1))\n H = AA.T * BB\n U, S, Vt = np.linalg.svd(H)\n R = Vt.T * U.T\n\n # reflection case\n if np.linalg.det(R) < 0:\n Vt[2,:] *= -1\n R = Vt.T * U.T\n\n t = -R*centroid_A.T + centroid_B.T\n\n return (R, t)\n" ]
[ [ "numpy.dot", "numpy.linalg.svd", "numpy.sqrt", "numpy.asarray", "numpy.tile", "numpy.asmatrix", "numpy.linalg.det", "numpy.mean", "numpy.identity", "numpy.float32", "numpy.argpartition", "numpy.array", "numpy.sum", "numpy.vstack" ] ]
b-carter/SufficientInputSubsets
[ "ed9d64a37d6ed2ff5ae012461bcc7ed4684aa6dd" ]
[ "rationale_objects.py" ]
[ "import numpy as np\nimport os\nimport json\nimport types\n\nimport sis\nimport lime_helper\nfrom numpy_encoder import NumpyJSONEncoder\nfrom packages.IntegratedGradients.IntegratedGradients import integrated_gradients\n\n\n##########################################\n## Rationale keys for various methods (SIS and alternative methods) for dumps\n## of Example objects:\n\nSIS_RATIONALE_KEY = 'sis'\n\nIG_SUFF_RATIONALE_KEY = 'ig_sufficient'\nIG_FIXED_RATIONALE_KEY = 'ig_fixed_length'\nIG_TOP_RATIONALE_KEY = 'ig_top'\n\nLIME_SUFF_RATIONALE_KEY = 'lime_sufficient'\nLIME_FIXED_RATIONALE_KEY = 'lime_fixed_length'\n\nPERTURB_SUFF_RATIONALE_KEY = 'perturb_sufficient'\nPERTURB_FIXED_RATIONALE_KEY = 'perturb_fixed_length'\n\n##########################################\n\n\ndef make_threshold_f(threshold, is_pos):\n if not isinstance(is_pos, bool):\n raise TypeError('`is_pos` must be a boolean type')\n if is_pos:\n return lambda x: x >= threshold\n else:\n return lambda x: x <= threshold\n\n\nclass Rationale(object):\n def __init__(self, elms=[], history=None):\n self.elms = elms\n self.history = history\n\n def add(self, e):\n self.elms.append(e)\n\n def get_elms(self):\n return self.elms\n\n def get_length(self):\n return len(self.elms)\n\n def get_history(self):\n return self.history\n\n def __len__(self):\n return self.get_length()\n\n def __iter__(self):\n return iter(self.elms)\n\n def to_json_str(self):\n json_str = json.dumps(self.__dict__, cls=NumpyJSONEncoder)\n return json_str\n\n @staticmethod\n def from_json_str(json_str):\n data = json.loads(json_str)\n rationale = Rationale()\n for k, v in data.items():\n setattr(rationale, k, v)\n return rationale\n\n\nclass ExampleJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n elif isinstance(obj, Rationale):\n return obj.to_json_str()\n elif isinstance(obj, types.FunctionType): # cannot serialize functions\n return None\n else:\n return super(ExampleJSONEncoder, self).default(obj)\n\n\n# Define class for maintaining example (with input and index in dataset)\nclass Example(object):\n x = None # input\n i = None # index in dataset\n\n def __init__(self, x=None, i=None):\n self.x = x\n self.i = i\n\n\ndef compute_mean_embedding(embeddings):\n if embeddings is None:\n raise TypeError('`embeddings` cannot be None')\n return np.mean(embeddings, axis=0)\n\n\nclass BeerReview(Example):\n def __init__(self, x=None, i=None, embeddings=None, pad_char=0):\n super(BeerReview, self).__init__(x=x, i=i)\n self.embeddings = embeddings\n self.annot_idxs = []\n self.rationales = {}\n self.pad_char = pad_char\n self.num_pad = np.count_nonzero(x == pad_char)\n self.original_prediction = None\n self.prediction_rationale_only = None\n self.prediction_nonrationale_only = None\n self.prediction_annotation_only = None\n self.prediction_nonannotation_only = None\n self.threshold = None # \"interesting\" threshold\n self.is_pos = None\n self.threshold_f = None\n\n def get_pad_embedding(self):\n return self.embeddings[self.pad_char]\n\n def set_annotation_idxs(self, annot_idxs):\n self.annot_idxs = annot_idxs\n\n def get_annotation_idxs(self):\n return self.annot_idxs\n\n def has_annotation(self):\n return len(self.get_annotation_idxs()) > 0\n\n def get_embeddings(self):\n return embeddings\n\n def set_embeddings(self, embeddings):\n self.embeddings = embeddings\n\n def set_threshold_f(self, threshold_f):\n self.threshold_f = threshold_f\n\n def get_rationales(self, method):\n if method not in self.rationales:\n return []\n return self.rationales[method]\n\n def get_num_tokens(self):\n return self.x.shape[0] - self.num_pad\n\n def add_rationale(self, rationale, method):\n if method not in self.rationales:\n self.rationales[method] = []\n self.rationales[method].append(rationale)\n\n def get_replacement_embedding(self, replacement_embedding='mean'):\n if isinstance(replacement_embedding, str) and \\\n replacement_embedding == 'mean':\n replacement_embedding = compute_mean_embedding(self.embeddings)\n return replacement_embedding\n\n def get_embedded_sequence(self, embeddings=None):\n if embeddings is None:\n embeddings = self.embeddings\n return np.copy(embeddings[self.x])\n\n def get_embedded_sequence_annotations_only(self, replacement_embedding='mean'):\n x_embed = self.get_embedded_sequence()\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n modified_seq = np.repeat(replacement_embedding.reshape(\n (1, replacement_embedding.shape[0])),\n x_embed.shape[0], axis=0)\n for x, y in self.get_annotation_idxs():\n x = x + self.num_pad\n y = y + self.num_pad\n modified_seq[x:y+1,:] = x_embed[x:y+1,:]\n return modified_seq\n\n # TODO: fix `return_none_no_annot` param to be consistent with previous function\n def get_embedded_sequence_nonannotations_only(self, replacement_embedding='mean',\n return_none_no_annot=False):\n annot_idxs = self.get_annotation_idxs()\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n if len(annot_idxs) == 0 and return_none_no_annot:\n return None\n modified_seq = self.get_embedded_sequence()\n for x, y in annot_idxs:\n for j in range(x, y + 1):\n modified_seq[j + self.num_pad] = replacement_embedding\n return modified_seq\n\n def get_all_rationale_idxs(self, rationales):\n rationale_idxs = []\n for rationale in rationales:\n rationale_idxs += list(rationale)\n return np.asarray(rationale_idxs)\n\n def get_nonrationale_idxs(self, rationales):\n num_tokens = self.get_num_tokens()\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n non_rationale_idxs = np.delete(np.arange(num_tokens), rationale_idxs)\n return non_rationale_idxs\n\n def get_embedded_sequence_rationale_only(self, rationales,\n replacement_embedding='mean',\n embeddings=None):\n modified_seq = self.get_embedded_sequence(embeddings=embeddings)\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n non_rationale_idxs = self.get_nonrationale_idxs(rationales)\n non_rationale_idxs_with_offset = self.num_pad + non_rationale_idxs\n modified_seq[non_rationale_idxs_with_offset] = replacement_embedding\n return modified_seq\n\n def get_embedded_sequence_nonrationale_only(self, rationales,\n replacement_embedding='mean'):\n modified_seq = self.get_embedded_sequence()\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n rationale_idxs_with_offset = rationale_idxs + self.num_pad\n if len(rationale_idxs_with_offset) > 0:\n modified_seq[rationale_idxs_with_offset] = replacement_embedding\n return modified_seq\n\n def frac_rationale_in_annotation(self, rationales):\n annot_idxs = self.get_annotation_idxs()\n count_in_annots = 0\n rationale_length = 0\n for rationale in rationales:\n for el in rationale:\n rationale_length += 1\n for x, y in annot_idxs:\n if el >= x and el <= y:\n count_in_annots += 1\n break\n frac = float(count_in_annots) / rationale_length\n return frac\n\n def set_predictions(self, model, rationales, replacement_embedding='mean'):\n self.original_prediction = sis.predict_for_embed_sequence(\n [self.get_embedded_sequence()], model)[0]\n self.prediction_rationale_only = sis.predict_for_embed_sequence(\n [self.get_embedded_sequence_rationale_only(rationales,\n replacement_embedding=replacement_embedding)],\n model)[0]\n self.prediction_nonrationale_only = sis.predict_for_embed_sequence(\n [self.get_embedded_sequence_nonrationale_only(rationales,\n replacement_embedding=replacement_embedding)],\n model)[0]\n self.prediction_annotation_only = sis.predict_for_embed_sequence(\n [self.get_embedded_sequence_annotations_only(\n replacement_embedding=replacement_embedding)], model)[0]\n self.prediction_nonannotation_only = sis.predict_for_embed_sequence(\n [self.get_embedded_sequence_nonannotations_only(\n replacement_embedding=replacement_embedding)], model)[0]\n\n def run_sis_rationales(self, model, replacement_embedding='mean',\n first_only=True, verbose=False):\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n all_rationales = self.get_rationales(SIS_RATIONALE_KEY)\n x_nonrationale = self.get_embedded_sequence_nonrationale_only(\n all_rationales,\n replacement_embedding=replacement_embedding)\n current_nonrationale_pred = sis.predict_for_embed_sequence(\n [x_nonrationale], model)[0]\n if first_only and len(all_rationales) >= 1:\n if verbose:\n print('Already >= 1 rationale and first_only=True, returning.')\n return None\n if verbose:\n print('Starting prediction %.3f' % current_nonrationale_pred)\n while self.threshold_f(current_nonrationale_pred):\n # prediction on non-rationale is still beyond threshold\n if verbose:\n print('Prediction beyond threshold, extracting rationale')\n removed_elts, history = sis.sis_removal(\n self.x,\n model,\n self.embeddings,\n embedded_input=x_nonrationale,\n replacement_embedding=replacement_embedding,\n return_history=True,\n verbose=False)\n rationale_length = sis.find_min_words_needed(history,\n self.threshold_f)\n rationale_elems = removed_elts[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(removed_elts, history))\n self.add_rationale(rationale, SIS_RATIONALE_KEY)\n # mask new rationale in the sequence and re-predict\n all_rationales = self.get_rationales(SIS_RATIONALE_KEY)\n x_nonrationale = self.get_embedded_sequence_nonrationale_only(\n all_rationales,\n replacement_embedding=replacement_embedding)\n current_nonrationale_pred = sis.predict_for_embed_sequence(\n [x_nonrationale], model)[0]\n if verbose:\n print('New predicted score %.3f' % current_nonrationale_pred)\n if first_only:\n if verbose:\n print('Only 1 rationale, first_only=True, breaking.')\n break\n if verbose:\n print('Done building rationales.')\n\n def run_perturbative_baseline_rationale(self, embed_model,\n replacement_embedding='mean',\n verbose=False):\n if len(self.get_rationales(PERTURB_SUFF_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have perturbative baseline rationale,',\n 'returning.')\n return None\n if verbose:\n print('Running perturbative baseline rationale.')\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n x_embed = self.get_embedded_sequence()\n removed_scores = sis.removed_word_predictions(x_embed,\n embed_model,\n self.num_pad,\n replacement_embedding)\n sorted_words = removed_scores.argsort()\n if self.is_pos:\n # word with biggest drop in score (lowest final score) at end of\n # sorted list\n sorted_words = sorted_words[::-1]\n score_history = sis.find_score_history_given_order(\n self.x,\n sorted_words,\n self.num_pad,\n embed_model,\n replacement_embedding,\n self.get_pad_embedding(),\n self.embeddings)\n rationale_length = sis.find_min_words_needed(score_history,\n self.threshold_f)\n rationale_elems = sorted_words[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(sorted_words, score_history))\n self.add_rationale(rationale, PERTURB_SUFF_RATIONALE_KEY)\n if verbose:\n print('Done with perturbative baseline.')\n\n def run_integrated_gradients_rationale(self, ig_model, embed_model,\n baseline,\n replacement_embedding='mean',\n verbose=False):\n if len(self.get_rationales(IG_SUFF_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have IG rationale, returning.')\n return None\n if verbose:\n print('Running integrated gradients rationale.')\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n x_embed = self.get_embedded_sequence()\n igs = ig_model.explain(x_embed, reference=baseline)\n igs = np.linalg.norm(igs, ord=1, axis=1) # L1 norm along embeddings\n sorted_words = igs[self.num_pad:].argsort()\n score_history = sis.find_score_history_given_order(\n self.x,\n sorted_words,\n self.num_pad,\n embed_model,\n replacement_embedding,\n self.get_pad_embedding(),\n self.embeddings)\n rationale_length = sis.find_min_words_needed(score_history,\n self.threshold_f)\n rationale_elems = sorted_words[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(sorted_words, score_history))\n self.add_rationale(rationale, IG_SUFF_RATIONALE_KEY)\n if verbose:\n print('Done with integrated gradients.')\n\n def run_lime_rationale(self, text_pipeline, embed_model, index_to_token,\n replacement_embedding='mean', verbose=False):\n if len(self.get_rationales(LIME_SUFF_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have LIME rationale, returning.')\n return None\n if verbose:\n print('Running LIME rationale.')\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n text = self.to_text(index_to_token, str_joiner=' ')\n explainer = lime_helper.make_explainer(verbose=False)\n explanation = lime_helper.explain(text, explainer, text_pipeline)\n sorted_words = lime_helper.extract_word_order(explanation)\n score_history = sis.find_score_history_given_order(\n self.x,\n sorted_words,\n self.num_pad,\n embed_model,\n replacement_embedding,\n self.get_pad_embedding(),\n self.embeddings)\n rationale_length = sis.find_min_words_needed(score_history,\n self.threshold_f)\n rationale_elems = sorted_words[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(sorted_words, score_history))\n self.add_rationale(rationale, LIME_SUFF_RATIONALE_KEY)\n if verbose:\n print('Done with LIME.')\n\n def run_integrated_gradients_fixed_length_rationale(self, ig_model,\n embed_model,\n baseline,\n verbose=False):\n if len(self.get_rationales(IG_FIXED_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have fixed length IG rationale, returning.')\n return None\n if verbose:\n print('Running fixed length IG rationale.')\n x_embed = self.get_embedded_sequence()\n igs = ig_model.explain(x_embed, reference=baseline)\n igs = np.linalg.norm(igs, ord=1, axis=1) # L1 norm along embeddings\n sorted_words = igs[self.num_pad:].argsort()\n rationale_length = self.get_fixed_baseline_length()\n rationale_elems = sorted_words[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(sorted_words, None))\n self.add_rationale(rationale, IG_FIXED_RATIONALE_KEY)\n if verbose:\n print('Done with fixed length IG.')\n\n # LIME baseline where length is fixed to same as median SIS length\n def run_lime_fixed_length_rationale(self, text_pipeline, embed_model,\n index_to_token, verbose=False):\n if len(self.get_rationales(LIME_FIXED_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have fixed length LIME rationale, returning.')\n return None\n if verbose:\n print('Running fixed length LIME rationale.')\n text = self.to_text(index_to_token, str_joiner=' ')\n explainer = lime_helper.make_explainer(verbose=False)\n explanation = lime_helper.explain(text, explainer, text_pipeline)\n sorted_words = lime_helper.extract_word_order(explanation)\n rationale_length = self.get_fixed_baseline_length()\n rationale_elems = sorted_words[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(sorted_words, None))\n self.add_rationale(rationale, LIME_FIXED_RATIONALE_KEY)\n if verbose:\n print('Done with LIME.')\n\n # Perturbative baseline where length is fixed to same as median SIS length\n def run_perturb_fixed_length_rationale(self, embed_model,\n replacement_embedding='mean',\n verbose=False):\n if len(self.get_rationales(PERTURB_FIXED_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have fixed length perturbative baseline rationale,',\n 'returning.')\n return None\n if verbose:\n print('Running perturbative baseline rationale.')\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n x_embed = self.get_embedded_sequence()\n removed_scores = sis.removed_word_predictions(x_embed,\n embed_model,\n self.num_pad,\n replacement_embedding)\n sorted_words = removed_scores.argsort()\n if self.is_pos:\n # word with biggest drop in score (lowest final score) at end of\n # sorted list\n sorted_words = sorted_words[::-1]\n rationale_length = self.get_fixed_baseline_length()\n rationale_elems = sorted_words[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(sorted_words, None))\n self.add_rationale(rationale, PERTURB_FIXED_RATIONALE_KEY)\n if verbose:\n print('Done with fixed length perturbative baseline.')\n\n # Returns length to use for fixed-length IG, LIME, and perturbative baselines\n # If only single SIS, returns that length. If multiple, returns\n # median SIS length rounded to nearest integer.\n def get_fixed_baseline_length(self):\n sis_rationales = self.get_rationales(SIS_RATIONALE_KEY)\n if len(sis_rationales) == 0:\n raise ValueError('Must first compute SIS rationales.')\n if len(sis_rationales) == 1:\n return len(sis_rationales[0])\n med = np.median([len(r) for r in sis_rationales])\n return int(np.rint(med))\n\n def perturbation(self, model, replacement_embedding='mean',\n diffs_transform_f=lambda preds_orig: \\\n preds_orig[1] - preds_orig[0]):\n perturb_idxs_scores = []\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n x_embed = self.get_embedded_sequence()\n preds = sis.removed_word_predictions(x_embed, model, self.num_pad,\n replacement_embedding)\n original_pred = self.original_prediction if self.original_prediction \\\n is not None else sis.predict_for_embed_sequence(\n [self.get_embedded_sequence()], model)[0]\n diffs = diffs_transform_f((np.array(preds), original_pred))\n return diffs\n\n def perturbation_rationale(self, model, rationales,\n replacement_embedding='mean',\n diffs_transform_f=lambda preds_orig: \\\n preds_orig[1] - preds_orig[0]):\n diffs = self.perturbation(model,\n replacement_embedding=replacement_embedding,\n diffs_transform_f=diffs_transform_f)\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n rationale_diffs = np.take(diffs, rationale_idxs)\n nonrationale_diffs = np.delete(diffs, rationale_idxs)\n assert(diffs.shape[0] == \\\n rationale_diffs.shape[0] + nonrationale_diffs.shape[0])\n return rationale_diffs, nonrationale_diffs\n\n def to_text(self, index_to_token, str_joiner=None):\n non_pad_elems = self.x[self.num_pad:]\n text = [index_to_token[e] for e in non_pad_elems]\n if str_joiner is not None:\n text = str_joiner.join(text)\n return text\n\n def to_html(self, rationale):\n pass\n\n def to_json(self, f, include_embeddings=False):\n json_str = json.dumps(self.__dict__, cls=ExampleJSONEncoder)\n # re-create object to guarantee no modifications to __dict__\n json_dict = json.loads(json_str)\n if not include_embeddings:\n json_dict['embeddings'] = None\n json.dump(json_dict, f)\n\n @staticmethod\n def from_json(f, set_threshold_f=True):\n data = json.load(f)\n review = BeerReview()\n for k, v in data.items():\n if k == 'rationales': # construct Rationale objects\n rationales = {}\n for k_ in v.keys():\n for i in range(len(v[k_])):\n v[k_][i] = Rationale.from_json_str(v[k_][i])\n elif (k == 'embeddings' or k == 'x') and v is not None:\n # cast to np array\n v = np.array(v)\n setattr(review, k, v)\n if set_threshold_f:\n try:\n threshold_f = make_threshold_f(review.threshold, review.is_pos)\n review.set_threshold_f(threshold_f)\n except TypeError:\n print('WARNING: cannot set `threshold_f`, `is_pos` is not True or False')\n return review\n\n def __len__(self):\n return self.get_num_tokens()\n\n\n# Container class for storing BeerReview objects\nclass BeerReviewContainer(object):\n def __init__(self, embeddings, index_to_token, aspect, trained_model_path,\n pad_char):\n self.pos_reviews = []\n self.neg_reviews = []\n self.i_to_review = {}\n self.embeddings = embeddings\n self.index_to_token = index_to_token\n self.aspect = aspect\n self.trained_model_path = trained_model_path\n self.pad_char = pad_char\n\n def add_pos_review(self, review):\n if review.i in self.i_to_review:\n raise KeyError('Review %d already in container' % (review.i))\n self.pos_reviews.append(review)\n self.i_to_review[review.i] = review\n\n def add_neg_review(self, review):\n if review.i in self.i_to_review:\n raise KeyError('Review %d already in container' % (review.i))\n self.neg_reviews.append(review)\n self.i_to_review[review.i] = review\n\n def get_review(self, i):\n if i not in self.i_to_review:\n raise KeyError('Review %d not in container' % (i))\n return self.i_to_review[i]\n\n def get_pos_reviews(self):\n return self.pos_reviews\n\n def get_neg_reviews(self):\n return self.neg_reviews\n\n def get_all_reviews(self):\n return self.pos_reviews + self.neg_reviews\n\n def get_index_to_token(self):\n return self.index_to_token()\n\n # Set `embeddings` attribute in all reviews in the container\n def set_embeddings_all(self):\n for review in self.get_all_reviews():\n review.set_embeddings(self.embeddings)\n\n def __len__(self):\n return len(self.i_to_review)\n\n @staticmethod\n def metadata_filename():\n return 'metadata.json'\n\n def dump_data(self, dir_path):\n # Make directories to dirpath path if not exists\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n metadata = {}\n\n metadata['trained_model_path'] = self.trained_model_path\n metadata['index_to_token'] = self.index_to_token\n metadata['pad_char'] = self.pad_char\n metadata['aspect'] = self.aspect\n\n # Dump embeddings to numpy npz file\n embeddings_filename = 'embeddings.txt'\n metadata['embeddings_file'] = embeddings_filename\n embeddings_filepath = os.path.join(dir_path, embeddings_filename)\n np.savetxt(embeddings_filepath, self.embeddings)\n\n # Dump pos and neg reviews to JSON files\n metadata['pos_reviews'] = []\n metadata['neg_reviews'] = []\n\n reviews_dir = 'reviews'\n reviews_dir_path = os.path.join(dir_path, reviews_dir)\n if not os.path.isdir(reviews_dir_path):\n os.makedirs(reviews_dir_path)\n\n for review in self.get_pos_reviews():\n review_file = os.path.join(reviews_dir, '%d.json' % (review.i))\n metadata['pos_reviews'].append(review_file)\n review_path = os.path.join(dir_path, review_file)\n with open(review_path, 'w') as f:\n review.to_json(f)\n\n for review in self.get_neg_reviews():\n review_file = os.path.join(reviews_dir, '%d.json' % (review.i))\n metadata['neg_reviews'].append(review_file)\n review_path = os.path.join(dir_path, review_file)\n with open(review_path, 'w') as f:\n review.to_json(f)\n\n metadata_file = os.path.join(dir_path, self.metadata_filename())\n with open(metadata_file, 'w') as outfile:\n json.dump(metadata, outfile)\n\n @staticmethod\n def load_data(dir_path):\n metadata_file = os.path.join(dir_path,\n BeerReviewContainer.metadata_filename())\n with open(metadata_file, 'r') as infile:\n metadata = json.load(infile)\n\n args = {}\n args['trained_model_path'] = metadata['trained_model_path']\n args['index_to_token'] = metadata['index_to_token']\n args['pad_char'] = metadata['pad_char']\n args['aspect'] = metadata['aspect']\n # Keys in index_to_token should be integers\n args['index_to_token'] = {int(k): v for k, v in \\\n args['index_to_token'].items()}\n\n # Load embeddings\n embeddings_filename = metadata['embeddings_file']\n embeddings_filepath = os.path.join(dir_path, embeddings_filename)\n embeddings = np.loadtxt(embeddings_filepath)\n args['embeddings'] = embeddings\n\n container = BeerReviewContainer(**args)\n\n # Load review objects\n for review_file in metadata['pos_reviews']:\n review_path = os.path.join(dir_path, review_file)\n with open(review_path, 'r') as f:\n try:\n review = BeerReview.from_json(f)\n except:\n continue\n container.add_pos_review(review)\n\n for review_file in metadata['neg_reviews']:\n review_path = os.path.join(dir_path, review_file)\n with open(review_path, 'r') as f:\n try:\n review = BeerReview.from_json(f)\n except:\n continue\n container.add_neg_review(review)\n\n container.set_embeddings_all()\n\n return container\n\n\nclass DNASequence(Example):\n JSON_NUMPY_ATTRIBS = ['x', 'replacement']\n\n def __init__(self, x=None, i=None, replacement=None,\n threshold=None, threshold_f=None):\n super(DNASequence, self).__init__(x=x, i=i)\n self.rationales = {}\n self.replacement = replacement\n self.original_prediction = None\n self.prediction_rationale_only = None\n self.prediction_nonrationale_only = None\n self.threshold = threshold\n self.threshold_f = threshold_f\n if threshold is not None and threshold_f is None:\n self.make_threshold_f()\n\n @staticmethod\n def replace_at(seq, vec, i):\n sis.replace_at_tf(seq, vec, i)\n\n def get_rationales(self, method):\n if method not in self.rationales:\n return []\n return self.rationales[method]\n\n def get_x(self, copy=True):\n if copy:\n return np.copy(self.x)\n return self.x\n\n def get_shape(self):\n shape = self.x.shape\n return shape\n\n def get_num_bases(self):\n return self.x.shape[0]\n\n def add_rationale(self, rationale, method):\n if method not in self.rationales:\n self.rationales[method] = []\n self.rationales[method].append(rationale)\n\n def set_replacement(self, replacement):\n self.replacement = replacement\n\n def get_replacement(self):\n if self.replacement is None:\n return TypeError('Must set replacement. Cannot be None.')\n return self.replacement\n\n def get_all_rationale_idxs(self, rationales):\n rationale_idxs = []\n for rationale in rationales:\n rationale_idxs += list(rationale)\n return np.asarray(rationale_idxs)\n\n def get_nonrationale_idxs(self, rationales):\n num_bases = self.get_num_bases()\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n non_rationale_idxs = np.delete(np.arange(num_bases), rationale_idxs)\n return non_rationale_idxs\n\n def get_x_rationale_only(self, rationales, replacement=None):\n if replacement is None:\n replacement = self.get_replacement()\n modified_x = np.array(self.get_x(copy=True), dtype='float32')\n non_rationale_idxs = self.get_nonrationale_idxs(rationales)\n for i in non_rationale_idxs:\n self.replace_at(modified_x, replacement, i)\n return modified_x\n\n def get_x_nonrationale_only(self, rationales, replacement=None):\n if replacement is None:\n replacement = self.get_replacement()\n modified_x = np.array(self.get_x(copy=True), dtype='float32')\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n for i in rationale_idxs:\n self.replace_at(modified_x, replacement, i)\n return modified_x\n\n def set_predictions(self, model, rationales, replacement=None):\n self.original_prediction = sis.predict_for_embed_sequence(\n [self.get_x()], model)[0]\n self.prediction_rationale_only = sis.predict_for_embed_sequence(\n [self.get_x_rationale_only(rationales,\n replacement=replacement)], model)[0]\n self.prediction_nonrationale_only = sis.predict_for_embed_sequence(\n [self.get_x_nonrationale_only(rationales,\n replacement=replacement)], model)[0]\n\n def set_threshold(self, threshold):\n self.threshold = threshold\n\n def set_threshold_f(self, threshold_f):\n self.threshold_f = threshold_f\n\n def make_threshold_f(self):\n if self.threshold is None:\n raise TypeError('Must set threshold attribute.')\n threshold_f = lambda prob: prob >= self.threshold\n self.set_threshold_f(threshold_f)\n\n def run_sis_rationales(self, model, replacement=None,\n first_only=True, verbose=False):\n if replacement is None:\n replacement = self.get_replacement()\n all_rationales = self.get_rationales(SIS_RATIONALE_KEY)\n if first_only and len(all_rationales) >= 1:\n if verbose:\n print('Already >= 1 rationale and first_only=True, returning.')\n return None\n x_nonrationale = self.get_x_nonrationale_only(all_rationales,\n replacement=replacement)\n current_nonrationale_pred = sis.predict_for_embed_sequence(\n [x_nonrationale], model)[0]\n if verbose:\n print('Starting prediction %.3f' % current_nonrationale_pred)\n while self.threshold_f(current_nonrationale_pred):\n # prediction on non-rationale is still beyond threshold\n if verbose:\n print('Prediction beyond threshold, extracting rationale')\n removed_elts, history = sis.sis_removal_tf(\n x_nonrationale.copy(),\n model,\n replacement,\n return_history=True,\n verbose=False)\n rationale_length = sis.find_min_words_needed(history,\n self.threshold_f)\n rationale_elems = removed_elts[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(removed_elts, history))\n self.add_rationale(rationale, SIS_RATIONALE_KEY)\n # mask new rationale in the sequence and re-predict\n all_rationales = self.get_rationales(SIS_RATIONALE_KEY)\n x_nonrationale = self.get_x_nonrationale_only(\n all_rationales, replacement=replacement)\n current_nonrationale_pred = sis.predict_for_embed_sequence(\n [x_nonrationale], model)[0]\n if verbose:\n print('New predicted score %.3f' % current_nonrationale_pred)\n if first_only:\n if verbose:\n print('Only 1 rationale, first_only=True, breaking.')\n break\n if verbose:\n print('Done building rationales.')\n\n def run_integrated_gradients_rationale(self, ig_model, model, baseline,\n replacement=None, verbose=False):\n if len(self.get_rationales(IG_SUFF_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have IG rationale, returning.')\n return None\n if verbose:\n print('Running integrated gradients rationale.')\n if replacement is None:\n replacement = self.get_replacement()\n x = self.get_x(copy=True)\n ig_vals = ig_model.explain(x, reference=baseline, num_steps=300)\n # L1 norm along the 4-dim one-hot embeddings axis\n ig_vals = np.linalg.norm(ig_vals, ord=1, axis=1)\n ig_order = np.argsort(ig_vals)\n score_history = sis.find_score_history_given_order_tf(\n x, ig_order, model, baseline)\n rationale_length = sis.find_min_words_needed(score_history,\n self.threshold_f)\n rationale_elems = ig_order[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(ig_order, score_history))\n self.add_rationale(rationale, IG_SUFF_RATIONALE_KEY)\n if verbose:\n print('Done with integrated gradients.')\n\n # In this Top IG baseline, determine the rationale length by first\n # ordering positions by masked L1 norm along each position's embedding.\n # So only using abs(IG) value for the correct base at each position.\n # Compute `target_igs = threshold - prediction at IG baseline`\n # Then using the L1 position ordering, add elements into rationale\n # (largest L1 norm first) until sum of all IGs (non-absolute sum)\n # of rationale elements is >= target_igs.\n # Should use a all-zeros baseline here so the IG vals are also one-hot\n # prior to masking / L1 (absolute value).\n def run_integrated_gradients_top_rationale(self, ig_model, model,\n baseline, replacement=None,\n verbose=False):\n if len(self.get_rationales(IG_TOP_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have Top IG rationale, returning.')\n return None\n if verbose:\n print('Running Top IG rationale.')\n if replacement is None:\n replacement = self.get_replacement()\n x = self.get_x(copy=True)\n ig_baseline_prediction = sis.predict_for_embed_sequence([baseline],\n model)[0]\n target_igs_sum = self.threshold - ig_baseline_prediction\n ig_vals = ig_model.explain(x, reference=baseline, num_steps=300)\n ig_masked = np.multiply(ig_vals, x)\n # L1 norm along the 4-dim one-hot embeddings axis\n # (only 1 non-zero element though, after masking)\n ig_masked_abs = np.linalg.norm(ig_masked, ord=1, axis=1)\n ig_sums = np.sum(ig_vals, axis=1)\n ig_abs_sums = list(zip(ig_masked_abs, ig_sums))\n ig_abs_sums_sorted = sorted(ig_abs_sums, key=lambda x: x[0])\n ig_abs_sorted, ig_sums_sorted = zip(*ig_abs_sums_sorted)\n ig_sums_sorted_cumsum = np.cumsum(ig_sums_sorted[::-1])[::-1]\n ig_order = np.argsort(ig_masked_abs)\n rationale_length = sis.find_min_words_needed(ig_sums_sorted_cumsum,\n lambda s: s >= target_igs_sum)\n rationale_elems = ig_order[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(ig_order, ig_sums_sorted_cumsum))\n self.add_rationale(rationale, IG_TOP_RATIONALE_KEY)\n if verbose:\n print('Done with Top IG.')\n\n # Integrated gradients baseline where length is fixed to same as median SIS length\n # Should input a baseline of zeros vectors.\n def run_integrated_gradients_fixed_length_rationale(self, ig_model, model,\n baseline,\n replacement=None,\n verbose=False):\n if len(self.get_rationales(IG_FIXED_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have fixed length IG rationale, returning.')\n return None\n if verbose:\n print('Running fixed length IG rationale.')\n if replacement is None:\n replacement = self.get_replacement()\n x = self.get_x(copy=True)\n ig_vals = ig_model.explain(x, reference=baseline, num_steps=300)\n ig_masked = np.multiply(ig_vals, x)\n # L1 norm along the 4-dim one-hot embeddings axis\n # (only 1 non-zero element though, after masking)\n ig_masked_abs = np.linalg.norm(ig_masked, ord=1, axis=1)\n ig_order = np.argsort(ig_masked_abs)\n rationale_length = self.get_fixed_baseline_length()\n rationale_elems = ig_order[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(ig_order, None))\n self.add_rationale(rationale, IG_FIXED_RATIONALE_KEY)\n if verbose:\n print('Done with fixed length IG.')\n\n # LIME baseline where length is fixed to same as median SIS length\n def run_lime_fixed_length_rationale(self, pipeline, decoder, model,\n verbose=False):\n if len(self.get_rationales(LIME_FIXED_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have fixed length LIME rationale, returning.')\n return None\n if verbose:\n print('Running fixed length LIME rationale.')\n seq_string = ' '.join(decoder(self.get_x()))\n explainer = lime_helper.make_explainer(verbose=False)\n explanation = lime_helper.explain(seq_string, explainer, pipeline,\n num_features=101)\n sorted_words = lime_helper.extract_word_order(explanation)\n rationale_length = self.get_fixed_baseline_length()\n rationale_elems = sorted_words[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(sorted_words, None))\n self.add_rationale(rationale, LIME_FIXED_RATIONALE_KEY)\n if verbose:\n print('Done with LIME.')\n\n # Perturbative baseline where length is fixed to same as median SIS length\n def run_perturb_fixed_length_rationale(self, model, replacement=None,\n verbose=False):\n if len(self.get_rationales(PERTURB_FIXED_RATIONALE_KEY)) >= 1:\n if verbose:\n print('Already have fixed length perturbative baseline rationale,',\n 'returning.')\n return None\n if verbose:\n print('Running perturbative baseline rationale.')\n if replacement is None:\n replacement = self.get_replacement()\n x = self.get_x()\n removed_scores = sis.removed_word_predictions_tf(x, model,\n replacement)\n sorted_words = removed_scores.argsort()\n # word with biggest drop in score (lowest final score) at end of\n # sorted list\n sorted_words = sorted_words[::-1]\n rationale_length = self.get_fixed_baseline_length()\n rationale_elems = sorted_words[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(sorted_words, None))\n self.add_rationale(rationale, PERTURB_FIXED_RATIONALE_KEY)\n if verbose:\n print('Done with fixed length perturbative baseline.')\n\n # Returns length to use for fixed-length IG, LIME, and perturbative baselines\n # If only single SIS, returns that length. If multiple, returns\n # median rounded to nearest integer.\n def get_fixed_baseline_length(self):\n sis_rationales = self.get_rationales(SIS_RATIONALE_KEY)\n if len(sis_rationales) == 0:\n raise ValueError('Must first compute SIS rationales.')\n if len(sis_rationales) == 1:\n return len(sis_rationales[0])\n med = np.median([len(r) for r in sis_rationales])\n return int(np.rint(med))\n\n def perturbation(self, model, replacement=None,\n diffs_transform_f=lambda preds_orig: \\\n preds_orig[1] - preds_orig[0]):\n perturb_idxs_scores = []\n if replacement is None:\n replacement = self.get_replacement()\n x = self.get_x(copy=True)\n preds = sis.removed_word_predictions_tf(x, model, replacement)\n original_pred = self.original_prediction if self.original_prediction \\\n is not None else sis.predict_for_embed_sequence(\n [self.get_x()], model)[0]\n diffs = diffs_transform_f((np.array(preds), original_pred))\n return diffs\n\n def perturbation_rationale(self, model, rationales,\n replacement=None,\n diffs_transform_f=lambda preds_orig: \\\n preds_orig[1] - preds_orig[0]):\n diffs = self.perturbation(model,\n replacement=replacement,\n diffs_transform_f=diffs_transform_f)\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n rationale_diffs = np.take(diffs, rationale_idxs)\n nonrationale_diffs = np.delete(diffs, rationale_idxs)\n assert(diffs.shape[0] == \\\n rationale_diffs.shape[0] + nonrationale_diffs.shape[0])\n return rationale_diffs, nonrationale_diffs\n\n def to_html(self, rationale):\n pass\n\n def to_text(self, rationale):\n pass\n\n def to_json(self, f):\n json.dump(self.__dict__, f, cls=ExampleJSONEncoder)\n\n @classmethod\n def from_json(cls, f, set_threshold_f=True):\n data = json.load(f)\n seq = cls()\n for k, v in data.items():\n if k == 'rationales': # construct Rationale objects\n rationales = {}\n for k_ in v.keys():\n for i in range(len(v[k_])):\n v[k_][i] = Rationale.from_json_str(v[k_][i])\n elif (k in cls.JSON_NUMPY_ATTRIBS) and v is not None:\n # cast to np array\n v = np.array(v)\n setattr(seq, k, v)\n if set_threshold_f:\n seq.make_threshold_f()\n return seq\n\n\n# Container class for storing DNASequence objects\nclass DNASequenceContainer(object):\n def __init__(self, threshold=None, trained_model_path=None):\n self.sequences = []\n self.i_to_sequence = {}\n self.threshold = threshold\n self.trained_model_path = trained_model_path\n\n def add_sequence(self, sequence):\n if sequence.i in self.i_to_sequence:\n raise KeyError('Review %d already in container' % (sequence.i))\n self.sequences.append(sequence)\n self.i_to_sequence[sequence.i] = sequence\n\n def get_sequence(self, i):\n if i not in self.i_to_sequence:\n raise KeyError('Sequence %d not in container' % (i))\n return self.i_to_sequence[i]\n\n def get_sequences(self):\n return self.sequences\n\n def get_threshold(self):\n return self.threshold\n\n def get_idxs(self):\n return sorted(self.i_to_sequence.keys())\n\n def __len__(self):\n return len(self.sequences)\n\n @staticmethod\n def metadata_filename():\n return 'metadata.json'\n\n def dump_data(self, dir_path):\n # Make directories to dirpath path if not exists\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n metadata = {}\n\n metadata['trained_model_path'] = self.trained_model_path\n metadata['threshold'] = self.threshold\n\n # Dump sequences to JSON files\n metadata['sequences'] = []\n\n sequences_dir = 'sequences'\n sequences_dir_path = os.path.join(dir_path, sequences_dir)\n if not os.path.isdir(sequences_dir_path):\n os.makedirs(sequences_dir_path)\n\n for seq in self.get_sequences():\n sequence_file = os.path.join(sequences_dir, '%d.json' % (seq.i))\n metadata['sequences'].append(sequence_file)\n review_path = os.path.join(dir_path, sequence_file)\n with open(review_path, 'w') as f:\n seq.to_json(f)\n\n metadata_file = os.path.join(dir_path, self.metadata_filename())\n with open(metadata_file, 'w') as outfile:\n json.dump(metadata, outfile)\n\n @staticmethod\n def load_data(dir_path, target_cls=DNASequence):\n metadata_file = os.path.join(dir_path,\n DNASequenceContainer.metadata_filename())\n with open(metadata_file, 'r') as infile:\n metadata = json.load(infile)\n\n args = {}\n args['trained_model_path'] = metadata['trained_model_path']\n args['threshold'] = metadata['threshold']\n\n container = DNASequenceContainer(**args)\n\n # Load DNASequence objects\n for sequence_file in metadata['sequences']:\n sequence_path = os.path.join(dir_path, sequence_file)\n with open(sequence_path, 'r') as f:\n seq = target_cls.from_json(f)\n container.add_sequence(seq)\n\n return container\n\n\nclass Image(Example):\n def __init__(self, x=None, i=None, replacement_pixel=None, class_idx=None,\n threshold=None, threshold_f=None):\n super(Image, self).__init__(x=x, i=i)\n self.rationales = {}\n self.replacement_pixel = replacement_pixel\n self.original_prediction = None\n self.prediction_rationale_only = None\n self.prediction_nonrationale_only = None\n self.class_idx = class_idx\n self.threshold = threshold\n self.threshold_f = threshold_f\n if threshold is not None and threshold_f is None:\n self.make_threshold_f()\n\n def get_rationales(self, method):\n if method not in self.rationales:\n return []\n return self.rationales[method]\n\n def get_x(self, copy=True):\n if copy:\n return np.copy(self.x)\n return self.x\n\n def get_shape(self, channels_last=True):\n shape = self.x.shape\n if channels_last:\n shape = shape[:2]\n return shape\n\n def get_num_pixels(self):\n return np.prod(self.get_shape(channels_last=True))\n\n def i_to_pos(self, i):\n return np.unravel_index(i, self.x.shape)\n\n def pos_to_i(self, pos_tuple):\n return np.ravel_multi_index(pos_tuple, self.x.shape)\n\n def add_rationale(self, rationale, method):\n if method not in self.rationales:\n self.rationales[method] = []\n self.rationales[method].append(rationale)\n\n def set_replacement_pixel(self, replacement_pixel):\n self.replacement_pixel = replacement_pixel\n\n def get_replacement_pixel(self):\n if self.replacement_pixel is None:\n return TypeError('Must set replacement_pixel. Cannot be None.')\n return self.replacement_pixel\n\n def get_all_rationale_idxs(self, rationales):\n rationale_idxs = []\n for rationale in rationales:\n rationale_idxs += list(rationale)\n return np.asarray(rationale_idxs)\n\n def get_nonrationale_idxs(self, rationales):\n num_pixels = self.get_num_pixels()\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n non_rationale_idxs = np.delete(np.arange(num_pixels), rationale_idxs)\n return non_rationale_idxs\n\n def get_x_rationale_only(self, rationales, replacement=None):\n if replacement is None:\n replacement = self.get_replacement_pixel()\n modified_x = self.get_x(copy=True)\n non_rationale_idxs = self.get_nonrationale_idxs(rationales)\n for i in non_rationale_idxs:\n pos = self.i_to_pos(i)\n modified_x[pos] = replacement\n return modified_x\n\n def get_x_nonrationale_only(self, rationales, replacement=None):\n if replacement is None:\n replacement = self.get_replacement_pixel()\n modified_x = self.get_x(copy=True)\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n for i in rationale_idxs:\n pos = self.i_to_pos(i)\n modified_x[pos] = replacement\n return modified_x\n\n def set_predictions(self, model, rationales, replacement=None):\n self.original_prediction = sis.predict_for_images(\n [self.get_x()], model)[0]\n self.prediction_rationale_only = sis.predict_for_images(\n [self.get_x_rationale_only(rationales,\n replacement=replacement)], model)[0]\n self.prediction_nonrationale_only = sis.predict_for_images(\n [self.get_x_nonrationale_only(rationales,\n replacement=replacement)], model)[0]\n\n def set_threshold(self, threshold):\n self.threshold = threshold\n\n def set_threshold_f(self, threshold_f):\n self.threshold_f = threshold_f\n\n def make_threshold_f(self):\n if self.threshold is None:\n raise TypeError('Must set threshold attribute.')\n threshold_f = lambda prob: prob >= self.threshold\n self.set_threshold_f(threshold_f)\n\n def run_sis_rationales(self, model, replacement=None,\n first_only=True, verbose=False):\n if replacement is None:\n replacement = self.get_replacement_pixel()\n if self.original_prediction is None:\n self.original_prediction = sis.predict_for_images(\n [self.get_x()], model)[0]\n orig_predicted_class, orig_class_prob = sis.pred_class_and_prob(\n self.original_prediction)\n all_rationales = self.get_rationales(SIS_RATIONALE_KEY)\n if first_only and len(all_rationales) >= 1:\n if verbose:\n print('Already >= 1 rationale and first_only=True, returning.')\n return None\n if verbose:\n print('Starting probability for class %d: %.5f' % \\\n (orig_predicted_class, orig_class_prob))\n x_nonrationale = self.get_x_nonrationale_only(all_rationales,\n replacement=replacement)\n current_nonrationale_pred = sis.predict_for_images(\n [x_nonrationale], model)[0][self.class_idx]\n while self.threshold_f(current_nonrationale_pred):\n # prediction on non-rationale is still beyond threshold\n if verbose:\n print('Prediction beyond threshold, extracting rationale')\n removed_elts, history = sis.sis_removal_img_classif(\n x_nonrationale.copy(),\n self,\n self.class_idx,\n model,\n replacement,\n return_history=True,\n verbose=False)\n rationale_length = sis.find_min_words_needed(history,\n self.threshold_f)\n rationale_elems = removed_elts[-rationale_length:]\n rationale = Rationale(elms=rationale_elems[::-1],\n history=(removed_elts, history))\n self.add_rationale(rationale, SIS_RATIONALE_KEY)\n # mask new rationale in the sequence and re-predict\n all_rationales = self.get_rationales(SIS_RATIONALE_KEY)\n x_nonrationale = self.get_x_nonrationale_only(\n all_rationales, replacement=replacement)\n current_nonrationale_pred = sis.predict_for_images(\n [x_nonrationale], model)[0][self.class_idx]\n if verbose:\n print('New probability %.5f' % current_nonrationale_pred)\n if first_only:\n if verbose:\n print('Only 1 rationale, first_only=True, breaking.')\n break\n if verbose:\n print('Done building rationales.')\n\n def perturbation(self, model, replacement_embedding='mean',\n diffs_transform_f=lambda preds_orig: \\\n preds_orig[1] - preds_orig[0]):\n perturb_idxs_scores = []\n replacement_embedding = self.get_replacement_embedding(\n replacement_embedding=replacement_embedding)\n x_embed = self.get_embedded_sequence()\n preds = sis.removed_word_predictions(x_embed, model, self.num_pad,\n replacement_embedding)\n original_pred = self.original_prediction if self.original_prediction \\\n is not None else sis.predict_for_embed_sequence(\n [self.get_embedded_sequence()], model)[0]\n diffs = diffs_transform_f((np.array(preds), original_pred))\n return diffs\n\n def perturbation_rationale(self, model, rationales,\n replacement_embedding='mean',\n diffs_transform_f=lambda preds_orig: \\\n preds_orig[1] - preds_orig[0]):\n diffs = self.perturbation(model,\n replacement_embedding=replacement_embedding,\n diffs_transform_f=diffs_transform_f)\n rationale_idxs = self.get_all_rationale_idxs(rationales)\n rationale_diffs = np.take(diffs, rationale_idxs)\n nonrationale_diffs = np.delete(diffs, rationale_idxs)\n assert(diffs.shape[0] == \\\n rationale_diffs.shape[0] + nonrationale_diffs.shape[0])\n return rationale_diffs, nonrationale_diffs\n\n def to_html(self, rationale):\n pass\n\n def to_json(self, f):\n json.dump(self.__dict__, f, cls=ExampleJSONEncoder)\n\n @staticmethod\n def from_json(f, set_threshold_f=True):\n data = json.load(f)\n image = Image()\n for k, v in data.items():\n if k == 'rationales': # construct Rationale objects\n rationales = {}\n for k_ in v.keys():\n for i in range(len(v[k_])):\n v[k_][i] = Rationale.from_json_str(v[k_][i])\n elif (k == 'x') and v is not None:\n # cast to np array\n v = np.array(v)\n setattr(image, k, v)\n if set_threshold_f:\n image.make_threshold_f()\n return image\n\n\nclass ImageContainer(object):\n def __init__(self):\n pass\n" ]
[ [ "numpy.take", "numpy.multiply", "numpy.asarray", "numpy.arange", "numpy.rint", "numpy.linalg.norm", "numpy.cumsum", "numpy.copy", "numpy.delete", "numpy.mean", "numpy.count_nonzero", "numpy.ravel_multi_index", "numpy.savetxt", "numpy.argsort", "numpy.array", "numpy.unravel_index", "numpy.sum", "numpy.loadtxt" ] ]
google/empirical-calibration
[ "0765bbd82377e8b7c613a021d07adcc101779385" ]
[ "empirical_calibration/data/kang_schafer_test.py" ]
[ "# Copyright 2019 The Empirical Calibration Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\nfrom empirical_calibration.data import kang_schafer as ks\nimport numpy as np\nimport unittest\n\n\nclass KangSchaferTest(googletest.TestCase):\n\n def test_simulation(self):\n np.random.seed(123)\n size = 1000\n simulation = ks.Simulation(size)\n\n self.assertEqual((size, 4), simulation.covariates.shape)\n self.assertEqual((size, 4), simulation.transformed_covariates.shape)\n self.assertLen(simulation.treatment, size)\n self.assertEqual([0.0, 1.0], np.unique(simulation.treatment).tolist())\n self.assertLen(simulation.outcome, size)\n self.assertTrue(all(simulation.outcome < 400))\n self.assertTrue(all(simulation.outcome > 0))\n\n\nif __name__ == '__main__':\n googletest.main()\n" ]
[ [ "numpy.random.seed", "numpy.unique" ] ]
ZXin0305/hri
[ "b91d89158fc2d05ca4d3ea3ba4a7b9f69b0221a2" ]
[ "test.py" ]
[ "import numpy as np\nfrom IPython import embed\nfrom time import time\nimport torch\nimport math\nimport random\n\n# xx = np.zeros(shape=(2,15,4), dtype=np.float)\n\n# xx[0, 2, 2] = 1\n# xx[1, 2, 2] = 0.5\n\n# yy = xx[:,2,2].argsort()\n# xx = xx[yy]\n# embed()\n\n# xx = np.ones(shape=(75,45))\n# xx = xx.tolist()\n# # xx.pop(0,20)\n# del xx[0:20]\n# embed()\n\n# xx = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0,'7':0,'8':0,'9':0,'10':0,'11':0,'12':0,'13':0,'14':0,'15':0,'16':0,\n# '17':0,'18':0,'19':0,'20':0,'21':0,'22':0,'23':0,'24':0,'25':0,'26':0,'27':0,'28':0,'29':0,'30':0,'31':0,}\n\n# st = time()\n# if \"0\" in xx.keys():\n# et = time()\n# print(f\"total {(et - st)}\")\n\n# def change_pose(pred_3d_bodys):\n# \"\"\"[summary]\n\n# Args:\n# pred_3d_bodys ([type]): [description]\n# not original \n\n# Returns:\n# [type]: [description]\n# \"\"\"\n# pose_3d = []\n# for i in range(0,1): # 默认都是1个人\n# for j in range(15):\n# pose_3d.append(pred_3d_bodys[i][j][0]) # x\n# pose_3d.append(pred_3d_bodys[i][j][1]) # y\n# pose_3d.append(pred_3d_bodys[i][j][2]) # z\n# return pose_3d\n# xx = np.eye(3)\n# yy = np.random.rand(1, 15,3)\n# yy =yy.transpose(0,2,1)\n# zz = xx @ yy\n# zz[0,1] += 1\n# zz[0,2] += 1\n# embed()\n\n# a = [1,2,3]\n# b = [1,2,3]\n# c = max(b)\n# print(c)\n\n# a = [[1,2,3],[1,2,3]]\n# a = np.array(a)\n\n# b = [[1,5,3],[0,0,0]]\n# b = np.array(b)\n\n# c = np.array([1,2,3])\n# # print(sum(c))\n# print(c)\n# print(c.argmax(0))\n\n# a = torch.tensor([1,2,3])\n# a = 0\n\n\n# xx = (1 / math.sqrt(2 * math.pi)) * math.exp((-1 / 2) * 0.13)\n# xx = math.exp((-1 / 2) * 0.13)\n# pri\n\n# xx = random.randrange(30,54)\n# print(xx)\n\n\n# xx = np.array([[1,2,3],[1,2,3]])\n# yy = np.delete(xx[:,:],1)\n# embed()\n\n# xx = np.array([[ -91.24533081, -9.77925491, 267.06481934, 1. ],\n# [ -82.04265594, -31.73023224, 271.43804932, 1. ],\n# [ -89.02472687, 40.83181763, 284.30203247, 1. ],\n# [-104.65914917, -12.89662933, 276.2901001 , 1. ],\n# [-109.66155243, 9.82787323, 289.22158813, 1. ],\n# [ -85.39533997, 6.52565002, 293.66082764, 1. ],\n# [ -97.42415619, 39.70267487, 290.0920105 , 1. ],\n# [-100.76938629, 71.71859741, 304.46191406, 1. ],\n# [-110.30347443, 106.38193512, 312.63577271, 1. ],\n# [ -77.77825165, -7.06853485, 257.95681763, 1. ],\n# [ -70.11280823, 17.82071495, 265.47302246, 1. ],\n# [ -69.68502808, 12.14739037, 288.32354736, 1. ],\n# [ -80.57032013, 41.87945175, 278.51196289, 1. ],\n# [ -77.47626495, 75.3965683 , 291.89306641, 1. ],\n# [ -78.98562622, 109.38594055, 302.17233276, 1. ]])\n\n# yy = np.array([[ 195.16946411, 240.30853271, 270.02520752, 2. ,\n# -97.80656433, -8.6984005 , 270.02520752, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 222.62481689, 187.98979187, 273.14260864, 2. ,\n# -85.97328186, -32.63896561, 273.14260864, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 213.34616089, 348.8364563 , 293.92376709, 2. ,\n# -97.53807831, 44.09518433, 293.92376709, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 173.70863342, 233.23698425, 280.52893066, 2. ,\n# -112.50686646, -12.4541378 , 280.52893066, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 176.00830078, 279.80993652, 292.9100647 , 2. ,\n# -116.22911072, 10.05602646, 292.9100647 , 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 216.99163818, 286.08230591, 300.94900513, 2. ,\n# -97.40164948, 13.34723759, 300.94900513, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 197.83638 , 343.52972412, 299.24221802, 2. ,\n# -107.50037384, 42.39523315, 299.24221802, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 207.59408569, 397.00366211, 315.77627563, 2. ,\n# -108.87758636, 73.62127686, 315.77627563, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 202.59803772, 455.28015137, 322.55981445, 2. ,\n# -115.69550323, 108.71553802, 322.55981445, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 215.68159485, 247.08103943, 260.17868042, 2. ,\n# -84.7667923 , -5.39123869, 260.17868042, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 234.49308777, 303.28533936, 262.46777344, 2. ,\n# -77.00579834, 19.09913254, 262.46777344, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 250.81388855, 287.89248657, 284.92578125, 2. ,\n# -75.51580811, 13.37642765, 284.92578125, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 229.62341309, 354.34918213, 288.60528564, 2. ,\n# -87.57569885, 45.7951622 , 288.60528564, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 252.86502075, 412.93890381, 304.94400024, 2. ,\n# -81.10929108, 78.66136169, 304.94400024, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904],\n# [ 265.61761475, 467.32778931, 317.81060791, 2. ,\n# -78.63576508, 112.31228638, 317.81060791, 1427.33996582,\n# 1423.13000488, 949.61798096, 548.13201904]])\n\n\n# xx = xx[:,:3]\n# yy = yy[:,4:7]\n# error = np.linalg.norm(np.abs(xx - yy), axis=1)\n# embed()\n\nxx = torch.tensor([[1,2,3],\n [2,1,1]])\nyy = torch.tensor([[0,0,0],\n [0,0,0]])\nembed()" ]
[ [ "torch.tensor" ] ]
jiajunhua/tensorflow
[ "0a926cbec33cfb4c24f76d519cd72608c2488458" ]
[ "tensorflow/python/client/session_clusterspec_prop_test.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.python.client.session.Session's ClusterSpec Propagation.\n\nThese tests exercise the ClusterSpec Propagation capabilities of distributed\nSessions.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import cluster_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\n# Import resource_variable_ops for the variables-to-tensor implicit conversion.\nfrom tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import server_lib\n\nops._USE_C_API = True\n\n# NOTE(mrry): Dummy shape registration for ops used in the tests, since they\n# don't have C++ op registrations on which to attach C++ shape fns.\nops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)\n\n\nclass SessionClusterSpecPropagationTest(test_util.TensorFlowTestCase):\n\n def testClusterSpecPropagationSimple(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config)\n output = sess.run(const)\n self.assertEqual(17, output)\n\n def testClusterSpecPropagationWorker2Placement(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device('/job:worker/task:1'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n output = sess.run(const, options=run_options, run_metadata=run_metadata)\n self.assertEqual(17, output)\n self.assertEqual(1,\n len([\n node_stats\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:worker/replica:0/task:1/device:CPU:0' ==\n dev_stats.device and 'Const' == node_stats.node_name\n ]))\n\n def testClusterSpecPropagationWorker1Placement(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device('/job:worker/task:0'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n output = sess.run(const)\n self.assertEqual(17, output)\n\n def testCanonicalDeviceNames(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device(\n '/job:worker/task:1/device:CPU:0'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n output = sess.run(const, options=run_options, run_metadata=run_metadata)\n self.assertEqual(17, output)\n self.assertEqual(1,\n len([\n node_stats\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:worker/replica:0/task:1/device:CPU:0' ==\n dev_stats.device and 'Const' == node_stats.node_name\n ]))\n\n def testFullDeviceNames(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'renamed_worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device(\n '/job:renamed_worker/replica:0/task:1/device:CPU:0'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n output = sess.run(const, options=run_options, run_metadata=run_metadata)\n self.assertEqual(17, output)\n self.assertEqual(1,\n len([\n node_stats\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:renamed_worker/replica:0/task:1/device:CPU:0'\n == dev_stats.device and 'Const' == node_stats.node_name\n ]))\n\n @test_util.disable_c_api # Operation._set_device doesn't work with C API\n def testMultipleLocalDevices(self):\n # Note: CPU->CPU transfers have a fast-path in\n # BaseRemoteRendezvous::SameWorkerRecvDone that means the test doesn't\n # actually capture the motivating bug unless run on a GPU machine.\n #\n # Example error message (before bugfix -- linebreaks added because lint):\n #\n # W0718 17:14:41.521534 190121 device_mgr.cc:107] Unknown device:\n # /job:worker/replica:0/task:0/device:CPU:0 all devices:\n # /job:local/replica:0/task:0/gpu:0,\n # /job:local/replica:0/task:0/device:GPU:0,\n # /job:local/replica:0/task:0/cpu:1, CPU:0, GPU:0,\n # /job:local/replica:0/task:0/device:CPU:1,\n # /job:local/replica:0/task:0/device:CPU:0, CPU:1,\n # /job:local/replica:0/task:0/cpu:0\n server_config = config_pb2.ConfigProto(device_count={'CPU': 2})\n server1 = server_lib.Server.create_local_server(config=server_config)\n server2 = server_lib.Server.create_local_server(config=server_config)\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g:\n with ops.device('/job:worker/task:1/cpu:1'):\n input1 = constant_op.constant(17, dtypes.float32)\n with ops.device('/job:worker/task:0/cpu:1'):\n input2 = constant_op.constant(3, dtypes.float32)\n with ops.device('/job:worker/task:1/cpu:0'):\n sum1 = input1 + input2\n\n if test.is_gpu_available():\n device_str = '/job:worker/task:0/gpu:0'\n else:\n device_str = '/job:worker/task:0/cpu:1'\n with ops.device(device_str):\n sum2 = input2 + input1\n\n with ops.device('/job:worker/task:0/cpu:0'):\n sum3 = sum1 + sum2\n sess = session.Session(server1.target, config=config, graph=g)\n output = sess.run(sum3)\n self.assertEqual(40, output)\n\n @test_util.disable_c_api # Operation._set_device doesn't work with C API\n def testLegacyDeviceNames(self):\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.Graph().as_default() as g, ops.device('/job:worker/task:1/cpu:0'):\n const = constant_op.constant(17)\n sess = session.Session(server1.target, config=config, graph=g)\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n output = sess.run(const, options=run_options, run_metadata=run_metadata)\n self.assertEqual(17, output)\n self.assertEqual(1,\n len([\n node_stats\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:worker/replica:0/task:1/device:CPU:0' ==\n dev_stats.device and 'Const' == node_stats.node_name\n ]))\n\n def testClusterSpecPropagationThreeServers2Graphs(self):\n \"\"\"Boots 3 servers, creates 2 sessions, ensures appropriate operations.\n\n We create 2 clusterspecs:\n 1. server2 as the master, server1 as a worker\n 2. server2 as the master, server3 as a worker\n\n We ensure that variables on the workers are independent.\n \"\"\"\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n server3 = server_lib.Server.create_local_server()\n cluster_def1 = cluster_pb2.ClusterDef()\n job1 = cluster_def1.job.add()\n job1.name = 'worker1'\n job1.tasks[0] = server2.target[len('grpc://'):]\n job1.tasks[1] = server1.target[len('grpc://'):]\n\n cluster_def2 = cluster_pb2.ClusterDef()\n job2 = cluster_def2.job.add()\n job2.name = 'worker2'\n job2.tasks[0] = server2.target[len('grpc://'):]\n job2.tasks[1] = server3.target[len('grpc://'):]\n\n config1 = config_pb2.ConfigProto(cluster_def=cluster_def1)\n config2 = config_pb2.ConfigProto(cluster_def=cluster_def2)\n\n with ops.Graph().as_default() as g1:\n with ops.device('/job:worker1/task:1'):\n var1 = variables.Variable(array_ops.zeros([2]), name='var1')\n update_op1 = state_ops.assign_add(\n var1, array_ops.ones([2]), name='var1_assign_add')\n init1 = variables.global_variables_initializer()\n\n with ops.Graph().as_default() as g2:\n with ops.device('/job:worker2/task:1'):\n var2 = variables.Variable(array_ops.zeros([2]), name='var2')\n update_op2 = state_ops.assign_add(\n var2, array_ops.ones([2]), name='var2_assign_add')\n init2 = variables.global_variables_initializer()\n\n sess1 = session.Session(server2.target, graph=g1, config=config1)\n sess2 = session.Session(server2.target, graph=g2, config=config2)\n\n init1.run(session=sess1)\n init2.run(session=sess2)\n\n expected_zeros = np.zeros([2])\n expected_ones = np.ones([2])\n\n self.assertAllEqual(expected_zeros, sess1.run(var1))\n self.assertAllEqual(expected_zeros, sess2.run(var2))\n\n self.assertAllEqual(expected_ones, sess1.run(update_op1))\n self.assertAllEqual(expected_ones, sess1.run(var1))\n self.assertAllEqual(expected_zeros, sess2.run(var2))\n self.assertAllEqual(expected_ones, sess2.run(update_op2))\n self.assertAllEqual(expected_ones + expected_ones, sess1.run(update_op1))\n self.assertAllEqual(expected_ones, sess2.run(var2))\n self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1))\n\n def testClusterSpecPropagationThreeServers(self):\n \"\"\"Boots 3 servers, creates 2 sessions, ensures appropriate operations.\n\n We create 2 clusterspecs:\n 1. server2 as the master, server1 as a worker\n 2. server2 as the master, server3 as a worker\n\n We ensure that variables on the workers are independent.\n \"\"\"\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n server3 = server_lib.Server.create_local_server()\n cluster_def1 = cluster_pb2.ClusterDef()\n job1 = cluster_def1.job.add()\n job1.name = 'worker'\n job1.tasks[0] = server2.target[len('grpc://'):]\n job1.tasks[1] = server1.target[len('grpc://'):]\n\n cluster_def2 = cluster_pb2.ClusterDef()\n job2 = cluster_def2.job.add()\n job2.name = 'worker'\n job2.tasks[0] = server2.target[len('grpc://'):]\n job2.tasks[1] = server3.target[len('grpc://'):]\n\n config1 = config_pb2.ConfigProto(cluster_def=cluster_def1)\n config2 = config_pb2.ConfigProto(cluster_def=cluster_def2)\n\n with ops.device('/job:worker/task:1'):\n var = variables.Variable(array_ops.zeros([2]), name='var')\n feed = array_ops.placeholder(dtypes.float32, shape=(2))\n update_op = var.assign_add(feed)\n\n sess1 = session.Session(server2.target, config=config1)\n sess2 = session.Session(server2.target, config=config2)\n\n variables.global_variables_initializer().run(session=sess1)\n variables.global_variables_initializer().run(session=sess2)\n\n expected_zeros = np.zeros([2])\n expected_ones = np.ones([2])\n\n self.assertAllEqual(expected_zeros, sess1.run(var))\n self.assertAllEqual(expected_zeros, sess2.run(var))\n self.assertAllEqual(expected_ones,\n sess1.run(update_op, feed_dict={feed: expected_ones}))\n self.assertAllEqual(expected_ones, sess1.run(var))\n self.assertAllEqual(expected_zeros, sess2.run(var))\n self.assertAllEqual(expected_ones,\n sess2.run(update_op, feed_dict={feed: expected_ones}))\n self.assertAllEqual(expected_ones + expected_ones,\n sess1.run(update_op, feed_dict={feed: expected_ones}))\n self.assertAllEqual(expected_ones, sess2.run(var))\n self.assertAllEqual(expected_ones + expected_ones, sess1.run(var))\n\n def testClusterSpecPropagationThreeServersOneCluster(self):\n \"\"\"Boots 3 servers, ensures appropriate communication across workers.\n\n Additionally, in this cluster, we ensure the master is not the 0-th worker.\n\n Note: this test only uses one session.\n \"\"\"\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n server3 = server_lib.Server.create_local_server()\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server3.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n job.tasks[2] = server1.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n # Add ops to the devices in non-linear order.\n\n with ops.device('/job:worker/task:1'):\n feed1 = array_ops.placeholder(dtypes.float32, shape=(2))\n const1 = constant_op.constant(2.0)\n mul1 = const1 * feed1\n\n with ops.device('/job:worker/task:2'):\n feed2 = array_ops.placeholder(dtypes.float32, shape=(2))\n const2 = constant_op.constant(2.0)\n mul2 = const2 * feed2\n\n with ops.device('/job:worker/task:0'):\n feed0 = array_ops.placeholder(dtypes.float32, shape=(2))\n const0 = constant_op.constant(2.0)\n mul0 = const0 * feed0\n\n sum_op = mul0 + mul1 + mul2\n\n ones = np.ones([2])\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n # Run!\n with session.Session(server1.target, config=config) as sess:\n output = sess.run(\n sum_op,\n options=run_options,\n run_metadata=run_metadata,\n feed_dict={feed1: ones,\n feed2: ones,\n feed0: ones})\n self.assertAllEqual(6 * ones, output)\n\n self.assertEqual(\n 3,\n len([\n dev_stats.device\n for dev_stats in run_metadata.step_stats.dev_stats\n for node_stats in dev_stats.node_stats\n if '/job:worker/replica:0/task:' in dev_stats.device and\n node_stats.node_name.startswith('Const')\n ]), run_metadata)\n\n @test_util.disable_c_api # Partial runs don't work with C API\n def testClusterSpecPropagationPartialRun(self):\n \"\"\"Test successful partial run with ClusterSpec propagation.\"\"\"\n server1 = server_lib.Server.create_local_server()\n server2 = server_lib.Server.create_local_server()\n\n cluster_def = cluster_pb2.ClusterDef()\n job = cluster_def.job.add()\n job.name = 'worker'\n job.tasks[0] = server1.target[len('grpc://'):]\n job.tasks[1] = server2.target[len('grpc://'):]\n config = config_pb2.ConfigProto(cluster_def=cluster_def)\n\n with ops.device('/job:worker/task:0'):\n a = array_ops.placeholder(dtypes.float32, shape=[])\n with ops.device('/job:worker/task:1'):\n b = array_ops.placeholder(dtypes.float32, shape=[])\n c = array_ops.placeholder(dtypes.float32, shape=[])\n r1 = math_ops.add(a, b)\n with ops.device('/job:worker/task:0'):\n r2 = math_ops.multiply(r1, c)\n\n with session.Session(server1.target, config=config) as sess:\n h = sess.partial_run_setup([r1, r2], [a, b, c])\n res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})\n self.assertEqual(3, res)\n res = sess.partial_run(h, r2, feed_dict={c: 3})\n self.assertEqual(9, res)\n\n\nif __name__ == '__main__':\n googletest.main()\n" ]
[ [ "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.math_ops.add", "tensorflow.python.platform.googletest.main", "tensorflow.python.ops.array_ops.ones", "numpy.zeros", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.training.server_lib.Server.create_local_server", "tensorflow.python.client.session.Session", "tensorflow.python.framework.ops.Graph", "numpy.ones", "tensorflow.core.protobuf.cluster_pb2.ClusterDef", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.constant_op.constant" ] ]
Linyhazel/posenet-pytorch
[ "d517137b34ff43be0115b11635e4b262dd9c2051" ]
[ "webcam_demo.py" ]
[ "import torch\nimport cv2\nimport time\nimport argparse\n\nimport posenet\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=int, default=101)\nparser.add_argument('--cam_id', type=int, default=0)\nparser.add_argument('--cam_width', type=int, default=1280)\nparser.add_argument('--cam_height', type=int, default=720)\nparser.add_argument('--scale_factor', type=float, default=0.7125)\nargs = parser.parse_args()\n\n\ndef main():\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n model = posenet.load_model(args.model)\n model = model.to(device)#cuda()\n output_stride = model.output_stride\n\n cap = cv2.VideoCapture(args.cam_id)\n cap.set(3, args.cam_width)\n cap.set(4, args.cam_height)\n\n start = time.time()\n frame_count = 0\n while True:\n input_image, display_image, output_scale = posenet.read_cap(\n cap, scale_factor=args.scale_factor, output_stride=output_stride)\n\n with torch.no_grad():\n input_image = torch.Tensor(input_image).to(device)#cuda()\n\n heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = model(input_image)\n\n pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(\n heatmaps_result.squeeze(0),\n offsets_result.squeeze(0),\n displacement_fwd_result.squeeze(0),\n displacement_bwd_result.squeeze(0),\n output_stride=output_stride,\n max_pose_detections=10,\n min_pose_score=0.15)\n\n keypoint_coords *= output_scale\n\n # TODO this isn't particularly fast, use GL for drawing and display someday...\n overlay_image = posenet.draw_skel_and_kp(\n display_image, pose_scores, keypoint_scores, keypoint_coords,\n min_pose_score=0.15, min_part_score=0.1)\n\n cv2.imshow('posenet', overlay_image)\n frame_count += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n print('Average FPS: ', frame_count / (time.time() - start))\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "torch.Tensor", "torch.no_grad", "torch.cuda.is_available" ] ]
joembis/Learning_Path
[ "3b16d34db622e7c6c53c830b88534ee882c1854a" ]
[ "Python/random_walks.py" ]
[ "from random import choice\r\nimport matplotlib.pyplot as plt\r\n\r\nclass RandomWalk():\r\n\t\"\"\"A class to generate a random walk\"\"\"\r\n\r\n\tdef __init__(self, num_points=5000):\r\n\t\t\"\"\"initialising the random walk attributes\"\"\"\r\n\t\tself.num_points=num_points\r\n\r\n\t\t\"\"\"all walks start at 0, 0\"\"\"\r\n\t\tself.point_num = [i for i in range(num_points)]\r\n\t\tself.x_values = [0]\r\n\t\tself.y_values = [0]\r\n\r\n\tdef fill_walk(self):\r\n\t\t\"\"\"calculates the steps in the random walk\"\"\"\r\n\r\n\t\t\"\"\"refactored lines that calculate steps\"\"\"\r\n\t\tdef get_step():\r\n\t\t\tdirection = choice([-1, 1])\r\n\t\t\tdistance = choice([0, 1, 2, 3, 4])\r\n\t\t\treturn direction*distance\r\n\r\n\t\t\"\"\"keep stepping until the walk length is reached\"\"\"\r\n\t\twhile len(self.x_values)< self.num_points:\r\n\t\t\t\"\"\"calculate new x, y positions\"\"\"\r\n\t\t\tx_change = self.x_values[-1] + get_step()\r\n\t\t\ty_change = self.y_values[-1] + get_step()\r\n\r\n\t\t\t\"\"\"update list of x, y, positions\"\"\"\r\n\t\t\tself.x_values.append(x_change)\r\n\t\t\tself.y_values.append(y_change)\r\n\r\nrand_w = RandomWalk(num_points=1000)\r\nrand_w.fill_walk()\r\n\r\nplt.scatter(rand_w.x_values, rand_w.y_values, c=rand_w.point_num, edgecolor=\"none\", cmap=plt.cm.Blues, s=10)\r\n\"\"\"emphasise the start and end points\"\"\"\r\nplt.scatter(0, 0, c=\"green\", s=100)\r\nplt.scatter(rand_w.x_values[-1], rand_w.y_values[-1], c=\"red\", s=100)\r\nplt.plot(rand_w.x_values, rand_w.y_values, color = \"black\", linewidth = 0.5)\r\nplt.axes().get_xaxis().set_visible(False)\r\nplt.axes().get_yaxis().set_visible(False)\r\nplt.show()" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.axes", "matplotlib.pyplot.show", "matplotlib.pyplot.scatter" ] ]
cbinyu/mriqc_comparison
[ "b0703390a54c9720ecfaa5c590888c461ce50856" ]
[ "mriqc_comparison/utils.py" ]
[ "\"\"\" Definitions to configure the app \"\"\"\n\nfrom pathlib import Path\n\nDEVICE_SERIAL_NO = '166018' # Default 'device_serial_no'\n\n# Keys that are relevant for IQMs\nRELEVANT_KEYS = [\n 'bids_meta.modality',\n 'spacing_x', 'size_x',\n 'spacing_y', 'size_y',\n 'spacing_z', 'size_z',\n 'bids_meta.RepetitionTime',\n 'bids_meta.InversionTime',\n 'bids_meta.EchoTime',\n 'bids_meta.FlipAngle',\n 'bids_meta.PartialFourier',\n 'bids_meta.PixelBandwidth',\n 'bids_meta.ReceiveCoilName',\n 'bids_meta.ParallelReductionFactorInPlane',\n]\n\n# Keys that are irrelevant, so we won't even save to file:\nDISCARD_KEYS = [\n 'Slices'\n]\n\nMRIQC_SERVER = 'mriqc.nimh.nih.gov'\n\nREPOSITORY_PATH = Path('/BIDS/CBI/mriqc/local_stats')\n#REPOSITORY_PATH = Path.home() / 'data/scratch/mriqc/local_stats'\n\nDEFAULT_T1_PROTOCOL = {\n 'size_x': 240, # no. slices\n 'size_y': 256,\n 'size_z': 256,\n 'spacing_x': 0.9,\n 'spacing_y': 0.9,\n 'spacing_z': 0.9,\n 'bids_meta.InversionTime': 0.9,\n 'bids_meta.EchoTime': 0.00232,\n 'bids_meta.FlipAngle': 8,\n# 'bids_meta.ParallelReductionFactorInPlane': 2,\n# 'bids_meta.PartialFourier': 1,\n 'bids_meta.PixelBandwidth': 200,\n# 'bids_meta.ReceiveCoilName': 'HeadNeck_64',\n 'bids_meta.RepetitionTime': 2.3,\n}\n\nDEFAULT_T2_PROTOCOL = {\n 'size_x': 240, # no. slices\n 'size_y': 256,\n 'size_z': 256,\n 'spacing_x': 0.9,\n 'spacing_y': 0.9,\n 'spacing_z': 0.9,\n 'bids_meta.EchoTime': 0.564,\n# 'bids_meta.PartialFourier': 1,\n 'bids_meta.PixelBandwidth': 750,\n# 'bids_meta.ReceiveCoilName': 'HeadNeck_64',\n 'bids_meta.RepetitionTime': 3.2,\n}\n\nDEFAULT_BOLD_PROTOCOL = {\n 'size_x': 104, # R>L\n 'size_y': 104,\n# 'size_z': 64,\n 'spacing_x': 2,\n 'spacing_y': 2,\n 'spacing_z': 2,\n 'bids_meta.EchoTime': 0.035,\n# 'bids_meta.ParallelReductionFactorInPlane': 2,\n# 'bids_meta.PartialFourier': 1,\n 'bids_meta.PixelBandwidth': 2290,\n# 'bids_meta.ReceiveCoilName': 'HeadNeck_64',\n# 'bids_meta.RepetitionTime': 1.3,\n}\n\n\ndef read_mriqc_json(file):\n \"\"\"\n Reads an MRIQC-generated json\n (https://github.com/poldracklab/mriqc/blob/b7bb3a381212ef5bb380b9531fde87dcb4b85254/mriqc/reports/individual.py#L62)\n Parameters\n ----------\n file : Path or str\n Path of the json file or directory with the iqms\n\n Returns\n -------\n iqms : pd.DataFrame\n IQMs from file\n \"\"\"\n import json\n import pandas as pd\n\n with Path(file).open() as json_file:\n iqms_dict = json.load(json_file)\n\n # Extract and prune metadata and provenance:\n metadata = iqms_dict.pop(\"bids_meta\", None)\n _ = metadata.pop(\"global\", None) # don't want this\n for key, value in metadata.items():\n iqms_dict['bids_meta.' + key] = value\n prov = iqms_dict.pop(\"provenance\", None)\n for key in 'md5sum', 'software', 'version':\n iqms_dict['provenance.' + key] = prov[key]\n\n # dict -> DataFrame, return as a row (\".T\"):\n iqms = pd.DataFrame.from_dict(iqms_dict, orient='index').T\n\n return iqms\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
gustavopdias/global-indicators
[ "adcf1f5638a254f0d6df7234f31d6e87be9ec10e" ]
[ "process/pre_process/11_urban_covariates.py" ]
[ "'''\r\n -- Create layer of additional urban study region covariates\r\n'''\r\n\r\nimport time \r\nimport psycopg2\r\nimport pandas as pd\r\nimport geopandas as gpd\r\n\r\nfrom _project_setup import *\r\nfrom script_running_log import script_running_log\r\n\r\ndef main():\r\n start = time.time()\r\n script = os.path.basename(sys.argv[0])\r\n task = 'Create layer of additional urban study region covariates'\r\n conn = psycopg2.connect(database=db, user=db_user, password=db_pwd)\r\n curs = conn.cursor()\r\n covariate_list = linkage_covariate_list.split(',')\r\n if (len(covariate_list)>0):\r\n if covariate_data.startswith('GHS:'):\r\n covariate_path = urban_region\r\n # load covariate data\r\n covariates = gpd.read_file(urban_region)\r\n # filter and retrieve covariate data for study region\r\n covariates = covariates.query(covariate_data.split(':')[1].replace('=','=='))[covariate_list]\r\n elif (str(covariate_data) not in ['','nan']):\r\n # if this field has been completed, and is not GHS, then assuming it is a csv file\r\n # localted in the city's study region folder, containg records only for this study region, \r\n # and with the covariate list included in the available variables\r\n covariates = pd.read_csv(f'{locale_dir}/{covariate_data}')[covariate_list]\r\n else:\r\n print(\"Study region covariate data input is either null or not recognised, \"\r\n \"and null values will be returned for covariate list\")\r\n covariates = pd.DataFrame(zip(covariate_list,[np.nan]*len(covariate_list))).set_index(0).transpose()\r\n covariates = list(covariates[covariate_list].transpose().to_dict().values())[0]\r\n covariates_sql = ',\\r\\n'+',\\r\\n'.join([f'{covariates[x]} \"{x}\"' if str(covariates[x])!=\"nan\" else f'NULL \"{x}\"' for x in covariates ])\r\n else:\r\n covariates_sql = ''\r\n \r\n sql = f'''\r\n DROP TABLE IF EXISTS urban_covariates;\r\n CREATE TABLE urban_covariates AS\r\n SELECT '{continent}'::text \"Continent\",\r\n '{country}'::text \"Country\",\r\n '{region}'::text \"ISO 3166-1 alpha-2\",\r\n u.study_region \"City\",\r\n u.area_sqkm \"Area (sqkm)\", \r\n u.urban_pop_est \"Population estimate\",\r\n u.pop_per_sqkm \"Population per sqkm\",\r\n i.intersections \"Intersections\",\r\n i.intersections/u.area_sqkm \"Intersections per sqkm\"\r\n {covariates_sql}\r\n FROM urban_study_region_pop u,\r\n (SELECT COUNT(c.geom) intersections\r\n FROM clean_intersections_12m c,\r\n urban_study_region_pop\r\n WHERE ST_Intersects(urban_study_region_pop.geom, c.geom)) i\r\n '''\r\n curs.execute(sql)\r\n conn.commit()\r\n \r\n script_running_log(script, task, start, locale)\r\n conn.close()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "pandas.read_csv" ] ]
MiroK/nEuronMI
[ "227b26598fa2cde5aabec68db898f308fb44aa31" ]
[ "neuronmi/mesh/meshconvert.py" ]
[ "\"\"\" Module for converting various mesh formats.\"\"\"\n\n# Copyright (C) 2006 Anders Logg\n#\n# This file is part of DOLFIN.\n#\n# DOLFIN is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# DOLFIN is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.\n#\n# Modified by Garth N. Wells (gmsh function)\n# Modified by Alexander H. Jarosch (gmsh fix)\n# Modified by Angelo Simone (Gmsh and Medit fix)\n# Modified by Andy R. Terrel (gmsh fix and triangle function)\n# Modified by Magnus Vikstrom (metis and scotch function)\n# Modified by Bartosz Sawicki (diffpack function)\n# Modified by Gideon Simpson (Exodus II function)\n# Modified by Kent-Andre Mardal (Star-CD function)\n# Modified by Nuno Lopes (fix for emc2 mesh format (medit version 0))\n# Modified by Neilen Marais (add gmsh support for reading physical region)\n# Modified by Evan Lezar (add support for reading gmsh physical regions on facets)\n# Modified by Jan Blechta (add triangle support for marker on edges and attributes on triangles)\n#\n# Last changed: 2014-02-06\n\n# NOTE: This module does not depend on (py)dolfin beeing installed.\n# NOTE: If future additions need that please import dolfin in a try: except:\n# NOTE: clause and tell the user to install dolfin if it is not installed.\n\nimport getopt\nimport sys\nimport re\nimport warnings\nimport os.path\n\nfrom dolfin_utils.meshconvert import abaqus\nfrom dolfin_utils.meshconvert import xml_writer\nimport numpy\n\n\ndef format_from_suffix(suffix):\n \"Return format for given suffix\"\n if suffix == \"xml\":\n return \"xml\"\n elif suffix == \"mesh\":\n return \"mesh\"\n elif suffix == \"gmsh\":\n return \"gmsh\"\n elif suffix == \"msh\":\n return \"gmsh\"\n elif suffix == \"gra\":\n return \"metis\"\n elif suffix == \"grf\":\n return \"scotch\"\n elif suffix == \"grid\":\n return \"diffpack\"\n elif suffix == \"inp\":\n return \"abaqus\"\n elif suffix == \"ncdf\":\n return \"NetCDF\"\n elif suffix == \"exo\":\n return \"ExodusII\"\n elif suffix == \"e\":\n return \"ExodusII\"\n elif suffix == \"vrt\" or suffix == \"cel\":\n return \"StarCD\"\n elif suffix == \"ele\" or suffix == \"node\":\n return \"Triangle\"\n else:\n _error(\"Sorry, unknown suffix %s.\" % suffix)\n\n\ndef mesh2xml(ifilename, ofilename):\n \"\"\"Convert between .mesh and .xml, parser implemented as a\n state machine:\n\n 0 = read 'Dimension'\n 1 = read dimension\n 2 = read 'Vertices'\n 3 = read number of vertices\n 4 = read next vertex\n 5 = read 'Triangles' or 'Tetrahedra'\n 6 = read number of cells\n 7 = read next cell\n 8 = done\n\n \"\"\"\n\n print(\"Converting from Medit format (.mesh) to DOLFIN XML format\")\n\n # Open files\n ifile = open(ifilename, \"r\")\n ofile = open(ofilename, \"w\")\n\n # Scan file for cell type\n cell_type = None\n dim = 0\n while 1:\n\n # Read next line\n line = ifile.readline()\n if not line: break\n\n # Remove newline\n if line[-1] == \"\\n\":\n line = line[:-1]\n\n # Read dimension\n if line == \"Dimension\" or line == \" Dimension\":\n line = ifile.readline()\n num_dims = int(line)\n if num_dims == 2:\n cell_type = \"triangle\"\n dim = 2\n elif num_dims == 3:\n cell_type = \"tetrahedron\"\n dim = 3\n break\n\n # Check that we got the cell type\n if cell_type == None:\n _error(\"Unable to find cell type.\")\n\n # Step to beginning of file\n ifile.seek(0)\n\n # Write header\n xml_writer.write_header_mesh(ofile, cell_type, dim)\n\n # Current state\n state = 0\n\n # Write data\n num_vertices_read = 0\n num_cells_read = 0\n\n while 1:\n\n # Read next line\n line = ifile.readline()\n if not line: break\n\n # Skip comments\n if line[0] == '#':\n continue\n\n # Remove newline\n if line[-1] == \"\\n\":\n line = line[:-1]\n\n if state == 0:\n if line == \"Dimension\" or line == \" Dimension\":\n state += 1\n elif state == 1:\n num_dims = int(line)\n state += 1\n elif state == 2:\n if line == \"Vertices\" or line == \" Vertices\":\n state += 1\n elif state == 3:\n num_vertices = int(line)\n xml_writer.write_header_vertices(ofile, num_vertices)\n state += 1\n elif state == 4:\n if num_dims == 2:\n (x, y, tmp) = line.split()\n x = float(x)\n y = float(y)\n z = 0.0\n elif num_dims == 3:\n (x, y, z, tmp) = line.split()\n x = float(x)\n y = float(y)\n z = float(z)\n xml_writer.write_vertex(ofile, num_vertices_read, x, y, z)\n num_vertices_read += 1\n if num_vertices == num_vertices_read:\n xml_writer.write_footer_vertices(ofile)\n state += 1\n elif state == 5:\n if (line == \"Triangles\" or line == \" Triangles\") and num_dims == 2:\n state += 1\n if line == \"Tetrahedra\" and num_dims == 3:\n state += 1\n elif state == 6:\n num_cells = int(line)\n xml_writer.write_header_cells(ofile, num_cells)\n state += 1\n elif state == 7:\n if num_dims == 2:\n (n0, n1, n2, tmp) = line.split()\n n0 = int(n0) - 1\n n1 = int(n1) - 1\n n2 = int(n2) - 1\n xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)\n elif num_dims == 3:\n (n0, n1, n2, n3, tmp) = line.split()\n n0 = int(n0) - 1\n n1 = int(n1) - 1\n n2 = int(n2) - 1\n n3 = int(n3) - 1\n xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)\n num_cells_read += 1\n if num_cells == num_cells_read:\n xml_writer.write_footer_cells(ofile)\n state += 1\n elif state == 8:\n break\n\n # Check that we got all data\n if state == 8:\n print(\"Conversion done\")\n else:\n _error(\"Missing data, unable to convert\")\n\n # Write footer\n xml_writer.write_footer_mesh(ofile)\n\n # Close files\n ifile.close()\n ofile.close()\n\n\ndef gmsh2xml(ifilename, handler):\n \"\"\"Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,\n parser implemented as a state machine:\n\n 0 = read 'MeshFormat'\n 1 = read mesh format data\n 2 = read 'EndMeshFormat'\n 3 = read 'Nodes'\n 4 = read number of vertices\n 5 = read vertices\n 6 = read 'EndNodes'\n 7 = read 'Elements'\n 8 = read number of cells\n 9 = read cells\n 10 = done\n\n Afterwards, extract physical region numbers if they are defined in\n the mesh file as a mesh function.\n\n \"\"\"\n\n print(\"Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format\")\n\n # The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension\n gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}\n cell_type_for_dim = {1: \"interval\", 2: \"triangle\", 3: \"tetrahedron\"}\n # the gmsh element types supported for conversion\n supported_gmsh_element_types = [1, 2, 4, 15]\n\n # Open files\n ifile = open(ifilename, \"r\")\n\n # Scan file for cell type\n cell_type = None\n highest_dim = 0\n line = ifile.readline()\n while line:\n\n # Remove newline\n if line[-1] == \"\\n\":\n line = line[:-1]\n\n # Read dimension\n if line.find(\"$Elements\") == 0:\n\n line = ifile.readline()\n num_elements = int(line)\n if num_elements == 0:\n _error(\"No elements found in gmsh file.\")\n line = ifile.readline()\n\n # Now iterate through elements to find largest dimension. Gmsh\n # format might include elements of lower dimensions in the element list.\n # We also need to count number of elements of correct dimensions.\n # Also determine which vertices are not used.\n dim_count = {0: 0, 1: 0, 2: 0, 3: 0}\n vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}\n # Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements\n tags_for_dim = {0: [], 1: [], 2: [], 3: []}\n\n while line.find(\"$EndElements\") == -1:\n element = line.split()\n elem_type = int(element[1])\n num_tags = int(element[2])\n if elem_type in supported_gmsh_element_types:\n dim = gmsh_dim[elem_type]\n if highest_dim < dim:\n highest_dim = dim\n node_num_list = [int(node) for node in element[3 + num_tags:]]\n vertices_used_for_dim[dim].extend(node_num_list)\n if num_tags > 0:\n tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3 + num_tags]))\n dim_count[dim] += 1\n else:\n # TODO: output a warning here. \"gmsh element type %d not supported\" % elem_type\n pass\n line = ifile.readline()\n else:\n # Read next line\n line = ifile.readline()\n\n # Check that we got the cell type and set num_cells_counted\n if highest_dim == 0:\n _error(\"Unable to find cells of supported type.\")\n\n num_cells_counted = dim_count[highest_dim]\n vertex_set = set(vertices_used_for_dim[highest_dim])\n vertices_used_for_dim[highest_dim] = None\n\n vertex_dict = {}\n for n, v in enumerate(vertex_set):\n vertex_dict[v] = n\n\n # Step to beginning of file\n ifile.seek(0)\n\n # Set mesh type\n handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)\n\n # Initialise node list (gmsh does not export all vertexes in order)\n nodelist = {}\n\n # Current state\n state = 0\n\n # Write data\n num_vertices_read = 0\n num_cells_read = 0\n\n # Only import the dolfin objects if facet markings exist\n process_facets = False\n if any(len(tags_for_dim[dim]) > 0 for dim in (highest_dim - 1, 1)):\n # first construct the mesh\n try:\n from dolfin import MeshEditor, Mesh\n except ImportError:\n _error(\"DOLFIN must be installed to handle Gmsh boundary regions\")\n mesh = Mesh()\n mesh_editor = MeshEditor()\n cell_type = {1: 'interval', 2: 'triangle', 3: 'tetrahedron'}[highest_dim]\n mesh_editor.open(mesh, cell_type, highest_dim, highest_dim)\n process_facets = True\n else:\n # TODO: Output a warning or an error here\n me = None\n\n while state != 10:\n\n # Read next line\n line = ifile.readline()\n if not line: break\n\n # Skip comments\n if line[0] == '#':\n continue\n\n # Remove newline\n if line[-1] == \"\\n\":\n line = line[:-1]\n\n if state == 0:\n if line == \"$MeshFormat\":\n state = 1\n elif state == 1:\n (version, file_type, data_size) = line.split()\n state = 2\n elif state == 2:\n if line == \"$EndMeshFormat\":\n state = 3\n elif state == 3:\n if line == \"$Nodes\":\n state = 4\n elif state == 4:\n num_vertices = len(vertex_dict)\n handler.start_vertices(num_vertices)\n if process_facets:\n mesh_editor.init_vertices_global(num_vertices, num_vertices)\n state = 5\n elif state == 5:\n (node_no, x, y, z) = line.split()\n node_no = int(node_no)\n x, y, z = [float(xx) for xx in (x, y, z)]\n if node_no in vertex_dict:\n node_no = vertex_dict[node_no]\n else:\n continue\n nodelist[int(node_no)] = num_vertices_read\n handler.add_vertex(num_vertices_read, [x, y, z])\n if process_facets:\n if highest_dim == 1:\n coords = numpy.array([x])\n elif highest_dim == 2:\n coords = numpy.array([x, y])\n elif highest_dim == 3:\n coords = numpy.array([x, y, z])\n mesh_editor.add_vertex(num_vertices_read, coords)\n\n num_vertices_read += 1\n\n if num_vertices == num_vertices_read:\n handler.end_vertices()\n state = 6\n elif state == 6:\n if line == \"$EndNodes\":\n state = 7\n elif state == 7:\n if line == \"$Elements\":\n state = 8\n elif state == 8:\n handler.start_cells(num_cells_counted)\n if process_facets:\n mesh_editor.init_cells_global(num_cells_counted, num_cells_counted)\n\n state = 9\n elif state == 9:\n element = line.split()\n elem_type = int(element[1])\n num_tags = int(element[2])\n if elem_type in supported_gmsh_element_types:\n dim = gmsh_dim[elem_type]\n else:\n dim = 0\n if dim == highest_dim:\n node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]\n for node in node_num_list:\n if not node in nodelist:\n _error(\"Vertex %d of %s %d not previously defined.\" %\n (node, cell_type_for_dim[dim], num_cells_read))\n cell_nodes = [nodelist[n] for n in node_num_list]\n handler.add_cell(num_cells_read, cell_nodes)\n\n if process_facets:\n cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)\n mesh_editor.add_cell(num_cells_read, cell_nodes)\n\n num_cells_read += 1\n\n if num_cells_counted == num_cells_read:\n handler.end_cells()\n if process_facets:\n mesh_editor.close()\n state = 10\n elif state == 10:\n break\n\n # Write mesh function based on the Physical Regions defined by\n # gmsh, but only if they are not all zero. All zero physical\n # regions indicate that no physical regions were defined.\n if highest_dim not in [1, 2, 3]:\n _error(\"Gmsh tags not supported for dimension %i. Probably a bug\" % dim)\n\n tags = tags_for_dim[highest_dim]\n physical_regions = tuple(tag[0] for tag in tags)\n if not all(tag == 0 for tag in physical_regions):\n handler.start_meshfunction(\"volume_region\", dim, num_cells_counted)\n for i, physical_region in enumerate(physical_regions):\n handler.add_entity_meshfunction(i, physical_region)\n handler.end_meshfunction()\n\n # Now process the facet markers\n tags = tags_for_dim[highest_dim - 1]\n if (len(tags) > 0) and (mesh is not None):\n physical_regions = tuple(tag[0] for tag in tags)\n if not all(tag == 0 for tag in physical_regions):\n mesh.init(highest_dim - 1, 0)\n\n # Get the facet-node connectivity information (reshape as a row of node indices per facet)\n if highest_dim == 1:\n # for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want\n # as facets are vertices\n facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])\n else:\n facets_as_nodes = numpy.array(mesh.topology()(highest_dim - 1, 0)(),\n dtype='uintp').reshape(mesh.num_facets(), highest_dim)\n\n # Build the reverse map\n nodes_as_facets = {}\n for facet in range(mesh.num_facets()):\n nodes_as_facets[tuple(facets_as_nodes[facet, :])] = facet\n\n data = [int(0 * k) for k in range(mesh.num_facets())]\n for i, physical_region in enumerate(physical_regions):\n nodes = [n - 1 for n in\n vertices_used_for_dim[highest_dim - 1][highest_dim * i:(highest_dim * i + highest_dim)]]\n nodes.sort()\n\n if physical_region != 0:\n try:\n index = nodes_as_facets[tuple(nodes)]\n data[index] = physical_region\n except IndexError:\n raise Exception(\"The facet (%d) was not found to mark: %s\" % (i, nodes))\n\n # Create and initialise the mesh function\n handler.start_meshfunction(\"facet_region\", highest_dim - 1, mesh.num_facets())\n for index, physical_region in enumerate(data):\n handler.add_entity_meshfunction(index, physical_region)\n handler.end_meshfunction()\n\n # Edge markers\n if highest_dim == 3:\n tags = tags_for_dim[1]\n if (len(tags) > 0) and (mesh is not None):\n physical_regions = tuple(tag[0] for tag in tags)\n if not all(tag == 0 for tag in physical_regions):\n mesh.init(1, 0)\n\n # Get the edge-node connectivity information (reshape as a row\n # of node indices per edge)\n edges_as_nodes = \\\n mesh.topology()(1, 0)().reshape(mesh.num_edges(), 2)\n\n # Build the reverse map\n nodes_as_edges = {}\n for edge in range(mesh.num_edges()):\n nodes_as_edges[tuple(edges_as_nodes[edge])] = edge\n\n data = numpy.zeros(mesh.num_edges())\n for i, physical_region in enumerate(physical_regions):\n nodes = [n - 1 for n in vertices_used_for_dim[1][2 * i:(2 * i + 2)]]\n nodes.sort()\n\n if physical_region != 0:\n try:\n index = nodes_as_edges[tuple(nodes)]\n data[index] = physical_region\n except IndexError:\n raise Exception(\"The edge (%d) was not found to mark: %s\" % (i, nodes))\n\n # Create and initialise the mesh function\n handler.start_meshfunction(\"curve_region\", 1, mesh.num_edges())\n for index, physical_region in enumerate(data):\n handler.add_entity_meshfunction(index, physical_region)\n handler.end_meshfunction()\n\n # Check that we got all data\n if state == 10:\n print(\"Conversion done\")\n else:\n _error(\"Missing data, unable to convert \\n\\ Did you use version 2.0 of the gmsh file format?\")\n\n # Close files\n ifile.close()\n\n\ndef triangle2xml(ifilename, ofilename):\n \"\"\"Convert between triangle format\n (http://www.cs.cmu.edu/~quake/triangle.html) and .xml. The\n given ifilename should be the prefix for the corresponding\n .node, and .ele files.\n \"\"\"\n\n def get_next_line(fp):\n \"\"\"Helper function for skipping comments and blank lines\"\"\"\n line = fp.readline()\n if line == '':\n _error(\"Hit end of file prematurely.\")\n line = line.strip()\n if not (line.startswith('#') or line == ''):\n return line\n return get_next_line(fp)\n\n print(\"Converting from Triangle format {.node, .ele} to DOLFIN XML format\")\n\n # Open files\n for suffix in [\".node\", \".ele\"]:\n if suffix in ifilename and ifilename[-len(suffix):] == suffix:\n ifilename = ifilename.replace(suffix, \"\")\n node_file = open(ifilename + \".node\", \"r\")\n ele_file = open(ifilename + \".ele\", \"r\")\n ofile = open(ofilename, \"w\")\n try:\n edge_file = open(ifilename + \".edge\", \"r\")\n print(\"Found .edge file\")\n except IOError:\n edge_file = None\n\n # Read all the nodes\n nodes = {}\n num_nodes, dim, attr, bound = map(int, get_next_line(node_file).split())\n while len(nodes) < num_nodes:\n node, x, y = get_next_line(node_file).split()[:3]\n nodes[int(node)] = (float(x), float(y))\n\n # Read all the triangles\n tris = {}\n tri_attrs = {}\n num_tris, n_per_tri, attrs = map(int, get_next_line(ele_file).split())\n while len(tris) < num_tris:\n line = get_next_line(ele_file).split()\n tri, n1, n2, n3 = map(int, line[:4])\n # vertices are ordered according to current UFC ordering scheme -\n # - may change in future!\n tris[tri] = tuple(sorted((n1, n2, n3)))\n tri_attrs[tri] = tuple(map(float, line[4:4 + attrs]))\n\n # Read all the boundary markers from edges\n edge_markers_global = {}\n edge_markers_local = []\n got_negative_edge_markers = False\n if edge_file is not None:\n num_edges, num_edge_markers = map(int, get_next_line(edge_file).split())\n if num_edge_markers == 1:\n while len(edge_markers_global) < num_edges:\n edge, v1, v2, marker = map(int, get_next_line(edge_file).split())\n if marker < 0: got_negative_edge_markers = True\n edge_markers_global[tuple(sorted((v1, v2)))] = marker\n if got_negative_edge_markers:\n print(\"Some edge markers are negative! dolfin will increase \" \\\n \"them by probably 2**32 when loading xml. \" \\\n \"Consider using non-negative edge markers only.\")\n for tri, vertices in tris.iteritems():\n v0, v1, v2 = sorted((vertices[0:3]))\n try:\n edge_markers_local.append((tri, 0, \\\n edge_markers_global[(v1, v2)]))\n edge_markers_local.append((tri, 1, \\\n edge_markers_global[(v0, v2)]))\n edge_markers_local.append((tri, 2, \\\n edge_markers_global[(v0, v1)]))\n except IndexError:\n raise Exception(\"meshconvert.py: The facet was not found.\")\n elif num_edge_markers == 0:\n print(\"...but no markers in it. Ignoring it\")\n else:\n print(\"...but %d markers specified in it. It won't be processed.\" \\\n % num_edge_markers)\n\n # Write everything out\n xml_writer.write_header_mesh(ofile, \"triangle\", 2)\n xml_writer.write_header_vertices(ofile, num_nodes)\n node_off = 0 if nodes.has_key(0) else -1\n for node, node_t in nodes.iteritems():\n xml_writer.write_vertex(ofile, node + node_off, node_t[0], node_t[1], 0.0)\n xml_writer.write_footer_vertices(ofile)\n xml_writer.write_header_cells(ofile, num_tris)\n tri_off = 0 if tris.has_key(0) else -1\n for tri, tri_t in tris.iteritems():\n xml_writer.write_cell_triangle(ofile, tri + tri_off, tri_t[0] + node_off,\n tri_t[1] + node_off, tri_t[2] + node_off)\n xml_writer.write_footer_cells(ofile)\n if len(edge_markers_local) > 0:\n xml_writer.write_header_domains(ofile)\n xml_writer.write_header_meshvaluecollection(ofile, \\\n \"edge markers\", 1, len(edge_markers_local), \"uint\")\n for tri, local_edge, marker in edge_markers_local:\n xml_writer.write_entity_meshvaluecollection(ofile, \\\n 1, tri + tri_off, marker, local_edge)\n xml_writer.write_footer_meshvaluecollection(ofile)\n xml_writer.write_footer_domains(ofile)\n xml_writer.write_footer_mesh(ofile)\n for i in range(attrs):\n afilename = ofilename.replace(\".xml\", \".attr\" + str(i) + \".xml\")\n afile = open(afilename, \"w\")\n xml_writer.write_header_meshfunction2(afile)\n xml_writer.write_header_meshvaluecollection(afile, \\\n \"triangle attribs \" + str(i), 2, num_tris, \"double\")\n for tri, tri_a in tri_attrs.iteritems():\n xml_writer.write_entity_meshvaluecollection(afile, \\\n 2, tri + tri_off, tri_a[i], 0)\n xml_writer.write_footer_meshvaluecollection(afile)\n xml_writer.write_footer_meshfunction(afile)\n print(\"triangle attributes from .ele file written to \" + afilename)\n afile.close()\n\n # Close files\n node_file.close()\n ele_file.close()\n if edge_file is not None:\n edge_file.close()\n ofile.close()\n\n\ndef xml_old2xml(ifilename, ofilename):\n \"Convert from old DOLFIN XML format to new.\"\n\n print(\"Converting from old (pre DOLFIN 0.6.2) to new DOLFIN XML format...\")\n\n # Open files\n ifile = open(ifilename, \"r\")\n ofile = open(ofilename, \"w\")\n\n # Scan file for cell type (assuming there is just one)\n cell_type = None\n dim = 0\n while 1:\n\n # Read next line\n line = ifile.readline()\n if not line: break\n\n # Read dimension\n if \"<triangle\" in line:\n cell_type = \"triangle\"\n dim = 2\n break\n elif \"<tetrahedron\" in line:\n cell_type = \"tetrahedron\"\n dim = 3\n break\n\n # Step to beginning of file\n ifile.seek(0)\n\n # Read lines and make changes\n while 1:\n\n # Read next line\n line = ifile.readline()\n if not line: break\n\n # Modify line\n if \"xmlns\" in line:\n line = \"<dolfin xmlns:dolfin=\\\"http://fenicsproject.org\\\">\\n\"\n if \"<mesh>\" in line:\n line = \" <mesh celltype=\\\"%s\\\" dim=\\\"%d\\\">\\n\" % (cell_type, dim)\n if dim == 2 and \" z=\\\"0.0\\\"\" in line:\n line = line.replace(\" z=\\\"0.0\\\"\", \"\")\n if \" name=\" in line:\n line = line.replace(\" name=\", \" index=\")\n if \" name =\" in line:\n line = line.replace(\" name =\", \" index=\")\n if \"n0\" in line:\n line = line.replace(\"n0\", \"v0\")\n if \"n1\" in line:\n line = line.replace(\"n1\", \"v1\")\n if \"n2\" in line:\n line = line.replace(\"n2\", \"v2\")\n if \"n3\" in line:\n line = line.replace(\"n3\", \"v3\")\n\n # Write line\n ofile.write(line)\n\n # Close files\n ifile.close();\n ofile.close();\n print(\"Conversion done\")\n\n\ndef metis_graph2graph_xml(ifilename, ofilename):\n \"Convert from Metis graph format to DOLFIN Graph XML.\"\n\n print(\"Converting from Metis graph format to DOLFIN Graph XML.\")\n\n # Open files\n ifile = open(ifilename, \"r\")\n ofile = open(ofilename, \"w\")\n\n # Read number of vertices and edges\n line = ifile.readline()\n if not line:\n _error(\"Empty file\")\n\n (num_vertices, num_edges) = line.split()\n\n xml_writer.write_header_graph(ofile, \"directed\")\n xml_writer.write_header_vertices(ofile, int(num_vertices))\n\n for i in range(int(num_vertices)):\n line = ifile.readline()\n edges = line.split()\n xml_writer.write_graph_vertex(ofile, i, len(edges))\n\n xml_writer.write_footer_vertices(ofile)\n xml_writer.write_header_edges(ofile, 2 * int(num_edges))\n\n # Step to beginning of file and skip header info\n ifile.seek(0)\n ifile.readline()\n for i in range(int(num_vertices)):\n print(\"vertex %g\", i)\n line = ifile.readline()\n edges = line.split()\n for e in edges:\n xml_writer.write_graph_edge(ofile, i, int(e))\n\n xml_writer.write_footer_edges(ofile)\n xml_writer.write_footer_graph(ofile)\n\n # Close files\n ifile.close();\n ofile.close();\n\n\ndef scotch_graph2graph_xml(ifilename, ofilename):\n \"Convert from Scotch graph format to DOLFIN Graph XML.\"\n\n print(\"Converting from Scotch graph format to DOLFIN Graph XML.\")\n\n # Open files\n ifile = open(ifilename, \"r\")\n ofile = open(ofilename, \"w\")\n\n # Skip graph file version number\n ifile.readline()\n\n # Read number of vertices and edges\n line = ifile.readline()\n if not line:\n _error(\"Empty file\")\n\n (num_vertices, num_edges) = line.split()\n\n # Read start index and numeric flag\n # Start index is 0 or 1 (C/Fortran)\n # Numeric flag is 3 bits where bit 1 enables vertex labels\n # bit 2 enables edge weights and bit 3 enables vertex weights\n\n line = ifile.readline()\n (start_index, numeric_flag) = line.split()\n\n # Handling not implented\n if not numeric_flag == \"000\":\n _error(\"Handling of scotch vertex labels, edge- and vertex weights not implemented\")\n\n xml_writer.write_header_graph(ofile, \"undirected\")\n xml_writer.write_header_vertices(ofile, int(num_vertices))\n\n # Read vertices and edges, first number gives number of edges from this vertex (not used)\n for i in range(int(num_vertices)):\n line = ifile.readline()\n edges = line.split()\n xml_writer.write_graph_vertex(ofile, i, len(edges) - 1)\n\n xml_writer.write_footer_vertices(ofile)\n xml_writer.write_header_edges(ofile, int(num_edges))\n\n # Step to beginning of file and skip header info\n ifile.seek(0)\n ifile.readline()\n ifile.readline()\n ifile.readline()\n for i in range(int(num_vertices)):\n line = ifile.readline()\n\n edges = line.split()\n for j in range(1, len(edges)):\n xml_writer.write_graph_edge(ofile, i, int(edges[j]))\n\n xml_writer.write_footer_edges(ofile)\n xml_writer.write_footer_graph(ofile)\n\n # Close files\n ifile.close();\n ofile.close();\n\n\ndef diffpack2xml(ifilename, ofilename):\n \"Convert from Diffpack tetrahedral/triangle grid format to DOLFIN XML.\"\n\n print(diffpack2xml.__doc__)\n\n # Format strings for MeshFunction XML files\n meshfunction_header = \"\"\"\\\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n\n<dolfin xmlns:dolfin=\"http://www.fenics.org/dolfin/\">\n <mesh_function type=\"uint\" dim=\"%d\" size=\"%d\">\\n\"\"\"\n meshfunction_entity = \" <entity index=\\\"%d\\\" value=\\\"%d\\\"/>\\n\"\n meshfunction_footer = \" </mesh_function>\\n</dolfin>\"\n\n # Open files\n ifile = open(ifilename, \"r\")\n ofile = open(ofilename, \"w\")\n\n # Read and analyze header\n while 1:\n line = ifile.readline()\n if not line:\n _error(\"Empty file\")\n if line[0] == \"#\":\n break\n if re.search(r\"Number of elements\", line):\n num_cells = int(re.match(r\".*\\s(\\d+).*\", line).group(1))\n if re.search(r\"Number of nodes\", line):\n num_vertices = int(re.match(r\".*\\s(\\d+).*\", line).group(1))\n if re.search(r\"Number of space dim.\", line):\n num_dims = int(re.match(r\".*\\s(\\d+).*\", line).group(1))\n\n if num_dims == 3:\n xml_writer.write_header_mesh(ofile, \"tetrahedron\", 3)\n elem_type = \"ElmT4n3D\"\n write_cell_func = xml_writer.write_cell_tetrahedron\n else:\n xml_writer.write_header_mesh(ofile, \"triangle\", 2)\n elem_type = \"ElmT3n2D\"\n write_cell_func = xml_writer.write_cell_triangle\n\n xml_writer.write_header_vertices(ofile, num_vertices)\n\n # Read & write vertices and collect markers for vertices\n vertex_markers = []\n unique_vertex_markers = set()\n for i in range(num_vertices):\n line = ifile.readline()\n m = re.match(r\"^.*\\(\\s*(.*)\\s*\\).*\\](.*)$\", line)\n x = map(float, re.split(\"[\\s,]+\", m.group(1)))\n xml_writer.write_vertex(ofile, i, *x)\n markers = map(int, m.group(2).split())\n vertex_markers.append(markers)\n unique_vertex_markers.update(markers)\n\n xml_writer.write_footer_vertices(ofile)\n xml_writer.write_header_cells(ofile, num_cells)\n\n # Output unique vertex markers as individual VertexFunctions\n unique_vertex_markers.difference_update([0])\n for unique_marker in unique_vertex_markers:\n ofile_marker = open(ofilename.replace(\".xml\", \"\") + \\\n \"_marker_\" + str(unique_marker) + \".xml\", \"w\")\n xml_writer.write_header_meshfunction(ofile_marker, 0, num_vertices)\n for ind, markers in enumerate(vertex_markers):\n if unique_marker in markers:\n xml_writer.write_entity_meshfunction(ofile_marker, ind, unique_marker)\n else:\n xml_writer.write_entity_meshfunction(ofile_marker, ind, 0)\n xml_writer.write_footer_meshfunction(ofile_marker)\n\n # Ignore comment lines\n while 1:\n line = ifile.readline()\n if not line:\n _error(\"Empty file\")\n if line[0] == \"#\":\n break\n\n # Read & write cells and collect cell and face markers\n cell_markers = []\n facet_markers = []\n facet_to_vert = [[1, 2, 3], [0, 2, 3], [0, 1, 3], [0, 1, 2]]\n vert_to_facet = facet_to_vert # The same!\n\n cell_ind = 0\n while cell_ind < num_cells:\n line = ifile.readline()\n v = line.split()\n if not v:\n continue\n\n if v[1] != elem_type:\n _error(\"Only tetrahedral (ElmT4n3D) and triangular (ElmT3n2D) elements are implemented.\")\n\n # Store Cell markers\n cell_markers.append(int(v[2]))\n\n # Sort vertex indices\n cell_indices = sorted(map(lambda x: int(x) - 1, v[3:]))\n write_cell_func(ofile, cell_ind, *cell_indices)\n\n if num_dims == 2:\n cell_ind += 1\n continue\n\n # Check Facet info\n process_facet = set(range(4))\n for local_vert_ind, global_vert_ind in enumerate(cell_indices):\n\n # If no marker is included for vertex skip corresponding facet\n if not vertex_markers[global_vert_ind]:\n process_facet.difference_update(facet_to_vert[local_vert_ind])\n\n # Process facets\n for local_facet in process_facet:\n\n # Start with markers from first vertex\n global_first_vertex = cell_indices[facet_to_vert[local_facet][0]]\n marker_intersection = set(vertex_markers[global_first_vertex])\n\n # Process the other vertices\n for local_vert in facet_to_vert[local_facet][1:]:\n marker_intersection.intersection_update( \\\n vertex_markers[cell_indices[local_vert]])\n\n if not marker_intersection:\n break\n\n # If not break we have a marker on local_facet\n else:\n assert (len(marker_intersection) == 1)\n facet_markers.append((cell_ind, local_facet, \\\n marker_intersection.pop()))\n # Bump cell_ind\n cell_ind += 1\n\n xml_writer.write_footer_cells(ofile)\n xml_writer.write_header_domains(ofile)\n\n # Write facet markers if any\n if facet_markers:\n xml_writer.write_header_meshvaluecollection(ofile, \"m\", 2, \\\n len(facet_markers), \"uint\")\n for cell, local_facet, marker in facet_markers:\n xml_writer.write_entity_meshvaluecollection(ofile, 2, cell, \\\n marker, local_facet)\n xml_writer.write_footer_meshvaluecollection(ofile)\n\n xml_writer.write_header_meshvaluecollection(ofile, \"m\", num_dims, \\\n len(cell_markers), \"uint\")\n for cell, marker in enumerate(cell_markers):\n xml_writer.write_entity_meshvaluecollection(ofile, num_dims, cell, \\\n marker)\n xml_writer.write_footer_meshvaluecollection(ofile)\n xml_writer.write_footer_domains(ofile)\n xml_writer.write_footer_mesh(ofile)\n\n # Close files\n ifile.close()\n ofile.close()\n\n\nclass ParseError(Exception):\n \"\"\" Error encountered in source file.\n \"\"\"\n\n\nclass DataHandler(object):\n \"\"\" Baseclass for handlers of mesh data.\n\n The actual handling of mesh data encountered in the source file is\n delegated to a polymorfic object. Typically, the delegate will write the\n data to XML.\n @ivar _state: the state which the handler is in, one of State_*.\n @ivar _cell_type: cell type in mesh. One of CellType_*.\n @ivar _dim: mesh dimensions.\n \"\"\"\n State_Invalid, State_Init, State_Vertices, State_Cells, \\\n State_MeshFunction, State_MeshValueCollection = range(6)\n CellType_Tetrahedron, CellType_Triangle, CellType_Interval = range(3)\n\n def __init__(self):\n self._state = self.State_Invalid\n\n def set_mesh_type(self, cell_type, dim):\n assert self._state == self.State_Invalid\n self._state = self.State_Init\n if cell_type == \"tetrahedron\":\n self._cell_type = self.CellType_Tetrahedron\n elif cell_type == \"triangle\":\n self._cell_type = self.CellType_Triangle\n elif cell_type == \"interval\":\n self._cell_type = self.CellType_Interval\n self._dim = dim\n\n def start_vertices(self, num_vertices):\n assert self._state == self.State_Init\n self._state = self.State_Vertices\n\n def add_vertex(self, vertex, coords):\n assert self._state == self.State_Vertices\n\n def end_vertices(self):\n assert self._state == self.State_Vertices\n self._state = self.State_Init\n\n def start_cells(self, num_cells):\n assert self._state == self.State_Init\n self._state = self.State_Cells\n\n def add_cell(self, cell, nodes):\n assert self._state == self.State_Cells\n\n def end_cells(self):\n assert self._state == self.State_Cells\n self._state = self.State_Init\n\n def start_domains(self):\n assert self._state == self.State_Init\n\n def end_domains(self):\n self._state = self.State_Init\n\n def start_meshfunction(self, name, dim, size):\n assert self._state == self.State_Init\n self._state = self.State_MeshFunction\n\n def add_entity_meshfunction(self, index, value):\n assert self._state == self.State_MeshFunction\n\n def end_meshfunction(self):\n assert self._state == self.State_MeshFunction\n self._state = self.State_Init\n\n def start_mesh_value_collection(self, name, dim, size, etype):\n assert self._state == self.State_Init\n self._state = self.State_MeshValueCollection\n\n def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):\n assert self._state == self.State_MeshValueCollection\n\n def end_mesh_value_collection(self):\n assert self._state == self.State_MeshValueCollection\n self._state = self.State_Init\n\n def warn(self, msg):\n \"\"\" Issue warning during parse.\n \"\"\"\n warnings.warn(msg)\n\n def error(self, msg):\n \"\"\" Raise error during parse.\n\n This method is expected to raise ParseError.\n \"\"\"\n raise ParseError(msg)\n\n def close(self):\n self._state = self.State_Invalid\n\n\nclass XmlHandler(DataHandler):\n \"\"\" Data handler class which writes to Dolfin XML.\n \"\"\"\n\n def __init__(self, ofilename):\n DataHandler.__init__(self)\n self._ofilename = ofilename\n self.__ofile = open(ofilename, \"w\")\n self.__ofile_meshfunc = None\n\n def ofile(self):\n return self.__ofile\n\n def set_mesh_type(self, cell_type, dim):\n DataHandler.set_mesh_type(self, cell_type, dim)\n xml_writer.write_header_mesh(self.__ofile, cell_type, dim)\n\n def start_vertices(self, num_vertices):\n DataHandler.start_vertices(self, num_vertices)\n xml_writer.write_header_vertices(self.__ofile, num_vertices)\n\n def add_vertex(self, vertex, coords):\n DataHandler.add_vertex(self, vertex, coords)\n xml_writer.write_vertex(self.__ofile, vertex, *coords)\n\n def end_vertices(self):\n DataHandler.end_vertices(self)\n xml_writer.write_footer_vertices(self.__ofile)\n\n def start_cells(self, num_cells):\n DataHandler.start_cells(self, num_cells)\n xml_writer.write_header_cells(self.__ofile, num_cells)\n\n def add_cell(self, cell, nodes):\n DataHandler.add_cell(self, cell, nodes)\n if self._cell_type == self.CellType_Tetrahedron:\n func = xml_writer.write_cell_tetrahedron\n elif self._cell_type == self.CellType_Triangle:\n func = xml_writer.write_cell_triangle\n elif self._cell_type == self.CellType_Interval:\n func = xml_writer.write_cell_interval\n\n func(self.__ofile, cell, *nodes)\n\n def end_cells(self):\n DataHandler.end_cells(self)\n xml_writer.write_footer_cells(self.__ofile)\n\n def start_meshfunction(self, name, dim, size):\n DataHandler.start_meshfunction(self, name, dim, size)\n fname = os.path.splitext(self.__ofile.name)[0]\n self.__ofile_meshfunc = open(\"%s_%s.xml\" % (fname, name), \"w\")\n xml_writer.write_header_meshfunction(self.__ofile_meshfunc, dim, size)\n\n def add_entity_meshfunction(self, index, value):\n DataHandler.add_entity_meshfunction(self, index, value)\n xml_writer.write_entity_meshfunction(self.__ofile_meshfunc, index, value)\n\n def end_meshfunction(self):\n DataHandler.end_meshfunction(self)\n xml_writer.write_footer_meshfunction(self.__ofile_meshfunc)\n self.__ofile_meshfunc.close()\n self.__ofile_meshfunc = None\n\n def start_domains(self):\n # DataHandler.start_domains(self)\n xml_writer.write_header_domains(self.__ofile)\n\n def end_domains(self):\n # DataHandler.end_domains(self)\n xml_writer.write_footer_domains(self.__ofile)\n\n def start_mesh_value_collection(self, name, dim, size, etype):\n DataHandler.start_mesh_value_collection(self, name, dim, size, etype)\n xml_writer.write_header_meshvaluecollection(self.__ofile, name, dim, size, etype)\n\n def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):\n DataHandler.add_entity_mesh_value_collection(self, dim, index, value)\n xml_writer.write_entity_meshvaluecollection(self.__ofile, dim, index, value, local_entity=local_entity)\n\n def end_mesh_value_collection(self):\n DataHandler.end_mesh_value_collection(self)\n xml_writer.write_footer_meshvaluecollection(self.__ofile)\n\n def close(self):\n DataHandler.close(self)\n if self.__ofile.closed:\n return\n xml_writer.write_footer_mesh(self.__ofile)\n self.__ofile.close()\n if self.__ofile_meshfunc is not None:\n self.__ofile_meshfunc.close()\n\n\ndef netcdf2xml(ifilename, ofilename):\n \"Convert from NetCDF format to DOLFIN XML.\"\n\n print(\"Converting from NetCDF format (.ncdf) to DOLFIN XML format\")\n\n # Open files\n ifile = open(ifilename, \"r\")\n ofile = open(ofilename, \"w\")\n\n cell_type = None\n dim = 0\n\n # Scan file for dimension, number of nodes, number of elements\n while 1:\n line = ifile.readline()\n if not line:\n _error(\"Empty file\")\n if re.search(r\"num_dim.*=\", line):\n dim = int(re.match(\".*\\s=\\s(\\d+)\\s;\", line).group(1))\n if re.search(r\"num_nodes.*=\", line):\n num_vertices = int(re.match(\".*\\s=\\s(\\d+)\\s;\", line).group(1))\n if re.search(r\"num_elem.*=\", line):\n num_cells = int(re.match(\".*\\s=\\s(\\d+)\\s;\", line).group(1))\n if re.search(r\"connect1 =\", line):\n break\n\n num_dims = dim\n\n # Set cell type\n if dim == 2:\n cell_type = \"triangle\"\n if dim == 3:\n cell_type = \"tetrahedron\"\n\n # Check that we got the cell type\n if cell_type == None:\n _error(\"Unable to find cell type.\")\n\n # Write header\n xml_writer.write_header_mesh(ofile, cell_type, dim)\n xml_writer.write_header_cells(ofile, num_cells)\n num_cells_read = 0\n\n # Read and write cells\n while 1:\n # Read next line\n line = ifile.readline()\n if not line:\n break\n connect = re.split(\"[,;]\", line)\n if num_dims == 2:\n n0 = int(connect[0]) - 1\n n1 = int(connect[1]) - 1\n n2 = int(connect[2]) - 1\n xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)\n elif num_dims == 3:\n n0 = int(connect[0]) - 1\n n1 = int(connect[1]) - 1\n n2 = int(connect[2]) - 1\n n3 = int(connect[3]) - 1\n xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)\n num_cells_read += 1\n if num_cells == num_cells_read:\n xml_writer.write_footer_cells(ofile)\n xml_writer.write_header_vertices(ofile, num_vertices)\n break\n\n num_vertices_read = 0\n coords = [[], [], []]\n coord = -1\n\n while 1:\n line = ifile.readline()\n if not line:\n _error(\"Missing data\")\n if re.search(r\"coord =\", line):\n break\n\n # Read vertices\n while 1:\n line = ifile.readline()\n if not line:\n break\n if re.search(r\"\\A\\s\\s\\S+,\", line):\n coord += 1\n print(\"Found x_\" + str(coord) + \" coordinates\")\n coords[coord] += line.split()\n if re.search(r\";\", line):\n break\n\n # Write vertices\n for i in range(num_vertices):\n if num_dims == 2:\n x = float(re.split(\",\", coords[0].pop(0))[0])\n y = float(re.split(\",\", coords[1].pop(0))[0])\n z = 0\n if num_dims == 3:\n x = float(re.split(\",\", coords[0].pop(0))[0])\n y = float(re.split(\",\", coords[1].pop(0))[0])\n z = float(re.split(\",\", coords[2].pop(0))[0])\n xml_writer.write_vertex(ofile, i, x, y, z)\n\n # Write footer\n xml_writer.write_footer_vertices(ofile)\n xml_writer.write_footer_mesh(ofile)\n\n # Close files\n ifile.close()\n ofile.close()\n\n\ndef _error(message):\n \"Write an error message\"\n for line in message.split(\"\\n\"):\n print(\"*** %s\" % line)\n sys.exit(2)\n\n\ndef convert2xml(ifilename, ofilename, iformat=None):\n \"\"\" Convert a file to the DOLFIN XML format.\n \"\"\"\n convert(ifilename, XmlHandler(ofilename), iformat=iformat)\n\n\ndef convert(ifilename, handler, iformat=None):\n \"\"\" Convert a file using a provided data handler.\n\n Note that handler.close is called when this function finishes.\n @param ifilename: Name of input file.\n @param handler: The data handler (instance of L{DataHandler}).\n @param iformat: Format of input file.\n \"\"\"\n if iformat is None:\n iformat = format_from_suffix(os.path.splitext(ifilename)[1][1:])\n # XXX: Backwards-compat\n if hasattr(handler, \"_ofilename\"):\n ofilename = handler._ofilename\n # Choose conversion\n if iformat == \"mesh\":\n # Convert from mesh to xml format\n mesh2xml(ifilename, ofilename)\n elif iformat == \"gmsh\":\n # Convert from gmsh to xml format\n gmsh2xml(ifilename, handler)\n elif iformat == \"Triangle\":\n # Convert from Triangle to xml format\n triangle2xml(ifilename, ofilename)\n elif iformat == \"xml-old\":\n # Convert from old to new xml format\n xml_old2xml(ifilename, ofilename)\n elif iformat == \"metis\":\n # Convert from metis graph to dolfin graph xml format\n metis_graph2graph_xml(ifilename, ofilename)\n elif iformat == \"scotch\":\n # Convert from scotch graph to dolfin graph xml format\n scotch_graph2graph_xml(ifilename, ofilename)\n elif iformat == \"diffpack\":\n # Convert from Diffpack tetrahedral grid format to xml format\n diffpack2xml(ifilename, ofilename)\n elif iformat == \"abaqus\":\n # Convert from abaqus to xml format\n abaqus.convert(ifilename, handler)\n elif iformat == \"NetCDF\":\n # Convert from NetCDF generated from ExodusII format to xml format\n netcdf2xml(ifilename, ofilename)\n elif iformat == \"ExodusII\":\n # Convert from ExodusII format to xml format via NetCDF\n exodus2xml(ifilename, ofilename)\n elif iformat == \"StarCD\":\n # Convert from Star-CD tetrahedral grid format to xml format\n starcd2xml(ifilename, ofilename)\n else:\n _error(\"Sorry, cannot convert between %s and DOLFIN xml file formats.\" % iformat)\n\n # XXX: handler.close messes things for other input formats than abaqus or gmsh\n if iformat in (\"abaqus\", \"gmsh\"):\n handler.close()\n\n\ndef starcd2xml(ifilename, ofilename):\n \"Convert from Star-CD tetrahedral grid format to DOLFIN XML.\"\n\n print(starcd2xml.__doc__)\n\n if not os.path.isfile(ifilename[:-3] + \"vrt\") or not os.path.isfile(ifilename[:-3] + \"cel\"):\n print(\"StarCD format requires one .vrt file and one .cel file\")\n sys.exit(2)\n\n # open output file\n ofile = open(ofilename, \"w\")\n\n # Open file, the vertices are in a .vrt file\n ifile = open(ifilename[:-3] + \"vrt\", \"r\")\n\n write_header_mesh(ofile, \"tetrahedron\", 3)\n\n # Read & write vertices\n\n # first, read all lines (need to sweep to times through the file)\n lines = ifile.readlines()\n\n # second, find the number of vertices\n num_vertices = -1\n counter = 0\n # nodenr_map is needed because starcd support node numbering like 1,2,4 (ie 3 is missing)\n nodenr_map = {}\n for line in lines:\n nodenr = int(line[0:15])\n nodenr_map[nodenr] = counter\n counter += 1\n num_vertices = counter\n\n # third, run over all vertices\n xml_writer.write_header_vertices(ofile, num_vertices)\n for line in lines:\n nodenr = int(line[0:15])\n vertex0 = float(line[15:31])\n vertex1 = float(line[31:47])\n vertex2 = float(line[47:63])\n xml_writer.write_vertex(ofile, nodenr_map[nodenr], float(vertex0), float(vertex1), float(vertex2))\n xml_writer.write_footer_vertices(ofile)\n\n # Open file, the cells are in a .cel file\n ifile = open(ifilename[:-3] + \"cel\", \"r\")\n\n # Read & write cells\n\n # first, read all lines (need to sweep to times through the file)\n lines = ifile.readlines()\n\n # second, find the number of cells\n num_cells = -1\n counter = 0\n for line in lines:\n l = [int(a) for a in line.split()]\n cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l\n if node4 > 0:\n if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal\n counter += 1\n else:\n print(\"The file does contain cells that are not tetraheders. The cell number is \", cellnr,\n \" the line read was \", line)\n else:\n # triangles on the surface\n # print(\"The file does contain cells that are not tetraheders node4==0. The cell number is \", cellnr, \" the line read was \", line\n # sys.exit(2)\n pass\n\n num_cells = counter\n\n # third, run over all cells\n xml_writer.write_header_cells(ofile, num_cells)\n counter = 0\n for line in lines:\n l = [int(a) for a in line.split()]\n cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l\n if (node4 > 0):\n if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal\n\n xml_writer.write_cell_tetrahedron(ofile, counter, nodenr_map[node0], nodenr_map[node1],\n nodenr_map[node2], nodenr_map[node4])\n counter += 1\n\n xml_writer.write_footer_cells(ofile)\n xml_writer.write_footer_mesh(ofile)\n\n # Close files\n ifile.close()\n ofile.close()\n" ]
[ [ "numpy.array" ] ]
heilongjiangjs/GeetChinese_crack
[ "9654fdd5ca4b7990c5a9ccbffaf0b6a055f79b73" ]
[ "lib/datasets/imdb.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\n\nimport PIL\nimport numpy as np\nimport scipy.sparse\nfrom lib.config import config as cfg\nfrom lib.utils.cython_bbox import bbox_overlaps\n\n\nclass imdb(object):\n \"\"\"Image database.\"\"\"\n\n def __init__(self, name, classes=None):\n self._name = name\n self._num_classes = 0\n if not classes:\n self._classes = []\n else:\n self._classes = classes\n self._image_index = []\n self._obj_proposer = 'gt'\n self._roidb = None\n self._roidb_handler = self.default_roidb\n # Use this dict for storing dataset specific config options\n self.config = {}\n\n @property\n def name(self):\n return self._name\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n def set_proposal_method(self, method):#'gt'\n method = eval('self.' + method + '_roidb')\n self.roidb_handler = method\n\n @property\n def roidb(self):\n # A roidb is a list of dictionaries, each with the following keys:\n # boxes\n # gt_overlaps\n # gt_classes\n # flipped\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(cfg.FLAGS2[\"data_dir\"], 'cache'))##abspath 当前文件的父路径\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n @property\n def num_images(self):\n return len(self.image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def default_roidb(self):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0]\n for i in range(self.num_images)]\n\n #图片翻转并加入数据库\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes,\n 'gt_overlaps': self.roidb[i]['gt_overlaps'],\n 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n \"\"\"Evaluate detection proposal recall metrics.\n\n Returns:\n results: dictionary of results with keys\n 'ar': average recall\n 'recalls': vector recalls at each IoU overlap threshold\n 'thresholds': vector of IoU overlap thresholds\n 'gt_overlaps': vector of all ground-truth overlaps\n \"\"\"\n # Record max overlap value for each gt box\n # Return vector of overlap values\n #这里记录了一些框的大小,0就是不限制大小\n areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [[0 ** 2, 1e5 ** 2], # all\n [0 ** 2, 32 ** 2], # small\n [32 ** 2, 96 ** 2], # medium\n [96 ** 2, 1e5 ** 2], # large\n [96 ** 2, 128 ** 2], # 96-128\n [128 ** 2, 256 ** 2], # 128-256\n [256 ** 2, 512 ** 2], # 256-512\n [512 ** 2, 1e5 ** 2], # 512-inf\n ]\n assert area in areas, 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in range(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n #gt_overlaps=1,和gt 100%重叠\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n ###计算重合程度,两个框之间的重合区域的面积 / 两个区域一共加起来的面积 IOU GT和n个anchors overlaps[n, k] n--anchor k--GT\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in range(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n #argmax返回的是最大数的索引 #从GT找到最大的一个anchor对应的IOU索引\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n #np.max:求序列的最值,axis:默认为列向(也即 axis=0),axis = 1 时为行方向的最值;这里就是找到最大的一个anchor\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()#anchor对应的最大GT索引\n gt_ovr = max_overlaps.max()#anchor对应的最大GT\n assert (gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]#最大的一个anchor对应的最大GT索引\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert (_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n #这些已经查找过了,不需要在查找\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n #np.sort\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n #zeros_like,返回和thresholds结构一致的0数组\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}\n\n def create_roidb_from_box_list(self, box_list, gt_roidb):\n assert len(box_list) == self.num_images, \\\n 'Number of boxes must match number of ground-truth images'\n roidb = []\n for i in range(self.num_images):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)\n\n if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:\n gt_boxes = gt_roidb[i]['boxes']\n gt_classes = gt_roidb[i]['gt_classes']\n gt_overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n argmaxes = gt_overlaps.argmax(axis=1)#返回的是计算每个anchor和哪个GTobject的IOU最大,这个是相对GT的索引\n maxes = gt_overlaps.max(axis=1)#返回的是计算每个anchor和GTobject的IOU最大值\n I = np.where(maxes > 0)[0]#返回max>0时,对应的索引,这个索引也是相对anchor的索引\n overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]#[anchorid,Classid[GTid]]=maxiOU\n #对于那些零元素数目远远多于非零元素数目,并且非零元素的分布没有规律的矩阵称为稀疏矩阵(sparse)。仅存储非零元素可使矩阵操作效率更高。也就是稀疏矩阵的计算速度更快\n #csr_matrix,创建稀疏矩阵\n overlaps = scipy.sparse.csr_matrix(overlaps)\n roidb.append({\n 'boxes': boxes,\n 'gt_classes': np.zeros((num_boxes,), dtype=np.int32),\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': np.zeros((num_boxes,), dtype=np.float32),\n })\n return roidb\n\n @staticmethod\n def merge_roidbs(a, b):\n assert len(a) == len(b)\n for i in range(len(a)):\n a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))\n a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],\n b[i]['gt_classes']))\n a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],\n b[i]['gt_overlaps']])\n a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],\n b[i]['seg_areas']))\n return a\n\n def competition_mode(self, on):\n \"\"\"Turn competition mode on or off.\"\"\"\n pass\n" ]
[ [ "numpy.hstack", "numpy.arange", "numpy.sort", "numpy.zeros_like", "numpy.zeros", "numpy.where", "numpy.vstack" ] ]
Derikka/lcls-tools
[ "76c58cfeb35e792509c11ae6bf5a3ca3448faa12" ]
[ "lcls_tools/image_processing/image_processing_test.py" ]
[ "import sys\nimport unittest\nimport numpy as np\nimport image_processing as ip\nfrom mat_image import MatImage\n\nFILE = 'test_image.mat'\nCAMERA = 'CAMR:LGUN:210'\n\nclass ImageProcessingTest(unittest.TestCase):\n \n def setUp(self):\n \"\"\"Use test image\"\"\"\n self.MI = MatImage()\n self.MI.load_mat_image('test_image.mat')\n\n def test_fliplr(self):\n \"\"\"Test that fliplr does the right thing\"\"\"\n col_init = self.MI.image[:, 0]\n col_final = ip.fliplr(self.MI.image)[:, -1]\n self.assertEqual(np.array_equal(col_init, col_final), True)\n\n def test_flipud(self):\n \"\"\"Test that flipud does the right thing\"\"\"\n row_init = self.MI.image[0]\n row_final = ip.flipud(self.MI.image)[-1]\n self.assertEqual(np.array_equal(row_init, row_final), True)\n\n def test_center_of_mass(self):\n \"\"\"Test that we get correct x and y centroids\"\"\"\n (x1, y1) = ip.center_of_mass(self.MI.image)\n self.assertEqual((int(x1), int(y1)), (522, 669))\n (x2, y2) = ip.center_of_mass(self.MI.image, sigma=2)\n self.assertEqual((int(x2), int(y2)), (540, 660))\n\n def test_average_image(self):\n \"\"\"Test that we can average a number of images\"\"\"\n images = []\n while len(images) < 10:\n images.append(self.MI.image)\n \n ave_image = ip.average_image(images)\n self.assertEqual(np.array_equal(ave_image, self.MI.image), True)\n\n def test_shape_image(self):\n \"\"\"Test that we can reshape our ndarray\"\"\"\n self.assertEqual(self.MI.image.shape, (1024, 1392))\n image = ip.shape_image(self.MI.image, 16, 89088)\n self.assertEqual(image.shape, (89088, 16))\n\n def test_x_projection(self):\n \"\"\"Test we get expected value for x projection\"\"\"\n x_proj = ip.x_projection(self.MI.image)\n self.assertEqual(x_proj.sum(), 9792279)\n self.assertEqual(int(x_proj.mean()), 7034)\n self.assertEqual(int(x_proj.std()), 10005)\n\n def test_y_projection(self):\n \"\"\"Test that we get expected value for y projection\"\"\"\n y_proj = ip.y_projection(self.MI.image)\n self.assertEqual(y_proj.sum(), 9623943)\n self.assertEqual(int(y_proj.mean()), 9398)\n self.assertEqual(int(y_proj.std()), 10398)\n\n def test_gauss_func(self):\n \"\"\"Test we get correct value for a gaussian evaluation\"\"\"\n ans = ip.gauss_func(1.0, 2.0, 3.0, 4.0)\n self.assertEqual(round(ans, 2), 1.76)\n\n def test_gauss_fit(self):\n \"\"\"Test that we get the correct gaussian fit parameters\"\"\"\n x_proj = ip.x_projection(self.MI.image)\n y_proj = ip.y_projection(self.MI.image)\n _, a_x, x0_x, sigma_x = ip.gauss_fit(x_proj)\n _, a_y, y0_y, sigma_y = ip.gauss_fit(y_proj)\n self.assertEqual(int(a_x), 30373)\n self.assertEqual(int(x0_x), 660)\n self.assertEqual(int(sigma_x), 124)\n self.assertEqual(int(a_y), 29528)\n self.assertEqual(int(y0_y), 541)\n self.assertEqual(int(sigma_y), 127)\n\nif __name__ == '__main__':\n unittest.main()\n \n" ]
[ [ "numpy.array_equal" ] ]
daemon/neurometer
[ "c839765492b0ae5bdbcc826808b2ae196b9b1c0c" ]
[ "examples/measure_component.py" ]
[ "import gc\nimport random\n\nfrom easydict import EasyDict as edict\nfrom matplotlib.lines import Line2D\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy import stats\nfrom tqdm import tqdm\nimport pandas as pd\nimport fire\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom neurometer import LatencyWatch, GridSearch\n\n\nclass LeNet5(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.convs = [nn.Conv2d(1, config.conv1_out, 5),\n nn.ReLU(), nn.MaxPool2d(2),\n nn.Conv2d(config.conv1_out, config.conv2_out, 5),\n nn.ReLU(), nn.MaxPool2d(2)]\n self._convs = nn.Sequential(*self.convs)\n self.fcs = [nn.Linear(config.conv2_out * 16, config.lin1_out), nn.ReLU(),\n nn.Linear(config.lin1_out, 10)]\n self._fcs = nn.Sequential(*self.fcs)\n self.watch = LatencyWatch()\n\n def dummy_input(self):\n return torch.zeros(1, 1, 28, 28)\n\n def forward(self, x):\n with self.watch:\n for conv in self.convs:\n x = conv(x)\n x = x.view(x.size(0), -1)\n for fc in self.fcs:\n x = fc(x)\n return x\n\n\nclass LeNet5Conv1(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.convs = [nn.Conv2d(1, config.conv1_out, 5),\n nn.ReLU(), nn.MaxPool2d(2)]\n self._convs = nn.Sequential(*self.convs)\n self.watch = LatencyWatch()\n\n def dummy_input(self):\n return torch.zeros(1, 1, 28, 28)\n\n def forward(self, x):\n with self.watch:\n for conv in self.convs:\n x = conv(x)\n return x\n\n\nclass LeNet5Conv2(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.convs = [nn.Conv2d(config.conv1_out, config.conv2_out, 5),\n nn.ReLU(), nn.MaxPool2d(2)]\n self._convs = nn.Sequential(*self.convs)\n self.watch = LatencyWatch()\n self.conv1_out = config.conv1_out\n self.conv2_out = config.conv2_out\n\n def dummy_input(self):\n return torch.zeros(1, self.conv1_out, 12, 12)\n\n def forward(self, x):\n with self.watch:\n for conv in self.convs:\n x = conv(x)\n return x\n\n\nclass LeNet5Fc1(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.fcs = [nn.Linear(config.conv2_out * 16, config.lin1_out), nn.ReLU(),\n nn.Linear(config.lin1_out, 10)]\n self._fcs = nn.Sequential(*self.fcs)\n self.lin1_out = config.lin1_out\n self.conv2_out = config.conv2_out\n self.watch = LatencyWatch()\n\n def dummy_input(self):\n return torch.zeros(1, self.conv2_out * 16)\n\n def forward(self, x):\n with self.watch:\n for fc in self.fcs:\n x = fc(x)\n return x\n\n\nclass BasicBlock(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.in_planes = in_planes = config.in_planes\n out_planes = config.out_planes\n stride = config.stride\n self.watch = LatencyWatch()\n self.height = config.height\n self.width = config.width\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.convShortcut = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)\n\n def dummy_input(self):\n return torch.zeros(1, self.in_planes, self.height, self.width)\n\n def forward(self, x):\n with self.watch:\n out = self.conv1(x)\n out = self.conv2(F.relu(self.bn2(out)))\n return out\n\n\nclass MeasureComponentBenchmark(object):\n\n def run(self, component_name, cuda=False, n_trials=100, burn_in=10, clear_cache=True, main=True, input_size=tuple(), **component_kwargs):\n torch.set_grad_enabled(False)\n model = components[component_name](edict(component_kwargs))\n model.eval()\n x = model.dummy_input()\n if cuda:\n x = x.cuda()\n model.cuda()\n for _ in tqdm(range(burn_in)):\n model(x)\n if cuda:\n torch.cuda.synchronize()\n model.watch.measurements = []\n for _ in tqdm(range(n_trials), position=0):\n model(x)\n if cuda:\n torch.cuda.synchronize()\n if clear_cache:\n x = model.dummy_input()\n if cuda:\n x = x.cuda()\n if main:\n model.watch.write()\n else:\n return model.watch.measurements\n\n def build_table(self, component_name, method=\"random\", cuda=False, ranges={}, n_samples=1000, n_trials=20, input_size=tuple(), \n seed=0, output_file=\"output.csv\", **component_kwargs):\n if method == \"random\":\n rand = random.Random(seed)\n elif method == \"grid\":\n grid_keys = list(ranges.keys())\n grid_iter = GridSearch([list(range(*range_args)) for range_args in ranges.values()])\n frames = []\n gc.disable()\n if method == \"random\":\n for idx in tqdm(range(n_samples), position=1):\n sample = {}\n cols = {}\n for key, range_ in ranges.items():\n sample[key] = rand.randint(*range_)\n cols[key] = [sample[key]] * n_trials\n sample.update(component_kwargs)\n cols[\"measurements\"] = self.run(component_name, cuda=cuda, n_trials=n_trials + 20, main=False, input_size=input_size, **sample)[20:]\n frames.append(pd.DataFrame(cols))\n if idx % 100 == 0:\n gc.collect()\n elif method == \"grid\":\n pbar = tqdm(total=len(grid_iter), position=1)\n for idx, args in enumerate(grid_iter):\n comp_args = {k: v for k, v in zip(grid_keys, args)}\n cols = comp_args.copy()\n comp_args.update(component_kwargs)\n cols[\"measurements\"] = self.run(component_name, cuda=cuda, n_trials=n_trials + 20, main=False, input_size=input_size, **comp_args)[20:]\n frames.append(pd.DataFrame(cols))\n if idx % 100 == 0:\n gc.collect()\n pbar.update(1)\n pbar.close()\n pd.concat(frames).to_csv(output_file, index_label=\"idx\")\n\n def plot_scatter(self, filename):\n df = pd.read_csv(filename)\n sns.violinplot(x=df[\"conv1_out\"], y=df[\"measurements\"])\n plt.show()\n\n def plot3d(self, *filenames, title=\"\", legend_names=[]):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n colors = [\"red\", \"blue\", \"green\", \"orange\", \"purple\", \"black\"]\n for idx, filename in enumerate(filenames):\n df = pd.read_csv(filename)\n df50 = df.groupby([\"conv1_out\", \"conv2_out\"]).quantile(0.75).reset_index()\n x, y = df50[\"conv1_out\"], df50[\"conv2_out\"]\n ax.scatter(x, y, df50[\"measurements\"], color=colors[idx % len(colors)])\n if title:\n plt.title(title)\n if legend_names:\n legend_elements = []\n for idx, name in enumerate(legend_names):\n legend_elements.append(Line2D([0], [0], color=colors[idx % len(colors)], lw=4, label=name))\n ax.legend(handles=legend_elements)\n plt.show()\n\n\ncomponents = dict(lenet5_conv1=LeNet5Conv1, lenet5_conv2=LeNet5Conv2, lenet5_fc1=LeNet5Fc1, lenet5=LeNet5, wrn_block=BasicBlock)\n\nif __name__ == \"__main__\":\n fire.Fire(MeasureComponentBenchmark)\n" ]
[ [ "torch.nn.Sequential", "torch.cuda.synchronize", "pandas.read_csv", "pandas.concat", "matplotlib.pyplot.title", "torch.zeros", "torch.nn.Conv2d", "pandas.DataFrame", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.set_grad_enabled", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
theawless/tweet-rank
[ "e02dcdd9914933e68243794725ed7b27cf332a7a" ]
[ "network/ltr.py" ]
[ "import re\nimport time\n\nimport nltk\nimport nltk.sentiment\nimport pandas as pd\nfrom geopy.distance import great_circle\nfrom pymongo import MongoClient\nfrom tqdm import tqdm\n\nclient = MongoClient()\ndb = client['vegas']\n\nEVENT_TIME = 1506837900 # 10:05 PM 1st October, 2017 (PDT)\nEVENT_COORDINATE = (36.11470649999999, 115.17284840000002)\nDIST_THRESHOLD = 20 # in km\n\nsentimentAnalyser = nltk.sentiment.vader.SentimentIntensityAnalyzer()\n\n\ndef tokenize_text(tweet, ascii=True, ignore_rt_char=True, ignore_url=True,\n ignore_mention=True, ignore_hashtag=True, letter_only=True,\n remove_stopwords=True, min_tweet_len=3):\n global model\n sword = nltk.corpus.stopwords.words('english')\n\n if ascii: # maybe remove lines with ANY non-ascii character\n for c in tweet:\n if not (0 < ord(c) < 127):\n return ''\n\n tokens = tweet.lower().split() # to lower, split\n res = []\n\n for token in tokens:\n if remove_stopwords and token in sword:\n continue\n if ignore_rt_char and token == 'rt':\n continue\n if ignore_url and token.startswith('https:'):\n continue\n if ignore_mention and token.startswith('@'):\n continue\n if ignore_hashtag and token.startswith('#'):\n continue\n if letter_only:\n if not token.isalpha():\n continue\n elif token.isdigit():\n token = '<num>'\n\n res += token,\n\n return res\n\n\ndef tweet_length(tweet):\n return len(tweet['text'])\n\n\ndef word_count(tweet):\n return len(re.findall(r'\\w+', tweet['text']))\n\n\ndef contains_question_mark(tweet):\n return '?' in tweet['text']\n\n\ndef contains_exclamation_mark(tweet):\n return '!' in tweet['text']\n\n\ndef contains_multiple_ques_excl_mark(tweet):\n return tweet['text'].count('?') > 1 or tweet['text'].count('!') > 1\n\n\ndef contains_second_pronoun(tweet):\n second_pronouns = ['you', 'yours', 'yourself']\n for word in tweet['text'].strip().split():\n for pronoun in second_pronouns:\n if word == pronoun:\n return True\n\n return False\n\n\ndef contains_third_pronoun(tweet):\n third_pronouns = ['she', 'her', 'him', 'it', 'he', 'they', 'them']\n for word in tweet['text'].strip().split():\n for pronoun in third_pronouns:\n if word == pronoun:\n return True\n\n return False\n\n\ndef fraction_of_uppercase(tweet):\n text = re.sub(r\"http\\S+\", \"\", tweet['text'])\n text = re.sub(r\"RT\", \"\", tweet['text'])\n t_count = len(text)\n u_count = sum(1 for c in text if c.isupper())\n return float(u_count) / t_count\n\n\ndef url_count(tweet):\n return len(re.findall(r'http\\S+', tweet['text']))\n\n\ndef contains_user_mention(tweet):\n return len(tweet['entities']['user_mentions']) > 0\n\n\ndef contains_hashtag(tweet):\n if 'hashtags' in tweet['entities']:\n return len(tweet['entities']['hashtags']) > 0\n\n return False\n\n\ndef is_retweet(tweet):\n if 'retweeted_status' in tweet:\n return True\n\n return False\n\n\ndef is_reply(tweet):\n if 'in_reply_status_id' in tweet:\n return True\n\n return False\n\n\ndef retweet_count(tweet):\n if 'retweet_count' in tweet:\n return tweet['retweet_count']\n\n return 0\n\n\ndef reply_count(tweet):\n if 'reply_count' in tweet:\n return tweet['reply_count']\n\n return 0\n\n\ndef tweet_sentiment(tweet):\n labels = {'pos': 1, 'neg': -1, 'neu': 0}\n sentence = ' '.join(tokenize_text(tweet['text']))\n scores = sentimentAnalyser.polarity_scores(sentence)\n scores.pop('compound', None)\n sentiment = max(scores, key=scores.get)\n return labels[sentiment]\n\n\ndef tweeted_before_event(tweet):\n struct_time = time.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')\n tweet_time = int(time.mktime(struct_time))\n\n return tweet_time < EVENT_TIME\n\n\ndef is_quoted_status(tweet):\n return 'quoted_status_id' in tweet\n\n\n# If tweet posted near event location\n# None if coordinates not in tweet\ndef vicinity_of_event(tweet):\n if not tweet['coordinates']:\n return None\n\n tweet_coord = (tweet['coordinates'][0], tweet['coordinates'][1])\n return great_circle(tweet_coord, EVENT_COORDINATE).km < DIST_THRESHOLD\n\n\ndef days_since_join(tweet):\n struct_time = time.strptime(tweet['user']['created_at'], '%a %b %d %H:%M:%S +0000 %Y')\n join_time = int(time.mktime(struct_time))\n\n return (EVENT_TIME - join_time) // 86400\n\n\ndef status_count(tweet):\n return tweet['user']['statuses_count']\n\n\ndef is_verified(tweet):\n return tweet['user']['verified']\n\n\ndef non_empty_bio(tweet):\n if tweet['user']['description']:\n return len(tweet['user']['description']) > 0\n\n return False\n\n\ndef follower_count(tweet):\n return tweet['user']['followers_count']\n\n\ndef friend_count(tweet):\n return tweet['user']['friends_count']\n\n\ndef extract_features(tweet):\n return {\n 'id': tweet['id_str'],\n 'score': tweet['score'],\n 'tweet_length': tweet_length(tweet),\n 'word_count': word_count(tweet),\n 'contains_question_mark': contains_question_mark(tweet),\n 'contains_exclamation_mark': contains_exclamation_mark(tweet),\n 'contains_multiple_ques_excl_mark': contains_multiple_ques_excl_mark(tweet),\n 'contains_second_pronoun': contains_second_pronoun(tweet),\n 'contains_third_pronoun': contains_third_pronoun(tweet),\n 'fraction_of_uppercase': fraction_of_uppercase(tweet),\n 'url_count': url_count(tweet),\n 'contains_user_mention': contains_user_mention(tweet),\n 'contains_hashtag': contains_hashtag(tweet),\n 'is_retweet': is_retweet(tweet),\n 'is_reply': is_reply(tweet),\n 'retweet_count': retweet_count(tweet),\n 'reply_count': reply_count(tweet),\n 'tweet_sentiment': tweet_sentiment(tweet),\n 'tweeted_before_event': tweeted_before_event(tweet),\n 'is_quoted_status': is_quoted_status(tweet),\n 'vicinity_of_event': vicinity_of_event(tweet),\n 'days_since_join': days_since_join(tweet),\n 'status_count': status_count(tweet),\n 'is_verified': is_verified(tweet),\n 'non_empty_bio': non_empty_bio(tweet),\n 'follower_count': follower_count(tweet),\n 'friend_count': friend_count(tweet)\n }\n\n\n# Returns dataframe from list of tweets\ndef compute_dataframe(tweets):\n data = []\n for tweet in tqdm(tweets):\n data.append(extract_features(tweet))\n\n return pd.DataFrame(data)\n\n\ndef saveToLTR(dataframe, file):\n datafile = open(file, 'w+')\n count = 0\n for idx, row in dataframe.iterrows():\n count += 1\n if count == 500:\n break\n score, features = row['score'], []\n for key, value in row.items():\n if key != 'score':\n features.append(value)\n\n line = str(score) + ' qid:1'\n for f in range(len(features)):\n line += ' ' + str(f) + ':' + str(features[f])\n line += '\\n'\n datafile.write(line)\n\n\ndef evaluate(tweets):\n dataframe = compute_dataframe(tweets)\n dataframe = dataframe.apply(lambda col: pd.factorize(col, sort=False)[0])\n saveToLTR(dataframe, 'test.txt')\n\n\ndef main():\n tweets = []\n for annotation in db[\"annotations\"].find():\n tweet = db[\"full_tweets_clean\"].find_one({\"id_str\": annotation[\"id_str\"]})\n if tweet is None:\n continue\n tweet[\"score\"] = annotation[\"annotation\"]\n tweets.append(tweet)\n\n dataframe = compute_dataframe(tweets)\n dataframe = dataframe.apply(lambda col: pd.factorize(col, sort=False)[0])\n\n dataframe.to_csv('tweets_ann.csv', sep='\\t')\n\n saveToLTR(dataframe, 'train.txt')\n # evaluate(tweets)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.factorize", "pandas.DataFrame" ] ]
3loi/MSP_Face
[ "f44300f05f6ec4e59374387b39702a419f73e5f0" ]
[ "AudioVisual_Modality/fusion_model_train.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: winston\n\"\"\"\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import Model\nfrom keras.layers import Dense, Input\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom scipy.io import loadmat\nfrom utils import getPaths, cc_coef\nfrom utils import class2onehot_5class, class2onehot_8class \nimport argparse\n\n\n\ndef fusion_network_MTL(num_nodes):\n inputs = Input((768,))\n encode = Dense(num_nodes, activation='relu')(inputs)\n encode = Dense(num_nodes, activation='relu')(encode)\n output_act = Dense(units=1, activation='linear')(encode)\n output_dom = Dense(units=1, activation='linear')(encode)\n output_val = Dense(units=1, activation='linear')(encode)\n adam = Adam(lr=0.0001)\n model = Model(inputs=inputs, outputs=[output_act, output_dom, output_val])\n model.compile(optimizer=adam, loss=[cc_coef, cc_coef, cc_coef])\n return model \n\ndef fusion_network_class(num_nodes, num_class):\n inputs = Input((768,))\n encode = Dense(num_nodes, activation='relu')(inputs)\n encode = Dense(num_nodes, activation='relu')(encode) \n outputs = Dense(units=num_class, activation='softmax')(encode)\n adam = Adam(lr=0.0001)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(optimizer=adam, loss='categorical_crossentropy')\n return model\n###############################################################################\n\n\n\nargparse = argparse.ArgumentParser()\nargparse.add_argument(\"-ep\", \"--epoch\", required=True)\nargparse.add_argument(\"-batch\", \"--batch_size\", required=True)\nargparse.add_argument(\"-emo\", \"--emo_type\", required=True)\nargparse.add_argument(\"-nodes\", \"--num_nodes\", required=True)\nargparse.add_argument(\"-nc\", \"--num_class\")\nargs = vars(argparse.parse_args())\n\n# Parameters\nshuffle = True\nrandom_seed = 99\nbatch_size = int(args['batch_size'])\nepochs = int(args['epoch'])\nnum_nodes = int(args['num_nodes'])\nlabel_type = args['emo_type']\ntry:\n num_class = args['num_class']\nexcept:\n pass\n\n# Hidden Features Paths Setting\nif label_type == 'attr':\n root_dir = './Fusion_Features/3-attribute'\nelif label_type == 'class':\n if num_class == '5-class':\n root_dir = './Fusion_Features/5-class'\n elif num_class == '8-class':\n root_dir = './Fusion_Features/8-class'\n\n# Loading Paths & Labels\nif label_type == 'class':\n paths_valid, labels_class_valid = getPaths(label_type, split_set='Validation', num_class=num_class)\n paths_train, labels_class_train = getPaths(label_type, split_set='Train', num_class=num_class)\nelif label_type == 'attr':\n # Loading Norm-Label\n Label_mean_act = loadmat('./NormTerm/act_norm_means.mat')['normal_para'][0][0]\n Label_std_act = loadmat('./NormTerm/act_norm_stds.mat')['normal_para'][0][0]\n Label_mean_dom = loadmat('./NormTerm/dom_norm_means.mat')['normal_para'][0][0]\n Label_std_dom = loadmat('./NormTerm/dom_norm_stds.mat')['normal_para'][0][0]\n Label_mean_val = loadmat('./NormTerm/val_norm_means.mat')['normal_para'][0][0]\n Label_std_val = loadmat('./NormTerm/val_norm_stds.mat')['normal_para'][0][0] \n paths_valid, labels_act_valid, labels_dom_valid, labels_val_valid = getPaths(label_type, split_set='Validation', num_class=num_class)\n paths_train, labels_act_train, labels_dom_train, labels_val_train = getPaths(label_type, split_set='Train', num_class=num_class) \n\n# shuffle the training set\nindexes = np.arange(len(paths_train))\nif shuffle:\n np.random.seed(random_seed)\n np.random.shuffle(indexes)\n \nif label_type == 'class':\n shuffle_paths_train = [paths_train[k] for k in indexes]\n shuffle_class_train = [labels_class_train[k] for k in indexes]\nelif label_type == 'attr':\n shuffle_paths_train = [paths_train[k] for k in indexes]\n shuffle_act_train = [labels_act_train[k] for k in indexes]\n shuffle_dom_train = [labels_dom_train[k] for k in indexes]\n shuffle_val_train = [labels_val_train[k] for k in indexes]\n\n# Loading Hidden Features (Training set)\nX_Train = []\nY_Train_Class = []\nY_Train_Act = []\nY_Train_Dom = []\nY_Train_Val = []\nfor i in range(len(shuffle_paths_train)):\n try: # deal with missing files\n x_audio = loadmat(root_dir + '/Audios/' + shuffle_paths_train[i].replace('.wav','.mat'))['Feat']\n x_video = loadmat(root_dir + '/Videos/' + shuffle_paths_train[i].replace('.wav','.mat'))['Feat']\n # fusing audio-visual hidden features\n x = np.concatenate((x_audio, x_video),axis=1)\n x = x.reshape(-1) \n X_Train.append(x)\n if label_type == 'class': # STL\n # class to one-hot label\n if num_class == '5-class':\n y = class2onehot_5class(shuffle_class_train[i])\n elif num_class == '8-class':\n y = class2onehot_8class(shuffle_class_train[i])\n Y_Train_Class.append(y)\n \n elif label_type == 'attr': # MTL \n # normalize regression label\n y_act = (shuffle_act_train[i]-Label_mean_act)/Label_std_act\n y_dom = (shuffle_dom_train[i]-Label_mean_dom)/Label_std_dom\n y_val = (shuffle_val_train[i]-Label_mean_val)/Label_std_val \n Y_Train_Act.append(y_act)\n Y_Train_Dom.append(y_dom)\n Y_Train_Val.append(y_val)\n except:\n pass\n\nif label_type == 'class':\n X_Train = np.array(X_Train)\n Y_Train_Class = np.array(Y_Train_Class)\nelif label_type == 'attr':\n X_Train = np.array(X_Train)\n Y_Train_Act = np.array(Y_Train_Act) \n Y_Train_Dom = np.array(Y_Train_Dom)\n Y_Train_Val = np.array(Y_Train_Val)\n\n# Loading Hidden Features (Validation set)\nX_Valid = []\nY_Valid_Class = []\nY_Valid_Act = []\nY_Valid_Dom = []\nY_Valid_Val = []\nfor i in range(len(paths_valid)):\n try: # deal with missing files\n x_audio = loadmat(root_dir + '/Audios/' + paths_valid[i].replace('.wav','.mat'))['Feat']\n x_video = loadmat(root_dir + '/Videos/' + paths_valid[i].replace('.wav','.mat'))['Feat']\n # fusing audio-visual hidden features\n x = np.concatenate((x_audio, x_video),axis=1)\n x = x.reshape(-1)\n X_Valid.append(x)\n if label_type == 'class':\n # class to one-hot label\n if num_class == '5-class':\n y = class2onehot_5class(labels_class_valid[i])\n elif num_class == '8-class':\n y = class2onehot_8class(labels_class_valid[i])\n Y_Valid_Class.append(y)\n elif label_type == 'attr': \n y_act = (labels_act_valid[i]-Label_mean_act)/Label_std_act\n y_dom = (labels_dom_valid[i]-Label_mean_dom)/Label_std_dom\n y_val = (labels_val_valid[i]-Label_mean_val)/Label_std_val \n Y_Valid_Act.append(y_act)\n Y_Valid_Dom.append(y_dom)\n Y_Valid_Val.append(y_val) \n except:\n pass\n \nif label_type == 'class':\n X_Valid = np.array(X_Valid)\n Y_Valid_Class = np.array(Y_Valid_Class)\nelif label_type == 'attr':\n X_Valid = np.array(X_Valid)\n Y_Valid_Act = np.array(Y_Valid_Act) \n Y_Valid_Dom = np.array(Y_Valid_Dom)\n Y_Valid_Val = np.array(Y_Valid_Val)\n\n# loading model structure\nif label_type == 'class':\n model = fusion_network_class(num_nodes=num_nodes, num_class=int(num_class.split('-')[0]))\nelif label_type == 'attr':\n model = fusion_network_MTL(num_nodes=num_nodes)\n#print(model.summary()) \n\n# Output fusion models saving folder\nif not os.path.isdir('./Fusion_Models/'):\n os.makedirs('./Fusion_Models/') \n\n# setting model checkpoints\nif label_type == 'attr':\n filepath='./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+label_type+'.hdf5'\nelif label_type == 'class':\n filepath='./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+num_class+'.hdf5'\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]\n\n# model fitting\nif label_type == 'class':\n model.fit(x=X_Train, \n y=Y_Train_Class, \n batch_size=batch_size, \n epochs=epochs,\n validation_data=(X_Valid, Y_Valid_Class),\n verbose=1,\n callbacks=callbacks_list)\n \nelif label_type == 'attr':\n model.fit(x=X_Train, \n y=([Y_Train_Act, Y_Train_Dom, Y_Train_Val]), \n batch_size=batch_size, \n epochs=epochs,\n validation_data=(X_Valid, [Y_Valid_Act, Y_Valid_Dom, Y_Valid_Val]),\n verbose=1,\n callbacks=callbacks_list)\n\n# Show training & validation loss\nv_loss = model.history.history['val_loss']\nt_loss = model.history.history['loss']\nplt.plot(t_loss,'b')\nplt.plot(v_loss,'r')\nif label_type == 'attr':\n plt.savefig('./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+label_type+'.png')\nelif label_type == 'class':\n plt.savefig('./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+num_class+'.png')\n" ]
[ [ "numpy.random.seed", "scipy.io.loadmat", "numpy.random.shuffle", "matplotlib.pyplot.plot", "numpy.concatenate", "numpy.array" ] ]
hot-cheeto/leo
[ "29acfd7bb3779ea2048fa253dc8347cd621a8a17" ]
[ "utils.py" ]
[ "# Copyright 2018 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Short utility functions for LEO.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pickle\n\nfrom six.moves import range\nimport tensorflow as tf\n\nimport config\nimport data\n\n\ndef unpack_data(problem_instance):\n \"\"\"Map data.ProblemInstance to a list of Tensors, to process with map_fn.\"\"\"\n if isinstance(problem_instance, data.ProblemInstance):\n return list(problem_instance)\n return problem_instance\n\n\ndef copy_checkpoint(checkpoint_path, global_step, accuracy):\n \"\"\"Copies the checkpoint to a separate directory.\"\"\"\n tmp_checkpoint_path = os.path.join(checkpoint_path, \"tmp_best_checkpoint\")\n best_checkpoint_path = os.path.join(checkpoint_path, \"best_checkpoint\")\n if _is_previous_accuracy_better(best_checkpoint_path, accuracy):\n tf.logging.info(\"Not copying the checkpoint: there is a better one from \"\n \"before a preemption.\")\n return\n\n checkpoint_regex = os.path.join(checkpoint_path,\n \"model.ckpt-{}.*\".format(global_step))\n checkpoint_files = tf.gfile.Glob(checkpoint_regex)\n graph_file = os.path.join(checkpoint_path, \"graph.pbtxt\")\n checkpoint_files.append(graph_file)\n\n _save_files_in_tmp_directory(tmp_checkpoint_path, checkpoint_files, accuracy)\n\n new_checkpoint_index_file = os.path.join(tmp_checkpoint_path, \"checkpoint\")\n with tf.gfile.Open(new_checkpoint_index_file, \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"{}/model.ckpt-{}\\\"\\n\".format(\n best_checkpoint_path, global_step))\n\n # We first copy the better checkpoint to a temporary directory, and only\n # when it's created move it to avoid inconsistent state when job is preempted\n # when copying the checkpoint.\n if tf.gfile.Exists(best_checkpoint_path):\n tf.gfile.DeleteRecursively(best_checkpoint_path)\n tf.gfile.Rename(tmp_checkpoint_path, best_checkpoint_path)\n tf.logging.info(\"Copied new best checkpoint with accuracy %.5f\", accuracy)\n\n\ndef _save_files_in_tmp_directory(tmp_checkpoint_path, checkpoint_files,\n accuracy):\n \"\"\"Saves the checkpoint files and accuracy in a temporary directory.\"\"\"\n\n if tf.gfile.Exists(tmp_checkpoint_path):\n tf.logging.info(\"The temporary directory exists, because job was preempted \"\n \"before it managed to move it. We're removing it.\")\n tf.gfile.DeleteRecursively(tmp_checkpoint_path)\n tf.gfile.MkDir(tmp_checkpoint_path)\n\n def dump_in_best_checkpoint_path(obj, filename):\n full_path = os.path.join(tmp_checkpoint_path, filename)\n with tf.gfile.Open(full_path, \"wb\") as f:\n pickle.dump(obj, f)\n\n for file_ in checkpoint_files:\n just_filename = file_.split(\"/\")[-1]\n tf.gfile.Copy(\n file_,\n os.path.join(tmp_checkpoint_path, just_filename),\n overwrite=False)\n dump_in_best_checkpoint_path(config.get_inner_model_config(), \"inner_config\")\n dump_in_best_checkpoint_path(config.get_outer_model_config(), \"outer_config\")\n dump_in_best_checkpoint_path(accuracy, \"accuracy\")\n\n\ndef _is_previous_accuracy_better(best_checkpoint_path, accuracy):\n if not tf.gfile.Exists(best_checkpoint_path):\n return False\n\n previous_accuracy_file = os.path.join(best_checkpoint_path, \"accuracy\")\n with tf.gfile.Open(previous_accuracy_file, \"rb\") as f:\n previous_accuracy = pickle.load(f)\n\n return previous_accuracy > accuracy\n\n\ndef evaluate_and_average(session, tensor, num_estimates):\n tensor_value_estimates = [session.run(tensor) for _ in range(num_estimates)]\n average_tensor_value = sum(tensor_value_estimates) / num_estimates\n return average_tensor_value\n" ]
[ [ "tensorflow.gfile.DeleteRecursively", "tensorflow.gfile.Open", "tensorflow.gfile.Exists", "tensorflow.gfile.MkDir", "tensorflow.gfile.Rename", "tensorflow.gfile.Glob", "tensorflow.logging.info" ] ]
mahdihosseini/CONet
[ "d1dabd074c5bee080ec5d58b382243b95a66993f" ]
[ "CONet/operations.py" ]
[ "import torch\nimport torch.nn as nn\n\nOPS = {\n 'none': lambda C, stride, affine: Zero(stride),\n 'avg_pool_3x3': lambda Cin, Cout, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1,\n count_include_pad=False),\n 'max_pool_3x3': lambda Cin, Cout, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),\n 'skip_connect': lambda Cin, Cout, stride, affine: Identity() if stride == 1 else FactorizedReduce(Cin, Cout,\n affine=affine),\n 'sep_conv_3x3': lambda Cin, Cmid, Cout, stride, affine: SepConv(Cin, Cmid, Cout, 3, stride, 1, affine=affine),\n 'sep_conv_5x5': lambda Cin, Cmid, Cout, stride, affine: SepConv(Cin, Cmid, Cout, 5, stride, 2, affine=affine),\n 'sep_conv_7x7': lambda Cin, Cmid, Cout, stride, affine: SepConv(Cin, Cmid, Cout, 7, stride, 3, affine=affine),\n 'dil_conv_3x3': lambda Cin, Cout, stride, affine: DilConv(Cin, Cout, 3, stride, 2, 2, affine=affine),\n 'dil_conv_5x5': lambda Cin, Cout, stride, affine: DilConv(Cin, Cout, 5, stride, 4, 2, affine=affine),\n 'conv_7x1_1x7': lambda C, stride, affine: nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C, C, (1, 7), stride=(1, stride), padding=(0, 3), bias=False),\n nn.Conv2d(C, C, (7, 1), stride=(stride, 1), padding=(3, 0), bias=False),\n nn.BatchNorm2d(C, affine=affine)\n ),\n}\n\n\nclass ReLUConvBN(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(ReLUConvBN, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass DilConv(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):\n super(DilConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation,\n groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass SepConv(nn.Module):\n\n def __init__(self, C_in, C_mid, C_out, kernel_size, stride, padding, affine=True):\n super(SepConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_mid, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_mid, affine=affine),\n\n nn.ReLU(inplace=False),\n nn.Conv2d(C_mid, C_mid, kernel_size=kernel_size, stride=1, padding=padding, groups=C_mid, bias=False),\n # (CIN, 1, 5, 5)\n nn.Conv2d(C_mid, C_out, kernel_size=1, padding=0, bias=False),\n # (COUT, CIN, 1, 1)\n nn.BatchNorm2d(C_out, affine=affine),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass Identity(nn.Module):\n\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass Zero(nn.Module):\n\n def __init__(self, stride):\n super(Zero, self).__init__()\n self.stride = stride\n\n def forward(self, x):\n if self.stride == 1:\n return x.mul(0.)\n return x[:, :, ::self.stride, ::self.stride].mul(0.)\n\n\nclass FactorizedReduce(nn.Module):\n\n def __init__(self, C_in, C_out, affine=True):\n super(FactorizedReduce, self).__init__()\n assert C_out % 2 == 0\n self.relu = nn.ReLU(inplace=False)\n self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)\n self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)\n self.bn = nn.BatchNorm2d(C_out, affine=affine)\n\n def forward(self, x):\n x = self.relu(x)\n\n\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.bn(out)\n return out" ]
[ [ "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
Marianna-Karavangeli/Earthquake_scraper
[ "95a6399fcd15ca99188e8f1a4b536dd16d732a4e" ]
[ "Earthquake_scraper/scraper/selectscrapeandsave.py" ]
[ "from Earthquake_scraper.visit_main_page import visit_main_page\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nimport time\nimport os\nimport pandas as pd\n\n\nclass SelectScrapeandSave:\n\n def __init__(self):\n self.driver = visit_main_page()\n\n def selection_settings(self):\n \"\"\"\n Selects the desired options step-by-step (e.g region of interest, start datetime and time zone).\n\n Returns\n -------\n Driver (remote control interface)\n \"\"\"\n options = self.driver.find_element_by_xpath(\n \"/html/body/usgs-root/usgs-header/header/usgs-panel-chooser/nav/i[3]\")\n options.click()\n\n earthquake_catalog = self.driver.find_element_by_xpath(\n \"/html/body/usgs-root/div/usgs-settings/section/usgs-earthquakes-filter/a\")\n earthquake_catalog.click()\n\n custom_selection = self.driver.find_element_by_xpath(\n \"/html/body/main/div/form/section/div[2]/section/ul[1]/li[3]/label\")\n custom_selection.click()\n\n start_datetime = self.driver.find_element_by_xpath(\n \"/html/body/main/div/form/section/div[2]/section/ul[2]/li[1]/input\")\n start_datetime.click()\n start_datetime.clear()\n start_datetime.send_keys(input(\"Datetime:\"))\n start_datetime.send_keys(Keys.RETURN)\n time.sleep(1)\n\n search = self.driver.find_element_by_xpath(\n \"/html/body/main/div/form/footer/button\")\n search.click()\n\n time.sleep(1)\n\n options = self.driver.find_element_by_xpath(\n \"/html/body/usgs-root/usgs-header/header/usgs-panel-chooser/nav/i[3]\")\n options.click()\n\n time_zone = self.driver.find_element_by_xpath(\n \"/html/body/usgs-root/div/usgs-settings/section/usgs-time-zone/mat-radio-group/mat-list/mat-list-item[2]/div/mat-radio-button\")\n time_zone.click()\n time.sleep(3)\n\n return self.driver\n\n def scrapeandsave(self):\n \"\"\"\n This function creates an empty dictionary in which the data will be stored.\n It iterates through the results of an infinitely loading page extracting the text from the elements of interest (Magnitude, Place\n Datetime and Depth).\n It created a pandas dataframe from the populated dictionary and saves it in a .csv file\n after checking if that file already exists.\n\n Returns\n -------\n Message\n \"\"\"\n data = {\"Magnitude\": [], \"Place\": [], \"Datetime\": [], \"Depth\": []}\n iter = 0\n\n # The number of iterations depends on the amount of results. The number 400 was chosen to accommodate the iteration through 15500 results (approximately).\n while iter < 400:\n list_eq = self.driver.find_element_by_xpath(\n '//mat-list[@class=\"mat-list mat-list-base ng-star-inserted\"]')\n earthquakes = list_eq.find_elements_by_xpath('./mat-list-item')\n for earth in earthquakes:\n data[\"Magnitude\"].append(\n earth.find_element_by_tag_name(\"span\").text)\n data[\"Place\"].append(earth.find_element_by_tag_name(\"h6\").text)\n data[\"Datetime\"].append(\n earth.find_element_by_class_name(\"time\").text)\n data[\"Depth\"].append(earth.find_element_by_xpath(\n \".//div[2]/div/aside/span\").text)\n iter += 1\n ActionChains(self.driver).move_to_element(earthquakes[-1]).perform()\n time.sleep(1)\n\n df = pd.DataFrame.from_dict(data)\n\n # Looks for the df.csv file and if it finds it, the new results are appended and if the file does not exist it is created with the scraping results.\n if os.path.isfile('df.csv'):\n # Removes duplicates bsaed on the pair of Place and Datetime.\n df = df.drop_duplicates(subset=[\"Place\", \"Datetime\"])\n df.to_csv('df.csv', mode='a', header=False)\n return f'Your file has been saved successfully!'\n else:\n df = df.drop_duplicates(subset=[\"Place\", \"Datetime\"])\n df.to_csv('df.csv', mode='a', header=True)\n return f'Your file has been saved successfully!'\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
throughput-ec/UnacquiredSites
[ "a2eb8e5116a7df701b1c84b94edfa7172bd188a9" ]
[ "src/modules/predicting/predict.py" ]
[ "import argparse\nimport os\nimport cProfile\nimport pstats\nimport io\nimport pandas as pd\nimport json\nimport utils as utils\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.snowball import SnowballStemmer\n\nimport time\nimport pickle\n\nimport argparse\nimport sys\nimport os\n\nimport cProfile, pstats, io\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n## USAGE\n## python3 src/modules/modelling/predict.py \\\n## --input_name = 'data/sentences_nlp3522' \\\n## --bib_file = 'data/bibjson2' \\\n## --output_file='output/predictions/predicted_labels_new_data.tsv'\n\nfile = r'data/sentences_nlp3522'\nbib_file = r'data/bibjson2'\nout_file = r'output/predictions/'\nt = time.localtime()\ntimestamp = time.strftime('%b_%d_%Y_%H%M%S', t)\nout_file_name = r'predictions_new_data_'+timestamp+'.tsv'\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_name', type=str, default=file,\n help='Path + Name of the file as tsv.')\n parser.add_argument('--bib_file', type=str, default=bib_file,\n help='Directory where bibliography json file is.')\n parser.add_argument('--output_file', type=str, default=out_file,\n help='Directory where your output data is.')\n\n args = parser.parse_args()\n\n nlp_sentences = preprocessed_sentences_tsv(args.input_name)\n bibliography = preprocessed_bibliography(args.bib_file)\n nlp_bib = nlp_sentences.merge(bibliography, on='gddid')\n original_sentences = nlp_bib[['gddid', 'title', 'sentid','sentence']]\n y_pred, y_proba = predict(data_test = nlp_bib)\n\n predicted_label = pd.DataFrame(y_pred)\n prediction_proba = pd.DataFrame(y_proba)\n original_sentences = original_sentences.reset_index()\n\n prediction_comp = pd.merge(predicted_label, prediction_proba, left_index=True, right_index=True)\n prediction_comp = pd.merge(original_sentences, prediction_comp, left_index=True, right_index=True)\n prediction_comp = prediction_comp.reset_index(drop=True)\n prediction_comp = prediction_comp.rename(columns={'0_x':'predicted_label', '0_y':'prediction_proba'})\n prediction_comp = prediction_comp[['sentid','sentence', 'predicted_label', 'prediction_proba', 'gddid', 'title']]\n\n selection1 = prediction_comp[(prediction_comp['prediction_proba'] > 0.000) & (prediction_comp['prediction_proba'] < 0.1)]\n selection1 = selection1.sample(frac = 0.15)\n selection2 = prediction_comp[prediction_comp['prediction_proba'] >= 0.1]\n prediction_comp = pd.concat([selection1, selection2])\n prediction_comp['true_label']='unknown'\n prediction_comp['found_lat']='unknown'\n prediction_comp['latnorth']='unknown'\n prediction_comp['found_long']='unknown'\n prediction_comp['longeast'] = 'unknown'\n prediction_comp['Train/Pred']='Pred'\n\n prediction_comp = prediction_comp[['gddid', 'title', 'sentid', 'sentence', 'predicted_label', 'prediction_proba', 'true_label', 'found_lat', 'latnorth', 'found_long', 'longeast', 'Train/Pred']]\n prediction_comp.sort_values(by=['gddid', 'sentid'], inplace = True)\n\n output_file = os.path.join(args.output_file, out_file_name)\n prediction_comp.to_csv(output_file, sep='\\t', index = False)\n\n print(f\"Saving predictions: {output_file}\")\n\ndef preprocessed_bibliography(path):\n \"\"\"\n Loads and formats bibliography json file and converts to a dataframe\n\n Parameters\n ----------\n path : string\n Path where the bibliography database is stored.\n\n Returns\n -------\n bibliography: pd.DataFrame\n pd.DataFrame with all bibliography information\n \"\"\"\n with open(path, 'r') as f:\n bib_dict = json.load(f)\n # Normalizing data so that we have access to the 'identifier'\n [elem.update({'identifier':[{'_type':None,'_id':None}]}) for elem in bib_dict if 'identifier' not in elem.keys()]\n\n bibliography = pd.json_normalize(bib_dict,\n 'identifier',\n ['publisher', 'title',\n ['journal', 'name', 'name'],\n ['author'],\n 'year', 'number', 'volume',\n ['link'],\n '_gddid', 'type', 'pages'],\n record_prefix='_',\n errors='ignore')\n\n bibliography['link'] = bibliography['link'].astype(str)\n\n url = bibliography['link'].str.split(\", \", expand=True)\n\n bibliography['link_url'] = url[0]\n bibliography['link_type'] = url[1]\n\n bibliography['link_url'] = bibliography['link_url']\\\n .replace(r\"\\[{'url': '\", \"\", regex=True)\\\n .replace(\"'\", \"\", regex=True)\n\n bibliography['link_type'] = bibliography['link_type']\\\n .replace(\"'type': '\", \"\", regex=True)\\\n .replace(\"'}]\", \"\", regex=True)\n\n bibliography['author'] = bibliography['author'].astype(str)\n bibliography['author'] = bibliography['author']\\\n .replace(r\"\\[{'name': '\", \"\", regex=True)\\\n .replace(\"{'name': '\", \"\", regex=True)\\\n .replace(\"'},\", \";\", regex=True)\\\n .replace(\"'}]\", \"\", regex=True)\n\n bibliography = bibliography[['_type', '_id', 'publisher', 'title',\n 'journal.name.name',\n 'author',\n 'year', 'number', 'volume',\n '_gddid', 'type', 'pages',\n 'link_url', 'link_type']]\n\n bibliography = bibliography.rename(columns={'_id': 'doi', '_gddid':'gddid'})\n\n return bibliography\n\ndef preprocessed_sentences_tsv(path = file):\n nlp_sentences = pd.read_csv(path, sep='\\t', names = ['gddid', 'sentid', 'wordidx', 'words', 'part_of_speech', 'special_class',\n 'lemmas', 'word_type', 'word_modified'], usecols = ['gddid', 'sentid', 'words'])\n nlp_sentences = nlp_sentences.replace('\"', '', regex = True)\\\n .replace('\\{', '', regex = True)\\\n .replace('}', '', regex = True)\\\n .replace(',', ',', regex = True)\\\n .replace(r'\\W{4,}', '', regex=True)\\\n .replace(',,,', 'comma_sym', regex=True)\\\n .replace(',', ' ', regex=True)\\\n .replace('comma_sym', ', ', regex=True)\\\n .replace('-LRB- ', '(', regex=True)\\\n .replace('LRB', '(', regex=True)\\\n .replace(' -RRB-', r')', regex=True)\\\n .replace('RRB', r')', regex=True)\\\n .replace('-RRB', r')', regex=True)\n nlp_sentences['words']= nlp_sentences['words'].str.split(\",\")\n # Sentences - not words.\n nlp_sentences['sentence'] = nlp_sentences['words'].apply(lambda x: ','.join(map(str, x)))\n\n # REGEX Values\n nlp_sentences = utils.find_regex(nlp_sentences, find_val = 'dms_regex',\\\n search_col = 'sentence', new_col_name = 'dms_re')\n nlp_sentences = utils.find_regex(nlp_sentences, find_val = 'dd_regex',\\\n search_col = 'sentence', new_col_name = 'dd_re')\n nlp_sentences = utils.find_regex(nlp_sentences, find_val = 'digits_regex',\\\n search_col = 'sentence', new_col_name = 'digits_re')\n\n # NLP Taks\n stop = stopwords.words('english')\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n stemmer = SnowballStemmer(\"english\")\n\n nlp_sentences['nltk'] = nlp_sentences.apply(lambda row: tokenizer.tokenize(row['sentence']), axis=1)\n nlp_sentences['nltk']=nlp_sentences['nltk'].apply(lambda x: [item for item in x if item not in stop])\n nlp_sentences['nltk']=nlp_sentences['nltk'].apply(lambda x: [stemmer.stem(y) for y in x])\n nlp_sentences = nlp_sentences[['gddid', 'sentid', 'sentence', 'nltk', 'dms_re', 'dd_re', 'digits_re']]\n\n return nlp_sentences\n\n\ndef predict(data_test):\n vec = pickle.load(open('output/count_vec_model.sav', 'rb'))\n X_test = vec.transform(data_test['sentence'].fillna(' '))\n\n loaded_model = pickle.load(open('output/NB_model.sav', 'rb'))\n y_pred = loaded_model.predict(X_test)\n y_proba = loaded_model.predict_proba(X_test)[:,1]\n return y_pred, y_proba\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.concat", "pandas.json_normalize", "pandas.DataFrame" ] ]
CQCL/aqua
[ "f6dd74434068fdba9100b60cfd8d85fdbbe52ef9" ]
[ "qiskit_aqua/algorithms/adaptive/qaoa/varform.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport numpy as np\nfrom functools import reduce\nfrom qiskit import QuantumRegister, QuantumCircuit\nfrom qiskit.quantum_info import Pauli\nfrom qiskit_aqua.operator import Operator\n\n\nclass QAOAVarForm:\n \"\"\"Global X phases and parameterized problem hamiltonian.\"\"\"\n\n def __init__(self, cost_operator, p, initial_state=None):\n self._cost_operator = cost_operator\n self._p = p\n self._initial_state = initial_state\n self.num_parameters = 2 * p\n self.parameter_bounds = [(0, np.pi)] * p + [(0, 2 * np.pi)] * p\n self.preferred_init_points = [0] * p * 2\n\n # prepare the mixer operator\n v = np.zeros(self._cost_operator.num_qubits)\n ws = np.eye(self._cost_operator.num_qubits)\n self._mixer_operator = reduce(\n lambda x, y: x + y,\n [\n Operator([[1, Pauli(v, ws[i, :])]])\n for i in range(self._cost_operator.num_qubits)\n ]\n )\n\n def construct_circuit(self, angles):\n if not len(angles) == self.num_parameters:\n raise ValueError('Incorrect number of angles: expecting {}, but {} given.'.format(\n self.num_parameters, len(angles)\n ))\n q = QuantumRegister(self._cost_operator.num_qubits, name='q')\n circuit = QuantumCircuit(q)\n if self._initial_state:\n circuit += self._initial_state.construct_circuit('circuit', q)\n else:\n circuit.u2(0, np.pi, q)\n for idx in range(self._p):\n beta, gamma = angles[idx], angles[idx + self._p]\n circuit += self._cost_operator.evolve(None, gamma, 'circuit', 1, quantum_registers=q)\n circuit += self._mixer_operator.evolve(None, beta, 'circuit', 1, quantum_registers=q)\n return circuit\n\n @property\n def setting(self):\n ret = \"Variational Form: {}\\n\".format(self.__class__.__name__)\n params = \"\"\n for key, value in self.__dict__.items():\n if key != \"_configuration\" and key[0] == \"_\":\n params += \"-- {}: {}\\n\".format(key[1:], value)\n ret += \"{}\".format(params)\n return ret\n" ]
[ [ "numpy.eye", "numpy.zeros" ] ]
edgardeng/python-data-science-days
[ "726451c827da502b585605f2ada1160817d25479" ]
[ "day13-matplotlib-introduction/index.py" ]
[ "# matplotlib introduction\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib as matplot\nfrom matplotlib.ticker import NullFormatter\n\n\ndef simple_plt():\n x = np.linspace(0, 2, 10)\n plt.plot(x, x, label='linear')\n plt.plot(x, x**2, label='quadratic')\n plt.plot(x, x**3, label='cubic')\n plt.xlabel('x label')\n plt.ylabel('y label')\n plt.title(\"Simple Plot\")\n plt.legend()\n plt.show()\n\n\ndef format_style():\n # plt.plot([1, 2, 3, 4], [1, 4, 9, 16], 'ro')\n # plt.axis([0, 6, 0, 20])\n # plt.show()\n\n t = np.arange(0., 5., 0.2)\n # red dashes, blue squares and green triangles\n plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')\n plt.show()\n\n\ndef keyword_plot():\n data = {'a': np.arange(50),\n 'c': np.random.randint(0, 50, 50),\n 'd': np.random.randn(50)}\n data['b'] = data['a'] + 10 * np.random.randn(50)\n data['d'] = np.abs(data['d']) * 100\n\n plt.scatter('a', 'b', c='c', s='d', data=data)\n plt.xlabel('entry a')\n plt.ylabel('entry b')\n plt.show()\n\n\ndef category_value():\n names = ['group_a', 'group_b', 'group_c']\n values = [1, 10, 100]\n\n plt.figure(1, figsize=(9, 3))\n\n plt.subplot(131)\n plt.bar(names, values)\n plt.subplot(132)\n plt.scatter(names, values)\n plt.subplot(133)\n plt.plot(names, values)\n plt.suptitle('Categorical Plotting')\n plt.show()\n\n\ndef plot_text():\n mu, sigma = 100, 15\n x = mu + sigma * np.random.randn(10000)\n n, bins, patches = plt.hist(x, 50, density=1, facecolor='g', alpha=0.75)\n # plt.xlabel('Smarts')\n t = plt.xlabel('my data', fontsize=14, color='red')\n plt.ylabel('Probability')\n plt.title('Histogram of IQ')\n plt.text(60, .025, r'$\\mu=100,\\ \\sigma=15$')\n plt.axis([40, 160, 0, 0.03])\n plt.grid(True)\n plt.show()\n\n\ndef nonlinear_axes():\n # Fixing random state for reproducibility\n np.random.seed(19680801)\n y = np.random.normal(loc=0.5, scale=0.4, size=1000)\n y = y[(y > 0) & (y < 1)]\n y.sort()\n x = np.arange(len(y))\n\n # plot with various axes scales\n plt.figure(1)\n\n # linear\n plt.subplot(221)\n plt.plot(x, y)\n plt.yscale('linear')\n plt.title('linear')\n plt.grid(True)\n\n # log\n plt.subplot(222)\n plt.plot(x, y)\n plt.yscale('log')\n plt.title('log')\n plt.grid(True)\n\n # symmetric log\n plt.subplot(223)\n plt.plot(x, y - y.mean())\n plt.yscale('symlog', linthreshy=0.01)\n plt.title('symlog')\n plt.grid(True)\n\n # logit\n plt.subplot(224)\n plt.plot(x, y)\n plt.yscale('logit')\n plt.title('logit')\n plt.grid(True)\n # Format the minor tick labels of the y-axis into empty strings with\n # `NullFormatter`, to avoid cumbering the axis with too many labels.\n plt.gca().yaxis.set_minor_formatter(NullFormatter())\n # Adjust the subplot layout, because the logit one may take more space\n # than usual, due to y-tick labels like \"1 - 10^{-3}\"\n plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,\n wspace=0.35)\n plt.show()\n\n\nif __name__ == '__main__':\n print('Numpy Version:', np.__version__)\n print('Matplot Version:', matplot.__version__)\n # simple_plt()\n # format_style()\n # keyword_plot()\n # category_value()\n # plot_text()\n nonlinear_axes()\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.plot", "numpy.random.randn", "numpy.random.randint", "matplotlib.pyplot.gca", "numpy.arange", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.text", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.ticker.NullFormatter", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "numpy.abs", "matplotlib.pyplot.scatter", "numpy.random.seed", "matplotlib.pyplot.yscale", "numpy.random.normal", "matplotlib.pyplot.bar", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel" ] ]
resibots/kaushik_2020_famle
[ "bbedd22a1eb6bba3cc0728fba5025b91186b6628" ]
[ "online_adaptation/ant_env_meta_v2.py" ]
[ "import fast_adaptation_embedding.env\nimport fast_adaptation_embedding.models.famle as nn_model\nfrom fast_adaptation_embedding.controllers.random_shooting import RS_opt\nimport torch\nimport numpy as np\nimport copy\nimport gym\nimport time\nfrom datetime import datetime\nimport pickle\nimport os\nfrom os import path\n# from pyprind import ProgBar\nfrom utils import ProgBar\nfrom gym.wrappers.monitoring.video_recorder import VideoRecorder\nimport argparse\n\n\nclass Cost(object):\n def __init__(self, model, init_state, horizon, action_dim, goal, task_likelihoods, pred_high, pred_low):\n self.__models = model\n # self.__goal = torch.FloatTensor([goal]).cuda() if self.__models[0].cuda_enabled else torch.FloatTensor([goal])\n self.__init_state = init_state\n self.__horizon = horizon\n self.__action_dim = action_dim\n self.__task_likelihoods = np.array(task_likelihoods)\n self.__n_tasks = len(task_likelihoods)\n self.__pred_high = pred_high\n self.__pred_low = pred_low\n self.__obs_dim = len(init_state)\n\n def cost_fn(self, samples):\n action_samples = torch.FloatTensor(samples).cuda(\n ) if self.__models[0].cuda_enabled else torch.FloatTensor(samples)\n init_states = torch.FloatTensor(np.repeat([self.__init_state], len(samples), axis=0)).cuda(\n ) if self.__models[0].cuda_enabled else torch.FloatTensor(np.repeat([self.__init_state], len(samples), axis=0))\n all_costs = torch.FloatTensor(np.zeros(len(samples))).cuda(\n ) if self.__models[0].cuda_enabled else torch.FloatTensor(np.zeros(len(samples)))\n\n n_batch = max(1, int(len(samples)/1024))\n per_batch = len(samples)/n_batch\n\n for i in range(n_batch):\n start_index = int(i*per_batch)\n end_index = len(samples) if i == n_batch - \\\n 1 else int(i*per_batch + per_batch)\n action_batch = action_samples[start_index:end_index]\n start_states = init_states[start_index:end_index]\n dyn_model = self.__models[np.argmax(self.__task_likelihoods)]\n for h in range(self.__horizon):\n actions = action_batch[:, h*self.__action_dim: h *\n self.__action_dim + self.__action_dim]\n model_input = torch.cat((start_states, actions), dim=1)\n diff_state = dyn_model.predict_tensor(model_input)\n start_states += diff_state\n\n for dim in range(self.__obs_dim):\n start_states[:, dim].clamp_(\n self.__pred_low[dim], self.__pred_high[dim])\n\n action_cost = torch.sum(actions * actions, dim=1) * 0.0\n x_vel_cost = -start_states[:, 13]\n survive_cost = (start_states[:, 0] < 0.26).type(\n start_states.dtype) * 2.0\n all_costs[start_index: end_index] += x_vel_cost * config[\"discount\"]**h + \\\n action_cost * config[\"discount\"]**h + \\\n survive_cost * config[\"discount\"]**h\n return all_costs.cpu().detach().numpy()\n\n\ndef train_meta(tasks_in, tasks_out, config):\n model = nn_model.Embedding_NN(dim_in=config[\"dim_in\"],\n hidden=config[\"hidden_layers\"],\n dim_out=config[\"dim_out\"],\n embedding_dim=config[\"embedding_size\"],\n num_tasks=len(tasks_in),\n CUDA=config[\"cuda\"],\n SEED=None,\n output_limit=config[\"output_limit\"],\n dropout=0.0,\n hidden_activation=config[\"hidden_activation\"])\n nn_model.train_meta(model,\n tasks_in,\n tasks_out,\n meta_iter=config[\"meta_iter\"],\n inner_iter=config[\"inner_iter\"],\n inner_step=config[\"inner_step\"],\n meta_step=config[\"meta_step\"],\n minibatch=config[\"meta_batch_size\"],\n inner_sample_size=config[\"inner_sample_size\"])\n return model\n\n\ndef train_model(model, train_in, train_out, task_id, config):\n cloned_model = copy.deepcopy(model)\n nn_model.train(cloned_model,\n train_in,\n train_out,\n task_id=task_id,\n inner_iter=config[\"epoch\"],\n inner_lr=config[\"learning_rate\"],\n minibatch=config[\"minibatch_size\"])\n return cloned_model\n\n\ndef process_data(data):\n '''Assuming dada: an array containing [state, action, state_transition, cost] '''\n training_in = []\n training_out = []\n for d in data:\n s = d[0]\n a = d[1]\n training_in.append(np.concatenate((s, a)))\n training_out.append(d[2])\n return np.array(training_in), np.array(training_out), np.max(training_in, axis=0), np.min(training_in, axis=0)\n\n\ndef execute_random(env, steps, init_state):\n current_state = env.reset()\n trajectory = []\n traject_cost = 0\n for i in range(steps):\n a = env.action_space.sample()\n next_state, r = 0, 0\n for k in range(1):\n next_state, r, _, _ = env.step(a)\n # env.joint_reset()\n trajectory.append([current_state.copy(), a.copy(),\n next_state-current_state, -r])\n current_state = next_state\n traject_cost += -r\n\n return np.array(trajectory), traject_cost\n\n\ndef execute(env, init_state, steps, init_mean, init_var, model, config, last_action_seq, task_likelihoods, pred_high, pred_low, recorder):\n # current_state = env.reset()\n current_state = copy.copy(env.state) if config['online'] else env.reset()\n trajectory = []\n traject_cost = 0\n sliding_mean = init_mean # np.zeros(config[\"sol_dim\"])\n\n temp_config = copy.deepcopy(config)\n temp_config[\"popsize\"] = 20000\n optimizer = None\n sol = None\n bar = ProgBar(steps, track_time=True,\n title='\\nExecuting....', bar_char='▒')\n for i in range(steps):\n cost_object = Cost(model=model, init_state=current_state, horizon=config[\"horizon\"], task_likelihoods=task_likelihoods,\n action_dim=env.action_space.shape[0], goal=config[\"goal\"], pred_high=pred_high, pred_low=pred_low)\n config[\"cost_fn\"] = cost_object.cost_fn\n optimizer = RS_opt(config)\n # sol = optimizer.obtain_solution(sliding_mean, init_var)\n sol = optimizer.obtain_solution()\n\n a = sol[0:env.action_space.shape[0]]\n next_state, r = 0, 0\n for k in range(1):\n if config[\"record_video\"]:\n recorder.capture_frame()\n next_state, r, _, _ = env.step(a)\n\n # env.joint_reset()\n trajectory.append([current_state.copy(), a.copy(),\n next_state-current_state, -r])\n current_state = next_state\n traject_cost += -r\n\n # sliding_mean = last_action_seq[i*config[\"sol_dim\"] : (i+1) * config[\"sol_dim\"]]\n # sliding_mean[0:-len(a)] = sol[len(a)::]\n # sliding_mean[-len(a)::] = sol[-len(a)::]\n bar.update(item_id=\" Step \" + str(i) + \" \")\n\n if config[\"record_video\"]:\n recorder.capture_frame()\n recorder.close()\n return trajectory, traject_cost\n\n\ndef test_model(ensemble_model, init_state, action, state_diff):\n x = np.concatenate(([init_state], [action]), axis=1)\n y = state_diff.reshape(1, -1)\n y_pred = ensemble_model.get_models()[0].predict(x)\n # print(\"True: \", y.flatten())\n # print(\"pred: \", y_pred.flatten())\n # input()\n return np.power(y-y_pred, 2).sum()\n\n\ndef extract_action_seq(data):\n actions = []\n for d in data:\n actions += d[1].tolist()\n return np.array(actions)\n\n\ndef compute_likelihood(data, models, adapt_steps, beta=1.0):\n '''\n Computes MSE loss and then softmax to have a probability\n '''\n data_size = config['adapt_steps']\n if data_size is None:\n data_size = len(data)\n lik = np.zeros(len(models))\n x, y, _, _ = process_data(data[-data_size::])\n for i, m in enumerate(models):\n y_pred = m.predict(x)\n lik[i] = np.exp(- beta * m.loss_function_numpy(y, y_pred)/len(x))\n return lik/np.sum(lik)\n\n\ndef sample_model_index(likelihoods):\n cum_sum = np.cumsum(likelihoods)\n num = np.random.rand()\n for i, cum_prob in enumerate(cum_sum):\n if num <= cum_prob:\n return i\n\n\ndef main(env, config, mismatch_fn):\n '''---------Prepare the directories------------------'''\n now = datetime.now()\n timestamp = now.strftime(\"%d_%m_%Y_%H_%M_%S\")\n experiment_name = timestamp + \"_\" + config[\"exp_suffix\"]\n res_dir = os.path.join(\n os.getcwd(), config[\"result_dir\"], config[\"env_name\"], experiment_name)\n try:\n i = 0\n while True:\n res_dir += \"_\" + str(i)\n i += 1\n if not os.path.isdir(res_dir):\n os.makedirs(res_dir)\n os.makedirs(res_dir+\"/videos\")\n break\n except:\n print(\"Could not make the result directory!!!\")\n\n with open(res_dir + \"/details.txt\", \"w+\") as f:\n f.write(config[\"exp_details\"])\n\n with open(res_dir + '/config.json', 'w') as fp:\n import json\n json.dump(config, fp)\n\n '''---------Prepare the test environment---------------'''\n trained_mismatches = np.load(config[\"data_dir\"] + \"/mismatches.npy\")\n n_training_tasks = len(trained_mismatches)\n try:\n s = os.environ['DISPLAY']\n print(\"Display available\")\n # env.render(mode=\"rgb_array\")\n env.render(mode=\"human\")\n env.reset()\n except:\n print(\"Display not available\")\n env.reset()\n\n print(\"\\n\\n\\n\")\n '''---------Initialize global variables------------------'''\n data = []\n models = []\n best_action_seq = np.random.rand(config[\"sol_dim\"])*2.0 - 1.0\n best_cost = 10000\n last_action_seq = None\n all_action_seq = []\n all_costs = []\n with open(res_dir + \"/costs.txt\", \"w+\") as f:\n f.write(\"\")\n\n '''--------------------Meta learn the models---------------------------'''\n meta_model = None\n if not path.exists(config[\"data_dir\"] + \"/\" + config[\"model_name\"]+\".pt\"):\n print(\"Model not found. Learning from data...\")\n meta_data = np.load(config[\"data_dir\"] + \"/trajectories.npy\")\n tasks_in, tasks_out = [], []\n for n in range(n_training_tasks):\n x, y, high, low = process_data(meta_data[n])\n tasks_in.append(x)\n tasks_out.append(y)\n print(\"task \", n, \" data: \", len(tasks_in[n]), len(tasks_out[n]))\n meta_model = train_meta(tasks_in, tasks_out, config)\n meta_model.save(config[\"data_dir\"] + \"/\" + config[\"model_name\"]+\".pt\")\n else:\n print(\"Model found. Loading from '.pt' file...\")\n device = torch.device(\n \"cuda\") if config[\"cuda\"] else torch.device(\"cpu\")\n meta_model = nn_model.load_model(\n config[\"data_dir\"] + \"/\" + config[\"model_name\"]+\".pt\", device)\n\n raw_models = [copy.deepcopy(meta_model) for _ in range(n_training_tasks)]\n models = [copy.deepcopy(meta_model) for _ in range(n_training_tasks)]\n for task_id, m in enumerate(raw_models):\n m.fix_task(task_id)\n\n for task_id, m in enumerate(models):\n m.fix_task(task_id)\n\n '''------------------------Test time------------------------------------'''\n\n high, low = np.ones(config[\"dim_out\"])*1000., - \\\n np.ones(config[\"dim_out\"])*1000.\n task_likelihoods = np.random.rand(n_training_tasks)\n\n for index_iter in range(config[\"iterations\"]):\n print(\"Episode: \", index_iter)\n new_mismatch = mismatch_fn(config)\n print(\"Mismatch: \", new_mismatch.tolist())\n env.set_mismatch(new_mismatch)\n recorder = VideoRecorder(\n env, res_dir + \"/videos/\" + str(index_iter) + \".mp4\") if config[\"record_video\"] else None\n trajectory, c = execute(env=env,\n init_state=config[\"init_state\"],\n model=models,\n steps=config[\"episode_length\"],\n init_mean=np.zeros(config[\"sol_dim\"]),\n init_var=0.01 * np.ones(config[\"sol_dim\"]),\n config=config,\n last_action_seq=None,\n task_likelihoods=task_likelihoods,\n pred_high=high,\n pred_low=low,\n recorder=recorder)\n\n data += trajectory\n '''-----------------Compute likelihood before relearning the models-------'''\n task_likelihoods = compute_likelihood(\n data, raw_models, config['adapt_steps'])\n print(\"\\nlikelihoods: \", task_likelihoods)\n\n x, y, high, low = process_data(data)\n\n task_index = sample_model_index(\n task_likelihoods) if config[\"sample_model\"] else np.argmax(task_likelihoods)\n print(\"\\nEstimated task-id: \", task_index)\n task_likelihoods = task_likelihoods * 0\n task_likelihoods[task_index] = 1.0\n data_size = config['adapt_steps']\n if data_size is None:\n data_size = len(x)\n print(\"Learning model with recent \", data_size, \" data\")\n models[task_index] = train_model(model=copy.deepcopy(\n raw_models[task_index]), train_in=x[-data_size::], train_out=y[-data_size::], task_id=task_index, config=config)\n\n print(\"\\nCost : \", c)\n with open(res_dir + \"/costs.txt\", \"a+\") as f:\n f.write(str(c)+\"\\n\")\n\n if c < best_cost:\n best_cost = c\n best_action_seq = []\n for d in trajectory:\n best_action_seq += d[1].tolist()\n best_action_seq = np.array(best_action_seq)\n last_action_seq = extract_action_seq(trajectory)\n\n all_action_seq.append(extract_action_seq(trajectory))\n all_costs.append(c)\n\n np.save(res_dir + \"/trajectories.npy\", data)\n print(\"\\n********************************************************\\n\")\n\n#######################################################################################################\n\n\nconfig = {\n # exp parameters:\n \"horizon\": 20, # NOTE: \"sol_dim\" must be adjusted\n \"iterations\": 1000,\n \"episode_length\": 1000,\n \"online\": False,\n \"adapt_steps\": None,\n \"init_state\": None, # Must be updated before passing config as param\n \"action_dim\": 8,\n \"goal\": None, # NOTE: Note used here.\n \"record_video\": False,\n \"online_damage_probability\": 0.0,\n \"sample_model\": False,\n\n # logging\n \"result_dir\": \"results\",\n \"data_dir\": \"data/ant_data\",\n \"model_name\": \"ant_meta_embedding_model\",\n \"env_name\": \"meta_ant\",\n \"exp_suffix\": \"experiment\",\n \"exp_details\": \"Default experiment.\",\n\n # Model_parameters\n \"dim_in\": 8+27,\n \"dim_out\": 27,\n \"hidden_layers\": [200, 200, 100],\n \"embedding_size\": 5,\n \"cuda\": True,\n \"output_limit\": 10.0,\n\n # Meta learning parameters\n \"meta_iter\": 20000, # 5000,\n \"meta_step\": 0.3,\n \"inner_iter\": 10, # 10,\n \"inner_step\": 0.0001,\n \"meta_batch_size\": 32,\n \"inner_sample_size\": 500,\n\n # Model learning parameters\n \"epoch\": 20,\n \"learning_rate\": 1e-4,\n \"minibatch_size\": 512,\n \"hidden_activation\": \"relu\",\n\n # Optimizer parameters\n \"max_iters\": 5,\n \"epsilon\": 0.0001,\n \"lb\": -1.,\n \"ub\": 1.,\n \"popsize\": 2000,\n \"sol_dim\": 8*20, # NOTE: Depends on Horizon\n \"num_elites\": 30,\n \"cost_fn\": None,\n \"alpha\": 0.1,\n \"discount\": 1.0\n}\n\n# optional arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--iterations\",\n help='Total episodes in episodic learning. Total MPC steps in the experiment.',\n type=int)\nparser.add_argument(\"--data_dir\",\n help='Path to load dynamics data and/or model',\n type=str)\nparser.add_argument(\"--exp_details\",\n help='Details about the experiment',\n type=str)\nparser.add_argument(\"--online\",\n action='store_true',\n help='Will not reset back to init position', )\nparser.add_argument(\"--adapt_steps\",\n help='Past steps to be used to learn a new model from the meta model',\n type=int)\nparser.add_argument(\"--control_steps\",\n help='Steps after which learn a new model => Learning frequency.',\n type=int)\nparser.add_argument(\"--rand_motor_damage\",\n action='store_true',\n help='Sample a random joint damage.')\nparser.add_argument(\"--rand_orientation_fault\",\n action='store_true',\n help='Sample a random orientation estimation fault.')\nparser.add_argument(\"--sample_model\",\n action='store_true',\n help='Sample a model (task-id) using the likelihood information. Default: Picks the most likely model.')\nparser.add_argument(\"--online_damage_probability\",\n help='Sample probabilistically random mismatch during mission. NOT used for episodic testing',\n default=0.0,\n type=float)\n\narguments = parser.parse_args()\nif arguments.data_dir is not None:\n config['data_dir'] = arguments.data_dir\nif arguments.iterations is not None:\n config['iterations'] = arguments.iterations\nif arguments.exp_details is not None:\n config['exp_details'] = arguments.exp_details\nif arguments.online is True:\n config['online'] = True\n if arguments.adapt_steps is not None:\n config['adapt_steps'] = arguments.adapt_steps\n if arguments.control_steps is not None:\n config['episode_length'] = arguments.control_steps\n if arguments.online_damage_probability is not None:\n config['online_damage_probability'] = arguments.online_damage_probability\n print(\"Online learning with adaptation steps: \",\n config['adapt_steps'], \" control steps: \", config['episode_length'])\nelse:\n print(\"Episodic learning with episode length: \", config['episode_length'])\n\nif arguments.rand_motor_damage is not None:\n config['rand_motor_damage'] = arguments.rand_motor_damage\nif arguments.rand_orientation_fault is not None:\n config['rand_orientation_fault'] = arguments.rand_orientation_fault\nif arguments.sample_model is not None:\n config['sample_model'] = arguments.sample_model\n\n'''----------- Environment specific setup --------------'''\n\n\ndef sample_mismatch(conf):\n '''\n If beginning of the experiment, then samples a damage.\n Else sample according to the given probability.\n '''\n if np.random.rand() < config['online_damage_probability'] or not conf.get(\"mismatch\"):\n mismatches = np.array([1., 1., 1., 1., 1., 1., 1., 1., 0.])\n # if conf['rand_motor_damage']:\n # mismatches[np.random.randint(0,8)] = 0\n # if conf['rand_orientation_fault'] is True:\n # mismatches[-1] = np.random.rand() * 2. - 1.0\n conf[\"mismatch\"] = mismatches.tolist()\n return np.array(conf[\"mismatch\"])\n\n\nenvironment = gym.make(\"AntMuJoCoEnv_fastAdapt-v0\")\nmain(env=environment, config=config, mismatch_fn=sample_mismatch)\n" ]
[ [ "numpy.min", "numpy.power", "torch.cat", "torch.sum", "numpy.cumsum", "numpy.save", "numpy.ones", "numpy.concatenate", "numpy.max", "numpy.argmax", "torch.FloatTensor", "numpy.random.rand", "torch.device", "numpy.load", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
yoelcortes/biosteam
[ "8d90788f87efb3b23428387d79097817962cad97" ]
[ "biosteam/units/heat_exchange.py" ]
[ "# -*- coding: utf-8 -*-\n# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules\n# Copyright (C) 2020-2021, Yoel Cortes-Pena <[email protected]>\n# \n# This module is under the UIUC open-source license. See \n# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt\n# for license details.\n\"\"\"\nThis module contains heat exchanger unit operations.\n\n.. contents:: :local:\n \nUnit operations\n---------------\n.. autoclass:: biosteam.units.heat_exchange.HX\n.. autoclass:: biosteam.units.heat_exchange.HXutility\n.. autoclass:: biosteam.units.heat_exchange.HXprocess \n\n\"\"\"\nfrom .. import Unit\nfrom .._graphics import utility_heat_exchanger_graphics, process_heat_exchanger_graphics\nfrom .design_tools.specification_factors import (\n shell_and_tube_material_factor_coefficients,\n compute_shell_and_tube_material_factor)\nfrom .design_tools import heat_transfer as ht\nimport numpy as np\nimport biosteam as bst\nfrom math import exp, log as ln\n\n__all__ = ('HX', 'HXutility', 'HXprocess')\n\n# Lenght factor \nx = np.array((8, 13, 16, 20)) \ny = np.array((1.25, 1.12,1.05,1))\np2 = np.polyfit(x, y, 2)\n\n# %% Purchase price\n\ndef compute_floating_head_purchase_price(A, CE):\n return exp(12.0310 - 0.8709*ln(A) + 0.09005 * ln(A)**2)*CE/567\n\ndef compute_fixed_head_purchase_price(A, CE):\n return exp(11.4185 - 0.9228*ln(A) + 0.09861 * ln(A)**2)*CE/567\n\ndef compute_u_tube_purchase_price(A, CE):\n return exp(11.5510 - 0.9186*ln(A) + 0.09790 * ln(A)**2)*CE/567\n\ndef compute_kettle_vaporizer_purchase_price(A, CE):\n return exp(12.3310 - 0.8709*ln(A) + 0.09005 * ln(A)**2)*CE/567\n\ndef compute_double_pipe_purchase_price(A, CE):\n return exp( 7.2718 + 0.16*ln(A))*CE/567\n\n# Purchase price\nCb_dict = {'Floating head': compute_floating_head_purchase_price,\n 'Fixed head': compute_fixed_head_purchase_price,\n 'U tube': compute_u_tube_purchase_price,\n 'Kettle vaporizer': compute_kettle_vaporizer_purchase_price,\n 'Double pipe': compute_double_pipe_purchase_price}\n\n# %% Classes\n\nclass HX(Unit, isabstract=True):\n \"\"\"\n Abstract class for counter current heat exchanger.\n\n **Abstract methods**\n \n get_streams()\n Should return two inlet streams and two outlet streams that exchange\n heat.\n\n \"\"\"\n line = 'Heat exchanger'\n _units = {'Area': 'ft^2',\n 'Overall heat transfer coefficient': 'kW/m^2/K',\n 'Log-mean temperature difference': 'K',\n 'Tube side pressure drop': 'psi',\n 'Shell side pressure drop': 'psi',\n 'Operating pressure': 'psi',\n 'Total tube length': 'ft'}\n _N_ins = 1\n _N_outs = 1\n _N_heat_utilities = 1\n _F_BM_default = {'Double pipe': 1.8,\n 'Floating head': 3.17,\n 'Fixed head': 3.17,\n 'U tube': 3.17,\n 'Kettle vaporizer': 3.17}\n \n @property\n def material(self):\n \"\"\"Default 'Carbon steel/carbon steel'\"\"\"\n return self.material\n @material.setter\n def material(self, material):\n try:\n self._F_Mab = shell_and_tube_material_factor_coefficients[material]\n except KeyError:\n raise AttributeError(\"material must be one of the following: \"\n f\"{', '.join(shell_and_tube_material_factor_coefficients)}\")\n self._material = material \n \n @property\n def heat_exchanger_type(self):\n \"\"\"[str] Heat exchanger type. Purchase cost depends on this selection.\"\"\"\n return self._heat_exchanger_type\n @heat_exchanger_type.setter\n def heat_exchanger_type(self, heat_exchanger_type):\n try:\n self._Cb_func = Cb_dict[heat_exchanger_type]\n except KeyError:\n raise AttributeError(\"heat exchange type must be one of the following: \"\n f\"{', '.join(Cb_dict)}\")\n self._heat_exchanger_type = heat_exchanger_type \n\n def reset_cache(self, isdynamic=None):\n for i in self.outs: i.reset_cache()\n\n def _assert_compatible_property_package(self):\n assert all([i.chemicals is j.chemicals for i, j in zip(self._ins, self._outs) if (i and j)]), (\n \"inlet and outlet stream chemicals are incompatible; \"\n \"try using the `thermo` keyword argument to initialize the unit operation \"\n \"with a compatible thermodynamic property package\"\n )\n\n def _design(self):\n # Get duty (kW)\n Q = abs(self.Q) / 3600\n \n if Q <= 1e-12: \n self.design_results.clear()\n return\n \n ### Use LMTD correction factor method ###\n Design = self.design_results\n \n # Get cold and hot inlet and outlet streams\n ci, hi, co, ho = ht.order_streams(*self.get_streams())\n \n # Get log mean temperature difference\n Tci = ci.T\n Thi = hi.T\n Tco = co.T\n Tho = ho.T\n LMTD = ht.compute_LMTD(Thi, Tho, Tci, Tco)\n \n # Get correction factor\n ft = self.ft\n if not ft:\n N_shells = self.N_shells\n ft = ht.compute_Fahkeri_LMTD_correction_factor(Tci, Thi, Tco, Tho, N_shells)\n \n # Get overall heat transfer coefficient\n U = self.U or ht.heuristic_overall_heat_transfer_coefficient(ci, hi, co, ho)\n dP_tube, dP_shell = ht.heuristic_tubeside_and_shellside_pressure_drops(ci, hi, co, ho)\n \n # TODO: Complete design of heat exchanger to find L\n # For now assume lenght is 20 ft\n L = 20\n \n # Design pressure\n P = max((ci.P, hi.P))\n Design['Area'] = 10.763 * ht.compute_heat_transfer_area(abs(LMTD), U, Q, ft)\n Design['Overall heat transfer coefficient'] = U\n Design['Log-mean temperature difference'] = LMTD\n Design['Fouling correction factor'] = ft\n Design['Tube side pressure drop'] = dP_tube\n Design['Shell side pressure drop'] = dP_shell\n Design['Operating pressure'] = P * 14.7/101325 # psi\n Design['Total tube length'] = L\n\n def _cost(self):\n Design = self.design_results\n if not Design: return\n \n A = Design['Area']\n L = Design['Total tube length']\n P = Design['Operating pressure']\n \n if A < 150: # Double pipe\n P = P/600\n F_p = 0.8510 + 0.1292*P + 0.0198*P**2\n # Assume outer pipe carbon steel, inner pipe stainless steel\n F_m = 2 \n A_min = 2.1718\n if A < A_min:\n F_l = A/A_min\n A = A_min\n else: \n F_l = 1\n heat_exchanger_type = 'Double pipe'\n C_b = compute_double_pipe_purchase_price(A, bst.CE)\n else: # Shell and tube\n F_m = compute_shell_and_tube_material_factor(A, *self._F_Mab)\n F_l = 1 if L > 20 else np.polyval(p2, L)\n P = P/100\n F_p = 0.9803 + 0.018*P + 0.0017*P**2\n heat_exchanger_type = self.heat_exchanger_type\n C_b = self._Cb_func(A, bst.CE)\n \n # Free on board purchase prize\n self.F_M[heat_exchanger_type] = F_m\n self.F_P[heat_exchanger_type] = F_p\n self.F_D[heat_exchanger_type] = F_l\n self.baseline_purchase_costs[heat_exchanger_type] = C_b\n\n\nclass HXutility(HX):\n \"\"\"\n Create a heat exchanger that changes temperature of the\n outlet stream using a heat utility.\n\n Parameters\n ----------\n ins : stream\n Inlet.\n outs : stream\n Outlet.\n T=None : float\n Temperature of outlet stream [K].\n V=None : float\n Vapor fraction of outlet stream.\n rigorous=False : bool\n If true, calculate vapor liquid equilibrium\n U=None : float, optional\n Enforced overall heat transfer coefficent [kW/m^2/K].\n heat_exchanger_type : str, optional\n Heat exchanger type. Defaults to \"Floating head\".\n N_shells : int, optional\n Number of shells. Defaults to 2.\n ft : float, optional\n User imposed correction factor.\n heat_only : bool, optional\n If True, heat exchanger can only heat.\n cool_only : bool, optional\n If True, heat exchanger can only cool.\n \n Notes\n -----\n Must specify either `T` or `V` when creating a HXutility object.\n \n Examples\n --------\n Run heat exchanger by temperature:\n \n >>> from biosteam.units import HXutility\n >>> from biosteam import Stream, settings\n >>> settings.set_thermo(['Water', 'Ethanol'], cache=True)\n >>> feed = Stream('feed', Water=200, Ethanol=200)\n >>> hx = HXutility('hx', ins=feed, outs='product', T=50+273.15,\n ... rigorous=False) # Ignore VLE\n >>> hx.simulate()\n >>> hx.show()\n HXutility: hx\n ins...\n [0] feed\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): Water 200\n Ethanol 200\n outs...\n [0] product\n phase: 'l', T: 323.15 K, P: 101325 Pa\n flow (kmol/hr): Water 200\n Ethanol 200\n >>> hx.results()\n Heat exchanger Units hx\n Low pressure steam Duty kJ/hr 1.01e+06\n Flow kmol/hr 26.1\n Cost USD/hr 6.21\n Design Area ft^2 57\n Overall heat transfer coefficient kW/m^2/K 0.5\n Log-mean temperature difference K 101\n Fouling correction factor 1\n Tube side pressure drop psi 1.5\n Shell side pressure drop psi 5\n Operating pressure psi 50\n Total tube length ft 20\n Purchase cost Double pipe USD 4.74e+03\n Total purchase cost USD 4.74e+03\n Utility cost USD/hr 6.21\n \n Run heat exchanger by vapor fraction:\n \n >>> feed = Stream('feed', Water=200, Ethanol=200)\n >>> hx = HXutility('hx', ins=feed, outs='product', V=1,\n ... rigorous=True) # Include VLE\n >>> hx.simulate()\n >>> hx.show()\n HXutility: hx\n ins...\n [0] feed\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): Water 200\n Ethanol 200\n outs...\n [0] product\n phase: 'g', T: 357.42 K, P: 101325 Pa\n flow (kmol/hr): Water 200\n Ethanol 200\n >>> hx.results()\n Heat exchanger Units hx\n Low pressure steam Duty kJ/hr 1.94e+07\n Flow kmol/hr 499\n Cost USD/hr 119\n Design Area ft^2 680\n Overall heat transfer coefficient kW/m^2/K 1\n Log-mean temperature difference K 80.8\n Fouling correction factor 1\n Tube side pressure drop psi 1.5\n Shell side pressure drop psi 1.5\n Operating pressure psi 50\n Total tube length ft 20\n Purchase cost Floating head USD 2.61e+04\n Total purchase cost USD 2.61e+04\n Utility cost USD/hr 119\n\n \"\"\"\n line = 'Heat exchanger'\n _graphics = utility_heat_exchanger_graphics\n \n def __init__(self, ID='', ins=None, outs=(), thermo=None, *,\n T=None, V=None, rigorous=False, U=None, H=None,\n heat_exchanger_type=\"Floating head\",\n material=\"Carbon steel/carbon steel\",\n N_shells=2,\n ft=None,\n heat_only=None,\n cool_only=None,\n ):\n super().__init__(ID, ins, outs, thermo)\n self.T = T #: [float] Temperature of outlet stream (K).\n self.V = V #: [float] Vapor fraction of outlet stream.\n self.H = H #: [float] Enthalpy of outlet stream.\n \n #: [bool] If true, calculate vapor liquid equilibrium\n self.rigorous = rigorous\n \n #: [float] Enforced overall heat transfer coefficent (kW/m^2/K)\n self.U = U\n \n #: [int] Number of shells for LMTD correction factor method.\n self.N_shells = N_shells\n \n #: [float] User imposed correction factor.\n self.ft = ft\n\n #: [bool] If True, heat exchanger can only heat. \n self.heat_only = heat_only\n \n #: [bool] If True, heat exchanger can only cool. \n self.cool_only = cool_only\n \n self.material = material\n self.heat_exchanger_type = heat_exchanger_type\n \n def _init_utils(self):\n # tuple[HeatUtility] All heat utilities associated to unit\n self._heat_utilities = tuple([bst.HeatUtility(heat_exchanger=self)\n for i in range(self._N_heat_utilities)])\n \n # [PowerUtility] Electric utility associated to unit\n self.power_utility = bst.PowerUtility()\n \n @property\n def Q(self):\n \"\"\"[float] Total heat transfered.\"\"\"\n return abs(self.heat_utilities[0].unit_duty)\n \n @property\n def heat_utilities(self):\n return self._heat_utilities\n @heat_utilities.setter\n def heat_utilities(self, heat_utilities):\n self._heat_utilities = heat_utilities = tuple(heat_utilities)\n for i in heat_utilities: i.heat_exchanger = self\n \n def simulate_as_auxiliary_exchanger(self, duty, stream):\n self.outs[0] = stream.proxy()\n self.ins[0] = stream.proxy()\n hu = self.heat_utilities[0]\n hu.heat_exchanger = None\n hu(duty, stream.T)\n super()._design()\n self._cost()\n self._load_capital_costs()\n \n def _run(self):\n feed = self.ins[0]\n outlet = self.outs[0]\n outlet.copy_flow(feed)\n outlet.P = feed.P\n T = self.T\n V = self.V\n H = self.H\n T_given = T is not None\n V_given = V is not None\n H_given = H is not None\n N_given = T_given + V_given + H_given\n if N_given == 0:\n raise RuntimeError(\"no specification available; must define at either \"\n \"temperature 'T', vapor fraction, 'V', or enthalpy 'H'\")\n if outlet.has_user_equilibrium:\n outlet.user_equilibrium(T=T, H=H, P=outlet.P, V=V)\n elif self.rigorous:\n if N_given > 1:\n raise RuntimeError(\"may only specify either temperature, 'T', \"\n \"vapor fraction 'V', or enthalpy 'H', \"\n \"in a rigorous simulation\")\n if V_given:\n if V == 0:\n outlet.phase = 'l'\n outlet.T = outlet.bubble_point_at_P().T\n elif V == 1:\n outlet.phase = 'g'\n outlet.T = outlet.dew_point_at_P().T\n elif 0 < V < 1:\n outlet.vle(V=V, P=outlet.P)\n else:\n raise RuntimeError(\"vapor fraction, 'V', must be a \"\n \"positive fraction\")\n elif T_given:\n if outlet.isempty():\n outlet.T = T\n else:\n try:\n outlet.vle(T=T, P=outlet.P)\n except RuntimeError as e:\n if len(outlet.phases) > 1: raise e\n T_bubble = outlet.bubble_point_at_P().T\n if T <= T_bubble:\n outlet.phase = 'l'\n else:\n T_dew = outlet.dew_point_at_P().T\n if T_dew >= T:\n outlet.phase = 'g'\n else:\n raise RuntimeError('outlet in vapor-liquid equilibrium, but stream is linked')\n outlet.T = T\n except ValueError as e:\n outlet.vle(T=T, P=outlet.P)\n else:\n outlet.vle(H=H, P=outlet.P)\n else:\n if T_given and H_given:\n raise RuntimeError(\"cannot specify both temperature, 'T' \"\n \"and enthalpy 'H'\")\n if T_given:\n outlet.T = T\n else:\n outlet.T = feed.T\n if V_given:\n if V == 0:\n outlet.phase = 'l'\n elif V == 1:\n outlet.phase = 'g'\n else:\n raise RuntimeError(\"vapor fraction, 'V', must be either \"\n \"0 or 1 in a non-rigorous simulation\")\n if V == 1 and feed.vapor_fraction < 1. and (outlet.T + 1e-6) < feed.T:\n raise ValueError('outlet cannot be cooler than inlet if boiling')\n if V == 0 and feed.vapor_fraction > 0. and outlet.T > feed.T + 1e-6:\n raise ValueError('outlet cannot be hotter than inlet if condensing')\n else:\n phase = feed.phase\n if len(phase) == 1: outlet.phase = phase\n if H_given:\n outlet.H = H\n \n if self.heat_only and outlet.H - feed.H < 0.: \n outlet.copy_like(feed)\n return\n if self.cool_only and outlet.H - feed.H > 0.: \n outlet.copy_like(feed)\n return\n\n def get_streams(self):\n \"\"\"\n Return inlet and outlet streams.\n \n Returns\n -------\n in_a : Stream\n Inlet a.\n in_b : Stream\n Inlet b.\n out_a : Stream\n Outlet a.\n out_b : Stream\n Outlet b.\n \n \"\"\"\n in_a = self.ins[0]\n out_a = self.outs[0]\n hu = self.heat_utilities[0]\n in_b = hu.inlet_utility_stream\n out_b = hu.outlet_utility_stream\n return in_a, in_b, out_a, out_b\n\n def _design(self, duty=None):\n # Set duty and run heat utility\n if duty is None:\n duty = self.H_out - self.H_in\n inlet = self.ins[0]\n outlet = self.outs[0] \n T_in = inlet.T\n T_out = outlet.T\n iscooling = duty < 0.\n if iscooling: # Assume there is a pressure drop before the heat exchanger\n if T_out > T_in: T_in = T_out\n else:\n if T_out < T_in: T_out = T_in\n self.heat_utilities[0](duty, T_in, T_out)\n super()._design()\n\n\nclass HXprocess(HX):\n \"\"\"\n Counter current heat exchanger for process fluids. Rigorously transfers\n heat until the pinch temperature or a user set temperature limit is reached.\n \n Parameters\n ----------\n ins : stream sequence\n * [0] Inlet process fluid a\n * [1] Inlet process fluid b \n outs : stream sequence\n * [0] Outlet process fluid a \n * [1] Outlet process fluid b\n U=None : float, optional\n Enforced overall heat transfer coefficent [kW/m^2/K].\n dT=5. : float\n Pinch temperature difference (i.e. dT = abs(outs[0].T - outs[1].T)).\n T_lim0 : float, optional\n Temperature limit of outlet stream at index 0.\n T_lim1 : float, optional\n Temperature limit of outlet stream at index 1.\n heat_exchanger_type : str, optional\n Heat exchanger type. Defaults to 'Floating head'.\n N_shells=2 : int, optional\n Number of shells.\n ft=None : float, optional\n User enforced correction factor.\n phase0=None : 'l' or 'g', optional\n User enforced phase of outlet stream at index 0.\n phase1=None : 'l' or 'g', optional\n User enforced phase of outlet stream at index 1.\n H_lim0 : float, optional\n Enthalpy limit of outlet stream at index 0.\n H_lim1 : float, optional\n Enthalpy limit of outlet stream at index 1.\n \n Examples\n --------\n Rigorous heat exchange until pinch temperature is reached:\n \n >>> from biosteam.units import HXprocess\n >>> from biosteam import Stream, settings\n >>> settings.set_thermo(['Water', 'Ethanol'])\n >>> in_a = Stream('in_a', Ethanol=50, T=351.43, phase='g')\n >>> in_b = Stream('in_b', Water=200)\n >>> hx = HXprocess('hx', ins=(in_a, in_b), outs=('out_a', 'out_b'))\n >>> hx.simulate()\n >>> hx.show()\n HXprocess: hx\n ins...\n [0] in_a\n phase: 'g', T: 351.43 K, P: 101325 Pa\n flow (kmol/hr): Ethanol 50\n [1] in_b\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): Water 200\n outs...\n [0] out_a\n phases: ('g', 'l'), T: 351.39 K, P: 101325 Pa\n flow (kmol/hr): (g) Ethanol 31.3\n (l) Ethanol 18.7\n [1] out_b\n phase: 'l', T: 346.43 K, P: 101325 Pa\n flow (kmol/hr): Water 200\n \n >>> hx.results()\n Heat exchanger Units hx\n Design Area ft^2 213\n Overall heat transfer coefficient kW/m^2/K 0.5\n Log-mean temperature difference K 20.4\n Fouling correction factor 1\n Tube side pressure drop psi 1.5\n Shell side pressure drop psi 5\n Operating pressure psi 14.7\n Total tube length ft 20\n Purchase cost Floating head USD 2.06e+04\n Total purchase cost USD 2.06e+04\n Utility cost USD/hr 0\n \n Sensible fluids case with user enfored outlet phases \n (more computationally efficient):\n \n >>> from biosteam.units import HXprocess\n >>> from biosteam import Stream, settings\n >>> settings.set_thermo(['Water', 'Ethanol'])\n >>> in_a = Stream('in_a', Water=200, T=350)\n >>> in_b = Stream('in_b', Ethanol=200)\n >>> hx = HXprocess('hx', ins=(in_a, in_b), outs=('out_a', 'out_b'),\n ... phase0='l', phase1='l')\n >>> hx.simulate()\n >>> hx.show()\n HXprocess: hx\n ins...\n [0] in_a\n phase: 'l', T: 350 K, P: 101325 Pa\n flow (kmol/hr): Water 200\n [1] in_b\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): Ethanol 200\n outs...\n [0] out_a\n phase: 'l', T: 303.15 K, P: 101325 Pa\n flow (kmol/hr): Water 200\n [1] out_b\n phase: 'l', T: 327.92 K, P: 101325 Pa\n flow (kmol/hr): Ethanol 200\n >>> hx.results()\n Heat exchanger Units hx\n Design Area ft^2 367\n Overall heat transfer coefficient kW/m^2/K 0.5\n Log-mean temperature difference K 11.5\n Fouling correction factor 1\n Tube side pressure drop psi 5\n Shell side pressure drop psi 5\n Operating pressure psi 14.7\n Total tube length ft 20\n Purchase cost Floating head USD 2.23e+04\n Total purchase cost USD 2.23e+04\n Utility cost USD/hr 0\n \n \"\"\"\n line = 'Heat exchanger'\n _graphics = process_heat_exchanger_graphics\n _N_heat_utilities = 0\n _N_ins = 2\n _N_outs = 2\n \n def __init__(self, ID='', ins=None, outs=(), thermo=None, *,\n U=None, dT=5., T_lim0=None, T_lim1=None,\n material=\"Carbon steel/carbon steel\",\n heat_exchanger_type=\"Floating head\",\n N_shells=2, ft=None, \n phase0=None,\n phase1=None,\n H_lim0=None,\n H_lim1=None,\n ):\n super().__init__(ID, ins, outs, thermo)\n \n #: [float] Enforced overall heat transfer coefficent (kW/m^2/K)\n self.U = U\n \n #: [float] Total heat transfered.\n self.Q = None\n \n #: Number of shells for LMTD correction factor method.\n self.N_shells = N_shells\n \n #: User imposed correction factor.\n self.ft = ft\n \n #: [float] Pinch temperature difference.\n self.dT = dT \n \n #: [float] Temperature limit of outlet stream at index 0.\n self.T_lim0 = T_lim0\n \n #: [float] Temperature limit of outlet stream at index 1.\n self.T_lim1 = T_lim1\n \n #: [float] Temperature limit of outlet stream at index 0.\n self.H_lim0 = H_lim0\n \n #: [float] Temperature limit of outlet stream at index 1.\n self.H_lim1 = H_lim1\n \n #: [str] Enforced phase of outlet at index 0\n self.phase0 = phase0\n \n #: [str] Enforced phase of outlet at index 1\n self.phase1 = phase1\n \n self.material = material\n self.heat_exchanger_type = heat_exchanger_type\n self.reset_source = True\n \n def get_streams(self):\n s_in_a, s_in_b = self.ins\n s_out_a, s_out_b = self.outs\n return s_in_a, s_in_b, s_out_a, s_out_b\n \n def _setup(self):\n super()._setup()\n if self.reset_source:\n for i in self._ins:\n if i.source: i.empty()\n \n def simulate(self):\n self._run()\n self._summary()\n \n def _run(self):\n s1_in, s2_in = self._ins\n s1_out, s2_out = self._outs\n if s1_in.isempty():\n s1_out.empty()\n s2_out.copy_like(s2_in)\n self.Q = 0.\n elif s2_in.isempty():\n s2_out.empty()\n s1_out.copy_like(s1_in)\n self.Q = 0.\n else:\n s1_out.copy_like(s1_in)\n s2_out.copy_like(s2_in)\n self._run_counter_current_heat_exchange()\n for s_out in (s1_out, s2_out):\n if isinstance(s_out, bst.MultiStream):\n phase = s_out.phase\n if len(phase) == 1: s_out.phase = phase\n \n def _run_counter_current_heat_exchange(self):\n self.Q = ht.counter_current_heat_exchange(*self._ins, *self._outs,\n self.dT, self.T_lim0, self.T_lim1,\n self.phase0, self.phase1, \n self.H_lim0, self.H_lim1)\n" ]
[ [ "numpy.polyfit", "numpy.array", "numpy.polyval" ] ]
marathomas/meerkat_umap
[ "1c1c23eba6e6219d777464f5afdb6c778198e09e" ]
[ "notebooks/run_all_datasets.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# Evaluate an embedding\n\nimport os\nimport pandas as pd\nimport sys\nimport numpy as np\nfrom pandas.core.common import flatten\nimport pickle\nfrom pathlib import Path\nimport datetime\nimport scipy\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib\nimport librosa.display\nimport random\nfrom scipy.spatial.distance import pdist, squareform\nimport umap\n\nfrom plot_functions import umap_2Dplot, mara_3Dplot, plotly_viz\nfrom preprocessing_functions import pad_spectro, calc_zscore, create_padded_data\nfrom evaluation_functions import nn,sil, plot_within_without\n\n\n# Setting project, input and output folders.\nwd = os.getcwd()\nDATA = os.path.join(os.path.sep, str(Path(wd).parents[0]), \"data\", \"processed\")\nFIGURES = os.path.join(os.path.sep, str(Path(wd).parents[0]), \"reports\", \"figures\")\n\nLOAD_EXISTING = True\nPLOTTING = True\n\nfor embedding_id in ['full', 'reduced', 'balanced', 'reducedrandom']:\n \n if embedding_id==\"reducedrandom\":\n spec_df = pd.read_pickle(os.path.join(os.path.sep, DATA, \"df_focal_reduced.pkl\"))\n else:\n spec_df = pd.read_pickle(os.path.join(os.path.sep, DATA, \"df_focal_\"+embedding_id+\".pkl\"))\n \n labels = spec_df.call_lable.values\n \n # Prepare data\n specs = spec_df.spectrograms.copy()\n specs = [calc_zscore(x) for x in specs] \n data = create_padded_data(specs)\n \n # UMAP\n #embedding_filename = os.path.join(os.path.sep, DATA,'basic_UMAP_'+embedding_id+'_default_params.csv')\n \n embeddings = {}\n \n for n_dims in [2,3]:\n dim_type = str(int(n_dims))+'D'\n embedding_filename = os.path.join(os.path.sep, DATA, 'basic_UMAP_'+dim_type+'_'+embedding_id+'_default_params.csv')\n print(embedding_filename)\n \n if (LOAD_EXISTING and os.path.isfile(embedding_filename)):\n embeddings[dim_type] = np.loadtxt(embedding_filename, delimiter=\";\")\n print(\"File already exists\")\n else:\n if embedding_id=='reducedrandom':\n distmat = squareform(pdist(data, metric='euclidean'))\n\n flattened_dists = distmat[np.triu_indices(n=distmat.shape[0], k=1)]\n random.seed(100)\n np.random.shuffle(flattened_dists)\n random_distmat = np.zeros(distmat.shape)\n random_distmat[np.triu_indices(n=distmat.shape[0], k=1)] = flattened_dists\n for i in range(random_distmat.shape[0]):\n for j in range(i,random_distmat.shape[1]):\n random_distmat[j,i] = random_distmat[i,j] \n\n reducer = umap.UMAP(n_components=n_dims, min_dist=0, metric='precomputed', random_state=2204)\n embeddings[dim_type] = reducer.fit_transform(random_distmat)\n\n else:\n reducer = umap.UMAP(n_components=3, min_dist = 0, random_state=2204)\n embeddings[dim_type] = reducer.fit_transform(data)\n\n np.savetxt(embedding_filename, embeddings[dim_type], delimiter=\";\")\n \n \n embedding = embeddings['3D']\n embedding_2D = embeddings['2D']\n # Plotting\n pal=\"Set2\"\n\n ## 2D Plots\n if PLOTTING:\n umap_2Dplot(embedding_2D[:,0], \n embedding_2D[:,1], \n labels, \n pal, \n os.path.join(os.path.sep, FIGURES, 'UMAP_2D_plot_'+embedding_id+'_nolegend.jpg'), \n showlegend=False)\n plt.close()\n\n\n ## 3D Plot\n mara_3Dplot(embedding[:,0],\n embedding[:,1],\n embedding[:,2],\n labels,\n pal,\n os.path.join(os.path.sep, FIGURES, 'UMAP_3D_plot_'+embedding_id+'_nolegend.jpg'),\n showlegend=False)\n plt.close()\n\n\n # Embedding evaluation\n\n # Evaluate the embedding based on calltype labels of nearest neighbors.\n\n nn_stats = nn(embedding, np.asarray(labels), k=5)\n print(\"Log final metric (unweighted):\",nn_stats.get_S())\n print(\"Abs final metric (unweighted):\",nn_stats.get_Snorm())\n \n\n if PLOTTING:\n nn_stats.plot_heat_S(outname=os.path.join(os.path.sep, FIGURES, 'heatS_UMAP_'+embedding_id+'.png'))\n nn_stats.plot_heat_Snorm(outname=os.path.join(os.path.sep, FIGURES, 'heatSnorm_UMAP_'+embedding_id+'.png'))\n nn_stats.plot_heat_fold(outname=os.path.join(os.path.sep, FIGURES, 'heatfold_UMAP_'+embedding_id+'.png'))\n\n\n ## Within vs. outside distances\n plot_within_without(embedding=embedding, labels=labels, outname=\"distanceswithinwithout_\"+embedding_id+\"_.png\")\n plt.close()\n\n\n ## Silhouette Plot\n sil_stats = sil(embedding, labels) \n print(\"SIL: \", sil_stats.get_avrg_score())\n \n if PLOTTING:\n sil_stats.plot_sil(outname=os.path.join(os.path.sep, FIGURES, 'silplot_UMAP_'+embedding_id+'.png'))\n plt.close()\n\n ## Graph from embedding evaluation\n outname = os.path.join(os.path.sep,FIGURES,'simgraph_'+embedding_id+'.png')\n nn_stats.draw_simgraph(outname)\n plt.close()" ]
[ [ "numpy.triu_indices", "numpy.asarray", "numpy.random.shuffle", "scipy.spatial.distance.pdist", "matplotlib.pyplot.close", "numpy.savetxt", "numpy.zeros", "numpy.loadtxt" ] ]
SabrinaRichter/batchglm
[ "2da429f895f7eb577a835da334f4ae146a9422ce" ]
[ "batchglm/train/tf/stats.py" ]
[ "import tensorflow as tf\n\n\ndef normalize(measure: tf.Tensor, data: tf.Tensor, name=\"normalize\") -> tf.Tensor:\n \"\"\"\n Normalize measure (e.g. `RMSD` or `MAE`) with the range of `data`\n\n :param measure: the measure which should be normalized\n :param data: Tensor representing the data by which the measure should be normalized\n :param name: name of this operation\n :return: \\frac{RMSD}{max(data) - min(data)}\n \"\"\"\n with tf.name_scope(name):\n retval = measure / (tf.maximum(data) - tf.minimum(data))\n return retval\n\n\ndef rmsd(estim: tf.Tensor, obs: tf.Tensor, axis=None, name=\"RMSD\") -> tf.Tensor:\n \"\"\"\n Calculate the root of the mean squared deviation between the estimated and the observed data\n\n :param estim: Tensor representing the estimated data\n :param obs: Tensor representing the observed data\n :param axis: axis to reduce\n :param name: name of this operation\n :return: \\sqrt{mean{(estim - obs)^2}}\n \"\"\"\n with tf.name_scope(name):\n retval = tf.sqrt(tf.reduce_mean(tf.squared_difference(estim, obs), axis=axis))\n return retval\n\n\ndef mae(estim: tf.Tensor, obs: tf.Tensor, axis=None, name=\"MAE\") -> tf.Tensor:\n \"\"\"\n Calculate the mean absolute error between the estimated weights `b` and the true `b`\n\n :param estim: Tensor representing the estimated data\n :param obs: Tensor representing the observed data\n :param axis: axis to reduce\n :param name: name of this operation\n :return: mean{|estim - obs|}\n \"\"\"\n with tf.name_scope(name):\n retval = tf.reduce_mean(tf.abs(estim - obs), axis=axis)\n return retval\n\n\ndef normalized_rmsd(estim: tf.Tensor, obs: tf.Tensor, axis=None, name=\"NRMSD\") -> tf.Tensor:\n \"\"\"\n Calculate the normalized RMSD between estimated and observed data\n\n :param estim: Tensor representing the estimated data\n :param obs: Tensor representing the observed data\n :param axis: axis to reduce\n :param name: name of this operation\n :return: \\frac{RMSD}{max(obs) - min(obs)}\n \"\"\"\n with tf.name_scope(name):\n retval = normalize(rmsd(estim, obs, axis=axis), obs)\n return retval\n\n\ndef normalized_mae(estim: tf.Tensor, obs: tf.Tensor, axis=None, name=\"NMAE\") -> tf.Tensor:\n \"\"\"\n Calculate the normalized MAE between estimated and observed data\n\n :param estim: Tensor representing the estimated data\n :param obs: Tensor representing the observed data\n :param axis: axis to reduce\n :param name: name of this operation\n :return: \\frac{MAE}{max(obs) - min(obs)}\n \"\"\"\n with tf.name_scope(name):\n retval = normalize(mae(estim, obs, axis=axis), obs)\n return retval\n\n\ndef mapd(estim: tf.Tensor, obs: tf.Tensor, axis=None, name=\"MAPD\") -> tf.Tensor:\n \"\"\"\n Calculate the mean absolute percentage deviation between the estimated and the observed data\n\n :param estim: ndarray representing the estimated data\n :param obs: ndarray representing the observed data\n :param axis: axis to reduce\n :param name: name of this operation\n :return: mean{|estim - obs| / obs}\n \"\"\"\n with tf.name_scope(name):\n retval = tf.reduce_mean(tf.abs(estim - obs) / obs, axis=axis)\n return retval\n" ]
[ [ "tensorflow.maximum", "tensorflow.minimum", "tensorflow.name_scope", "tensorflow.squared_difference", "tensorflow.abs" ] ]
showintime/MachineLearningToolkit
[ "cb265f8b0d3ca5aa16ad92cdbe74e138b5f56023" ]
[ "MyModel.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 30 18:40:01 2019\n\n@author: ZWH\n\"\"\"\n\n'''\n#LinearRegression\n\n\nimport numpy as np\nW=np.array([0.54,0.76,0.534,0.675]).reshape(4,1)\nB=1.78\ndef func(x):\n return x@W+B+np.random.random(size=(x.shape[0],1))*0.1\nx=np.random.random(size=(1000,4))\ny=func(x)\n\nfrom LinearRegression import LinearRegression\n\nlr=LinearRegression()\n\nTRAIN_NUM=1000\nEPOCHES=100\nBATCH_SIZE=10\ntemplate='Epoch:{:>4}, Train_loss:{:.6}'\nfor epoch in range(EPOCHES):\n for l in range(0,TRAIN_NUM,BATCH_SIZE):\n \n \n r=min(l+BATCH_SIZE,TRAIN_NUM)\n train_loss=lr.train(x[l:r],y[l:r])\n \n train_loss=lr.loss(lr.predict(x),y)\n print(template.format(epoch+1,train_loss))\n \n \n'''\n\n\n\n\n\n#分类\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata_path='C:/Users/ZWH/Desktop/MyNeuralNetwork/ANN/pkldata/'\ndef openpkl(data_path,name):\n import pickle\n with open(data_path+name+'.pkl','rb') as f:\n data=pickle.load(f)\n return data\ntrainx,trainy=openpkl(data_path,'train')\nvalidx,validy=openpkl(data_path,'valid')\ntestx,testy=openpkl(data_path,'test')\n\n\nfrom mnist_network import smallnetwork\n\nsn=smallnetwork()\n\ndef show(r=4,c=4):\n plt.figure(figsize=(10,10))\n for i in range(r*c):\n plt.subplot(r,c,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n index=np.random.randint(0,len(trainy))\n pre=np.argmax(sn.predict(trainx[index].reshape(1,-1)))\n lab=np.argmax(trainy[index])\n plt.imshow(trainx[index].reshape(28,-1))\n plt.xlabel('pre:{},lab:{}'.format(pre,lab))\n plt.show()\n \nkey=np.identity(10)\ntrainy=key[trainy]\nvalidy=key[validy]\ntesty=key[testy]\n\n#show()\n\ndef acc(x,y):\n predictions=sn.predict(x)\n labels=y\n pre=np.argmax(predictions,axis=1)\n lab=np.argmax(labels,axis=1)\n tem=np.mean((pre==lab)*1.0)\n\n return tem\n\n\natrain_acc=acc(trainx,trainy)\navalid_acc=acc(validx,validy)\natest_acc=acc(testx,testy)\n\nprint('train:{},valid:{},test:{}'.format(atrain_acc,avalid_acc,atest_acc))\n\n\n\n\nTRAIN_NUM=len(trainy)\nEPOCHES=1000\nBATCH_SIZE=16\ntemplate='Epoch:{:>4}, Train_loss:{:.6}'\n\n\nfor epoch in range(EPOCHES):\n for l in range(0,TRAIN_NUM,BATCH_SIZE):\n \n \n r=min(l+BATCH_SIZE,TRAIN_NUM)\n train_loss=sn.train(trainx[l:r],trainy[l:r])\n \n btrain_acc=acc(trainx,trainy)\n bvalid_acc=acc(validx,validy)\n btest_acc=acc(testx,testy)\n train_loss=sn.loss(sn.predict(trainx),trainy)\n print(template.format(epoch+1,train_loss))\n print('train:{},valid:{},test:{}'.format(btrain_acc,bvalid_acc,btest_acc))\n\n#show()\n \ndel data_path\ndel trainx,trainy,testx,testy,validx,validy\n\n\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.yticks", "numpy.argmax", "numpy.mean", "numpy.identity", "matplotlib.pyplot.subplot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
fikrirazor/Complete-Gini-Indext-Text-klasifikasi-email-spam
[ "76a0e2cde0d216eb905ca4bb2fca218b899c7b73" ]
[ "gita.py" ]
[ "'''\r\nCreated on Thu Feb 14 18:34:19 2019\r\n\r\n@Author : Fikri Rozan Imadudin\r\n'''\r\nimport numpy as np\r\nimport preprocessing\r\n\r\nclass gita(object):\r\n '''\r\n Menghitung Complete Gini Index Text A antara dua variabel kontinu\r\n\r\n Parameters\r\n ----------\r\n alpha : smoothing agar tidak terjadi 0/0 infinite\r\n X : berupa sparsing matrix term document frequency\r\n y : kelas harus berupa angka\r\n Returns\r\n -------\r\n gita : matrix kelas X banyaknya fitur\r\n \r\n References\r\n ----------\r\n [1] Park, H., Kwon, S., & Kwon, H. C. (2010, June). Complete gini-index text (git) feature-selection algorithm for text classification. \r\n In The 2nd International Conference on Software Engineering and Data Mining (pp. 366-371). IEEE.\r\n\r\n '''\r\n def __init__(self,alpha=1.0):\r\n self.alpha=alpha\r\n \r\n def fit(self, X, y):\r\n y = preprocessing.label_ke_numerik(y)\r\n # Mengambil index Ham dan Spam dari masing-masing data\r\n y_index = {}\r\n for c in np.unique(y):\r\n y_index[c] = np.where(y == c)[0]\r\n # Mencocokan Index ke data X\r\n X_index = []\r\n for i in range(len(y_index)):\r\n X_index.append(X[y_index[i]])\r\n # Menjumlahkan Tiap Fitur/Row\r\n sumrow = []\r\n for i in range(len(X_index)):\r\n sumrow.append(np.array(X_index[i].sum(axis=0)))\r\n # Menggabungkan menjadi matrix\r\n count = np.vstack(sumrow) + self.alpha \r\n # Menjumlahkan semua Fitur Spam dan Ham\r\n jumlahw = np.sum(count, axis=0)\r\n #np.seterr(divide='ignore', invalid='ignore')\r\n pcw=count/(jumlahw+count)\r\n # Gini Index Text A\r\n self.gita=(pcw)**2\r\n return self.gita\r\n\r\n" ]
[ [ "numpy.where", "numpy.sum", "numpy.vstack", "numpy.unique" ] ]
ShuhuaGao/rfBFE
[ "74100cca045fdba4479f07633e4418848ab161a9" ]
[ "src/feature_selection.py" ]
[ "import pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n\ndef random_forest_classification(x_train, y_train):\n params = {'n_estimators': [10, 20, 30, 50],\n 'max_depth': [3],\n 'min_samples_split': [2],\n }\n # k-fold cross validation based model selection\n kfold = 5\n grid_search = GridSearchCV(RandomForestClassifier(), params, cv=kfold,\n n_jobs=-1, return_train_score=True)\n grid_search.fit(x_train, y_train)\n print('best params:')\n print(grid_search.best_params_)\n print('best score:')\n print(grid_search.best_score_)\n return grid_search.best_estimator_\n\n\n\ndef obtain_feature_importances(random_forest, features):\n \"\"\"\n Get the feature importance generated by a trained random forest regressor.\n Return: a pandas.Series containing the importance for each feature\n \"\"\"\n return pd.Series(random_forest.feature_importances_, index=features).sort_values(ascending=False)" ]
[ [ "pandas.Series", "sklearn.ensemble.RandomForestClassifier" ] ]
d-sot/nwb-jupyter-widgets
[ "f9bf5c036c39f29e26b3cdb78198cccfa1b13cef" ]
[ "nwbwidgets/controllers.py" ]
[ "from ipywidgets import widgets, Layout\nimport numpy as np\n\n\ndef move_range_slider_up(slider):\n value = slider.get_interact_value()\n value_range = value[1] - value[0]\n max_val = slider.get_state()['max']\n if value[1] + value_range < max_val:\n slider.set_state({'value': (value[0] + value_range, value[1] + value_range)})\n else:\n slider.set_state({'value': (max_val - value_range, max_val)})\n\n\ndef move_int_slider_up(slider: widgets.IntSlider):\n value = slider.get_interact_value()\n max_val = slider.get_state()['max']\n if value + 1 < max_val:\n slider.value = value + 1\n\n\ndef move_int_slider_down(slider: widgets.IntSlider):\n value = slider.get_interact_value()\n min_val = slider.get_state()['min']\n if value - 1 > min_val:\n slider.value = value - 1\n\n\ndef move_range_slider_down(slider):\n value = slider.get_interact_value()\n value_range = value[1] - value[0]\n min_val = slider.get_state()['min']\n if value[0] - value_range > min_val:\n slider.set_state({'value': (value[0] - value_range, value[1] - value_range)})\n else:\n slider.set_state({'value': (min_val, min_val + value_range)})\n\n\ndef move_slider_up(slider, dur):\n value = slider.get_interact_value()\n max_val = slider.get_state()['max']\n if value + 2 * dur < max_val:\n slider.value = value + dur\n else:\n slider.value = max_val - dur\n\n\ndef move_slider_down(slider, dur):\n value = slider.get_interact_value()\n min_val = slider.get_state()['min']\n if value - dur > min_val:\n slider.value = value - dur\n else:\n slider.value = min_val\n\n\ndef float_range_controller(tmin, tmax, start_value=None):\n if start_value is None:\n start_value = [tmin, min(tmin + 50, tmax)]\n\n slider = widgets.FloatRangeSlider(\n value=start_value,\n min=tmin,\n max=tmax,\n step=0.1,\n description='time window',\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n layout=Layout(width='90%'))\n\n forward_button = widgets.Button(description='▶', layout=Layout(width='50px'))\n forward_button.on_click(lambda b: move_range_slider_up(slider))\n\n backwards_button = widgets.Button(description='◀', layout=Layout(width='50px'))\n backwards_button.on_click(lambda b: move_range_slider_down(slider))\n\n button_box = widgets.HBox(children=[backwards_button, forward_button])\n button_box.layout.align_items = 'center'\n\n controller = widgets.VBox(\n layout=Layout(width='250px'),\n children=[slider, button_box])\n\n return controller\n\n\ndef make_time_window_controller(tmin, tmax, start=0, duration=5.):\n slider = widgets.FloatSlider(\n value=start,\n min=tmin,\n max=tmax,\n step=0.1,\n description='window start (s):',\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f')\n\n duration_widget = widgets.BoundedFloatText(\n value=duration,\n min=0,\n max=tmax - tmin,\n step=0.1,\n description='duration (s):',\n )\n\n forward_button = widgets.Button(description='▶')\n forward_button.on_click(lambda b: move_slider_up(slider, duration_widget.get_interact_value()))\n\n backwards_button = widgets.Button(description='◀')\n backwards_button.on_click(lambda b: move_slider_down(slider, duration_widget.get_interact_value()))\n\n controller = widgets.VBox(\n children=[\n widgets.VBox(children=[slider, duration_widget]),\n widgets.HBox(children=[backwards_button, forward_button])])\n\n return controller\n\n\ndef int_range_controller(max, min=0, start_range=(0, 30), description='units', orientation='horizontal',\n continuous_update=False):\n\n slider = widgets.IntRangeSlider(\n value=start_range,\n min=min,\n max=max,\n description=description,\n continuous_update=continuous_update,\n orientation=orientation,\n readout=True,\n style={'description_width': 'initial'},\n layout=Layout(width='100%'))\n\n up_button = widgets.Button(description='▲', layout=Layout(width='100%'))\n up_button.on_click(lambda b: move_range_slider_up(slider))\n\n down_button = widgets.Button(description='▼', layout=Layout(width='100%'))\n down_button.on_click(lambda b: move_range_slider_down(slider))\n\n controller = widgets.VBox(\n layout=Layout(width='175px'),\n children=[\n slider,\n widgets.VBox(children=[up_button, down_button])])\n\n return controller\n\n\ndef int_controller(max, min=0, value=0, description='unit', orientation='horizontal', continuous_update=False):\n slider = widgets.IntSlider(\n value=value,\n min=min,\n max=max,\n description=description,\n continuous_update=continuous_update,\n orientation=orientation,\n readout=True\n )\n\n up_button = widgets.Button(description='▲', layout=Layout(width='auto'))\n up_button.on_click(lambda b: move_int_slider_up(slider))\n\n down_button = widgets.Button(description='▼', layout=Layout(width='auto'))\n down_button.on_click(lambda b: move_int_slider_down(slider))\n\n controller = widgets.VBox(\n children=[\n slider,\n widgets.VBox(children=[up_button, down_button])])\n\n return controller\n\n\ndef make_trial_event_controller(trials):\n trial_events = ['start_time']\n if not np.all(np.isnan(trials['stop_time'].data)):\n trial_events.append('stop_time')\n trial_events += [x.name for x in trials.columns if\n (('_time' in x.name) and (x.name not in ('start_time', 'stop_time')))]\n trial_event_controller = widgets.Dropdown(options=trial_events,\n value='start_time',\n description='align to: ')\n return trial_event_controller\n" ]
[ [ "numpy.isnan" ] ]
YinghuaHuang/daguan-2019
[ "b0f6b36dba6467fcb1b26c92c78c6be3bd3063b1" ]
[ "opennmt/models/sequence_tagger.py" ]
[ "\"\"\"Sequence tagger.\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom opennmt import inputters\nfrom opennmt.models.model import Model\nfrom opennmt.utils.misc import print_bytes\nfrom opennmt.utils.losses import cross_entropy_sequence_loss\n\n\nclass SequenceTagger(Model):\n \"\"\"A sequence tagger.\"\"\"\n\n def __init__(self,\n inputter,\n encoder,\n labels_vocabulary_file_key,\n tagging_scheme=None,\n crf_decoding=False,\n daisy_chain_variables=False,\n name=\"seqtagger\"):\n \"\"\"Initializes a sequence tagger.\n\n Args:\n inputter: A :class:`opennmt.inputters.inputter.Inputter` to process the\n input data.\n encoder: A :class:`opennmt.encoders.encoder.Encoder` to encode the input.\n labels_vocabulary_file_key: The data configuration key of the labels\n vocabulary file containing one label per line.\n tagging_scheme: The tagging scheme used. For supported schemes (currently\n only BIOES), additional evaluation metrics could be computed such as\n precision, recall, etc.\n crf_decoding: If ``True``, add a CRF layer after the encoder.\n daisy_chain_variables: If ``True``, copy variables in a daisy chain\n between devices for this model. Not compatible with RNN based models.\n name: The name of this model.\n \"\"\"\n super(SequenceTagger, self).__init__(\n name,\n features_inputter=inputter,\n labels_inputter=TagsInputter(labels_vocabulary_file_key),\n daisy_chain_variables=daisy_chain_variables)\n self.encoder = encoder\n self.crf_decoding = crf_decoding\n self.transition_params = None\n if tagging_scheme:\n self.tagging_scheme = tagging_scheme.lower()\n else:\n self.tagging_scheme = None\n\n def initialize(self, metadata):\n self.tagging_scheme = metadata.get(\"tagging_scheme\", self.tagging_scheme)\n super(SequenceTagger, self).initialize(metadata)\n\n def mask_dense(self, inputs, mask=None, output_dim=None):\n \"\"\"\n Args:\n inputs: [batch_size, seq_len, dims]\n mask : [batch_size, seq_len]\n output_dim: int\n Returns:\n outputs: [batch_size, seq_len, output_dim]\n \"\"\"\n mask = None\n if mask is None:\n outputs = tf.layers.dense(inputs, output_dim)\n return outputs\n input_shape = tf.shape(inputs)\n batch_size = input_shape[0]\n seq_len = input_shape[1]\n dims = inputs.get_shape()[-1]\n flat_inputs = tf.reshape(inputs, [-1, dims])\n flat_mask = tf.reshape(mask, [-1])\n # [item_numbers]\n mask_indices = tf.where(flat_mask)\n # [item_numbers, dims]\n mask_flat_inputs = tf.gather(flat_inputs, mask_indices)\n # [item_numbers, output_dim]\n mask_flat_outputs = tf.layers.dense(mask_flat_inputs, output_dim)\n output_shape = [batch_size * seq_len, output_dim]\n flat_outputs = tf.scatter_nd(mask_indices[:, None], mask_flat_outputs, output_shape)\n outputs = tf.reshape(flat_outputs, [batch_size, seq_len, output_dim])\n return outputs\n\n def _call(self, features, labels, params, mode):\n training = mode == tf.estimator.ModeKeys.TRAIN\n length = self.features_inputter.get_length(features)\n mask = features.get(\"mask\", None)\n with tf.variable_scope(\"encoder\"):\n inputs = self.features_inputter.make_inputs(features, training=training)\n encoder_outputs, _, encoder_sequence_length = self.encoder.encode(\n inputs,\n sequence_length=length,\n mode=mode)\n with tf.variable_scope(\"generator\"):\n logits = self.mask_dense(encoder_outputs, mask, self.labels_inputter.vocabulary_size)\n\n num_tags = self.labels_inputter.vocabulary_size\n if self.crf_decoding:\n self.transition_params = tf.get_variable(\"transitions\", shape=[num_tags, num_tags])\n init_transition_path = params.get(\"init_transition_params\")\n if init_transition_path is not None:\n init_transition_params = np.load(init_transition_path)\n mask = np.isinf(init_transition_params)\n self.transition_params = tf.where(mask,\n x=init_transition_params,\n y=self.transition_params)\n\n if mode != tf.estimator.ModeKeys.TRAIN:\n if self.crf_decoding:\n tags_id, _ = tf.contrib.crf.crf_decode(\n logits,\n self.transition_params,\n encoder_sequence_length)\n tags_id = tf.cast(tags_id, tf.int64)\n else:\n tags_prob = tf.nn.softmax(logits)\n tags_id = tf.argmax(tags_prob, axis=2)\n\n labels_vocab_rev = self.labels_inputter.vocabulary_lookup_reverse()\n\n # A tensor can not be both fed and fetched,\n # so identity a new tensor of \"length\" for export model to predict\n output_sequence_length = tf.identity(encoder_sequence_length)\n\n predictions = {\n \"length\": output_sequence_length,\n \"tags\": labels_vocab_rev.lookup(tags_id)\n }\n else:\n predictions = None\n\n return (logits, mask), predictions\n\n def compute_loss(self, outputs, labels, training=True, params=None):\n outputs, mask = outputs\n\n if params is None:\n params = {}\n\n if self.crf_decoding:\n log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(\n outputs,\n tf.cast(labels[\"tags_id\"], tf.int32),\n labels[\"length\"],\n transition_params=self.transition_params)\n loss = tf.reduce_sum(-log_likelihood)\n\n loss_normalizer = tf.cast(tf.shape(log_likelihood)[0], loss.dtype)\n return loss, loss_normalizer\n else:\n return cross_entropy_sequence_loss(\n outputs,\n labels[\"tags_id\"],\n labels[\"length\"],\n label_smoothing=params.get(\"label_smoothing\", 0.0),\n average_in_time=params.get(\"average_loss_in_time\", False),\n mask=mask,\n training=training)\n\n def compute_metrics(self, predictions, labels):\n weights = tf.sequence_mask(\n labels[\"length\"], maxlen=tf.shape(labels[\"tags\"])[1], dtype=tf.float32)\n\n eval_metric_ops = {}\n eval_metric_ops[\"accuracy\"] = tf.metrics.accuracy(\n labels[\"tags\"], predictions[\"tags\"], weights=weights)\n\n if self.tagging_scheme in (\"bioes\", \"bio\"):\n flag_fn = get_flag_bioes_tags_fn(self.tagging_scheme)\n\n gold_flags, predicted_flags = tf.py_func(\n flag_fn,\n [labels[\"tags\"], predictions[\"tags\"], labels[\"length\"]],\n [tf.bool, tf.bool],\n stateful=False)\n\n precision_metric = tf.metrics.precision(gold_flags, predicted_flags)\n recall_metric = tf.metrics.recall(gold_flags, predicted_flags)\n\n precision = precision_metric[0]\n recall = recall_metric[0]\n f1 = (2 * precision * recall) / (recall + precision)\n\n eval_metric_ops[\"precision\"] = precision_metric\n eval_metric_ops[\"recall\"] = recall_metric\n eval_metric_ops[\"f1\"] = (f1, tf.no_op())\n\n return eval_metric_ops\n\n def print_prediction(self, prediction, params=None, stream=None):\n tags = prediction[\"tags\"][:prediction[\"length\"]]\n sent = b\" \".join(tags)\n print_bytes(sent, stream=stream)\n\n\nclass TagsInputter(inputters.TextInputter):\n \"\"\"Reading space-separated tags.\"\"\"\n\n def __init__(self, vocabulary_file_key):\n super(TagsInputter, self).__init__(\n vocabulary_file_key=vocabulary_file_key, num_oov_buckets=0)\n\n def make_features(self, element=None, features=None, training=None):\n features = super(TagsInputter, self).make_features(\n element=element, features=features, training=training)\n return {\n \"length\": features[\"length\"],\n \"tags\": features[\"tokens\"],\n \"tags_id\": self.vocabulary.lookup(features[\"tokens\"])\n }\n\ndef get_flag_bioes_tags_fn(tagging_scheme):\n \"\"\"support bioes and bio.\"\"\"\n return lambda gold, predicted, sequence_length=None: \\\n flag_bioes_tags(gold, predicted, sequence_length, tagging_scheme)\n\ndef flag_bioes_tags(gold, predicted, sequence_length=None, tagging_scheme=\"bioes\"):\n \"\"\"Flags chunk matches for the BIOES tagging scheme.\n\n This function will produce the gold flags and the predicted flags. For each aligned\n gold flag ``g`` and predicted flag ``p``:\n\n * when ``g == p == True``, the chunk has been correctly identified (true positive).\n * when ``g == False and p == True``, the chunk has been incorrectly identified (false positive).\n * when ``g == True and p == False``, the chunk has been missed (false negative).\n * when ``g == p == False``, the chunk has been correctly ignored (true negative).\n\n Args:\n gold: The gold tags as a Numpy 2D string array.\n predicted: The predicted tags as a Numpy 2D string array.\n sequence_length: The length of each sequence as Numpy array.\n tagging_scheme: str. \"bioes\" or \"bio\".\n Returns:\n A tuple ``(gold_flags, predicted_flags)``.\n \"\"\"\n gold_flags = []\n predicted_flags = []\n\n def _add_true_positive():\n gold_flags.append(True)\n predicted_flags.append(True)\n def _add_false_positive():\n gold_flags.append(False)\n predicted_flags.append(True)\n def _add_true_negative():\n gold_flags.append(False)\n predicted_flags.append(False)\n def _add_false_negative():\n gold_flags.append(True)\n predicted_flags.append(False)\n\n def _match(ref, hyp, index, length):\n if ref[index].startswith(b\"B\"):\n match = True\n #bioes_flag_fn = lambda index: index < length and not ref[index].startswith(b\"E\")\n bio_flag_fn = \\\n lambda index: (index < length - 1 and ref[index + 1][0] in [ord(b\"I\"), ord(b\"X\"), ord(b\"E\")]) \\\n or index == length - 1\n bioes_flag_fn = bio_flag_fn\n flag_fn = None\n if tagging_scheme == \"bioes\":\n flag_fn = bioes_flag_fn\n elif tagging_scheme == \"bio\":\n flag_fn = bio_flag_fn\n while flag_fn(index):\n if ref[index] != hyp[index]:\n match = False\n index += 1\n match = match and index < length and ref[index] == hyp[index]\n return match, index\n return ref[index] == hyp[index], index\n\n for b in range(gold.shape[0]):\n length = sequence_length[b] if sequence_length is not None else gold.shape[1]\n\n # First pass to detect true positives and true/false negatives.\n index = 0\n while index < length:\n gold_tag = gold[b][index]\n match, index = _match(gold[b], predicted[b], index, length)\n if match:\n if gold_tag in [b\"O\", b\"X\"]:\n _add_true_negative()\n else:\n _add_true_positive()\n else:\n if gold_tag not in [b\"O\", b\"X\"]:\n _add_false_negative()\n index += 1\n\n # Second pass to detect false postives.\n index = 0\n while index < length:\n pred_tag = predicted[b][index]\n match, index = _match(predicted[b], gold[b], index, length)\n if not match and pred_tag not in [b\"O\", b\"X\"]:\n _add_false_positive()\n index += 1\n\n return np.array(gold_flags), np.array(predicted_flags)\n" ]
[ [ "tensorflow.get_variable", "tensorflow.metrics.accuracy", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.where", "tensorflow.contrib.crf.crf_decode", "tensorflow.py_func", "tensorflow.metrics.precision", "tensorflow.layers.dense", "tensorflow.gather", "numpy.load", "tensorflow.argmax", "tensorflow.shape", "tensorflow.identity", "tensorflow.scatter_nd", "tensorflow.no_op", "numpy.array", "tensorflow.nn.softmax", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.metrics.recall", "numpy.isinf" ] ]
kjappelbaum/MMD-critic
[ "ac8e48adc8b8eefdfb517baa92cb2e045a861747" ]
[ "mmdcritic/helper.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nfrom scipy.spatial.distance import euclidean\nfrom six.moves import range\n\n\ndef write_outputfile(array, filename):\n \"\"\"writes array to file\"\"\"\n np.save(filename, array)\n\n\ndef heuristic_guess_gamma(X, iterations=5000):\n distances = []\n length = len(X)\n for _ in range(iterations):\n index0 = np.random.randint(0, length - 1)\n index1 = np.random.randint(0, length - 1)\n distances.append(euclidean(X[index0], X[index1]))\n\n quantile01 = np.quantile(distances, 0.1)\n quantile05 = np.quantile(distances, 0.5)\n quantile09 = np.quantile(distances, 0.9)\n\n print(\n (\n 'the 0.1, 0.5 and 0.9 quantiles are {:.4f}, {:.4f}, {:.4f}'.format(\n 1 / quantile01, 1 / quantile05, 1 / quantile09\n )\n )\n )\n" ]
[ [ "numpy.quantile", "scipy.spatial.distance.euclidean", "numpy.save", "numpy.random.randint" ] ]