repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
qx-teo/covidcast-indicators | [
"6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a"
] | [
"cdc_covidnet/delphi_cdc_covidnet/covidnet.py"
] | [
"\"\"\"\nGenerate COVID-NET sensors.\n\nAuthor: Eu Jing Chua\nCreated: 2020-06-12\n\"\"\"\n\nimport json\nimport logging\nimport os\nfrom typing import Tuple, List\nfrom multiprocessing import cpu_count, Pool\n\nimport requests\nimport pandas as pd\n\nfrom .api_config import APIConfig\n\nclass CovidNet:\n \"\"\"Methods for downloading and loading COVID-NET data.\"\"\"\n\n @staticmethod\n def download_mappings(\n url: str = APIConfig.INIT_URL,\n outfile: str = \"./init.json\"):\n \"\"\"\n Download the JSON file with all mappings (age, mmwr, catchments etc.) to disk.\n\n Args:\n url: The API URL to GET from\n outfile: The output JSON file to write to\n \"\"\"\n params = {\"appVersion\": \"Public\"}\n data = requests.get(url, params).json()\n with open(outfile, \"w\") as f_json:\n json.dump(data, f_json)\n\n @staticmethod\n def read_mappings(infile: str) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Read the mappings JSON file from disk to produce formatted DataFrames for relevant mappings.\n\n Args:\n infile: Mappings JSON file\n\n Returns:\n age_info: Age-related mappings\n mmwr_info: Date-related mappings\n catchment_info: Geography-related mappings\n \"\"\"\n with open(infile, \"r\") as f_json:\n data = json.load(f_json)\n\n # Network, catchment & area mappings\n catchment_info = pd.DataFrame.from_records(data[\"catchments\"])\n\n # MMWR date mappings\n mmwr_info = pd.DataFrame.from_records(data[\"mmwr\"], columns=APIConfig.MMWR_COLS)\n mmwr_info[\"weekstart\"] = pd.to_datetime(mmwr_info[\"weekstart\"])\n mmwr_info[\"weekend\"] = pd.to_datetime(mmwr_info[\"weekend\"])\n\n # Age mappings\n age_info = pd.DataFrame.from_records(data[\"ages\"], columns=APIConfig.AGE_COLS)\n\n return catchment_info, mmwr_info, age_info\n\n @staticmethod\n def download_hosp_data(\n network_id: int, catchment_id: str,\n age_groups: List[int], seasons: List[int],\n outfile: str,\n url: str = APIConfig.HOSP_URL):\n \"\"\"\n Download hospitalization data to disk for a particular network or state.\n\n Refer to catchment_info for network & catchment ID mappings\n Refer to age_info for age-group mappings\n Seasons are enumerated in original mappings JSON file\n\n Args:\n network_id: Network ID of intended network / state\n catchment_id: Catchment ID of intended network / state.\n age_groups: List of age-group IDs to request for\n seasons: List of season IDs to request for\n outfile: JSON file to write the results to\n url: The API URL to POST to for downloading hospitalization data\n \"\"\"\n download_params = {\n \"AppVersion\": \"Public\",\n \"networkid\": network_id,\n # Catchment_id are \"numbers\", but they represent it as a string\n \"catchmentid\": catchment_id,\n \"seasons\": [{\"ID\": season_id} for season_id in seasons],\n \"agegroups\": [{\"ID\": ag_id} for ag_id in age_groups]\n }\n data = requests.post(url, json=download_params).json()\n\n with open(outfile, \"w\") as f_json:\n json.dump(data, f_json)\n\n @staticmethod\n def download_all_hosp_data(\n mappings_file: str, cache_path: str, parallel: bool = False\n ) -> List[str]:\n \"\"\"\n Download hospitalization data for all states listed in the mappings JSON file to disk.\n\n Args:\n mappings_file: Mappings JSON file\n cache_path: Cache directory to write all state hosp. JSON files to\n parallel: Download each file in parallel\n\n Returns:\n List of all downloaded JSON filenames (including the cache_path)\n \"\"\"\n catchment_info, _, age_info = CovidNet.read_mappings(mappings_file)\n\n # By state\n states_idx = catchment_info[\"area\"] != \"Entire Network\"\n args = catchment_info.loc[states_idx, [\"networkid\", \"catchmentid\"]]\n\n # All age groups\n age_groups = list(age_info.loc[age_info[\"label\"] == \"Overall\", \"ageid\"])\n\n # Set up arguments for download, and file names for return\n state_files = []\n state_args = []\n for nid, cid in args.itertuples(index=False, name=None):\n outfile = os.path.join(cache_path, f\"networkid_{nid}_catchmentid_{cid}.json\")\n state_files.append(outfile)\n if not os.path.exists(outfile):\n args = (nid, cid, age_groups, APIConfig.SEASONS, outfile, APIConfig.HOSP_URL)\n state_args.append(args)\n\n # Download all state files\n if parallel:\n # Originally used context-manager API, but does not work well with pytest-cov\n # https://pytest-cov.readthedocs.io/en/latest/subprocess-support.html#if-you-use-multiprocessing-pool\n # However seems to still produce .coverage.<HOSTNAME>... files on python 3.8 at least\n pool = Pool(min(10, cpu_count()))\n try:\n pool.starmap(CovidNet.download_hosp_data, state_args)\n finally:\n pool.close()\n pool.join()\n else:\n for args in state_args:\n CovidNet.download_hosp_data(*args)\n logging.debug(\"Downloading for nid=%s, cid=%s\", args[0], args[1])\n\n return state_files\n\n @staticmethod\n def read_all_hosp_data(state_files: List[str]) -> pd.DataFrame:\n \"\"\"\n Read and combine hospitalization JSON files for each state into a pd.DataFrame.\n\n Args:\n state_files: List of hospitalization JSON files for each state to read from disk\n\n Returns:\n Single pd.DataFrame with all the hospitalization data combined\n \"\"\"\n dfs = []\n for state_file in state_files:\n # Read json\n with open(state_file, \"r\") as f_json:\n data = json.load(f_json)[\"datadownload\"]\n\n # Make dataframe out of json\n state_df = pd.DataFrame.from_records(data).astype(APIConfig.HOSP_DTYPES)\n dfs.append(state_df)\n\n # Combine dataframes\n return pd.concat(dfs)\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame.from_records",
"pandas.concat"
]
] |
microsoft/msrflute | [
"4eda3eaccd2c7e76b412668a9a6d7b1571209372"
] | [
"extensions/RL/RL.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\nimport os\nimport json\nimport random\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom collections import OrderedDict\nfrom utils import ( make_lr_scheduler,\n print_rank,\n torch_save,\n try_except_save,\n make_optimizer,\n to_device)\n\nclass SequenceWise(nn.Module):\n def __init__(self, module):\n \"\"\"\n Collapses input of dim T*N*H to (T*N)*H, and applies to a module.\n Allows handling of variable sequence lengths and minibatch sizes.\n :param module: Module to apply input to.\n \"\"\"\n super(SequenceWise, self).__init__()\n self.module = module\n\n def forward(self, x):\n t, n = x.size(0), x.size(1)\n x = x.view(t * n, -1)\n x = x.contiguous()\n x = self.module(x)\n x = x.view(t, n, -1)\n return x\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + ' (\\n'\n tmpstr += self.module.__repr__()\n tmpstr += ')'\n return tmpstr\n\n\nclass BatchRNN(nn.Module):\n def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, bidirectional=False, batch_norm=True,dropout=0.0,multi=1):\n super(BatchRNN, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.batch_norm_activate = batch_norm\n self.bidirectional = bidirectional\n self.multi = multi\n self.dropout = dropout\n\n if self.batch_norm_activate:\n self.batch_norm = SequenceWise(nn.BatchNorm1d(input_size))\n self.rnn = rnn_type(input_size = input_size,\n hidden_size = hidden_size,\n bidirectional= bidirectional,\n bias = True,\n batch_first = True,\n dropout = self.dropout)\n self.num_directions = 2 if bidirectional else 1\n\n\n def forward(self, x):\n if x.dim()==2:\n x=x.unsqueeze(1)\n\n if self.batch_norm_activate:\n x = x.contiguous()\n x = self.batch_norm(x)\n x, _ = self.rnn(x)\n\n if self.bidirectional and self.multi<2:\n x = x.view(x.size(0), x.size(1), 2, -1).sum(2).view(x.size(0), x.size(1), -1)\n return x\n\n\nclass NeuralNetwork(nn.Module):\n def __init__(self, params, wantLSTM=False, batch_norm=False):\n super(NeuralNetwork, self).__init__()\n\n \"\"\"\n The following parameters need revisiting\n self.number_of_actions = 2\n self.gamma = 0.99\n self.final_epsilon = 0.0001\n self.initial_epsilon = 0.1\n self.number_of_iterations = 2000000\n self.replay_memory_size = 10000\n self.minibatch_size = 32\n\n optimizer = optim.Adam(model.parameters(), lr=1e-6)\n criterion = nn.MSELoss()\n\n \"\"\"\n self.wantLSTM = wantLSTM\n self.batch_norm= batch_norm\n params = [int(x) for x in params.split(',')]\n layers = []\n\n self.softmax = nn.Softmax(dim = 1)\n if self.wantLSTM:\n # Recurrent Component of the architecture\n rnns = []\n for i in range(1, len(params) - 2):\n multi = 1 if i==1 else 1\n rnn = BatchRNN(input_size = params[i-1]*multi,\n hidden_size = params[i],\n rnn_type = nn.LSTM,\n bidirectional= True,\n batch_norm = batch_norm,\n multi = 1,\n dropout = 0.0)\n rnns.append(('%d' %(i-1), rnn))\n self.rnn = nn.Sequential(OrderedDict(rnns))\n\n layers.append(nn.Linear(params[-3], params[-2], bias=True))\n layers.append(nn.ReLU(inplace=True))\n layers.append(nn.Linear(params[-2], params[-1], bias=True))\n mlp = nn.Sequential(*layers)\n self.mlp = nn.Sequential(SequenceWise(mlp),)\n\n else:\n if self.batch_norm:\n self.batch_norm = nn.BatchNorm1d(params[0])\n\n for i in range(1, len(params)-1):\n layers.append(nn.Linear(params[i-1], params[i], bias=True))\n layers.append(nn.ReLU(inplace=True))\n layers.append(nn.Linear(params[-2], params[-1], bias=True))\n self.mlp = nn.Sequential(*layers) \n\n\n def forward(self, x):\n if self.wantLSTM:\n x = self.rnn(x)\n\n if self.batch_norm:\n x = self.batch_norm(x)\n out = self.mlp(x)\n out = out.squeeze()\n\n return out\n\n\n\n\nclass RL:\n def __init__(self, config=None):\n\n # Finalized config-file\n self.config= config\n\n self.out_size = config[\"num_clients_per_iteration\"]\n self.wantLSTM = config['RL']['wantLSTM'] if 'wantLSTM' in config['RL'] else False\n self.replay_memory= []\n self.state_memory = []\n self.epsilon= config['RL']['initial_epsilon']\n self.step =0 \n self.runningLoss =0\n\n model_descriptor = config['RL']['model_descriptor_RL'] if 'model_descriptor_RL' in config['RL'] else 'Default'\n self.model_name = os.path.join(config['RL']['RL_path'], 'rl_{}.{}.model'.format(self.out_size, model_descriptor))\n self.stats_name = os.path.join(config['RL']['RL_path'], 'rl_{}.{}.stats'.format(self.out_size, model_descriptor))\n\n # Initialize RL model\n self.make_model()\n self.load_saved_status()\n\n # Set the RL weights\n self.rl_weights=None\n self.rl_losses=None\n\n self.criterion = nn.MSELoss()\n\n def set_losses(self, losses):\n self.rl_losses=losses\n\n def set_weights(self, weights):\n self.rl_weights = weights\n\n def forward(self, state=None):\n # epsilon greedy exploration\n\n if self.wantLSTM:\n N = len(state)\n state.resize(1, N)\n if len(self.state_memory)==0:\n self.state_memory = np.zeros((self.config['RL']['minibatch_size'], N))\n self.state_memory = np.concatenate((self.state_memory[1:], state), axis=0)\n state = self.state_memory\n\n if random.random() <= self.epsilon:\n print_rank(\"Performed random action!\")\n action= to_device(torch.rand(self.out_size))\n else:\n state = to_device(torch.from_numpy(state))\n print_rank(f'RL_state: {state.shape}')\n action= self.model(state.float())\n return action\n\n\n\n def train(self, batch=None):\n # save transition to replay memory\n self.replay_memory.append(batch)\n\n # if replay memory is full, remove the oldest transition\n if len(self.replay_memory) > self.config['RL']['max_replay_memory_size']:\n self.replay_memory.pop(0)\n\n # epsilon annealing\n self.epsilon *= self.config['RL']['epsilon_gamma'] if self.epsilon*self.config['RL']['epsilon_gamma']>self.config['RL']['final_epsilon'] else 1.0\n\n # sample random minibatch\n if self.wantLSTM:\n if len(self.replay_memory)>= self.config['RL']['minibatch_size']:\n minibatch = self.replay_memory[-self.config['RL']['minibatch_size']:]\n else:\n minibatch = self.replay_memory \n else:\n minibatch = random.sample(self.replay_memory, min(len(self.replay_memory), self.config['RL']['minibatch_size']))\n\n # unpack minibatch\n state_batch = torch.tensor(tuple(d[0] for d in minibatch)).float()\n action_batch = torch.tensor(tuple(d[1] for d in minibatch)).float()\n reward_batch = torch.tensor(tuple(d[2] for d in minibatch)).float()\n\n state_batch = to_device(state_batch)\n action_batch = to_device(action_batch)\n reward_batch = to_device(reward_batch)\n\n\n # set y_j to r_j for terminal state, otherwise to r_j + gamma*max(Q)\n y_batch = reward_batch\n\n # extract Q-value\n print_rank(f'RL state_batch: {state_batch.shape}', loglevel=logging.DEBUG)\n state_output = self.model(state_batch)\n print_rank(f'RL train shapes: {state_batch.shape} {action_batch.shape} {state_output.shape}', loglevel=logging.DEBUG)\n q_value = torch.sum(state_output * action_batch, dim=1)\n\n # reset gradient\n self.optimizer.zero_grad()\n\n # returns a new Tensor, detached from the current graph, the result will never require gradient\n y_batch = y_batch.detach()\n\n # calculate loss\n loss = self.criterion(q_value, y_batch)\n\n # do backward pass\n loss.backward()\n self.optimizer.step()\n\n # Tracking a running average of loss\n if self.runningLoss==0:\n self.runningLoss = loss.item()\n else:\n self.runningLoss = 0.95 * self.runningLoss + 0.05 * loss.item()\n print_rank('Running Loss for RL training process: {}'.format(self.runningLoss))\n\n # Decay learning rate\n self.lr_scheduler.step()\n\n\n def make_model(self):\n # make model\n self.model = NeuralNetwork(self.config['RL']['network_params'], \\\n self.config['RL']['wantLSTM'] if 'wantLSTM' in self.config['RL'] else False, \\\n self.config['RL']['batchNorm'] if 'batchNorm' in self.config['RL'] else False)\n print(self.model)\n model = to_device(model)\n\n # make optimizer\n self.optimizer = make_optimizer(self.config['RL'][\"optimizer_config\"], self.model)\n\n # make lr_scheduler\n self.lr_scheduler = make_lr_scheduler(\n self.config['RL']['annealing_config'],\n self.optimizer,\n num_batches=1)\n\n\n def load_saved_status(self):\n if os.path.exists(self.model_name):\n print_rank(\"Resuming from checkpoint model {}\".format(self.model_name))\n self.load()\n\n if os.path.exists(self.stats_name):\n with open(self.stats_name, 'r') as logfp: # loading the iteration no., val_loss and lr_weight\n elems = json.load(logfp)\n self.cur_iter_no= elems[\"i\"]\n self.val_loss = elems[\"val_loss\"]\n self.val_cer = elems[\"val_cer\"]\n self.runningLoss= elems[\"weight\"]\n\n\n\n def load(self):\n print_rank(\"Loading checkpoint: {}\".format(self.model_name))\n checkpoint = torch.load(self.model_name)\n\n self.model.load_state_dict(checkpoint['model_state_dict'])\n if self.optimizer is not None:\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n anl_st_dict = checkpoint.get('lr_scheduler_state_dict')\n if anl_st_dict and self.lr_scheduler is not None:\n self.lr_scheduler.load_state_dict(anl_st_dict)\n\n\n def save(self, i):\n \"\"\"\n Save a model as well as training information\n \"\"\"\n\n save_state = {\n 'model_state_dict' : self.model.state_dict(),\n 'optimizer_state_dict' : self.optimizer.state_dict() if self.optimizer is not None else None,\n 'lr_scheduler_state_dict' : self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None\n }\n\n outputdir = os.path.dirname(self.model_name)\n if os.path.exists(outputdir) is False:\n os.makedirs(outputdir, exist_ok=True)\n\n print_rank(\"Saving model to: {}\".format(self.model_name))\n try_except_save(torch_save, state_or_model=save_state,\n save_path=self.model_name)\n\n # logging the latest best values\n print_rank(f'Saving stats to {self.stats_name}')\n with open(self.stats_name, 'w') as logfp:\n json.dump({\"i\":i+1,\n \"val_loss\":float(self.rl_losses[0]),\n \"val_cer\":float(self.rl_losses[1]),\n \"weight\":float(self.runningLoss)},\n logfp)\n\n\n\n"
] | [
[
"numpy.concatenate",
"torch.nn.Linear",
"torch.rand",
"torch.nn.MSELoss",
"torch.nn.Softmax",
"numpy.zeros",
"torch.nn.Sequential",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.load",
"torch.sum"
]
] |
Spico197/REx | [
"bb3cdb845765a63e9bd18070068af52a1b2db3f3"
] | [
"rex/modules/ffn.py"
] | [
"from typing import Optional, Iterable\n\nimport torch\nimport torch.nn as nn\n\nfrom rex.modules.dropout import SharedDropout\n\n\nclass FFN(nn.Module):\n \"\"\"\n Multi-layer feed-forward neural networks\n\n Args:\n input_dim: input dimension\n output_dim: output dimension\n mid_dims: middle dimensions, if None, FFN is equals to `nn.Linear` with dropout\n dropout: dropout rate\n act_fn: activation function (module class without instantiated)\n\n Input:\n hidden: hidden states\n \"\"\"\n\n def __init__(\n self,\n input_dim: int,\n output_dim: int,\n mid_dims: Optional[Iterable[int]] = None,\n dropout: Optional[float] = 0.5,\n act_fn: Optional[nn.Module] = nn.ReLU,\n ):\n super().__init__()\n\n if mid_dims is None:\n self.ffn = nn.Sequential(\n nn.Linear(input_dim, output_dim), nn.Dropout(dropout)\n )\n else:\n mid_dims = list(mid_dims)\n mid_dims.insert(0, input_dim)\n mid_dims.append(output_dim)\n len_mid_dims = len(mid_dims)\n modules = []\n for i in range(len_mid_dims - 2):\n modules.extend(\n [\n nn.Linear(mid_dims[i], mid_dims[i + 1]),\n nn.Dropout(dropout),\n act_fn(),\n ]\n )\n modules.append(nn.Linear(mid_dims[-2], mid_dims[-1]))\n self.ffn = nn.Sequential(*modules)\n\n def forward(self, hidden):\n return self.ffn(hidden)\n\n\nclass MLP(nn.Module):\n \"\"\"Implements Multi-layer Perception.\"\"\"\n\n def __init__(\n self,\n input_size,\n output_size,\n mid_size=None,\n num_mid_layer=1,\n act_fn=torch.relu,\n dropout=0.1,\n ):\n super(MLP, self).__init__()\n\n assert num_mid_layer >= 1\n if mid_size is None:\n mid_size = input_size\n\n self.act_fn = act_fn\n self.input_fc = nn.Linear(input_size, mid_size)\n self.out_fc = nn.Linear(mid_size, output_size)\n if num_mid_layer > 1:\n self.mid_fcs = nn.ModuleList(\n nn.Linear(mid_size, mid_size) for _ in range(num_mid_layer - 1)\n )\n else:\n self.mid_fcs = []\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n x = self.dropout(self.act_fn(self.input_fc(x)))\n for mid_fc in self.mid_fcs:\n x = self.dropout(self.act_fn(mid_fc(x)))\n x = self.out_fc(x)\n return x\n\n\nclass SharedDropoutMLP(nn.Module):\n r\"\"\"\n Applies a linear transformation together with a non-linear activation to the incoming tensor:\n :math:`y = \\mathrm{Activation}(x A^T + b)`\n Args:\n n_in (~torch.Tensor):\n The size of each input feature.\n n_out (~torch.Tensor):\n The size of each output feature.\n dropout (float):\n If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.\n activation (bool):\n Whether to use activations. Default: True.\n \"\"\"\n\n def __init__(self, n_in, n_out, dropout=0, activation=True):\n super().__init__()\n\n self.n_in = n_in\n self.n_out = n_out\n self.linear = nn.Linear(n_in, n_out)\n self.activation = (\n nn.LeakyReLU(negative_slope=0.1) if activation else nn.Identity()\n )\n self.dropout = SharedDropout(p=dropout)\n\n self.reset_parameters()\n\n def __repr__(self):\n s = f\"n_in={self.n_in}, n_out={self.n_out}\"\n if self.dropout.p > 0:\n s += f\", dropout={self.dropout.p}\"\n\n return f\"{self.__class__.__name__}({s})\"\n\n def reset_parameters(self):\n nn.init.orthogonal_(self.linear.weight)\n nn.init.zeros_(self.linear.bias)\n\n def forward(self, x):\n r\"\"\"\n Args:\n x (~torch.Tensor):\n The size of each input feature is `n_in`.\n Returns:\n A tensor with the size of each output feature `n_out`.\n \"\"\"\n\n x = self.linear(x)\n x = self.activation(x)\n x = self.dropout(x)\n\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Identity",
"torch.nn.Sequential",
"torch.nn.LeakyReLU",
"torch.nn.init.orthogonal_",
"torch.nn.init.zeros_"
]
] |
shiyeli/machine_learn | [
"3975c678d985c468deecd03560d882e9d316bb63"
] | [
"projects/nets/resnet_utils.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Contains building blocks for various versions of Residual Networks.\r\n\r\nResidual networks (ResNets) were proposed in:\r\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\r\n Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015\r\n\r\nMore variants were introduced in:\r\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\r\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016\r\n\r\nWe can obtain different ResNet variants by changing the network depth, width,\r\nand form of residual unit. This module implements the infrastructure for\r\nbuilding them. Concrete ResNet units and full ResNet networks are implemented in\r\nthe accompanying resnet_v1.py and resnet_v2.py modules.\r\n\r\nCompared to https://github.com/KaimingHe/deep-residual-networks, in the current\r\nimplementation we subsample the output activations in the last residual unit of\r\neach block, instead of subsampling the input activations in the first residual\r\nunit of each block. The two implementations give identical results but our\r\nimplementation is more memory efficient.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport tensorflow as tf\r\n\r\nslim = tf.contrib.slim\r\n\r\n\r\nclass Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):\r\n \"\"\"A named tuple describing a ResNet block.\r\n\r\n Its parts are:\r\n scope: The scope of the `Block`.\r\n unit_fn: The ResNet unit function which takes as input a `Tensor` and\r\n returns another `Tensor` with the output of the ResNet unit.\r\n args: A list of length equal to the number of units in the `Block`. The list\r\n contains one (depth, depth_bottleneck, stride) tuple for each unit in the\r\n block to serve as argument to unit_fn.\r\n \"\"\"\r\n\r\n\r\ndef subsample(inputs, factor, scope=None):\r\n \"\"\"Subsamples the input along the spatial dimensions.\r\n\r\n Args:\r\n inputs: A `Tensor` of size [batch, height_in, width_in, channels].\r\n factor: The subsampling factor.\r\n scope: Optional variable_scope.\r\n\r\n Returns:\r\n output: A `Tensor` of size [batch, height_out, width_out, channels] with the\r\n input, either intact (if factor == 1) or subsampled (if factor > 1).\r\n \"\"\"\r\n if factor == 1:\r\n return inputs\r\n else:\r\n return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)\r\n\r\n\r\ndef conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):\r\n \"\"\"Strided 2-D convolution with 'SAME' padding.\r\n\r\n When stride > 1, then we do explicit zero-padding, followed by conv2d with\r\n 'VALID' padding.\r\n\r\n Note that\r\n\r\n net = conv2d_same(inputs, num_outputs, 3, stride=stride)\r\n\r\n is equivalent to\r\n\r\n net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')\r\n net = subsample(net, factor=stride)\r\n\r\n whereas\r\n\r\n net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')\r\n\r\n is different when the input's height or width is even, which is why we add the\r\n current function. For more details, see ResnetUtilsTest.testConv2DSameEven().\r\n\r\n Args:\r\n inputs: A 4-D tensor of size [batch, height_in, width_in, channels].\r\n num_outputs: An integer, the number of output filters.\r\n kernel_size: An int with the kernel_size of the filters.\r\n stride: An integer, the output stride.\r\n rate: An integer, rate for atrous convolution.\r\n scope: Scope.\r\n\r\n Returns:\r\n output: A 4-D tensor of size [batch, height_out, width_out, channels] with\r\n the convolution output.\r\n \"\"\"\r\n if stride == 1:\r\n return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,\r\n padding='SAME', scope=scope)\r\n else:\r\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\r\n pad_total = kernel_size_effective - 1\r\n pad_beg = pad_total // 2\r\n pad_end = pad_total - pad_beg\r\n inputs = tf.pad(inputs,\r\n [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\r\n return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,\r\n rate=rate, padding='VALID', scope=scope)\r\n\r\n\r\[email protected]_arg_scope\r\ndef stack_blocks_dense(net, blocks, output_stride=None,\r\n outputs_collections=None):\r\n \"\"\"Stacks ResNet `Blocks` and controls output feature density.\r\n\r\n First, this function creates scopes for the ResNet in the form of\r\n 'block_name/unit_1', 'block_name/unit_2', etc.\r\n\r\n Second, this function allows the user to explicitly control the ResNet\r\n output_stride, which is the ratio of the input to output spatial resolution.\r\n This is useful for dense prediction tasks such as semantic segmentation or\r\n object detection.\r\n\r\n Most ResNets consist of 4 ResNet blocks and subsample the activations by a\r\n factor of 2 when transitioning between consecutive ResNet blocks. This results\r\n to a nominal ResNet output_stride equal to 8. If we set the output_stride to\r\n half the nominal network stride (e.g., output_stride=4), then we compute\r\n responses twice.\r\n\r\n Control of the output feature density is implemented by atrous convolution.\r\n\r\n Args:\r\n net: A `Tensor` of size [batch, height, width, channels].\r\n blocks: A list of length equal to the number of ResNet `Blocks`. Each\r\n element is a ResNet `Block` object describing the units in the `Block`.\r\n output_stride: If `None`, then the output will be computed at the nominal\r\n network stride. If output_stride is not `None`, it specifies the requested\r\n ratio of input to output spatial resolution, which needs to be equal to\r\n the product of unit strides from the start up to some level of the ResNet.\r\n For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,\r\n then valid values for the output_stride are 1, 2, 6, 24 or None (which\r\n is equivalent to output_stride=24).\r\n outputs_collections: Collection to add the ResNet block outputs.\r\n\r\n Returns:\r\n net: Output tensor with stride equal to the specified output_stride.\r\n\r\n Raises:\r\n ValueError: If the target output_stride is not valid.\r\n \"\"\"\r\n # The current_stride variable keeps track of the effective stride of the\r\n # activations. This allows us to invoke atrous convolution whenever applying\r\n # the next residual unit would result in the activations having stride larger\r\n # than the target output_stride.\r\n current_stride = 1\r\n\r\n # The atrous convolution rate parameter.\r\n rate = 1\r\n\r\n for block in blocks:\r\n with tf.variable_scope(block.scope, 'block', [net]) as sc:\r\n for i, unit in enumerate(block.args):\r\n if output_stride is not None and current_stride > output_stride:\r\n raise ValueError('The target output_stride cannot be reached.')\r\n\r\n with tf.variable_scope('unit_%d' % (i + 1), values=[net]):\r\n unit_depth, unit_depth_bottleneck, unit_stride = unit\r\n\r\n # If we have reached the target output_stride, then we need to employ\r\n # atrous convolution with stride=1 and multiply the atrous rate by the\r\n # current unit's stride for use in subsequent layers.\r\n if output_stride is not None and current_stride == output_stride:\r\n net = block.unit_fn(net,\r\n depth=unit_depth,\r\n depth_bottleneck=unit_depth_bottleneck,\r\n stride=1,\r\n rate=rate)\r\n rate *= unit_stride\r\n\r\n else:\r\n net = block.unit_fn(net,\r\n depth=unit_depth,\r\n depth_bottleneck=unit_depth_bottleneck,\r\n stride=unit_stride,\r\n rate=1)\r\n current_stride *= unit_stride\r\n net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)\r\n\r\n if output_stride is not None and current_stride != output_stride:\r\n raise ValueError('The target output_stride cannot be reached.')\r\n\r\n return net\r\n\r\n\r\ndef resnet_arg_scope(weight_decay=0.0001,\r\n batch_norm_decay=0.997,\r\n batch_norm_epsilon=1e-5,\r\n batch_norm_scale=True):\r\n \"\"\"Defines the default ResNet arg scope.\r\n\r\n TODO(gpapan): The batch-normalization related default values above are\r\n appropriate for use in conjunction with the reference ResNet models\r\n released at https://github.com/KaimingHe/deep-residual-networks. When\r\n training ResNets from scratch, they might need to be tuned.\r\n\r\n Args:\r\n weight_decay: The weight decay to use for regularizing the model.\r\n batch_norm_decay: The moving average decay when estimating layer activation\r\n statistics in batch normalization.\r\n batch_norm_epsilon: Small constant to prevent division by zero when\r\n normalizing activations by their variance in batch normalization.\r\n batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the\r\n activations in the batch normalization layer.\r\n\r\n Returns:\r\n An `arg_scope` to use for the resnet models.\r\n \"\"\"\r\n batch_norm_params = {\r\n 'decay': batch_norm_decay,\r\n 'epsilon': batch_norm_epsilon,\r\n 'scale': batch_norm_scale,\r\n 'updates_collections': tf.GraphKeys.UPDATE_OPS,\r\n }\r\n\r\n with slim.arg_scope(\r\n [slim.conv2d],\r\n weights_regularizer=slim.l2_regularizer(weight_decay),\r\n weights_initializer=slim.variance_scaling_initializer(),\r\n activation_fn=tf.nn.relu,\r\n normalizer_fn=slim.batch_norm,\r\n normalizer_params=batch_norm_params):\r\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\r\n # The following implies padding='SAME' for pool1, which makes feature\r\n # alignment easier for dense prediction tasks. This is also used in\r\n # https://github.com/facebook/fb.resnet.torch. However the accompanying\r\n # code of 'Deep Residual Learning for Image Recognition' uses\r\n # padding='VALID' for pool1. You can switch to that choice by setting\r\n # slim.arg_scope([slim.max_pool2d], padding='VALID').\r\n with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:\r\n return arg_sc\r\n"
] | [
[
"tensorflow.pad",
"tensorflow.variable_scope"
]
] |
cevheri/superset | [
"34542db3b615ff556281f80410f322f41f5a97a6"
] | [
"superset/connectors/druid/models.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: skip-file\nimport json\nimport logging\nimport re\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom datetime import datetime, timedelta\nfrom distutils.version import LooseVersion\nfrom multiprocessing.pool import ThreadPool\nfrom typing import Any, cast, Dict, Iterable, List, Optional, Set, Tuple, Union\n\nimport pandas as pd\nimport sqlalchemy as sa\nfrom dateutil.parser import parse as dparse\nfrom flask import escape, Markup\nfrom flask_appbuilder import Model\nfrom flask_appbuilder.models.decorators import renders\nfrom flask_appbuilder.security.sqla.models import User\nfrom flask_babel import lazy_gettext as _\nfrom sqlalchemy import (\n Boolean,\n Column,\n DateTime,\n ForeignKey,\n Integer,\n String,\n Table,\n Text,\n UniqueConstraint,\n)\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import backref, relationship, Session\nfrom sqlalchemy.sql import expression\n\nfrom superset import conf, db, security_manager\nfrom superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric\nfrom superset.constants import NULL_STRING\nfrom superset.exceptions import SupersetException\nfrom superset.extensions import encrypted_field_factory\nfrom superset.models.core import Database\nfrom superset.models.helpers import AuditMixinNullable, ImportExportMixin, QueryResult\nfrom superset.typing import (\n AdhocMetric,\n FilterValues,\n Granularity,\n Metric,\n QueryObjectDict,\n)\nfrom superset.utils import core as utils\nfrom superset.utils.date_parser import parse_human_datetime, parse_human_timedelta\nfrom superset.utils.memoized import memoized\n\ntry:\n import requests\n from pydruid.client import PyDruid\n from pydruid.utils.aggregators import count\n from pydruid.utils.dimensions import (\n MapLookupExtraction,\n RegexExtraction,\n RegisteredLookupExtraction,\n TimeFormatExtraction,\n )\n from pydruid.utils.filters import Bound, Dimension, Filter\n from pydruid.utils.having import Aggregation, Having\n from pydruid.utils.postaggregator import (\n Const,\n Field,\n HyperUniqueCardinality,\n Postaggregator,\n Quantile,\n Quantiles,\n )\nexcept ImportError:\n pass\n\ntry:\n from superset.utils.core import DimSelector, DTTM_ALIAS, FilterOperator, flasher\nexcept ImportError:\n pass\n\nDRUID_TZ = conf.get(\"DRUID_TZ\")\nPOST_AGG_TYPE = \"postagg\"\nmetadata = Model.metadata # pylint: disable=no-member\nlogger = logging.getLogger(__name__)\n\ntry:\n # Postaggregator might not have been imported.\n class JavascriptPostAggregator(Postaggregator):\n def __init__(self, name: str, field_names: List[str], function: str) -> None:\n self.post_aggregator = {\n \"type\": \"javascript\",\n \"fieldNames\": field_names,\n \"name\": name,\n \"function\": function,\n }\n self.name = name\n\n class CustomPostAggregator(Postaggregator):\n \"\"\"A way to allow users to specify completely custom PostAggregators\"\"\"\n\n def __init__(self, name: str, post_aggregator: Dict[str, Any]) -> None:\n self.name = name\n self.post_aggregator = post_aggregator\n\n\nexcept NameError:\n pass\n\n# Function wrapper because bound methods cannot\n# be passed to processes\ndef _fetch_metadata_for(datasource: \"DruidDatasource\") -> Optional[Dict[str, Any]]:\n return datasource.latest_metadata()\n\n\nclass DruidCluster(Model, AuditMixinNullable, ImportExportMixin):\n\n \"\"\"ORM object referencing the Druid clusters\"\"\"\n\n __tablename__ = \"clusters\"\n type = \"druid\"\n\n id = Column(Integer, primary_key=True)\n verbose_name = Column(String(250), unique=True)\n # short unique name, used in permissions\n cluster_name = Column(String(250), unique=True, nullable=False)\n broker_host = Column(String(255))\n broker_port = Column(Integer, default=8082)\n broker_endpoint = Column(String(255), default=\"druid/v2\")\n metadata_last_refreshed = Column(DateTime)\n cache_timeout = Column(Integer)\n broker_user = Column(String(255))\n broker_pass = Column(encrypted_field_factory.create(String(255)))\n\n export_fields = [\n \"cluster_name\",\n \"broker_host\",\n \"broker_port\",\n \"broker_endpoint\",\n \"cache_timeout\",\n \"broker_user\",\n ]\n update_from_object_fields = export_fields\n export_children = [\"datasources\"]\n\n def __repr__(self) -> str:\n return self.verbose_name if self.verbose_name else self.cluster_name\n\n def __html__(self) -> str:\n return self.__repr__()\n\n @property\n def data(self) -> Dict[str, Any]:\n return {\"id\": self.id, \"name\": self.cluster_name, \"backend\": \"druid\"}\n\n @staticmethod\n def get_base_url(host: str, port: int) -> str:\n if not re.match(\"http(s)?://\", host):\n host = \"http://\" + host\n\n url = \"{0}:{1}\".format(host, port) if port else host\n return url\n\n def get_base_broker_url(self) -> str:\n base_url = self.get_base_url(self.broker_host, self.broker_port)\n return f\"{base_url}/{self.broker_endpoint}\"\n\n def get_pydruid_client(self) -> \"PyDruid\":\n cli = PyDruid(\n self.get_base_url(self.broker_host, self.broker_port), self.broker_endpoint\n )\n if self.broker_user and self.broker_pass:\n cli.set_basic_auth_credentials(self.broker_user, self.broker_pass)\n return cli\n\n def get_datasources(self) -> List[str]:\n endpoint = self.get_base_broker_url() + \"/datasources\"\n auth = requests.auth.HTTPBasicAuth(self.broker_user, self.broker_pass)\n return json.loads(requests.get(endpoint, auth=auth).text)\n\n def get_druid_version(self) -> str:\n endpoint = self.get_base_url(self.broker_host, self.broker_port) + \"/status\"\n auth = requests.auth.HTTPBasicAuth(self.broker_user, self.broker_pass)\n return json.loads(requests.get(endpoint, auth=auth).text)[\"version\"]\n\n @property # type: ignore\n @memoized\n def druid_version(self) -> str:\n return self.get_druid_version()\n\n def refresh_datasources(\n self,\n datasource_name: Optional[str] = None,\n merge_flag: bool = True,\n refresh_all: bool = True,\n ) -> None:\n \"\"\"Refresh metadata of all datasources in the cluster\n If ``datasource_name`` is specified, only that datasource is updated\n \"\"\"\n ds_list = self.get_datasources()\n denylist = conf.get(\"DRUID_DATA_SOURCE_DENYLIST\", [])\n ds_refresh: List[str] = []\n if not datasource_name:\n ds_refresh = list(filter(lambda ds: ds not in denylist, ds_list))\n elif datasource_name not in denylist and datasource_name in ds_list:\n ds_refresh.append(datasource_name)\n else:\n return\n self.refresh(ds_refresh, merge_flag, refresh_all)\n\n def refresh(\n self, datasource_names: List[str], merge_flag: bool, refresh_all: bool\n ) -> None:\n \"\"\"\n Fetches metadata for the specified datasources and\n merges to the Superset database\n \"\"\"\n session = db.session\n ds_list = (\n session.query(DruidDatasource)\n .filter(DruidDatasource.cluster_id == self.id)\n .filter(DruidDatasource.datasource_name.in_(datasource_names))\n )\n ds_map = {ds.name: ds for ds in ds_list}\n for ds_name in datasource_names:\n datasource = ds_map.get(ds_name, None)\n if not datasource:\n datasource = DruidDatasource(datasource_name=ds_name)\n with session.no_autoflush:\n session.add(datasource)\n flasher(_(\"Adding new datasource [{}]\").format(ds_name), \"success\")\n ds_map[ds_name] = datasource\n elif refresh_all:\n flasher(_(\"Refreshing datasource [{}]\").format(ds_name), \"info\")\n else:\n del ds_map[ds_name]\n continue\n datasource.cluster = self\n datasource.merge_flag = merge_flag\n session.flush()\n\n # Prepare multithreaded executation\n pool = ThreadPool()\n ds_refresh = list(ds_map.values())\n metadata = pool.map(_fetch_metadata_for, ds_refresh)\n pool.close()\n pool.join()\n\n for i in range(0, len(ds_refresh)):\n datasource = ds_refresh[i]\n cols = metadata[i]\n if cols:\n col_objs_list = (\n session.query(DruidColumn)\n .filter(DruidColumn.datasource_id == datasource.id)\n .filter(DruidColumn.column_name.in_(cols.keys()))\n )\n col_objs = {col.column_name: col for col in col_objs_list}\n for col in cols:\n if col == \"__time\": # skip the time column\n continue\n col_obj = col_objs.get(col)\n if not col_obj:\n col_obj = DruidColumn(\n datasource_id=datasource.id, column_name=col\n )\n with session.no_autoflush:\n session.add(col_obj)\n col_obj.type = cols[col][\"type\"]\n col_obj.datasource = datasource\n if col_obj.type == \"STRING\":\n col_obj.groupby = True\n col_obj.filterable = True\n datasource.refresh_metrics()\n session.commit()\n\n @hybrid_property\n def perm(self) -> str:\n return f\"[{self.cluster_name}].(id:{self.id})\"\n\n @perm.expression # type: ignore\n def perm(cls) -> str: # pylint: disable=no-self-argument\n return \"[\" + cls.cluster_name + \"].(id:\" + expression.cast(cls.id, String) + \")\"\n\n def get_perm(self) -> str:\n return self.perm # type: ignore\n\n @property\n def name(self) -> str:\n return self.verbose_name or self.cluster_name\n\n @property\n def unique_name(self) -> str:\n return self.verbose_name or self.cluster_name\n\n\nsa.event.listen(DruidCluster, \"after_insert\", security_manager.set_perm)\nsa.event.listen(DruidCluster, \"after_update\", security_manager.set_perm)\n\n\nclass DruidColumn(Model, BaseColumn):\n \"\"\"ORM model for storing Druid datasource column metadata\"\"\"\n\n __tablename__ = \"columns\"\n __table_args__ = (UniqueConstraint(\"column_name\", \"datasource_id\"),)\n\n datasource_id = Column(Integer, ForeignKey(\"datasources.id\"))\n # Setting enable_typechecks=False disables polymorphic inheritance.\n datasource = relationship(\n \"DruidDatasource\",\n backref=backref(\"columns\", cascade=\"all, delete-orphan\"),\n enable_typechecks=False,\n )\n dimension_spec_json = Column(Text)\n\n export_fields = [\n \"datasource_id\",\n \"column_name\",\n \"is_active\",\n \"type\",\n \"groupby\",\n \"filterable\",\n \"description\",\n \"dimension_spec_json\",\n \"verbose_name\",\n ]\n update_from_object_fields = export_fields\n export_parent = \"datasource\"\n\n def __repr__(self) -> str:\n return self.column_name or str(self.id)\n\n @property\n def expression(self) -> str:\n return self.dimension_spec_json\n\n @property\n def dimension_spec(self) -> Optional[Dict[str, Any]]:\n if self.dimension_spec_json:\n return json.loads(self.dimension_spec_json)\n return None\n\n def get_metrics(self) -> Dict[str, \"DruidMetric\"]:\n metrics = {\n \"count\": DruidMetric(\n metric_name=\"count\",\n verbose_name=\"COUNT(*)\",\n metric_type=\"count\",\n json=json.dumps({\"type\": \"count\", \"name\": \"count\"}),\n )\n }\n return metrics\n\n def refresh_metrics(self) -> None:\n \"\"\"Refresh metrics based on the column metadata\"\"\"\n metrics = self.get_metrics()\n dbmetrics = (\n db.session.query(DruidMetric)\n .filter(DruidMetric.datasource_id == self.datasource_id)\n .filter(DruidMetric.metric_name.in_(metrics.keys()))\n )\n dbmetrics = {metric.metric_name: metric for metric in dbmetrics}\n for metric in metrics.values():\n dbmetric = dbmetrics.get(metric.metric_name)\n if dbmetric:\n for attr in [\"json\", \"metric_type\"]:\n setattr(dbmetric, attr, getattr(metric, attr))\n else:\n with db.session.no_autoflush:\n metric.datasource_id = self.datasource_id\n db.session.add(metric)\n\n\nclass DruidMetric(Model, BaseMetric):\n\n \"\"\"ORM object referencing Druid metrics for a datasource\"\"\"\n\n __tablename__ = \"metrics\"\n __table_args__ = (UniqueConstraint(\"metric_name\", \"datasource_id\"),)\n datasource_id = Column(Integer, ForeignKey(\"datasources.id\"))\n\n # Setting enable_typechecks=False disables polymorphic inheritance.\n datasource = relationship(\n \"DruidDatasource\",\n backref=backref(\"metrics\", cascade=\"all, delete-orphan\"),\n enable_typechecks=False,\n )\n json = Column(Text, nullable=False)\n\n export_fields = [\n \"metric_name\",\n \"verbose_name\",\n \"metric_type\",\n \"datasource_id\",\n \"json\",\n \"description\",\n \"d3format\",\n \"warning_text\",\n ]\n update_from_object_fields = export_fields\n export_parent = \"datasource\"\n\n @property\n def expression(self) -> Column:\n return self.json\n\n @property\n def json_obj(self) -> Dict[str, Any]:\n try:\n obj = json.loads(self.json)\n except Exception:\n obj = {}\n return obj\n\n @property\n def perm(self) -> Optional[str]:\n return (\n (\"{parent_name}.[{obj.metric_name}](id:{obj.id})\").format(\n obj=self, parent_name=self.datasource.full_name\n )\n if self.datasource\n else None\n )\n\n def get_perm(self) -> Optional[str]:\n return self.perm\n\n\ndruiddatasource_user = Table(\n \"druiddatasource_user\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"user_id\", Integer, ForeignKey(\"ab_user.id\")),\n Column(\"datasource_id\", Integer, ForeignKey(\"datasources.id\")),\n)\n\n\nclass DruidDatasource(Model, BaseDatasource):\n\n \"\"\"ORM object referencing Druid datasources (tables)\"\"\"\n\n __tablename__ = \"datasources\"\n __table_args__ = (UniqueConstraint(\"datasource_name\", \"cluster_id\"),)\n\n type = \"druid\"\n query_language = \"json\"\n cluster_class = DruidCluster\n columns: List[DruidColumn] = []\n metrics: List[DruidMetric] = []\n metric_class = DruidMetric\n column_class = DruidColumn\n owner_class = security_manager.user_model\n\n baselink = \"druiddatasourcemodelview\"\n\n # Columns\n datasource_name = Column(String(255), nullable=False)\n is_hidden = Column(Boolean, default=False)\n filter_select_enabled = Column(Boolean, default=True) # override default\n fetch_values_from = Column(String(100))\n cluster_id = Column(Integer, ForeignKey(\"clusters.id\"), nullable=False)\n cluster = relationship(\n \"DruidCluster\", backref=\"datasources\", foreign_keys=[cluster_id]\n )\n owners = relationship(\n owner_class, secondary=druiddatasource_user, backref=\"druiddatasources\"\n )\n\n export_fields = [\n \"datasource_name\",\n \"is_hidden\",\n \"description\",\n \"default_endpoint\",\n \"cluster_id\",\n \"offset\",\n \"cache_timeout\",\n \"params\",\n \"filter_select_enabled\",\n ]\n update_from_object_fields = export_fields\n\n export_parent = \"cluster\"\n export_children = [\"columns\", \"metrics\"]\n\n @property\n def cluster_name(self) -> str:\n cluster = (\n self.cluster\n or db.session.query(DruidCluster).filter_by(id=self.cluster_id).one()\n )\n return cluster.cluster_name\n\n @property\n def database(self) -> DruidCluster:\n return self.cluster\n\n @property\n def connection(self) -> str:\n return str(self.database)\n\n @property\n def num_cols(self) -> List[str]:\n return [c.column_name for c in self.columns if c.is_numeric]\n\n @property\n def name(self) -> str:\n return self.datasource_name\n\n @property\n def datasource_type(self) -> str:\n return self.type\n\n @property\n def schema(self) -> Optional[str]:\n ds_name = self.datasource_name or \"\"\n name_pieces = ds_name.split(\".\")\n if len(name_pieces) > 1:\n return name_pieces[0]\n else:\n return None\n\n def get_schema_perm(self) -> Optional[str]:\n \"\"\"Returns schema permission if present, cluster one otherwise.\"\"\"\n return security_manager.get_schema_perm(self.cluster, self.schema)\n\n def get_perm(self) -> str:\n return (\"[{obj.cluster_name}].[{obj.datasource_name}]\" \"(id:{obj.id})\").format(\n obj=self\n )\n\n def update_from_object(self, obj: Dict[str, Any]) -> None:\n raise NotImplementedError()\n\n @property\n def link(self) -> Markup:\n name = escape(self.datasource_name)\n return Markup(f'<a href=\"{self.url}\">{name}</a>')\n\n @property\n def full_name(self) -> str:\n return utils.get_datasource_full_name(self.cluster_name, self.datasource_name)\n\n @property\n def time_column_grains(self) -> Dict[str, List[str]]:\n return {\n \"time_columns\": [\n \"all\",\n \"5 seconds\",\n \"30 seconds\",\n \"1 minute\",\n \"5 minutes\",\n \"30 minutes\",\n \"1 hour\",\n \"6 hour\",\n \"1 day\",\n \"7 days\",\n \"week\",\n \"week_starting_sunday\",\n \"week_ending_saturday\",\n \"month\",\n \"quarter\",\n \"year\",\n ],\n \"time_grains\": [\"now\"],\n }\n\n def __repr__(self) -> str:\n return self.datasource_name\n\n @renders(\"datasource_name\")\n def datasource_link(self) -> str:\n url = f\"/superset/explore/{self.type}/{self.id}/\"\n name = escape(self.datasource_name)\n return Markup(f'<a href=\"{url}\">{name}</a>')\n\n def get_metric_obj(self, metric_name: str) -> Dict[str, Any]:\n return [m.json_obj for m in self.metrics if m.metric_name == metric_name][0]\n\n def latest_metadata(self) -> Optional[Dict[str, Any]]:\n \"\"\"Returns segment metadata from the latest segment\"\"\"\n logger.info(\"Syncing datasource [{}]\".format(self.datasource_name))\n client = self.cluster.get_pydruid_client()\n try:\n results = client.time_boundary(datasource=self.datasource_name)\n except IOError:\n results = None\n if results:\n max_time = results[0][\"result\"][\"maxTime\"]\n max_time = dparse(max_time)\n else:\n max_time = datetime.now()\n # Query segmentMetadata for 7 days back. However, due to a bug,\n # we need to set this interval to more than 1 day ago to exclude\n # realtime segments, which triggered a bug (fixed in druid 0.8.2).\n # https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ\n lbound = (max_time - timedelta(days=7)).isoformat()\n if LooseVersion(self.cluster.druid_version) < LooseVersion(\"0.8.2\"):\n rbound = (max_time - timedelta(1)).isoformat()\n else:\n rbound = max_time.isoformat()\n segment_metadata = None\n try:\n segment_metadata = client.segment_metadata(\n datasource=self.datasource_name,\n intervals=lbound + \"/\" + rbound,\n merge=self.merge_flag,\n analysisTypes=[],\n )\n except Exception as ex:\n logger.warning(\"Failed first attempt to get latest segment\")\n logger.exception(ex)\n if not segment_metadata:\n # if no segments in the past 7 days, look at all segments\n lbound = datetime(1901, 1, 1).isoformat()[:10]\n if LooseVersion(self.cluster.druid_version) < LooseVersion(\"0.8.2\"):\n rbound = datetime.now().isoformat()\n else:\n rbound = datetime(2050, 1, 1).isoformat()[:10]\n try:\n segment_metadata = client.segment_metadata(\n datasource=self.datasource_name,\n intervals=lbound + \"/\" + rbound,\n merge=self.merge_flag,\n analysisTypes=[],\n )\n except Exception as ex:\n logger.warning(\"Failed 2nd attempt to get latest segment\")\n logger.exception(ex)\n if segment_metadata:\n return segment_metadata[-1][\"columns\"]\n return None\n\n def refresh_metrics(self) -> None:\n for col in self.columns:\n col.refresh_metrics()\n\n @classmethod\n def sync_to_db_from_config(\n cls,\n druid_config: Dict[str, Any],\n user: User,\n cluster: DruidCluster,\n refresh: bool = True,\n ) -> None:\n \"\"\"Merges the ds config from druid_config into one stored in the db.\"\"\"\n session = db.session\n datasource = (\n session.query(cls).filter_by(datasource_name=druid_config[\"name\"]).first()\n )\n # Create a new datasource.\n if not datasource:\n datasource = cls(\n datasource_name=druid_config[\"name\"],\n cluster=cluster,\n owners=[user],\n changed_by_fk=user.id,\n created_by_fk=user.id,\n )\n session.add(datasource)\n elif not refresh:\n return\n\n dimensions = druid_config[\"dimensions\"]\n col_objs = (\n session.query(DruidColumn)\n .filter(DruidColumn.datasource_id == datasource.id)\n .filter(DruidColumn.column_name.in_(dimensions))\n )\n col_objs = {col.column_name: col for col in col_objs}\n for dim in dimensions:\n col_obj = col_objs.get(dim, None)\n if not col_obj:\n col_obj = DruidColumn(\n datasource_id=datasource.id,\n column_name=dim,\n groupby=True,\n filterable=True,\n # TODO: fetch type from Hive.\n type=\"STRING\",\n datasource=datasource,\n )\n session.add(col_obj)\n # Import Druid metrics\n metric_objs = (\n session.query(DruidMetric)\n .filter(DruidMetric.datasource_id == datasource.id)\n .filter(\n DruidMetric.metric_name.in_(\n spec[\"name\"] for spec in druid_config[\"metrics_spec\"]\n )\n )\n )\n metric_objs = {metric.metric_name: metric for metric in metric_objs}\n for metric_spec in druid_config[\"metrics_spec\"]:\n metric_name = metric_spec[\"name\"]\n metric_type = metric_spec[\"type\"]\n metric_json = json.dumps(metric_spec)\n\n if metric_type == \"count\":\n metric_type = \"longSum\"\n metric_json = json.dumps(\n {\"type\": \"longSum\", \"name\": metric_name, \"fieldName\": metric_name}\n )\n\n metric_obj = metric_objs.get(metric_name, None)\n if not metric_obj:\n metric_obj = DruidMetric(\n metric_name=metric_name,\n metric_type=metric_type,\n verbose_name=\"%s(%s)\" % (metric_type, metric_name),\n datasource=datasource,\n json=metric_json,\n description=(\n \"Imported from the airolap config dir for %s\"\n % druid_config[\"name\"]\n ),\n )\n session.add(metric_obj)\n session.commit()\n\n @staticmethod\n def time_offset(granularity: Granularity) -> int:\n if granularity == \"week_ending_saturday\":\n return 6 * 24 * 3600 * 1000 # 6 days\n return 0\n\n @classmethod\n def get_datasource_by_name(\n cls, session: Session, datasource_name: str, schema: str, database_name: str\n ) -> Optional[\"DruidDatasource\"]:\n query = (\n session.query(cls)\n .join(DruidCluster)\n .filter(cls.datasource_name == datasource_name)\n .filter(DruidCluster.cluster_name == database_name)\n )\n return query.first()\n\n # uses https://en.wikipedia.org/wiki/ISO_8601\n # http://druid.io/docs/0.8.0/querying/granularities.html\n # TODO: pass origin from the UI\n @staticmethod\n def granularity(\n period_name: str, timezone: Optional[str] = None, origin: Optional[str] = None\n ) -> Union[Dict[str, str], str]:\n if not period_name or period_name == \"all\":\n return \"all\"\n iso_8601_dict = {\n \"5 seconds\": \"PT5S\",\n \"30 seconds\": \"PT30S\",\n \"1 minute\": \"PT1M\",\n \"5 minutes\": \"PT5M\",\n \"30 minutes\": \"PT30M\",\n \"1 hour\": \"PT1H\",\n \"6 hour\": \"PT6H\",\n \"one day\": \"P1D\",\n \"1 day\": \"P1D\",\n \"7 days\": \"P7D\",\n \"week\": \"P1W\",\n \"week_starting_sunday\": \"P1W\",\n \"week_ending_saturday\": \"P1W\",\n \"month\": \"P1M\",\n \"quarter\": \"P3M\",\n \"year\": \"P1Y\",\n }\n\n granularity = {\"type\": \"period\"}\n if timezone:\n granularity[\"timeZone\"] = timezone\n\n if origin:\n dttm = parse_human_datetime(origin)\n assert dttm\n granularity[\"origin\"] = dttm.isoformat()\n\n if period_name in iso_8601_dict:\n granularity[\"period\"] = iso_8601_dict[period_name]\n if period_name in (\"week_ending_saturday\", \"week_starting_sunday\"):\n # use Sunday as start of the week\n granularity[\"origin\"] = \"2016-01-03T00:00:00\"\n elif not isinstance(period_name, str):\n granularity[\"type\"] = \"duration\"\n granularity[\"duration\"] = period_name\n elif period_name.startswith(\"P\"):\n # identify if the string is the iso_8601 period\n granularity[\"period\"] = period_name\n else:\n granularity[\"type\"] = \"duration\"\n granularity[\"duration\"] = (\n parse_human_timedelta(period_name).total_seconds() # type: ignore\n * 1000\n )\n return granularity\n\n @staticmethod\n def get_post_agg(mconf: Dict[str, Any]) -> \"Postaggregator\":\n \"\"\"\n For a metric specified as `postagg` returns the\n kind of post aggregation for pydruid.\n \"\"\"\n if mconf.get(\"type\") == \"javascript\":\n return JavascriptPostAggregator(\n name=mconf.get(\"name\", \"\"),\n field_names=mconf.get(\"fieldNames\", []),\n function=mconf.get(\"function\", \"\"),\n )\n elif mconf.get(\"type\") == \"quantile\":\n return Quantile(mconf.get(\"name\", \"\"), mconf.get(\"probability\", \"\"))\n elif mconf.get(\"type\") == \"quantiles\":\n return Quantiles(mconf.get(\"name\", \"\"), mconf.get(\"probabilities\", \"\"))\n elif mconf.get(\"type\") == \"fieldAccess\":\n return Field(mconf.get(\"name\"))\n elif mconf.get(\"type\") == \"constant\":\n return Const(mconf.get(\"value\"), output_name=mconf.get(\"name\", \"\"))\n elif mconf.get(\"type\") == \"hyperUniqueCardinality\":\n return HyperUniqueCardinality(mconf.get(\"name\"))\n elif mconf.get(\"type\") == \"arithmetic\":\n return Postaggregator(\n mconf.get(\"fn\", \"/\"), mconf.get(\"fields\", []), mconf.get(\"name\", \"\")\n )\n else:\n return CustomPostAggregator(mconf.get(\"name\", \"\"), mconf)\n\n @staticmethod\n def find_postaggs_for(\n postagg_names: Set[str], metrics_dict: Dict[str, DruidMetric]\n ) -> List[DruidMetric]:\n \"\"\"Return a list of metrics that are post aggregations\"\"\"\n postagg_metrics = [\n metrics_dict[name]\n for name in postagg_names\n if metrics_dict[name].metric_type == POST_AGG_TYPE\n ]\n # Remove post aggregations that were found\n for postagg in postagg_metrics:\n postagg_names.remove(postagg.metric_name)\n return postagg_metrics\n\n @staticmethod\n def recursive_get_fields(_conf: Dict[str, Any]) -> List[str]:\n _type = _conf.get(\"type\")\n _field = _conf.get(\"field\")\n _fields = _conf.get(\"fields\")\n field_names = []\n if _type in [\"fieldAccess\", \"hyperUniqueCardinality\", \"quantile\", \"quantiles\"]:\n field_names.append(_conf.get(\"fieldName\", \"\"))\n if _field:\n field_names += DruidDatasource.recursive_get_fields(_field)\n if _fields:\n for _f in _fields:\n field_names += DruidDatasource.recursive_get_fields(_f)\n return list(set(field_names))\n\n @staticmethod\n def resolve_postagg(\n postagg: DruidMetric,\n post_aggs: Dict[str, Any],\n agg_names: Set[str],\n visited_postaggs: Set[str],\n metrics_dict: Dict[str, DruidMetric],\n ) -> None:\n mconf = postagg.json_obj\n required_fields = set(\n DruidDatasource.recursive_get_fields(mconf) + mconf.get(\"fieldNames\", [])\n )\n # Check if the fields are already in aggs\n # or is a previous postagg\n required_fields = set(\n field\n for field in required_fields\n if field not in visited_postaggs and field not in agg_names\n )\n # First try to find postaggs that match\n if len(required_fields) > 0:\n missing_postaggs = DruidDatasource.find_postaggs_for(\n required_fields, metrics_dict\n )\n for missing_metric in required_fields:\n agg_names.add(missing_metric)\n for missing_postagg in missing_postaggs:\n # Add to visited first to avoid infinite recursion\n # if post aggregations are cyclicly dependent\n visited_postaggs.add(missing_postagg.metric_name)\n for missing_postagg in missing_postaggs:\n DruidDatasource.resolve_postagg(\n missing_postagg,\n post_aggs,\n agg_names,\n visited_postaggs,\n metrics_dict,\n )\n post_aggs[postagg.metric_name] = DruidDatasource.get_post_agg(postagg.json_obj)\n\n @staticmethod\n def metrics_and_post_aggs(\n metrics: List[Metric], metrics_dict: Dict[str, DruidMetric]\n ) -> Tuple[\"OrderedDict[str, Any]\", \"OrderedDict[str, Any]\"]:\n # Separate metrics into those that are aggregations\n # and those that are post aggregations\n saved_agg_names = set()\n adhoc_agg_configs = []\n postagg_names = []\n for metric in metrics:\n if isinstance(metric, dict) and utils.is_adhoc_metric(metric):\n adhoc_agg_configs.append(metric)\n elif isinstance(metric, str):\n if metrics_dict[metric].metric_type != POST_AGG_TYPE:\n saved_agg_names.add(metric)\n else:\n postagg_names.append(metric)\n # Create the post aggregations, maintain order since postaggs\n # may depend on previous ones\n post_aggs: \"OrderedDict[str, Postaggregator]\" = OrderedDict()\n visited_postaggs = set()\n for postagg_name in postagg_names:\n postagg = metrics_dict[postagg_name]\n visited_postaggs.add(postagg_name)\n DruidDatasource.resolve_postagg(\n postagg, post_aggs, saved_agg_names, visited_postaggs, metrics_dict\n )\n aggs = DruidDatasource.get_aggregations(\n metrics_dict, saved_agg_names, adhoc_agg_configs\n )\n return aggs, post_aggs\n\n def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:\n \"\"\"Retrieve some values for the given column\"\"\"\n logger.info(\n \"Getting values for columns [{}] limited to [{}]\".format(column_name, limit)\n )\n # TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid\n if self.fetch_values_from:\n from_dttm = parse_human_datetime(self.fetch_values_from)\n assert from_dttm\n else:\n from_dttm = datetime(1970, 1, 1)\n\n qry = dict(\n datasource=self.datasource_name,\n granularity=\"all\",\n intervals=from_dttm.isoformat() + \"/\" + datetime.now().isoformat(),\n aggregations=dict(count=count(\"count\")),\n dimension=column_name,\n metric=\"count\",\n threshold=limit,\n )\n\n client = self.cluster.get_pydruid_client()\n client.topn(**qry)\n df = client.export_pandas()\n return df[column_name].to_list()\n\n def get_query_str(\n self,\n query_obj: QueryObjectDict,\n phase: int = 1,\n client: Optional[\"PyDruid\"] = None,\n ) -> str:\n return self.run_query(client=client, phase=phase, **query_obj)\n\n def _add_filter_from_pre_query_data(\n self, df: pd.DataFrame, dimensions: List[Any], dim_filter: \"Filter\"\n ) -> \"Filter\":\n ret = dim_filter\n if not df.empty:\n new_filters = []\n for unused, row in df.iterrows():\n fields = []\n for dim in dimensions:\n f = None\n # Check if this dimension uses an extraction function\n # If so, create the appropriate pydruid extraction object\n if isinstance(dim, dict) and \"extractionFn\" in dim:\n (col, extraction_fn) = DruidDatasource._create_extraction_fn(\n dim\n )\n dim_val = dim[\"outputName\"]\n f = Filter(\n dimension=col,\n value=row[dim_val],\n extraction_function=extraction_fn,\n )\n elif isinstance(dim, dict):\n dim_val = dim[\"outputName\"]\n if dim_val:\n f = Dimension(dim_val) == row[dim_val]\n else:\n f = Dimension(dim) == row[dim]\n if f:\n fields.append(f)\n if len(fields) > 1:\n term = Filter(type=\"and\", fields=fields)\n new_filters.append(term)\n elif fields:\n new_filters.append(fields[0])\n if new_filters:\n ff = Filter(type=\"or\", fields=new_filters)\n if not dim_filter:\n ret = ff\n else:\n ret = Filter(type=\"and\", fields=[ff, dim_filter])\n return ret\n\n @staticmethod\n def druid_type_from_adhoc_metric(adhoc_metric: AdhocMetric) -> str:\n column_type = adhoc_metric[\"column\"][\"type\"].lower()\n aggregate = adhoc_metric[\"aggregate\"].lower()\n\n if aggregate == \"count\":\n return \"count\"\n if aggregate == \"count_distinct\":\n return \"hyperUnique\" if column_type == \"hyperunique\" else \"cardinality\"\n else:\n return column_type + aggregate.capitalize()\n\n @staticmethod\n def get_aggregations(\n metrics_dict: Dict[str, Any],\n saved_metrics: Set[str],\n adhoc_metrics: Optional[List[AdhocMetric]] = None,\n ) -> \"OrderedDict[str, Any]\":\n \"\"\"\n Returns a dictionary of aggregation metric names to aggregation json objects\n\n :param metrics_dict: dictionary of all the metrics\n :param saved_metrics: list of saved metric names\n :param adhoc_metrics: list of adhoc metric names\n :raise SupersetException: if one or more metric names are not aggregations\n \"\"\"\n if not adhoc_metrics:\n adhoc_metrics = []\n aggregations = OrderedDict()\n invalid_metric_names = []\n for metric_name in saved_metrics:\n if metric_name in metrics_dict:\n metric = metrics_dict[metric_name]\n if metric.metric_type == POST_AGG_TYPE:\n invalid_metric_names.append(metric_name)\n else:\n aggregations[metric_name] = metric.json_obj\n else:\n invalid_metric_names.append(metric_name)\n if len(invalid_metric_names) > 0:\n raise SupersetException(\n _(\"Metric(s) {} must be aggregations.\").format(invalid_metric_names)\n )\n for adhoc_metric in adhoc_metrics:\n aggregations[adhoc_metric[\"label\"]] = {\n \"fieldName\": adhoc_metric[\"column\"][\"column_name\"],\n \"fieldNames\": [adhoc_metric[\"column\"][\"column_name\"]],\n \"type\": DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),\n \"name\": adhoc_metric[\"label\"],\n }\n return aggregations\n\n def get_dimensions(\n self, columns: List[str], columns_dict: Dict[str, DruidColumn]\n ) -> List[Union[str, Dict[str, Any]]]:\n dimensions = []\n columns = [col for col in columns if col in columns_dict]\n for column_name in columns:\n col = columns_dict.get(column_name)\n dim_spec = col.dimension_spec if col else None\n dimensions.append(dim_spec or column_name)\n return dimensions\n\n def intervals_from_dttms(self, from_dttm: datetime, to_dttm: datetime) -> str:\n # Couldn't find a way to just not filter on time...\n from_dttm = from_dttm or datetime(1901, 1, 1)\n to_dttm = to_dttm or datetime(2101, 1, 1)\n\n # add tzinfo to native datetime with config\n from_dttm = from_dttm.replace(tzinfo=DRUID_TZ)\n to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)\n return \"{}/{}\".format(\n from_dttm.isoformat() if from_dttm else \"\",\n to_dttm.isoformat() if to_dttm else \"\",\n )\n\n @staticmethod\n def _dimensions_to_values(\n dimensions: List[Union[Dict[str, str], str]]\n ) -> List[Union[Dict[str, str], str]]:\n \"\"\"\n Replace dimensions specs with their `dimension`\n values, and ignore those without\n \"\"\"\n values: List[Union[Dict[str, str], str]] = []\n for dimension in dimensions:\n if isinstance(dimension, dict):\n if \"extractionFn\" in dimension:\n values.append(dimension)\n elif \"dimension\" in dimension:\n values.append(dimension[\"dimension\"])\n else:\n values.append(dimension)\n\n return values\n\n @staticmethod\n def sanitize_metric_object(metric: Metric) -> None:\n \"\"\"\n Update a metric with the correct type if necessary.\n :param dict metric: The metric to sanitize\n \"\"\"\n if (\n utils.is_adhoc_metric(metric)\n and metric[\"column\"][\"type\"].upper() == \"FLOAT\" # type: ignore\n ):\n metric[\"column\"][\"type\"] = \"DOUBLE\" # type: ignore\n\n def run_query( # druid\n self,\n metrics: List[Metric],\n granularity: str,\n from_dttm: datetime,\n to_dttm: datetime,\n columns: Optional[List[str]] = None,\n groupby: Optional[List[str]] = None,\n filter: Optional[List[Dict[str, Any]]] = None,\n is_timeseries: Optional[bool] = True,\n timeseries_limit: Optional[int] = None,\n timeseries_limit_metric: Optional[Metric] = None,\n row_limit: Optional[int] = None,\n row_offset: Optional[int] = None,\n inner_from_dttm: Optional[datetime] = None,\n inner_to_dttm: Optional[datetime] = None,\n orderby: Optional[Any] = None,\n extras: Optional[Dict[str, Any]] = None,\n phase: int = 2,\n client: Optional[\"PyDruid\"] = None,\n order_desc: bool = True,\n is_rowcount: bool = False,\n apply_fetch_values_predicate: bool = False,\n ) -> str:\n \"\"\"Runs a query against Druid and returns a dataframe.\"\"\"\n # is_rowcount and apply_fetch_values_predicate is only\n # supported on SQL connector\n if is_rowcount:\n raise SupersetException(\"is_rowcount is not supported on Druid connector\")\n if apply_fetch_values_predicate:\n raise SupersetException(\n \"apply_fetch_values_predicate is not supported on Druid connector\"\n )\n\n # TODO refactor into using a TBD Query object\n client = client or self.cluster.get_pydruid_client()\n row_limit = row_limit or conf.get(\"ROW_LIMIT\")\n if row_offset:\n raise SupersetException(\"Offset not implemented for Druid connector\")\n\n if not is_timeseries:\n granularity = \"all\"\n\n if granularity == \"all\":\n phase = 1\n inner_from_dttm = inner_from_dttm or from_dttm\n inner_to_dttm = inner_to_dttm or to_dttm\n\n timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None\n\n query_str = \"\"\n metrics_dict = {m.metric_name: m for m in self.metrics}\n columns_dict = {c.column_name: c for c in self.columns}\n\n if self.cluster and LooseVersion(\n self.cluster.get_druid_version()\n ) < LooseVersion(\"0.11.0\"):\n for metric in metrics:\n self.sanitize_metric_object(metric)\n if timeseries_limit_metric:\n self.sanitize_metric_object(timeseries_limit_metric)\n\n aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(\n metrics, metrics_dict\n )\n\n # the dimensions list with dimensionSpecs expanded\n dimensions = self.get_dimensions(groupby, columns_dict) if groupby else []\n\n extras = extras or {}\n qry = dict(\n datasource=self.datasource_name,\n dimensions=dimensions,\n aggregations=aggregations,\n granularity=DruidDatasource.granularity(\n granularity, timezone=timezone, origin=extras.get(\"druid_time_origin\")\n ),\n post_aggregations=post_aggs,\n intervals=self.intervals_from_dttms(from_dttm, to_dttm),\n )\n\n if is_timeseries:\n qry[\"context\"] = dict(skipEmptyBuckets=True)\n\n filters = (\n DruidDatasource.get_filters(filter, self.num_cols, columns_dict)\n if filter\n else None\n )\n if filters:\n qry[\"filter\"] = filters\n\n if \"having_druid\" in extras:\n having_filters = self.get_having_filters(extras[\"having_druid\"])\n if having_filters:\n qry[\"having\"] = having_filters\n else:\n having_filters = None\n\n order_direction = \"descending\" if order_desc else \"ascending\"\n\n if columns:\n columns.append(\"__time\")\n del qry[\"post_aggregations\"]\n del qry[\"aggregations\"]\n del qry[\"dimensions\"]\n qry[\"columns\"] = columns\n qry[\"metrics\"] = []\n qry[\"granularity\"] = \"all\"\n qry[\"limit\"] = row_limit\n client.scan(**qry)\n elif not groupby and not having_filters:\n logger.info(\"Running timeseries query for no groupby values\")\n del qry[\"dimensions\"]\n client.timeseries(**qry)\n elif not having_filters and order_desc and (groupby and len(groupby) == 1):\n dim = list(qry[\"dimensions\"])[0]\n logger.info(\"Running two-phase topn query for dimension [{}]\".format(dim))\n pre_qry = deepcopy(qry)\n order_by: Optional[str] = None\n if timeseries_limit_metric:\n order_by = utils.get_metric_name(timeseries_limit_metric)\n aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(\n [timeseries_limit_metric], metrics_dict\n )\n if phase == 1:\n pre_qry[\"aggregations\"].update(aggs_dict)\n pre_qry[\"post_aggregations\"].update(post_aggs_dict)\n else:\n pre_qry[\"aggregations\"] = aggs_dict\n pre_qry[\"post_aggregations\"] = post_aggs_dict\n else:\n agg_keys = qry[\"aggregations\"].keys()\n order_by = list(agg_keys)[0] if agg_keys else None\n\n # Limit on the number of timeseries, doing a two-phases query\n pre_qry[\"granularity\"] = \"all\"\n pre_qry[\"threshold\"] = min(row_limit, timeseries_limit or row_limit)\n pre_qry[\"metric\"] = order_by\n pre_qry[\"dimension\"] = self._dimensions_to_values(qry[\"dimensions\"])[0]\n del pre_qry[\"dimensions\"]\n\n client.topn(**pre_qry)\n logger.info(\"Phase 1 Complete\")\n if phase == 2:\n query_str += \"// Two phase query\\n// Phase 1\\n\"\n query_str += json.dumps(\n client.query_builder.last_query.query_dict, indent=2\n )\n query_str += \"\\n\"\n if phase == 1:\n return query_str\n query_str += \"// Phase 2 (built based on phase one's results)\\n\"\n df = client.export_pandas()\n if df is None:\n df = pd.DataFrame()\n qry[\"filter\"] = self._add_filter_from_pre_query_data(\n df, [pre_qry[\"dimension\"]], filters\n )\n qry[\"threshold\"] = timeseries_limit or 1000\n if row_limit and granularity == \"all\":\n qry[\"threshold\"] = row_limit\n qry[\"dimension\"] = dim\n del qry[\"dimensions\"]\n qry[\"metric\"] = list(qry[\"aggregations\"].keys())[0]\n client.topn(**qry)\n logger.info(\"Phase 2 Complete\")\n elif having_filters or groupby:\n # If grouping on multiple fields or using a having filter\n # we have to force a groupby query\n logger.info(\"Running groupby query for dimensions [{}]\".format(dimensions))\n if timeseries_limit and is_timeseries:\n logger.info(\"Running two-phase query for timeseries\")\n\n pre_qry = deepcopy(qry)\n pre_qry_dims = self._dimensions_to_values(qry[\"dimensions\"])\n\n # Can't use set on an array with dicts\n # Use set with non-dict items only\n non_dict_dims = list(\n set([x for x in pre_qry_dims if not isinstance(x, dict)])\n )\n dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)]\n pre_qry[\"dimensions\"] = non_dict_dims + dict_dims # type: ignore\n\n order_by = None\n if metrics:\n order_by = utils.get_metric_name(metrics[0])\n else:\n order_by = pre_qry_dims[0] # type: ignore\n\n if timeseries_limit_metric:\n order_by = utils.get_metric_name(timeseries_limit_metric)\n aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(\n [timeseries_limit_metric], metrics_dict\n )\n if phase == 1:\n pre_qry[\"aggregations\"].update(aggs_dict)\n pre_qry[\"post_aggregations\"].update(post_aggs_dict)\n else:\n pre_qry[\"aggregations\"] = aggs_dict\n pre_qry[\"post_aggregations\"] = post_aggs_dict\n\n # Limit on the number of timeseries, doing a two-phases query\n pre_qry[\"granularity\"] = \"all\"\n pre_qry[\"limit_spec\"] = {\n \"type\": \"default\",\n \"limit\": min(timeseries_limit, row_limit),\n \"intervals\": self.intervals_from_dttms(\n inner_from_dttm, inner_to_dttm\n ),\n \"columns\": [{\"dimension\": order_by, \"direction\": order_direction}],\n }\n client.groupby(**pre_qry)\n logger.info(\"Phase 1 Complete\")\n query_str += \"// Two phase query\\n// Phase 1\\n\"\n query_str += json.dumps(\n client.query_builder.last_query.query_dict, indent=2\n )\n query_str += \"\\n\"\n if phase == 1:\n return query_str\n query_str += \"// Phase 2 (built based on phase one's results)\\n\"\n df = client.export_pandas()\n if df is None:\n df = pd.DataFrame()\n qry[\"filter\"] = self._add_filter_from_pre_query_data(\n df, pre_qry[\"dimensions\"], filters\n )\n qry[\"limit_spec\"] = None\n if row_limit:\n dimension_values = self._dimensions_to_values(dimensions)\n qry[\"limit_spec\"] = {\n \"type\": \"default\",\n \"limit\": row_limit,\n \"columns\": [\n {\n \"dimension\": (\n utils.get_metric_name(metrics[0])\n if metrics\n else dimension_values[0]\n ),\n \"direction\": order_direction,\n }\n ],\n }\n client.groupby(**qry)\n logger.info(\"Query Complete\")\n query_str += json.dumps(client.query_builder.last_query.query_dict, indent=2)\n return query_str\n\n @staticmethod\n def homogenize_types(df: pd.DataFrame, columns: Iterable[str]) -> pd.DataFrame:\n \"\"\"Converting all columns to strings\n\n When grouping by a numeric (say FLOAT) column, pydruid returns\n strings in the dataframe. This creates issues downstream related\n to having mixed types in the dataframe\n\n Here we replace None with <NULL> and make the whole series a\n str instead of an object.\n \"\"\"\n df[columns] = df[columns].fillna(NULL_STRING).astype(\"unicode\")\n return df\n\n def query(self, query_obj: QueryObjectDict) -> QueryResult:\n qry_start_dttm = datetime.now()\n client = self.cluster.get_pydruid_client()\n query_str = self.get_query_str(client=client, query_obj=query_obj, phase=2)\n df = client.export_pandas()\n if df is None:\n df = pd.DataFrame()\n\n if df.empty:\n return QueryResult(\n df=df, query=query_str, duration=datetime.now() - qry_start_dttm\n )\n\n df = self.homogenize_types(df, query_obj.get(\"groupby\", []))\n df.columns = [\n DTTM_ALIAS if c in (\"timestamp\", \"__time\") else c for c in df.columns\n ]\n\n is_timeseries = (\n query_obj[\"is_timeseries\"] if \"is_timeseries\" in query_obj else True\n )\n if not is_timeseries and DTTM_ALIAS in df.columns:\n del df[DTTM_ALIAS]\n\n # Reordering columns\n cols: List[str] = []\n if DTTM_ALIAS in df.columns:\n cols += [DTTM_ALIAS]\n\n cols += query_obj.get(\"groupby\") or []\n cols += query_obj.get(\"columns\") or []\n cols += query_obj.get(\"metrics\") or []\n\n cols = utils.get_metric_names(cols)\n cols = [col for col in cols if col in df.columns]\n df = df[cols]\n\n time_offset = DruidDatasource.time_offset(query_obj[\"granularity\"])\n\n def increment_timestamp(ts: str) -> datetime:\n dt = parse_human_datetime(ts).replace(tzinfo=DRUID_TZ)\n return dt + timedelta(milliseconds=time_offset)\n\n if DTTM_ALIAS in df.columns and time_offset:\n df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(increment_timestamp)\n\n return QueryResult(\n df=df, query=query_str, duration=datetime.now() - qry_start_dttm\n )\n\n @staticmethod\n def _create_extraction_fn(\n dim_spec: Dict[str, Any]\n ) -> Tuple[\n str,\n Union[\n \"MapLookupExtraction\",\n \"RegexExtraction\",\n \"RegisteredLookupExtraction\",\n \"TimeFormatExtraction\",\n ],\n ]:\n extraction_fn = None\n if dim_spec and \"extractionFn\" in dim_spec:\n col = dim_spec[\"dimension\"]\n fn = dim_spec[\"extractionFn\"]\n ext_type = fn.get(\"type\")\n if ext_type == \"lookup\" and fn[\"lookup\"].get(\"type\") == \"map\":\n replace_missing_values = fn.get(\"replaceMissingValueWith\")\n retain_missing_values = fn.get(\"retainMissingValue\", False)\n injective = fn.get(\"isOneToOne\", False)\n extraction_fn = MapLookupExtraction(\n fn[\"lookup\"][\"map\"],\n replace_missing_values=replace_missing_values,\n retain_missing_values=retain_missing_values,\n injective=injective,\n )\n elif ext_type == \"regex\":\n extraction_fn = RegexExtraction(fn[\"expr\"])\n elif ext_type == \"registeredLookup\":\n extraction_fn = RegisteredLookupExtraction(fn.get(\"lookup\"))\n elif ext_type == \"timeFormat\":\n extraction_fn = TimeFormatExtraction(\n fn.get(\"format\"), fn.get(\"locale\"), fn.get(\"timeZone\")\n )\n else:\n raise Exception(_(\"Unsupported extraction function: \" + ext_type))\n return (col, extraction_fn)\n\n @classmethod\n def get_filters(\n cls,\n raw_filters: List[Dict[str, Any]],\n num_cols: List[str],\n columns_dict: Dict[str, DruidColumn],\n ) -> \"Filter\":\n \"\"\"Given Superset filter data structure, returns pydruid Filter(s)\"\"\"\n filters = None\n for flt in raw_filters:\n col: Optional[str] = flt.get(\"col\")\n op: Optional[str] = flt[\"op\"].upper() if \"op\" in flt else None\n eq: Optional[FilterValues] = flt.get(\"val\")\n if (\n not col\n or not op\n or (\n eq is None\n and op\n not in (\n FilterOperator.IS_NULL.value,\n FilterOperator.IS_NOT_NULL.value,\n )\n )\n ):\n continue\n\n # Check if this dimension uses an extraction function\n # If so, create the appropriate pydruid extraction object\n column_def = columns_dict.get(col)\n dim_spec = column_def.dimension_spec if column_def else None\n extraction_fn = None\n if dim_spec and \"extractionFn\" in dim_spec:\n (col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)\n\n cond = None\n is_numeric_col = col in num_cols\n is_list_target = op in (\n FilterOperator.IN.value,\n FilterOperator.NOT_IN.value,\n )\n eq = cls.filter_values_handler(\n eq,\n is_list_target=is_list_target,\n target_column_type=utils.GenericDataType.NUMERIC\n if is_numeric_col\n else utils.GenericDataType.STRING,\n )\n\n # For these two ops, could have used Dimension,\n # but it doesn't support extraction functions\n if op == FilterOperator.EQUALS.value:\n cond = Filter(\n dimension=col, value=eq, extraction_function=extraction_fn\n )\n elif op == FilterOperator.NOT_EQUALS.value:\n cond = ~Filter(\n dimension=col, value=eq, extraction_function=extraction_fn\n )\n elif is_list_target:\n eq = cast(List[Any], eq)\n fields = []\n # ignore the filter if it has no value\n if not len(eq):\n continue\n # if it uses an extraction fn, use the \"in\" operator\n # as Dimension isn't supported\n elif extraction_fn is not None:\n cond = Filter(\n dimension=col,\n values=eq,\n type=\"in\",\n extraction_function=extraction_fn,\n )\n elif len(eq) == 1:\n cond = Dimension(col) == eq[0]\n else:\n for s in eq:\n fields.append(Dimension(col) == s)\n cond = Filter(type=\"or\", fields=fields)\n if op == FilterOperator.NOT_IN.value:\n cond = ~cond\n elif op == FilterOperator.REGEX.value:\n cond = Filter(\n extraction_function=extraction_fn,\n type=\"regex\",\n pattern=eq,\n dimension=col,\n )\n\n # For the ops below, could have used pydruid's Bound,\n # but it doesn't support extraction functions\n elif op == FilterOperator.GREATER_THAN_OR_EQUALS.value:\n cond = Bound(\n extraction_function=extraction_fn,\n dimension=col,\n lowerStrict=False,\n upperStrict=False,\n lower=eq,\n upper=None,\n ordering=cls._get_ordering(is_numeric_col),\n )\n elif op == FilterOperator.LESS_THAN_OR_EQUALS.value:\n cond = Bound(\n extraction_function=extraction_fn,\n dimension=col,\n lowerStrict=False,\n upperStrict=False,\n lower=None,\n upper=eq,\n ordering=cls._get_ordering(is_numeric_col),\n )\n elif op == FilterOperator.GREATER_THAN.value:\n cond = Bound(\n extraction_function=extraction_fn,\n lowerStrict=True,\n upperStrict=False,\n dimension=col,\n lower=eq,\n upper=None,\n ordering=cls._get_ordering(is_numeric_col),\n )\n elif op == FilterOperator.LESS_THAN.value:\n cond = Bound(\n extraction_function=extraction_fn,\n upperStrict=True,\n lowerStrict=False,\n dimension=col,\n lower=None,\n upper=eq,\n ordering=cls._get_ordering(is_numeric_col),\n )\n elif op == FilterOperator.IS_NULL.value:\n cond = Filter(dimension=col, value=\"\")\n elif op == FilterOperator.IS_NOT_NULL.value:\n cond = ~Filter(dimension=col, value=\"\")\n\n if filters:\n filters = Filter(type=\"and\", fields=[cond, filters])\n else:\n filters = cond\n\n return filters\n\n @staticmethod\n def _get_ordering(is_numeric_col: bool) -> str:\n return \"numeric\" if is_numeric_col else \"lexicographic\"\n\n def _get_having_obj(self, col: str, op: str, eq: str) -> \"Having\":\n cond = None\n if op == FilterOperator.EQUALS.value:\n if col in self.column_names:\n cond = DimSelector(dimension=col, value=eq)\n else:\n cond = Aggregation(col) == eq\n elif op == FilterOperator.GREATER_THAN.value:\n cond = Aggregation(col) > eq\n elif op == FilterOperator.LESS_THAN.value:\n cond = Aggregation(col) < eq\n\n return cond\n\n def get_having_filters(\n self, raw_filters: List[Dict[str, Any]]\n ) -> Optional[\"Having\"]:\n filters = None\n reversed_op_map = {\n FilterOperator.NOT_EQUALS.value: FilterOperator.EQUALS.value,\n FilterOperator.GREATER_THAN_OR_EQUALS.value: FilterOperator.LESS_THAN.value,\n FilterOperator.LESS_THAN_OR_EQUALS.value: FilterOperator.GREATER_THAN.value,\n }\n\n for flt in raw_filters:\n if not all(f in flt for f in [\"col\", \"op\", \"val\"]):\n continue\n col = flt[\"col\"]\n op = flt[\"op\"]\n eq = flt[\"val\"]\n cond = None\n if op in [\n FilterOperator.EQUALS.value,\n FilterOperator.GREATER_THAN.value,\n FilterOperator.LESS_THAN.value,\n ]:\n cond = self._get_having_obj(col, op, eq)\n elif op in reversed_op_map:\n cond = ~self._get_having_obj(col, reversed_op_map[op], eq)\n\n if filters:\n filters = filters & cond\n else:\n filters = cond\n return filters\n\n @classmethod\n def query_datasources_by_name(\n cls,\n session: Session,\n database: Database,\n datasource_name: str,\n schema: Optional[str] = None,\n ) -> List[\"DruidDatasource\"]:\n return []\n\n def external_metadata(self) -> List[Dict[str, Any]]:\n self.merge_flag = True\n latest_metadata = self.latest_metadata() or {}\n return [{\"name\": k, \"type\": v.get(\"type\")} for k, v in latest_metadata.items()]\n\n\nsa.event.listen(DruidDatasource, \"after_insert\", security_manager.set_perm)\nsa.event.listen(DruidDatasource, \"after_update\", security_manager.set_perm)\n"
] | [
[
"pandas.DataFrame"
]
] |
bencoster/DAIN | [
"9699f6ded59da7f6f273e2f453f4f73e2aa1d81e"
] | [
"load_functions/prepare_split_aug_images.py"
] | [
"from skimage import io\nimport numpy as np\nfrom tqdm import tqdm\nimport shutil\nimport os\nfrom aicsimageio import AICSImage, imread\nimport shutil\nimport time\nimport numpy\nimport random\nfrom aicsimageio import AICSImage, imread\nfrom aicsimageio.writers import png_writer \nfrom tqdm import tqdm\nfrom google.colab.patches import cv2_imshow\nfrom aicsimageio.writers.ome_tiff_writer import OmeTiffWriter\nfrom tqdm import tqdm\nfrom timeit import default_timer as timer\nimport imageio\nimport tifffile \nfrom aicsimageio.transforms import reshape_data\nfrom datetime import datetime\n\n\ndef make_folder_with_date(save_location, name):\n today = datetime.now()\n if today.hour < 12:\n h = \"00\"\n else:\n h = \"12\"\n sub_save_location = save_location + \"/\" + today.strftime('%Y%m%d%H')+ \"_\"+ today.strftime('%H%M%S')+ \"_%s\"%name\n os.mkdir(sub_save_location)\n return sub_save_location\n\n\ndef diplay_img_info(img, divisor):\n ### display image data\n image_resolution = img.shape[-1]\n nr_z_slices = img.shape[2]\n nr_channels = img.shape[0]\n nr_timepoints = img.shape[1]\n x_dim = img.shape[-1]\n y_dim = img.shape[-2] \n x_div = x_dim//divisor\n y_div = y_dim//divisor\n print(img.shape)\n print(\"The Resolution is: \" + str(image_resolution))\n print(\"The number of z-slizes is: \" + str(nr_z_slices))\n print(\"The number of timepoints: \" + str(nr_timepoints))\n print(\"The number of channels: \" + str(nr_channels))\n return nr_z_slices, nr_channels, nr_timepoints, x_dim, y_dim, x_div, y_div \n\n\ndef rotation_aug(source_img, name, path, flip=False):\n print(source_img.shape)\n # Source Rotation\n source_img_90 = np.rot90(source_img,axes=(2,3))\n source_img_180 = np.rot90(source_img_90,axes=(2,3))\n source_img_270 = np.rot90(source_img_180,axes=(2,3))\n # Add a flip to the rotation\n if flip == True:\n source_img_lr = np.fliplr(source_img)\n source_img_90_lr = np.fliplr(source_img_90)\n source_img_180_lr = np.fliplr(source_img_180)\n source_img_270_lr = np.fliplr(source_img_270)\n\n #source_img_90_ud = np.flipud(source_img_90)\n # Save the augmented files\n # Source images\n with OmeTiffWriter(path + \"/\" + name + \".tif\") as writer2:\n writer2.save(source_img, dimension_order='TZYX') \n with OmeTiffWriter(path + \"/\" + name +'_90.tif') as writer2:\n writer2.save(source_img_90, dimension_order='TZYX') \n with OmeTiffWriter(path + \"/\" + name +'_180.tif') as writer2:\n writer2.save(source_img_180, dimension_order='TZYX') \n with OmeTiffWriter(path + \"/\" + name +'_270.tif') as writer2:\n writer2.save(source_img_270, dimension_order='TZYX') \n # Target images\n \n if flip == True:\n with OmeTiffWriter(path + \"/\" + name + '_lr.tif') as writer2:\n writer2.save(source_img_lr, dimension_order='TZYX') \n with OmeTiffWriter(path + \"/\" + name + '_90_lr.tif') as writer2:\n writer2.save(source_img_90_lr, dimension_order='TZYX') \n with OmeTiffWriter(path + \"/\" + name + '_180_lr.tif') as writer2:\n writer2.save(source_img_180_lr, dimension_order='TZYX') \n with OmeTiffWriter(path + \"/\" + name + '_270_lr.tif') as writer2:\n writer2.save(source_img_270_lr, dimension_order='TZYX') \n\n \ndef flip(source_img, name, path):\n source_img_lr = np.fliplr(source_img)\n with OmeTiffWriter(path + \"/\" + name + \".tif\") as writer2:\n writer2.save(source_img, dimension_order='TZYX') \n with OmeTiffWriter(path + \"/\" + name + '_lr.tif') as writer2:\n writer2.save(source_img_lr, dimension_order='TZYX')\n"
] | [
[
"numpy.rot90",
"numpy.fliplr"
]
] |
GuoQuanhao/Contrib | [
"9069366559d0353c96075ed573222f3fbdfabafe"
] | [
"UGATIT-Paddle-master/utils.py"
] | [
"import os\r\nimport cv2\r\nimport numpy as np\r\nfrom paddle.fluid.layers import sigmoid_cross_entropy_with_logits, reduce_sum, reduce_mean, clip, pad2d, relu, \\\r\n leaky_relu, tanh, interpolate\r\nfrom paddle.fluid.dygraph import SpectralNorm,Layer\r\n\r\n\"\"\" tools\"\"\"\r\n\r\n\r\ndef check_folder(log_dir):\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n return log_dir\r\n\r\n\r\ndef str2bool(x):\r\n return x.lower() in ('true')\r\n\r\n\r\ndef denorm(x):\r\n return x * 0.5 + 0.5\r\n\r\n\r\ndef tensor2numpy(x):\r\n return x.detach().numpy().transpose(1, 2, 0)\r\n\r\n\r\ndef RGB2BGR(x):\r\n return cv2.cvtColor(x, cv2.COLOR_RGB2BGR)\r\n\r\n\r\ndef cam(x, size=256):\r\n x = x - np.min(x)\r\n cam_img = x / np.max(x)\r\n cam_img = np.uint8(255 * cam_img)\r\n cam_img = cv2.resize(cam_img, (size, size))\r\n cam_img = cv2.applyColorMap(cam_img, cv2.COLORMAP_JET)\r\n return cam_img / 255.0\r\n\r\n\r\n\"\"\"some api\"\"\"\r\n\r\n\r\nclass BCEWithLogitsLoss():\r\n def __init__(self, weight=None, reduction='mean'):\r\n self.weight = weight\r\n self.reduction = reduction\r\n\r\n def __call__(self, x, label):\r\n out = sigmoid_cross_entropy_with_logits(x, label)\r\n if self.reduction == 'sum':\r\n return reduce_sum(out)\r\n elif self.reduction == 'mean':\r\n return reduce_mean(out)\r\n else:\r\n return out\r\n\r\n\r\nclass RhoClipper():\r\n def __init__(self, vmin=0, vmax=1):\r\n self.vmin = vmin\r\n self.vmax = vmax\r\n\r\n def __call__(self, net):\r\n for name, param in net.named_parameters():\r\n if 'rho' in name:\r\n param.set_value(clip(param, self.vmin, self.vmax))\r\n\r\n\r\nclass ReflectionPad2D(Layer):\r\n def __init__(self, paddings):\r\n super().__init__()\r\n self.padding = [paddings] * 4\r\n\r\n def forward(self, x):\r\n return pad2d(x, self.padding, mode='reflect')\r\n\r\n\r\nclass ReLU(Layer):\r\n def __init__(self, inplace=True):\r\n super(ReLU, self).__init__()\r\n self.inplace = inplace\r\n\r\n def forward(self, x):\r\n if self.inplace:\r\n x.set_value(relu(x))\r\n return x\r\n else:\r\n y = relu(x)\r\n return y\r\n\r\n\r\nclass LeakyReLU(Layer):\r\n def __init__(self, alpha=0.02, inplce=False):\r\n super(LeakyReLU, self).__init__()\r\n self.inplce = inplce\r\n self.alpha = alpha\r\n\r\n def forward(self, x):\r\n if self.inplce:\r\n x.set_value(leaky_relu(x, self.alpha))\r\n return x\r\n else:\r\n y = leaky_relu(x, self.alpha)\r\n return y\r\n\r\n\r\nclass Tanh(Layer):\r\n def __init__(self):\r\n super(Tanh, self).__init__()\r\n\r\n def forward(self, x):\r\n return tanh(x)\r\n\r\n\r\nclass Upsample(Layer):\r\n def __init__(self, scales, resamples):\r\n super(Upsample, self).__init__()\r\n self.scale = scales\r\n self.resample = resamples\r\n\r\n def forward(self, x):\r\n return interpolate(x, scale=self.scale, resample=self.resample)\r\n\r\n\r\ndef var(input, dim=None, keep_dim=True, unbiased=True, name=None):\r\n rank = len(input.shape)\r\n dims = dim if dim is not None and dim != [] else range(rank)\r\n dims = [e if e >= 0 else e + rank for e in dims]\r\n inp_shape = input.shape\r\n mean = reduce_mean(input, dim=dim, keep_dim=True, name=name)\r\n tmp = reduce_mean((input - mean) ** 2, dim=dim, keep_dim=True, name=name)\r\n if unbiased:\r\n n = 1\r\n for i in dims:\r\n n *= inp_shape[i]\r\n factor = n / (n - 1.0) if n > 1.0 else 0.0\r\n tmp *= factor\r\n return tmp\r\n\r\n\r\nclass spectral_norm(Layer):\r\n\r\n def __init__(self, layer, dim=0, power_iters=1, eps=1e-12, dtype='float32'):\r\n super(spectral_norm, self).__init__()\r\n self.dim = dim\r\n self.power_iters = power_iters\r\n self.eps = eps\r\n self.layer = layer\r\n self.dtype = dtype\r\n weight = layer._parameters['weight']\r\n del layer._parameters['weight']\r\n self.shape = weight.shape\r\n self.weight_orig = self.create_parameter(weight.shape, dtype=weight.dtype)\r\n self.weight_orig.set_value(weight)\r\n\r\n def forward(self, x):\r\n weight = SpectralNorm(self.shape, self.dim, self.power_iters, self.eps, self.dtype)(self.weight_orig)\r\n self.layer.weight = weight\r\n out = self.layer(x)\r\n return out\r\n"
] | [
[
"numpy.max",
"numpy.uint8",
"numpy.min"
]
] |
qingziguanjun/AIStudy | [
"70ab93e9dd09f0f545936ba0eb95f21f87920d4c"
] | [
"MachineLearning/tutorials_python/introductory/customizing.py"
] | [
"\"\"\"\nCustomizing Matplotlib with style sheets and rcParams\n=====================================================\n\nTips for customizing the properties and default styles of Matplotlib.\n\nUsing style sheets\n------------------\n\nThe ``style`` package adds support for easy-to-switch plotting \"styles\" with\nthe same parameters as a matplotlibrc_ file (which is read at startup to\nconfigure matplotlib).\n\nThere are a number of pre-defined styles `provided by Matplotlib`_. For\nexample, there's a pre-defined style called \"ggplot\", which emulates the\naesthetics of ggplot_ (a popular plotting package for R_). To use this style,\njust add:\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nplt.style.use('ggplot')\ndata = np.random.randn(50)\n\n###############################################################################\n# To list all available styles, use:\n\nprint(plt.style.available)\n\n###############################################################################\n# Defining your own style\n# -----------------------\n#\n# You can create custom styles and use them by calling ``style.use`` with the\n# path or URL to the style sheet. Additionally, if you add your\n# ``<style-name>.mplstyle`` file to ``mpl_configdir/stylelib``, you can reuse\n# your custom style sheet with a call to ``style.use(<style-name>)``. By default\n# ``mpl_configdir`` should be ``~/.config/matplotlib``, but you can check where\n# yours is with ``matplotlib.get_configdir()``; you may need to create this\n# directory. You also can change the directory where matplotlib looks for\n# the stylelib/ folder by setting the MPLCONFIGDIR environment variable,\n# see :ref:`locating-matplotlib-config-dir`.\n#\n# Note that a custom style sheet in ``mpl_configdir/stylelib`` will\n# override a style sheet defined by matplotlib if the styles have the same name.\n#\n# For example, you might want to create\n# ``mpl_configdir/stylelib/presentation.mplstyle`` with the following::\n#\n# axes.titlesize : 24\n# axes.labelsize : 20\n# lines.linewidth : 3\n# lines.markersize : 10\n# xtick.labelsize : 16\n# ytick.labelsize : 16\n#\n# Then, when you want to adapt a plot designed for a paper to one that looks\n# good in a presentation, you can just add::\n#\n# >>> import matplotlib.pyplot as plt\n# >>> plt.style.use('presentation')\n#\n#\n# Composing styles\n# ----------------\n#\n# Style sheets are designed to be composed together. So you can have a style\n# sheet that customizes colors and a separate style sheet that alters element\n# sizes for presentations. These styles can easily be combined by passing\n# a list of styles::\n#\n# >>> import matplotlib.pyplot as plt\n# >>> plt.style.use(['dark_background', 'presentation'])\n#\n# Note that styles further to the right will overwrite values that are already\n# defined by styles on the left.\n#\n#\n# Temporary styling\n# -----------------\n#\n# If you only want to use a style for a specific block of code but don't want\n# to change the global styling, the style package provides a context manager\n# for limiting your changes to a specific scope. To isolate your styling\n# changes, you can write something like the following:\n\nwith plt.style.context(('dark_background')):\n plt.plot(np.sin(np.linspace(0, 2 * np.pi)), 'r-o')\nplt.show()\n\n###############################################################################\n# matplotlib rcParams\n# ===================\n#\n# .. _customizing-with-dynamic-rc-settings:\n#\n# Dynamic rc settings\n# -------------------\n#\n# You can also dynamically change the default rc settings in a python script or\n# interactively from the python shell. All of the rc settings are stored in a\n# dictionary-like variable called :data:`matplotlib.rcParams`, which is global to\n# the matplotlib package. rcParams can be modified directly, for example:\n\nmpl.rcParams['lines.linewidth'] = 2\nmpl.rcParams['lines.color'] = 'r'\nplt.plot(data)\n\n###############################################################################\n# Matplotlib also provides a couple of convenience functions for modifying rc\n# settings. The :func:`matplotlib.rc` command can be used to modify multiple\n# settings in a single group at once, using keyword arguments:\n\nmpl.rc('lines', linewidth=4, color='g')\nplt.plot(data)\n\n###############################################################################\n# The :func:`matplotlib.rcdefaults` command will restore the standard matplotlib\n# default settings.\n#\n# There is some degree of validation when setting the values of rcParams, see\n# :mod:`matplotlib.rcsetup` for details.\n#\n# .. _customizing-with-matplotlibrc-files:\n#\n# The :file:`matplotlibrc` file\n# -----------------------------\n#\n# matplotlib uses :file:`matplotlibrc` configuration files to customize all kinds\n# of properties, which we call `rc settings` or `rc parameters`. You can control\n# the defaults of almost every property in matplotlib: figure size and dpi, line\n# width, color and style, axes, axis and grid properties, text and font\n# properties and so on. matplotlib looks for :file:`matplotlibrc` in four\n# locations, in the following order:\n#\n# 1. :file:`matplotlibrc` in the current working directory, usually used for\n# specific customizations that you do not want to apply elsewhere.\n#\n# 2. :file:`$MATPLOTLIBRC` if it is a file, else :file:`$MATPLOTLIBRC/matplotlibrc`.\n#\n# 3. It next looks in a user-specific place, depending on your platform:\n#\n# - On Linux and FreeBSD, it looks in :file:`.config/matplotlib/matplotlibrc`\n# (or `$XDG_CONFIG_HOME/matplotlib/matplotlibrc`) if you've customized\n# your environment.\n#\n# - On other platforms, it looks in :file:`.matplotlib/matplotlibrc`.\n#\n# See :ref:`locating-matplotlib-config-dir`.\n#\n# 4. :file:`{INSTALL}/matplotlib/mpl-data/matplotlibrc`, where\n# :file:`{INSTALL}` is something like\n# :file:`/usr/lib/python3.5/site-packages` on Linux, and maybe\n# :file:`C:\\\\Python35\\\\Lib\\\\site-packages` on Windows. Every time you\n# install matplotlib, this file will be overwritten, so if you want\n# your customizations to be saved, please move this file to your\n# user-specific matplotlib directory.\n#\n# Once a :file:`matplotlibrc` file has been found, it will *not* search any of\n# the other paths.\n#\n# To display where the currently active :file:`matplotlibrc` file was\n# loaded from, one can do the following::\n#\n# >>> import matplotlib\n# >>> matplotlib.matplotlib_fname()\n# '/home/foo/.config/matplotlib/matplotlibrc'\n#\n# See below for a sample :ref:`matplotlibrc file<matplotlibrc-sample>`.\n# Although all parameters are optional, you should almost always set the\n# `backend` or else matplotlib will choose `Agg`, a *non-interactive* backend.\n# This can lead to unexpected behavior, since if you do not have a\n# :file:`matplotlibrc` file, it would normally fall back to\n# :file:`{INSTALL}/matplotlib/mpl-data/matplotlibrc`, which is often set to an\n# interactive backend by the package maintainer.\n#\n# .. _matplotlibrc-sample:\n#\n# A sample matplotlibrc file\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# .. literalinclude:: ../../../matplotlibrc.template\n#\n#\n# .. _matplotlibrc: http://matplotlib.org/users/customizing.html\n# .. _ggplot: http://ggplot2.org/\n# .. _R: https://www.r-project.org/\n# .. _provided by Matplotlib: https://github.com/matplotlib/matplotlib/tree/master/lib/matplotlib/mpl-data/stylelib\n"
] | [
[
"matplotlib.pyplot.style.context",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.rc",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
howardpen9/eth_analytics | [
"9ec2583b864fb41519c654a754218530c944ccf6"
] | [
"defi_historical/lending/liquidations.py"
] | [
"import pandas as pd \nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\n#import plotly.express as px\n\n# Users over time \naave_liquidations = pd.read_csv('data/aave_liquidations.csv') \ncompound_liquidations = pd.read_csv('data/compound_liquidations.csv') \n\naave_liquidations = aave_liquidations.head(7)\ncompound_liquidations = compound_liquidations.head(7)\nprint(aave_liquidations)\n\ncompound_sum_liquidations = compound_liquidations['number_of_liquidations'].sum()\naave_sum_liquidations = aave_liquidations['number_of_liquidations'].sum()\naave_sum_collateral = aave_liquidations['collateral_liquidated_usd'].sum()\nprint(aave_sum_collateral)\n#fig = px.line(aave_liquidations, x=\"day\", y=\"number_of_liquidations\")\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n\nfig.add_trace(go.Bar(x=aave_liquidations['day'], y=aave_liquidations['collateral_liquidated_usd']))\nfig.add_trace(go.Scatter(x=aave_liquidations['day'], y=aave_liquidations['number_of_liquidations'],\n mode='lines',\n name='lines'),secondary_y=True)\nfig.update_layout(\n xaxis=dict(\n showline=True,\n showgrid=False,\n showticklabels=True,\n linewidth=2,\n zeroline=True,\n linecolor='#F4F4F4',\n ticks='outside',\n tickfont=dict(\n family='Arial',\n size=21,\n color='rgb(82, 82, 82)',\n ),\n ),\n yaxis=dict(\n showgrid=True,\n zeroline=True,\n showline=True,\n showticklabels=True,\n gridcolor='#F4F4F4',\n tickfont=dict(\n family='Arial',\n size=21,\n color='grey',\n ),\n ),\n legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ),\n autosize=True,\n\n plot_bgcolor='white'\n)\n\n'''\nfig.add_layout_image(\n dict(\n source=\"https://images.plot.ly/language-icons/api-home/python-logo.png\",\n xref=\"x\",\n yref=\"y\",\n x=0,\n y=3,\n sizex=2,\n sizey=2,\n sizing=\"stretch\",\n opacity=0.5,\n layer=\"below\")\n)\n'''\nfig.show()\n"
] | [
[
"pandas.read_csv"
]
] |
zzong2006/space-filling-curve-with-RF-learning | [
"30823745dae91240c0977185fb1831c9b4771a40"
] | [
"deprecated/main(ActorCritic).py"
] | [
"import sys\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import combinations\nfrom utils import *\nfrom torch.autograd import Variable\nfrom multiprocessing import Process, Pipe\n\n'''\n * 08-22 : 기존 방식은 locality가 조금이라도 떨어지면 바로 다음 episode로 넘어갔기 때문에 충분히 학습할 여유가 되지않음\n 그렇기 때문에 목숨 개념을 추가해서, 최대 x 번까지 locality가 떨어지게 하는 action을 해도 감점만 하고 지나감\n 그리고 Agent가 가진 기회가 끝났는지에 대한 내용도 정보에 추가함 \n ( 현재는 True, False로 하는데 이후 그냥 목숨인 양의 정수 값으로 추가할 것인지는 고려 중 에 있음)\n\n * 08-26 : - 점수 부여 방식을 조금 수정했는데, 최소 locality가 action을 통해서 나오지 않더라도, 이전의 locality 보다 났다면,\n 조금의 점수를 부여하는 방향으로 바꿨음. 다만 이전의 locality 보다 같으면 감점을 부여함\n - 초기 curve를 zig-zag 뿐만 아니라 hilbert 또는 z-curve로 시작하게끔 수정\n\n * 08-27 : 훈련된 모델을 테스트해볼 수 있도록 test 함수를 Environment 클래스에 추가\n\n * 08-28 : actor-critic 방식 추가 , 신경망 구성을 바꿔서 action 방식을 변환 (하나의 action 분포를 이용해서 두번 선택함)\n Reward Normalization 삭제\n step 을 짧게 (5 ~ 10 이 안정적) 하여서 지속적으로 업데이트하면 학습 정도가 좋다는 사실을 확인함\n => 이를 이용하여 일정 주기에서 curve를 reset하고 고정된 길이 만큼의 reward 내역을 update 하는것이 이상적일 것이라 예측\n \n * 08-29 : (하나의 action 분포를 이용해서 수행할 action을 두번 선택하는 방식은 좋지 않음을 확인함)\n CNN 을 이용하여 입력데이터를 바꾸기\n'''\n\nNOTEBOOK = True\nTEST = False\nCUDA = torch.cuda.is_available()\nDEVICE = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n# ------------ Curve ------------------------- #\nDIM = 2\nORDER = 2\nside = np.sqrt(2 ** (ORDER * DIM)).astype('int')\nINDEX_TO_COORDINATE = np.array(list(map(lambda x: list([x // side, x % side]), np.arange(0, 2 ** (ORDER * DIM)))))\nDATA_SIZE = 20\nMAX_STEP = 200\nCAPACITY = 10000\nINIT_CURVE = 'hilbert'\nNUM_ADVANCED_STEP = 5 # 총 보상을 계산할 때 Advantage 학습을 할 단계 수\n# -------- Hyper Parameter --------------- #\nLEARNING_RATE = 1e-4 # 학습률\nGAMMA = 0.99 # 시간 할인율\nENTROPY_COEFF = 0.01\nVALUE_COEFF = 0.5\nMAX_GRAD_NORM = 0.5\nOFFSET = 0 # 기존 state 좌표 값 외에 신경망에 추가로 들어갈 정보의 갯수\nNUM_PROCESSES = 32 # 동시 실행 환경 수\n\n'''\nGrid (회색 선) 을 그릴 좌표를 써주는 함수\nArg : pmax 값\n'''\n\ndef changeIndexOrder(indexD, a, b):\n a = a.cpu().numpy().astype(int).item()\n b = b.cpu().numpy().astype(int).item()\n\n indexD[[a, b]] = indexD[[b, a]]\n return indexD\n\n\ndef discount_rewards(r):\n \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n running_add = running_add * GAMMA + r[t]\n discounted_r[t] = running_add\n\n # Normalize reward to avoid a big variability in rewards\n mean = np.mean(discounted_r)\n std = np.std(discounted_r)\n if std == 0: std = 1\n normalized_discounted_r = (discounted_r - mean) / std\n return discounted_r\n\n\nclass RolloutStorage(object):\n '''Advantage 학습에 사용할 메모리 클래스'''\n\n def __init__(self, num_steps, num_processes, obs_size):\n self.observations = torch.zeros(num_steps + 1, num_processes, obs_size).to(DEVICE)\n self.masks = torch.ones(num_steps + 1, num_processes, 1).to(DEVICE)\n self.rewards = torch.zeros(num_steps, num_processes, 1).to(DEVICE)\n self.actions = torch.zeros(num_steps, num_processes, 2).long().to(DEVICE)\n\n # 할인 총보상 저장\n self.returns = torch.zeros(num_steps + 1, num_processes, 1).to(DEVICE)\n self.index = 0 # insert할 인덱스\n\n def insert(self, current_obs, action, reward, mask):\n '''현재 인덱스 위치에 transition을 저장'''\n self.observations[self.index + 1].copy_(current_obs)\n self.masks[self.index + 1].copy_(mask)\n self.rewards[self.index].copy_(reward)\n self.actions[self.index].copy_(action)\n\n self.index = (self.index + 1) % NUM_ADVANCED_STEP # 인덱스 값 업데이트\n\n def after_update(self):\n '''Advantage학습 단계만큼 단계가 진행되면 가장 새로운 transition을 index0에 저장'''\n self.observations[0].copy_(self.observations[-1])\n self.masks[0].copy_(self.masks[-1])\n\n def compute_returns(self, next_value):\n '''Advantage 학습 범위 안의 각 단계에 대해 할인 총보상을 계산'''\n\n # 주의 : 5번째 단계부터 거슬러 올라오며 계산\n # 주의 : 5번째 단계가 Advantage1, 4번째 단계는 Advantage2가 됨\n self.returns[-1] = next_value\n for ad_step in reversed(range(self.rewards.size(0))):\n self.returns[ad_step] = self.returns[ad_step + 1] * GAMMA * self.masks[ad_step + 1] + self.rewards[ad_step]\n\n\n'''\nSFC를 만드는 모델\n\nNotice\n1. dropout은 쓸지 말지 고민중임\n2. embedding vector를 사용할지 말지 고민하고 있음 (각 데이터의 좌표로 유사성을 파악하기)\n'''\n\n\nclass SFCNet(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(SFCNet, self).__init__()\n self.hidden_size = hidden_size or ((input_size + output_size) // 2)\n\n self.input_nn = nn.Linear(input_size, self.hidden_size)\n self.hidden_nn = nn.Linear(self.hidden_size, self.hidden_size)\n self.first_actor_nn = nn.Linear(self.hidden_size, output_size)\n self.second_actor_nn = nn.Linear(self.hidden_size, output_size)\n self.critic_nn = nn.Linear(self.hidden_size, 1)\n\n def forward(self, input):\n output = torch.relu(self.input_nn(input))\n output = torch.relu(self.hidden_nn(output))\n first_action = self.first_actor_nn(output)\n second_action = self.second_actor_nn(output)\n value = self.critic_nn(output)\n\n return [first_action, second_action], value\n\n\nclass Brain:\n def __init__(self, num_states, num_actions, hidden_size=None):\n self.num_actions = num_actions\n self.num_states = num_states\n\n self.model = SFCNet(num_states, hidden_size, num_actions)\n if CUDA:\n self.model.cuda()\n self.optimizer = optim.Adam(self.model.parameters(), lr=LEARNING_RATE)\n print(self.model)\n\n '''\n Policy Gradient 알고리즘으로 신경망의 결합 가중치 학습\n '''\n\n def update(self, rollouts):\n # Advantage학습의 대상이 되는 5단계 모두를 사용하여 수정\n self.model.eval()\n\n # 상태 x로부터 상태가치, 실제 행동 actions의 로그 확률, 엔트로피를 계산\n actor_output, values = self.model(rollouts.observations[:-1].view(-1, self.num_states))\n log_probs_1 = F.log_softmax(actor_output[0], dim=1)\n log_probs_2 = F.log_softmax(actor_output[1], dim=1)\n\n action_log_probs_1 = log_probs_1.gather(1, rollouts.actions[:, :, 0].view(-1, 1))\n action_log_probs_2 = log_probs_2.gather(1, rollouts.actions[:, :, 1].view(-1, 1))\n\n probs_1 = F.softmax(actor_output[0], dim=1)\n probs_2 = F.softmax(actor_output[1], dim=1)\n\n # 엔트로피 H : action이 확률적으로 얼마나 퍼져 있는가? (비슷한 확률의 다중 액션 -> high, 단일 액션 -> low)\n entropy = -((log_probs_1 * probs_1 + log_probs_2 * probs_2)).sum(-1).mean()\n\n values = values.view(NUM_ADVANCED_STEP, NUM_PROCESSES, 1)\n action_log_probs_1 = action_log_probs_1.view(NUM_ADVANCED_STEP, NUM_PROCESSES, 1)\n action_log_probs_2 = action_log_probs_2.view(NUM_ADVANCED_STEP, NUM_PROCESSES, 1)\n\n # advantage(행동가치(할인 총 보상, discounted reward)-상태가치(critic value)) 계산\n advantages = rollouts.returns[:-1] - values\n\n # Critic의 loss 계산\n value_loss = advantages.pow(2).mean()\n\n # Actor의 gain 계산, 나중에 -1을 곱하면 loss가 된다\n action_gain = ((action_log_probs_1 + action_log_probs_2) * advantages.detach()).mean()\n # detach 메서드를 호출하여 advantages를 상수로 취급\n\n # 오차함수의 총합\n total_loss = (value_loss * VALUE_COEFF - action_gain - entropy * ENTROPY_COEFF)\n self.model.train()\n self.optimizer.zero_grad()\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), MAX_GRAD_NORM)\n # 결합 가중치가 한번에 너무 크게 변화하지 않도록, 경사를 0.5 이하로 제한함(클리핑)\n\n self.optimizer.step() # 결합 가중치 수정\n\n def decide_action(self, state, episode):\n with torch.no_grad():\n action, _ = self.model(state)\n\n a = torch.softmax(action[0], dim=1)\n b = torch.softmax(action[1], dim=1)\n\n a = a.multinomial(1).data\n b = b.multinomial(1).data\n\n # equivalent with ...\n # a, b = np.random.choice(dist, size=2, replace=False, p=dist)\n # a = np.argmax(dist == a)\n # b = np.argmax(dist == b)\n return torch.cat((a, b), 1)\n\n def compute_value(self, state):\n _, value = self.model(state)\n return value\n\n\nclass Agent():\n def __init__(self, num_states, num_actions):\n self.brain = Brain(num_states, num_actions)\n\n def update_policy_function(self, history):\n self.brain.update(history)\n\n def get_action(self, state, step):\n action = self.brain.decide_action(state, step)\n return action\n\n def get_value(self, state):\n value = self.brain.compute_value(state)\n return value\n\n\nclass Env:\n def __init__(self, data_index, order, max_episode, max_step, init_curve, dimension=2):\n self.DIM = dimension\n self.iteration = order\n self.MAX_STEP = max_step\n self.MAX_EPISODE = max_episode\n self.data_index = data_index\n self.initial_curve = init_curve\n\n self.num_action_space = 2 ** (dimension * order)\n self.num_observation_space = 2 ** (dimension * order) * 3 + OFFSET\n\n # Reward 설정용\n self.init_coords = build_init_coords(order, dimension, init_curve)\n self.agent = Agent(self.num_observation_space, self.num_action_space)\n self.analyzer = Analyzer(data_index, self.init_coords.copy(), order=order, dim=dimension)\n self.hilbert = HilbertCurve(dimension=dimension)\n self.z = ZCurve(dimension=dimension)\n\n '''\n 초기 state를 생성하는 함수; \n 1. 활성화된 데이터의 binary 표현\n\n 3. 전체 area에서 curve를 따라서 모든 활성화된 데이터를 지날 수 있는 curve의 최소 길이 (또는 query area에서만 구할 수 있는 길이) : (미구현) \n '''\n\n def reset(self, data_index):\n avail = np.zeros((2 ** (self.iteration * self.DIM), 1))\n avail[data_index] = 1\n observation = np.concatenate((avail, self.init_coords), axis=1)\n self.observation = observation\n return observation\n\n '''\n Agent로 부터 행동을 선택하고 그 행동에 맞춰서 이후 상태 관찰,\n 관찰에 기반하여 보상을 계산하고 이들을 버퍼에 저장\n 버퍼에 충분히 Transition을 저장했다고 생각하면 신경망(Q 함수) 업데이트\n '''\n\n def run(self):\n span = 100\n prev_o_num = -1\n total_sum = 0\n total_reward = 0\n\n current_obs = torch.zeros(NUM_PROCESSES, self.num_observation_space)\n episode_rewards = torch.zeros([NUM_PROCESSES, 1]) # 현재 에피소드의 보상\n obs_np = np.zeros([NUM_PROCESSES, self.num_observation_space]) # Numpy 배열\n reward_np = np.zeros([NUM_PROCESSES, 1]) # Numpy 배열\n o_num = np.zeros(NUM_PROCESSES) # 각 환경의 단계 수를 기록\n\n locality_average_list = []\n locality_per_episode_list = []\n locality_per_step_list = []\n episode_list = np.zeros(span)\n reward_list = np.zeros(span)\n\n avail = np.zeros((2 ** (self.iteration * self.DIM), 1))\n avail[self.data_index] = 1\n\n h_state = np.concatenate((avail, self.hilbert.getCoords(self.iteration)), axis=1)\n z_state = np.concatenate((avail, self.z.getCoords(self.iteration)), axis=1)\n h_num = self.analyzer.l2NormLocality(h_state)\n z_num = self.analyzer.l2NormLocality(z_state)\n\n observation = np.array([self.reset(self.data_index) for i in range(NUM_PROCESSES)])\n min_o_num = np.array([self.analyzer.l2NormLocality(x) for x in observation])\n print(f'hilbert : {h_num} , Z : {z_num}, Initial ({self.initial_curve}) : {min(min_o_num)}')\n\n global_min_o_num = min(min_o_num) # 최소 locality 의 역(reverse) 값\n global_min_state = np.empty(1)\n\n rollouts = RolloutStorage(NUM_ADVANCED_STEP, NUM_PROCESSES, self.num_observation_space) # rollouts 객체\n # advanced 학습에 사용되는 객체 rollouts 첫번째 상태에 현재 상태를 저장\n obs = torch.from_numpy(observation).float().to(DEVICE).view(NUM_PROCESSES, -1)\n current_obs = obs\n rollouts.observations[0].copy_(current_obs)\n\n for episode in range(self.MAX_EPISODE): # 최대 에피소드 수만큼 반복\n for step in range(NUM_ADVANCED_STEP):\n with torch.no_grad():\n action = self.agent.get_action(rollouts.observations[step], episode)\n\n # 한 단계를 실행\n for i in range(NUM_PROCESSES):\n obs_next = self.step(observation[i], action[i].unsqueeze(0))\n obs_np[i] = obs_next.reshape([-1])\n o_num[i] = self.analyzer.l2NormLocality(obs_next)\n if i == 0:\n # print(action[i][0].data, action[i][1].data)\n locality_per_step_list.append(o_num[i])\n\n # Update Minimum reverse of the locality\n if global_min_o_num > o_num[i]:\n global_min_o_num = o_num[i]\n global_min_state = obs_next.copy()\n\n # Reward Part\n # if ((prev_action[0][0].data == action[0][0].data) and (\n # prev_action[0][1].data == action[0][1].data)):\n # reward_np[i] = -10.0\n # else:\n if min_o_num[i] < o_num[i]:\n reward_np[i] = -1.0\n elif min_o_num[i] == o_num[i]:\n reward_np[i] = 0\n else:\n reward_np[i] = 10\n min_o_num[i] = o_num[i]\n\n prev_o_num = o_num[i]\n # prev_action = action.clone()\n print(f'reward : {reward_np.sum()}, locality : {o_num.mean()}')\n\n reward = torch.from_numpy(reward_np).float().to(DEVICE)\n masks = torch.from_numpy(np.ones([NUM_PROCESSES, 1])).float().to(DEVICE)\n current_obs = torch.from_numpy(obs_np).float().to(DEVICE)\n\n rollouts.insert(current_obs, action.data, reward, masks)\n\n # advanced 학습 for문 끝\n # advanced 학습 대상 중 마지막 단계의 상태로 예측하는 상태가치를 계산\n with torch.no_grad():\n next_value = self.agent.get_value(rollouts.observations[-1]).detach()\n rollouts.compute_returns(next_value)\n\n # 신경망 및 rollout 업데이트\n self.agent.update_policy_function(rollouts)\n rollouts.after_update()\n\n # metrics 측정 부분 여기는 학습 방식이 정리되면 수정할 것\n if episode % 5 == 0:\n observation = np.array([self.reset(self.data_index) for i in range(NUM_PROCESSES)])\n min_o_num = np.array([self.analyzer.l2NormLocality(x) for x in observation])\n locality_per_episode_list.append(total_sum / (step + 1))\n\n print(f'{episode} : {global_min_o_num}')\n\n if NOTEBOOK:\n f = plt.figure(figsize=(30, 8))\n\n ax2 = f.add_subplot(111)\n plt.plot(locality_per_step_list, 'b-')\n plt.xlabel('step')\n plt.ylabel('Reverse of the locality')\n\n plt.tight_layout()\n plt.show(block=True)\n\n return global_min_o_num, global_min_state\n\n def test(self, data_index, max_episode, max_step):\n avail = np.zeros((2 ** (self.iteration * self.DIM), 1))\n avail[data_index] = 1\n\n TestAnalyzer = Analyzer(data_index, self.init_coords.copy(), order=self.iteration, dim=self.DIM)\n\n h_state = np.concatenate((avail, self.hilbert.getCoords(self.iteration)), axis=1)\n z_state = np.concatenate((avail, self.z.getCoords(self.iteration)), axis=1)\n h_num = TestAnalyzer.l2NormLocality(h_state)\n z_num = TestAnalyzer.l2NormLocality(z_state)\n\n self.observation = self.reset(data_index)\n min_o_num = TestAnalyzer.l2NormLocality(self.observation)\n global_min_o_num = min_o_num # 최소 locality 의 역(reverse) 값\n global_min_state = np.empty(1)\n\n print(f'hilbert : {h_num} , Z : {z_num}, Initial : {min_o_num}')\n\n for episode in range(max_episode):\n self.observation = self.reset(data_index)\n min_o_num = TestAnalyzer.l2NormLocality(self.observation)\n observation = self.observation\n state = torch.from_numpy(observation).type(torch.cuda.FloatTensor)\n state = state.view(1, -1)\n\n for step in range(max_step):\n action = self.agent.get_action(state, episode)\n self.observation_next = self.step(action)\n o_num = TestAnalyzer.l2NormLocality(self.observation_next)\n\n # Update Minimum reverse of the locality\n if global_min_o_num > o_num:\n global_min_o_num = o_num\n global_min_state = self.observation_next.copy()\n\n state_next = self.observation_next\n state_next = torch.from_numpy(state_next).type(torch.cuda.FloatTensor)\n state_next = state_next.view(1, -1)\n state = state_next\n\n print(f'episode {episode} is over within step {step + 1}. \\n'\n f'Recorded the minimum reverse of the locality so far: {global_min_o_num}')\n\n return global_min_o_num, global_min_state\n\n '''\n 주어진 action 을 수행하고 난 뒤의 state를 반환\n '''\n\n def step(self, state, choosenAction):\n next_state = changeIndexOrder(state, choosenAction[:, 0], choosenAction[:, 1])\n return next_state\n\n'''\nindex (n) 은 다음과 같이 좌표로 표시됨\nn 의 최댓값은 DIM * ORDER - 1 \n좌표 값은 ( n // (DIM * ORDER), n % (DIM * ORDER) ) \n'''\n\n\ndef main():\n np.random.seed(210)\n\n side = np.sqrt(2 ** (ORDER * DIM))\n scan_index = np.random.choice(2 ** (DIM * ORDER), size=DATA_SIZE, replace=False)\n sample_data = np.array(list(map(lambda x: list([x // side, x % side]), scan_index)))\n if NOTEBOOK:\n fig, ax = plt.subplots(1, figsize=(10, 10))\n show_points(sample_data, ax, index=False)\n\n if INIT_CURVE == 'hilbert':\n show_line_by_index_order(np.array(HilbertCurve(DIM).getCoords(ORDER)), ax, index=False)\n elif INIT_CURVE == 'zig-zag':\n grid_index = np.arange(2 ** (ORDER * DIM))\n show_line_by_index_order(grid_index, ax)\n plt.show(block=True)\n\n env = Env(data_index=scan_index, order=ORDER, max_episode=3000, max_step=10,\n init_curve=INIT_CURVE, dimension=DIM)\n result_value, result_state = env.run()\n\n print(f'Recorded the minimum reverse of the locality :{result_value}')\n if NOTEBOOK:\n fig, ax = plt.subplots(1, figsize=(10, 10))\n show_points(sample_data, ax, index=False)\n show_line_by_index_order(result_state[:, 1:3].reshape([-1, 2]), ax, index=False)\n plt.show(block=True)\n\n # Test trained model\n if TEST:\n np.random.seed(175)\n\n print(f'Start testing trained model ... ')\n test_index = np.random.choice(2 ** (DIM * ORDER), size=DATA_SIZE, replace=False)\n result_value, result_state = env.test(test_index, max_episode=1000, max_step=100)\n\n print(f'[TEST] Recorded the minimum reverse of the locality :{result_value}')\n\n if NOTEBOOK:\n fig, ax = plt.subplots(1, figsize=(10, 10))\n show_points(sample_data, ax, index=False)\n show_line_by_index_order(result_state[:, 1:3].reshape([-1, 2]), ax, index=False)\n plt.show(block=True)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"numpy.random.choice",
"numpy.mean",
"torch.ones",
"torch.cuda.is_available",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.empty",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt",
"torch.zeros",
"numpy.zeros",
"torch.nn.functional.log_softmax",
"matplotlib.pyplot.figure",
"numpy.std",
"torch.nn.functional.softmax",
"matplotlib.pyplot.show",
"numpy.random.seed",
"matplotlib.pyplot.xlabel",
"torch.no_grad",
"matplotlib.pyplot.plot",
"numpy.ones",
"torch.softmax",
"torch.from_numpy",
"matplotlib.pyplot.ylabel"
]
] |
rohithredd94/Computer-Vision-using-OpenCV | [
"9238a3284158f566461782e1410a5bf3ef88c03b"
] | [
"Harris-Corners/harris_corners.py"
] | [
"import cv2\nimport numpy as np\nimport sys\nimport random\nfrom collections import OrderedDict\n\nimgs = ['transA.jpg', 'transB.jpg', 'simA.jpg', 'simB.jpg']\n\ndef calc_grad(img, k_sobel, norm,k):\n if k == 'x':\n grad = cv2.Sobel(img, cv2.CV_64F, 1, 0, k_sobel)\n elif k == 'y':\n grad = cv2.Sobel(img, cv2.CV_64F, 0, 1, k_sobel)\n\n if norm:\n grad = cv2.normalize(grad, grad, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n #grad = grad\n return grad\n\ndef harris_values(img, window_size, harris_scoring, norm):\n #Calculate X and Y gradients\n grad_x = calc_grad(img, 3, False, 'x')\n grad_y = calc_grad(img, 3, False, 'y')\n grad_xx = grad_x ** 2\n grad_xy = grad_x * grad_y\n grad_yx = grad_y * grad_x\n grad_yy = grad_y ** 2\n\n #Calculate the weight window matrix\n c = np.zeros((window_size,)*2, dtype=np.float32); \n c[int (window_size / 2), int (window_size / 2)] = 1.0\n w = cv2.GaussianBlur(c, (window_size,)*2, 0)\n\n #Calculating the harris window values for the given image\n har_val = np.zeros(img.shape, dtype=np.float32)\n for r in range(int (w.shape[0]/2), int (img.shape[0] - w.shape[0]/2)): #Iterating over the window size\n print(r)\n minr = int (max(0, r - w.shape[0]/2))\n maxr = int (min(img.shape[0], minr + w.shape[0]))\n for c in range(int (w.shape[1]/2), int (img.shape[1] - w.shape[1]/2)):\n minc = int (max(0, c - w.shape[1]/2))\n maxc = int (min(img.shape[1], minc + w.shape[1]))\n wgrad_xx = grad_xx[minr:maxr, minc:maxc]\n wgrad_xy = grad_xy[minr:maxr, minc:maxc]\n wgrad_yx = grad_yx[minr:maxr, minc:maxc]\n wgrad_yy = grad_yy[minr:maxr, minc:maxc]\n m_xx = (w * wgrad_xx).sum()\n m_xy = (w * wgrad_xy).sum()\n m_yx = (w * wgrad_yx).sum()\n m_yy = (w * wgrad_yy).sum()\n M = np.array([m_xx, m_xy, m_yx, m_yy]).reshape((2,2))\n har_val[r,c] = np.linalg.det(M)- harris_scoring * (M.trace() ** 2)\n #Scaling the images\n if norm:\n har_val = cv2.normalize(har_val, har_val, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n return har_val\n\ndef harris_corners(img, window_size, harris_scoring, threshold, nms_size):\n if len(img.shape) > 2:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # calculate harris values for all valid pixels\n corners = harris_values(img, window_size, harris_scoring, False)\n # apply thresholding\n corners = corners * (corners > (threshold * corners.max())) * (corners > 0)\n # apply non maximal suppression\n rows, columns = np.nonzero(corners)\n new_corners = np.zeros(corners.shape)\n for r,c in zip(rows,columns):\n minr = int (max(0, r - nms_size / 2))\n maxr = int (min(img.shape[0], minr + nms_size))\n minc = int (max(0, c - nms_size / 2))\n maxc = int (min(img.shape[1], minc + nms_size))\n if corners[r,c] == corners[minr:maxr,minc:maxc].max():\n new_corners[r,c] = corners[r,c]\n # corners[minr:r, minc:c] = 0\n # corners[r+1:maxr, c+1:maxc] = 0\n return new_corners\n\ndef harris():\n images = imgs[0:3:2];\n for i, img in enumerate(images):\n img = cv2.imread('resources/'+img, cv2.IMREAD_GRAYSCALE)#Read the image\n #Calculate X and Y gradients\n print(\"Calculating X & Y Gradients\")\n img_grad_x = calc_grad(img, 3, True, 'x')\n img_grad_y = calc_grad(img, 3, True, 'y')\n #Save the calculated gradients\n cv2.imwrite('results/gradients-'+images[i]+'.png', np.hstack((img_grad_x, img_grad_y)))\n print(\"X & Y Gradients are saved to images in results folder\")\n\n #Read the images and calculate harris values for all the images\n for i, img in enumerate(imgs):\n img = cv2.imread('resources/'+img, cv2.IMREAD_GRAYSCALE)#Read the image\n n = i\n har_val = harris_values(img, 3, 0.04, True)\n cv2.imwrite('results/harris-values'+imgs[i]+'.png', har_val)\n print(\"Harris Values are saved to images in results folder\")\n\n img = np.float32(img)\n corners = harris_corners(img, 3, 0.04,1e-3, 5)\n x = img.shape[0]\n y = img.shape[1]\n for i in range(x):\n for j in range(y):\n if(corners[i][j] > 0):\n img[i][j] = 255\n cv2.imwrite('results/harris-corners'+imgs[n]+'.png', img)\n print(\"Harris Corners are saved to images in results folder\")\n\nif __name__ == '__main__':\n print(\"Executing Harris Corners\")\n harris()\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.linalg.det",
"numpy.nonzero",
"numpy.float32",
"numpy.hstack"
]
] |
codeforfrankfurt/PolBotCheck | [
"cef5136008301dcb16eb06dc5f18d00510c77f35"
] | [
"polbotcheck/plots/front_back_link.py"
] | [
"import sys\nfrom os import path\nsys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )\n\nimport db\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\n\n# takes data (that should come from the backend) and creates the output we would like to have\n# on the front end.\n\ndef follower_botness(username):\n#given a username, it creates the histogram of the botness of the followers \n#and saves it in plots (for now) it also returns the probable percentage of follower bots\n#(cutoff needs to be defined, for now it is 0.7)\"\"\" \n cutoff = 0.7\n scorelist = []\n followers = db.getFollowers(toName=username)\n for f in followers:\n follower = f['_from'].split('/')[1]\n score = db.getUser(follower)['botness']['score']\n scorelist.append(score)\n\n if scorelist:\n scores = pd.Series(scorelist, name='probability of follower bot') \n ax = sns.distplot(scores) \n fig = ax.get_figure()\n fig.savefig('testfig.png')\n botpercent = sum(np.array(scorelist)>cutoff) / len(scorelist)\n return botpercent\n else:\n return None\n"
] | [
[
"numpy.array",
"pandas.Series"
]
] |
ACTCollaboration/moby2 | [
"b0f6bd6add7170999eb964d18f16d795520426e9"
] | [
"python/tod/flags.py"
] | [
"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom past.builtins import basestring\nimport moby2\nimport numpy as np\n\nfrom moby2.tod import TODCuts, CutsVector\nfrom moby2.util import HDFArchive\n\nclass TODFlags:\n \"\"\"\n Manages the association of a set of flags to each sample of each\n detector in a TOD. Helps with I/O of such information, and the\n conversion of different combinations of flags to TODCuts (which\n are effectively are carry a single flag bit).\n \"\"\"\n\n def __init__(self, ndets=None, nsamps=None, det_uid=None,\n sample_offset=None, flag_names=None):\n \"\"\"To initialize the class, pass at least the det_uid vector or ndets\n (from which a simple det_uid vector will be generated).\n Parameters nsamps and sample_offset have the same meaning as\n in TODCuts. flag_names can be used to initialize a set of\n base flags.\n \"\"\"\n if ndets is None:\n ndets = len(det_uid)\n if det_uid is None:\n det_uid = np.arange(ndets)\n self.det_uid = det_uid\n self.nsamps = nsamps\n self.sample_offset = sample_offset\n self.flag_names = []\n self.base_flags = []\n self.flag_defs = {}\n if flag_names is not None:\n for n in flag_names:\n self.add_base_flag(n)\n\n @classmethod\n def for_cuts(cls, cuts):\n \"\"\"Returns an empty TODFlags instance based on the TODCuts instance\n passed in. This just sets det_uid, sample_offset, nsamps.\n The cuts themselves are not copied in.\n \"\"\"\n return cls(nsamps=cuts.nsamps, det_uid=cuts.det_uid,\n sample_offset=cuts.sample_offset)\n\n def add_base_flag(self, name, data=None, make_copy=True):\n \"\"\"Create a new base_flag, with the given name. If data is passed in,\n it should be a TODCuts object, and it will be used to\n initialize the flag.\n \"\"\"\n assert name not in self.flag_names\n self.flag_defs[name] = ('base', len(self.flag_names))\n self.flag_names.append(name)\n\n if not make_copy:\n if data is None:\n make_copy = True\n else:\n # Check that cuts are compatible...\n if data.nsamps != self.nsamps or data.sample_offset != self.sample_offset:\n print(' ...overriding no-copy request (\"%s\").' % name)\n make_copy = True\n if make_copy:\n self.base_flags.append(TODCuts(\n det_uid=self.det_uid, nsamps=self.nsamps,\n sample_offset=self.sample_offset))\n if data is not None:\n self.base_flags[-1].merge_tod_cuts(data)\n else:\n self.base_flags.append(data)\n \n def add_derived_flag(self, name, cut_components, uncut_components=[]):\n \"\"\"Create a new derived flag, called `name`. The flag is derived by\n or-ing together the flags listed in cut_components, \"\"\"\n assert name not in self.flag_names\n comp_i0 = [self.flag_names.index(c) for c in cut_components]\n comp_i1 = [self.flag_names.index(c) for c in uncut_components]\n self.flag_defs[name] = ('derived', comp_i0, comp_i1)\n self.flag_names.append(name)\n\n def get_cuts(self, name):\n if not name in self.flag_defs:\n raise ValueError(\"Flag '%s' not found in flag_defs: %s\" % (\n name, list(self.flag_defs.keys())))\n flag_def = self.flag_defs[name]\n if flag_def[0] == 'base':\n return self.base_flags[self.flag_names.index(name)].copy()\n elif flag_def[0] == 'derived':\n i_pos, i_neg = flag_def[1:]\n cuts = TODCuts(det_uid=self.det_uid, nsamps=self.nsamps,\n sample_offset=self.sample_offset)\n for i in i_pos:\n cuts.merge_tod_cuts(self.base_flags[i])\n for i in i_neg:\n cuts.merge_tod_cuts(self.base_flags[i].get_complement())\n return cuts\n\n \"\"\"\n I/O.\n \"\"\"\n\n @staticmethod\n def _explode_bits(bits, total_bits):\n \"\"\"Create an array of uint8 of sufficient size to express `total_bits`\n bits. Set bits to 1 according to the list of bits in `bits`.\n For example, if bits=[0,1,5,15] and total_bits=24, the\n function returns array([35,128,0]).\n \"\"\"\n out = np.zeros((total_bits + 7) // 8, 'uint8')\n for b in bits:\n out[b >> 8] |= (1 << (b % 8))\n return out\n\n def _prepare_output(self):\n \"\"\"Service function for output routines; recode internal state for\n easy storage.\n \"\"\"\n # Use C routine to convert raw cuts vectors to packed form.\n X = moby2.libactpol.pack_flags(\n [x.cuts for x in self.base_flags], self.nsamps)\n stack_bounds, flag_stack, index_stack = X\n base_names = [HDFArchive.encode_23(n) for n in self.flag_names\n if self.flag_defs[n][0] == 'base']\n output = [\n ('!sample_offset', self.sample_offset),\n ('!sample_count', self.nsamps),\n ('det_uid', self.det_uid),\n ('stack_bounds', stack_bounds),\n ('index_stack', index_stack),\n ('flag_stack', flag_stack),\n ('flag_names', np.array(base_names)),\n ]\n n_words = flag_stack.shape[1]\n dt, word_size = np.uint8, 8\n\n # Add on the derived field information.\n derived = []\n for n in self.flag_names:\n if self.flag_defs[n][0] != 'derived':\n continue\n flagval = np.zeros((2, n_words), dtype=dt)\n i_pos, i_neg = self.flag_defs[n][1:]\n for i in i_pos:\n flagval[0, i >> word_size] |= (1 << i)\n for i in i_neg:\n flagval[1, i >> word_size] |= (1 << i)\n derived.append((HDFArchive.encode_23(n), flagval))\n if len(derived) > 0:\n dnames, dmasks = list(map(np.array, list(zip(*derived))))\n else:\n dnames = np.zeros((0,), dtype='S0')\n dmasks = np.zeros((0,2,n_words), dtype=dt) # if empty, give right shape.\n output.append(('derived_names', dnames))\n output.append(('derived_masks', dmasks))\n return output\n \n def write_hdf(self, target):\n \"\"\"Encode this object into an HDF5 file. The target must be an empty\n h5py HDF5 group.\n\n By design, this function does not support a string address\n (file + group name); use TODFlagsArchive for container\n management.\n \"\"\"\n output = self._prepare_output()\n for k, v in output:\n if k[0] == '!':\n target.attrs[HDFArchive.encode_23(k[1:])] = v\n else:\n target.create_dataset(HDFArchive.encode_23(k), data=v,\n compression='gzip')\n HDFArchive.set_class(target, 'tod_flags', 1)\n\n @classmethod\n def from_hdf(cls, target):\n \"\"\"Load an instance of this class from the HDF5 object pointed to by\n target. This could be an open h5py.File, or a group within\n one.\n\n By design, this function does not support a string address\n (file + group name). For such container management, use a\n TODFlagsArchive.\n \"\"\"\n HDFArchive.check_class(target, 'tod_flags', 1)\n\n self = cls(det_uid=np.array(target['det_uid']),\n nsamps=target.attrs['sample_count'],\n sample_offset=target.attrs['sample_offset'])\n for name in target['flag_names']:\n name = HDFArchive.decode_23(name) # py2/3\n self.add_base_flag(name)\n bit_masks = [self._explode_bits([b], len(self.base_flags))\n for b in range(len(self.base_flags))]\n\n # Call C layer to get lists\n cvecs = moby2.libactpol.unpack_flags(\n np.array(target[b'stack_bounds'], 'uint32'),\n np.array(target[b'flag_stack'], 'uint8'),\n np.array(target[b'index_stack'], 'uint32'),\n target.attrs[b'sample_count'],\n len(target[b'flag_names']))\n\n for i in range(len(self.det_uid)):\n for b, m in enumerate(bit_masks):\n self.base_flags[b].cuts[i] = CutsVector(\n cuts_in=cvecs[b][i], nsamps=self.nsamps)\n\n # The derived fields.\n for i,n in enumerate(target['derived_names']):\n pos = [HDFArchive.decode_23(_n) for _n,m in\n zip(target[b'flag_names'], bit_masks)\n if np.any(target[b'derived_masks'][i][0] & m)]\n neg = [HDFArchive.decode_23(_n) for _n,m in\n zip(target[b'flag_names'], bit_masks)\n if np.any(target[b'derived_masks'][i][1] & m)]\n self.add_derived_flag(HDFArchive.decode_23(n), pos, neg)\n\n return self\n \n def write_hdf_deprecated(self, hdf_file, group_name=None, clobber=False,\n compression='gzip'):\n \"\"\"Store the TODFlags flags object to the given hdf_file, which can be\n a filename, or a node in an open h5py.File instance. Data are\n written to group `group_name`. If group exists, function will\n fail unless clobber=True, in which case the existing group\n will be deleted.\n \"\"\"\n import h5py\n\n open_file_in = not isinstance(hdf_file, basestring)\n if not open_file_in:\n hdf_file = h5py.File(hdf_file, 'a')\n if group_name is None:\n group_name = '/'\n if group_name in hdf_file:\n print('Found existing group %s' % group_name)\n group = hdf_file[group_name]\n if clobber:\n for k,v in output:\n if k[0] == '!' and k[1:] in group.attrs:\n del group.attrs[k[1:]]\n elif k in group:\n del group[k]\n else:\n group = hdf_file.create_group(group_name)\n\n self.write_hdf(group)\n\n if not open_file_in:\n # This is not reliable... safer to pass in an open h5py.File.\n hdf_file.close()\n\n @classmethod\n def from_hdf_deprecated(cls, hdf_file, group_name='/'):\n \"\"\"Initialize a TODFlags flags object from the specified hdf_file\n filename. Data are loaded from HDF5 group `group_name`.\n \"\"\"\n import h5py\n open_file_in = not isinstance(hdf_file, basestring)\n if not open_file_in:\n hdf_file = h5py.File(hdf_file, 'r')\n group = hdf_file[group_name]\n\n self = cls.from_hdf(group)\n\n if not open_file_in:\n hdf_file.close()\n return self\n\n\nclass TODFlagsArchive(HDFArchive):\n _moby2_class_name = 'tod_flags_archive'\n _moby2_class_version = 0\n _moby2_read_func = TODFlags.from_hdf\n #_moby2_write_func = TODFlags.write_hdf\n\n def _moby2_write_func(self, item, dest):\n return item.write_hdf(dest)\n"
] | [
[
"numpy.any",
"numpy.array",
"numpy.arange",
"numpy.zeros"
]
] |
WenRichard/Customer-Chatbot | [
"48508c40574ffac8ced414a5bea799e2c85341ca"
] | [
"smart-chatbot-zero/Recall/recall_model.py"
] | [
"# -*- coding: utf-8 -*-\n# @Time : 2019/4/3 16:48\n# @Author : Alan\n# @Email : [email protected]\n# @File : tmodel.py\n# @Software: PyCharm\n\nimport pandas as pd\nimport matplotlib as mpl\nimport numpy as np\nfrom nltk.probability import FreqDist\nimport time\n\nfrom .jiebaSegment import *\nfrom .sentenceSimilarity import SentenceSimilarity\n\nmpl.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # enable chinese\n\n# 设置外部词\nseg = Seg()\nseg.load_userdict('./userdict/userdict.txt')\n\n\ndef read_corpus1():\n qList = []\n # 问题的关键词列表\n qList_kw = []\n aList = []\n data = pd.read_csv('./data/corpus1/faq/qa_.csv', header=None)\n data_ls = np.array(data).tolist()\n for t in data_ls:\n qList.append(t[0])\n qList_kw.append(seg.cut(t[0]))\n aList.append(t[1])\n return qList_kw, qList, aList\n\n\ndef read_corpus2():\n qList = []\n # 问题的关键词列表\n qList_kw = []\n aList = []\n with open('./data/corpus1/chat/chat-small2.txt', 'r', encoding='utf-8') as f2:\n for i in f2:\n t = i.split('\\t')\n s1 = ''.join(t[0].split(' '))\n s2 = ''.join(t[1].strip('\\n'))\n qList.append(s1)\n qList_kw.append(seg.cut(s1))\n aList.append(s2)\n\n return qList_kw, qList, aList\n\n\ndef plot_words(wordList):\n fDist = FreqDist(wordList)\n #print(fDist.most_common())\n print(\"单词总数: \",fDist.N())\n print(\"不同单词数: \",fDist.B())\n fDist.plot(10)\n\n\ndef main(question, top_k, task='faq'):\n # 读取数据\n if task == 'chat':\n List_kw, questionList, answerList = read_corpus2()\n else:\n List_kw, questionList, answerList = read_corpus1()\n # 初始化模型\n ss = SentenceSimilarity(seg)\n ss.set_sentences(questionList)\n ss.TfidfModel() # tfidf模型\n # ss.LsiModel() # lsi模型\n # ss.LdaModel() # lda模型\n question_k = ss.similarity_k(question, top_k)\n return question_k, questionList, answerList\n\n\nif __name__ == '__main__':\n # 设置外部词\n seg = Seg()\n seg.load_userdict('./userdict/userdict.txt')\n # 读取数据\n List_kw, questionList, answerList = read_corpus1()\n # 初始化模型\n ss = SentenceSimilarity(seg)\n ss.set_sentences(questionList)\n ss.TfidfModel() # tfidf模型\n # ss.LsiModel() # lsi模型\n # ss.LdaModel() # lda模型\n\n while True:\n question = input(\"请输入问题(q退出): \")\n if question == 'q':\n break\n time1 = time.time()\n question_k = ss.similarity_k(question, 5)\n print(\"亲,我们给您找到的答案是: {}\".format(answerList[question_k[0][0]]))\n for idx, score in zip(*question_k):\n print(\"same questions: {}, score: {}\".format(questionList[idx], score))\n time2 = time.time()\n cost = time2 - time1\n print('Time cost: {} s'.format(cost))\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.array",
"pandas.read_csv"
]
] |
uyhcire/hexit | [
"d35a712e34e751cb754397240d284bb0a48b6b5b"
] | [
"neural/train.py"
] | [
"import os\nimport random\n\nimport tensorflow as tf\nimport training_game_pb2\n\n\nRESIDUAL_FILTERS = 16\nRESIDUAL_BLOCKS = 2\nLEARNING_RATE = 0.1\n\n\ndef get_data():\n inputs = []\n policy_targets = []\n value_targets = []\n\n i = 0\n while os.path.exists('training_games/{}'.format(i)):\n print('{} games loaded'.format(i))\n with open('training_games/{}'.format(i), 'rb') as f:\n data = f.read()\n training_game = training_game_pb2.TrainingGame()\n training_game.ParseFromString(data)\n move_snapshot = random.choice(training_game.moveSnapshots)\n inputs.append(\n list(move_snapshot.squaresOccupiedByMyself) + \\\n list(move_snapshot.squaresOccupiedByOtherPlayer))\n policy_targets.append(list(move_snapshot.normalizedVisitCounts))\n value_targets.append([+1] if move_snapshot.winner == training_game_pb2.TrainingGame.MYSELF else [-1])\n i += 1\n\n return inputs, policy_targets, value_targets\n\n\ndef main():\n session = tf.Session()\n tf.keras.backend.set_session(session)\n\n # 25 inputs for the player to move, 25 for the other player\n board_input = tf.keras.layers.Input(shape=(5*5*2,), dtype='float32', name='boardInput')\n policy_output = tf.keras.layers.Dense(5*5, activation='softmax', name='policyOutput')(board_input)\n value_output = tf.keras.layers.Dense(1, activation='tanh', name='valueOutput')(board_input)\n\n model = tf.keras.models.Model(inputs=[board_input], outputs=[policy_output, value_output])\n sgd = tf.keras.optimizers.SGD(lr=LEARNING_RATE, momentum=0.9, nesterov=True)\n model.compile(\n optimizer=sgd, \n loss=['categorical_crossentropy', 'mean_squared_error'],\n loss_weights=[1.0, 1.0],\n )\n\n print('Loading data...')\n inputs, policy_targets, value_targets = get_data()\n print('...done')\n\n model.fit(\n [inputs], [policy_targets, value_targets], \n epochs=10, batch_size=100, validation_split=0.1)\n\n builder = tf.saved_model.builder.SavedModelBuilder('hexit_saved_model')\n builder.add_meta_graph_and_variables(session, ['serve'])\n builder.save()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.layers.Input",
"tensorflow.Session",
"tensorflow.keras.backend.set_session",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.saved_model.builder.SavedModelBuilder"
]
] |
LonelyTItor/TransAnomaly | [
"dd33ef87cbf786a73d5af320c57fb3cc285ca1ea"
] | [
"myTest.py"
] | [
"from utils import load_cifar10, load_cats_vs_dogs, load_fashion_mnist, load_cifar100\nimport numpy as np\nimport os\n\ndest_dir = './res/cifar10/'\n\nfile_list = os.listdir(dest_dir)\nfile_lists = [file for file in file_list if file[-3:] =='npz']\nprint(file_lists)\n\nfor file in file_lists:\n a = np.load(dest_dir + file)\n for elem in a:\n print(elem)\n print(elem.shape)\n print(a.shape)\n"
] | [
[
"numpy.load"
]
] |
AbdelkaderMH/sarcasm_wanlp | [
"76850cea42b72b6a0da74532dd8eaa90a926630b"
] | [
"losses.py"
] | [
"import numpy as np\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass ModelMultitaskLoss(nn.Module):\n def __init__(self):\n super(ModelMultitaskLoss, self).__init__()\n self.eta = nn.Parameter(torch.tensor([0.0,0.0]))\n\n def forward(self, loss_1, loss_2,):\n total_loss_1 = loss_1 * torch.exp(-self.eta[0]) + self.eta[0]\n total_loss_2 = loss_2 * torch.exp(-self.eta[1]) + self.eta[1]\n\n total_loss = total_loss_1 + total_loss_2\n return total_loss\n\n\nclass F1_Loss(nn.Module):\n\n def __init__(self, epsilon=1e-7, num_class=3):\n super().__init__()\n self.epsilon = epsilon\n self.num_class = num_class\n self.ce = nn.CrossEntropyLoss().to(device)\n\n def forward(self, y_pred, y_true ):\n assert y_pred.ndim == 2\n assert y_true.ndim == 1\n loss = self.ce(y_pred, y_true)\n y_true = F.one_hot(y_true, self.num_class).float()\n y_pred = F.softmax(y_pred, dim=1)\n\n tp = (y_true * y_pred).sum(dim=0).float()\n tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).float()\n fp = ((1 - y_true) * y_pred).sum(dim=0).float()\n fn = (y_true * (1 - y_pred)).sum(dim=0).float()\n\n precision = tp / (tp + fp + self.epsilon)\n recall = tp / (tp + fn + self.epsilon)\n\n f1 = 2 * (precision * recall) / (precision + recall + self.epsilon)\n f1 = f1.clamp(min=self.epsilon, max=1 - self.epsilon)\n return loss - f1.mean()\n"
] | [
[
"torch.nn.functional.one_hot",
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.exp",
"torch.nn.CrossEntropyLoss"
]
] |
chuyj/saliency | [
"878680dd326f983b051fc33dd6212f28f1d9a7a7"
] | [
"saliency/integrated_gradients_test.py"
] | [
"# Copyright 2019 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nimport tensorflow as tf\nfrom . import integrated_gradients\nfrom tensorflow.python.platform import googletest\n\n\nclass IntegratedGradientsTest(googletest.TestCase):\n \"\"\"\n To run:\n \"python -m saliency.integrated_gradients_test\" from the PAIR-code/saliency\n directory.\n \"\"\"\n\n def testIntegratedGradientsGetMask(self):\n with tf.Graph().as_default() as graph:\n x = tf.placeholder(shape=[None, 3], dtype=tf.float32)\n y = 5 * x[:, 0] + x[:, 0] * x[:, 1] + tf.sin(x[:, 2])\n with tf.Session() as sess:\n # Calculate the value of `y` at the baseline.\n x_baseline_val = np.array([[0.5, 0.8, 1.0]], dtype=np.float)\n y_baseline_val = sess.run(y, feed_dict={x: x_baseline_val})\n\n # Calculate the value of `y` at the input.\n x_input_val = np.array([[1.0, 2.0, 3.0]], dtype=np.float)\n y_input_val = sess.run(y, feed_dict={x: x_input_val})\n\n # Due to mathematical properties of the integrated gradients,\n # the expected IG value is equal to the difference between\n # the `y` value at the input and the `y` value at the baseline.\n expected_val = y_input_val[0] - y_baseline_val[0]\n\n # Calculate the integrated gradients attribution of the input.\n ig = integrated_gradients.IntegratedGradients(graph, sess, y[0], x)\n mask = ig.GetMask(x_value=x_input_val[0], feed_dict={},\n x_baseline=x_baseline_val[0], x_steps=1000)\n\n # Verify the result.\n self.assertAlmostEqual(expected_val, mask.sum(), places=3)\n\n\nif __name__ == '__main__':\n googletest.main()\n"
] | [
[
"numpy.array",
"tensorflow.python.platform.googletest.main",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.sin"
]
] |
sargas/scipy | [
"3dcb7b2d5da7fcd9529137aa96b7a771cd47f111"
] | [
"scipy/optimize/tests/test_tnc.py"
] | [
"\"\"\"\nUnit tests for TNC optimization routine from tnc.py\n\"\"\"\n\nfrom numpy.testing import (assert_allclose, assert_equal, TestCase,\n run_module_suite)\n\nfrom scipy import optimize\nimport numpy as np\nfrom math import pow\n\nclass TestTnc(TestCase):\n \"\"\"TNC non-linear optimization.\n\n These tests are taken from Prof. K. Schittkowski's test examples\n for constrained non-linear programming.\n\n http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm\n\n \"\"\"\n def setUp(self):\n # options for minimize\n self.opts = {'disp': False, 'maxiter': 200}\n\n # objective functions and jacobian for each test\n def f1(self, x, a=100.0):\n return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2)\n\n def g1(self, x, a=100.0):\n dif = [0, 0]\n dif[1] = 2 * a * (x[1] - pow(x[0], 2))\n dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0)\n return dif\n\n def fg1(self, x, a=100.0):\n return self.f1(x, a), self.g1(x, a)\n\n def f3(self, x):\n return x[1] + pow(x[1] - x[0], 2) * 1.0e-5\n\n def g3(self, x):\n dif = [0, 0]\n dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5\n dif[1] = 1.0 - dif[0]\n return dif\n\n def fg3(self, x):\n return self.f3(x), self.g3(x)\n\n def f4(self, x):\n return pow(x[0] + 1.0, 3) / 3.0 + x[1]\n\n def g4(self, x):\n dif = [0, 0]\n dif[0] = pow(x[0] + 1.0, 2)\n dif[1] = 1.0\n return dif\n\n def fg4(self, x):\n return self.f4(x), self.g4(x)\n\n def f5(self, x):\n return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \\\n 1.5 * x[0] + 2.5 * x[1] + 1.0\n\n def g5(self, x):\n dif = [0, 0]\n v1 = np.cos(x[0] + x[1])\n v2 = 2.0*(x[0] - x[1])\n\n dif[0] = v1 + v2 - 1.5\n dif[1] = v1 - v2 + 2.5\n return dif\n\n def fg5(self, x):\n return self.f5(x), self.g5(x)\n\n def f38(self, x):\n return (100.0 * pow(x[1] - pow(x[0], 2), 2) +\n pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) +\n pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) +\n pow(x[3] - 1.0, 2)) +\n 19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5\n\n def g38(self, x):\n dif = [0, 0, 0, 0]\n dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) -\n 2.0 * (1.0 - x[0])) * 1.0e-5\n dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) +\n 19.8 * (x[3] - 1.0)) * 1.0e-5\n dif[2] = ( - 360.0 * x[2] * (x[3] - pow(x[2], 2)) -\n 2.0 * (1.0 - x[2])) * 1.0e-5\n dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) +\n 19.8 * (x[1] - 1.0)) * 1.0e-5\n return dif\n\n def fg38(self, x):\n return self.f38(x), self.g38(x)\n\n def f45(self, x):\n return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0\n\n def g45(self, x):\n dif = [0] * 5\n dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0\n dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0\n dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0\n dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0\n dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0\n return dif\n\n def fg45(self, x):\n return self.f45(x), self.g45(x)\n\n # tests\n # minimize with method=TNC\n def test_minimize_tnc1(self):\n x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None])\n xopt = [1, 1]\n iterx = [] # to test callback\n\n res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1,\n bounds=bnds, options=self.opts,\n callback=iterx.append)\n assert_allclose(res.fun, self.f1(xopt), atol=1e-8)\n assert_equal(len(iterx), res.nit)\n\n def test_minimize_tnc1b(self):\n x0, bnds = np.matrix([-2, 1]), ([-np.inf, None],[-1.5, None])\n xopt = [1, 1]\n x = optimize.minimize(self.f1, x0, method='TNC',\n bounds=bnds, options=self.opts).x\n assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4)\n\n def test_minimize_tnc1c(self):\n x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])\n xopt = [1, 1]\n x = optimize.minimize(self.fg1, x0, method='TNC',\n jac=True, bounds=bnds,\n options=self.opts).x\n assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)\n\n def test_minimize_tnc2(self):\n x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None])\n xopt = [-1.2210262419616387, 1.5]\n x = optimize.minimize(self.f1, x0, method='TNC',\n jac=self.g1, bounds=bnds,\n options=self.opts).x\n assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)\n\n def test_minimize_tnc3(self):\n x0, bnds = [10, 1], ([-np.inf, None], [0.0, None])\n xopt = [0, 0]\n x = optimize.minimize(self.f3, x0, method='TNC',\n jac=self.g3, bounds=bnds,\n options=self.opts).x\n assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8)\n\n def test_minimize_tnc4(self):\n x0 ,bnds = [1.125, 0.125], [(1, None), (0, None)]\n xopt = [1, 0]\n x = optimize.minimize(self.f4, x0, method='TNC',\n jac=self.g4, bounds=bnds,\n options=self.opts).x\n assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8)\n\n def test_minimize_tnc5(self):\n x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)]\n xopt = [-0.54719755119659763, -1.5471975511965976]\n x = optimize.minimize(self.f5, x0, method='TNC',\n jac=self.g5, bounds=bnds,\n options=self.opts).x\n assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8)\n\n def test_minimize_tnc38(self):\n x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4\n xopt = [1]*4\n x = optimize.minimize(self.f38, x0, method='TNC',\n jac=self.g38, bounds=bnds,\n options=self.opts).x\n assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8)\n\n def test_minimize_tnc45(self):\n x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]\n xopt = [1, 2, 3, 4, 5]\n x = optimize.minimize(self.f45, x0, method='TNC',\n jac=self.g45, bounds=bnds,\n options=self.opts).x\n assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8)\n\n # fmin_tnc\n def test_tnc1(self):\n fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None])\n xopt = [1, 1]\n\n x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ),\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\n def test_tnc1b(self):\n x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])\n xopt = [1, 1]\n\n x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True,\n bounds=bounds,\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\n def test_tnc1c(self):\n x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])\n xopt = [1, 1]\n\n x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1,\n bounds=bounds,\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\n def test_tnc2(self):\n fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None])\n xopt = [-1.2210262419616387, 1.5]\n\n x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\n def test_tnc3(self):\n fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None])\n xopt = [0, 0]\n\n x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\n def test_tnc4(self):\n fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)]\n xopt = [1, 0]\n\n x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\n def test_tnc5(self):\n fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)]\n xopt = [-0.54719755119659763, -1.5471975511965976]\n\n x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\n def test_tnc38(self):\n fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4\n xopt = [1]*4\n\n x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\n def test_tnc45(self):\n fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3),\n (0, 4), (0, 5)]\n xopt = [1, 2, 3, 4, 5]\n\n x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,\n messages=optimize.tnc.MSG_NONE,\n maxfun=200)\n\n assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8,\n err_msg=\"TNC failed with status: \" +\n optimize.tnc.RCSTRINGS[rc])\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
] | [
[
"numpy.matrix",
"numpy.testing.run_module_suite",
"numpy.array",
"numpy.sin",
"numpy.cos",
"scipy.optimize.fmin_tnc",
"scipy.optimize.minimize"
]
] |
ivangarrera/MachineLearning | [
"c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75"
] | [
"src/features/basic_data_cleaning.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\n\nclass BasicCleaning:\n @classmethod\n def CleanData(cls, path_to_data, var):\n data_set = pd.read_csv(path_to_data, sep=',')\n \n # Convert string to datetime\n data_set['TimeStemp'] = pd.to_datetime(data_set['TimeStemp'])\n \n # filter data by date\n data_set = data_set[(data_set['TimeStemp'] > '2016-04-30 00:00:00') & \\\n (data_set['TimeStemp'] <= '2016-05-01 23:59:59')]\n \n data_set = data_set [[column for column in data_set if var in column and column.endswith(\"MEAN\")]]\n\n\n # Remove strings columns\n # for column in data_set:\n # if type(data_set[column][0]) is str:\n # data_set = data_set.drop(column, axis=1)\n \n # Remove columns with all null values\n for column in data_set:\n if data_set[column].isnull().all():\n data_set = data_set.drop(column, axis=1)\n \n # Remove duplicate rows\n data_set = data_set.drop_duplicates()\n \n # Remove rows with any null value\n data_set = data_set.dropna()\n \n # Remove version, because 2.3.3 is not a valid float value and PCA could fail\n # data_set = data_set.drop('Version', axis=1)\n \n return data_set"
] | [
[
"pandas.to_datetime",
"pandas.read_csv"
]
] |
mskcc/ACCESS-Pipeline | [
"3441040dfaecba58150c13a95a6a93657b00778a"
] | [
"python_tools/pipeline_kickoff/create_title_file_from_samplesheet.py"
] | [
"#!/usr/bin/env python\nimport xlrd\nimport argparse\nimport pandas as pd\n\nfrom python_tools.constants import *\n\n# Suppress pandas copy warning\npd.options.mode.chained_assignment = None\n\n##################################\n# Pipeline Kickoff Step #1\n#\n# This module is used to create a title file with the information needed for a pipeline run\n# It is derived from the manually-curated sample samplesheet\n#\n# Usage example:\n#\n# create_title_file_from_samplesheet \\\n# -i ./SampleSheet.csv \\\n# -o ./title_file.txt\n#\n# Note: The following requirements will be imposed on the input samplesheet file:\n#\n# 1. The fields that are found in the sample samplesheet should matched with the examples in test/test_data\n# 2. The sample ID's in the samplesheet must be matched somewhere in the fastq file names fom the -d data folder\n# 3. The sample ID's in the samplesheet must be matched somewhere in the path to the SampleSheet.csv files\n# 4. The SAMPLE_CLASS column of the samplesheet must consist of the values either \"Tumor\" or \"Normal\"\n# 5. Each \"Tumor\" sample must have at least one associated \"Normal\" sample\n# 6. Each sample folder in the -d data folder must have these three files:\n#\n# '_R1_001.fastq.gz'\n# '_R2_001.fastq.gz'\n# 'SampleSheet.csv'\n\n\ndef create_title_file(samplesheet_file_path, output_filename):\n \"\"\"\n Main function to read sample sheet, perform checks \n \"\"\"\n ### Read samplesheet as either csv or Excel file ###\n try:\n samplesheet = pd.read_csv(samplesheet_file_path, sep=\",\", header=0, dtype=str)\n except (xlrd.biffh.XLRDError, pd.io.common.CParserError):\n samplesheet = pd.read_excel(samplesheet_file_path, sep=\",\")\n\n # Remove rows where all elements are missing\n samplesheet = samplesheet.dropna(axis=0, how=\"all\")\n samplesheet = samplesheet.replace(\"\\n\", \"\", regex=True)\n\n ### resolve columns values ###\n # Check for duplicate columns\n if not samplesheet.equals(samplesheet.loc[:, ~samplesheet.columns.duplicated()]):\n raise Exception(\"Duplicated column headers in samplesheet.\")\n\n # Check for required columns\n if not set(SAMPLE_SHEET_REQUIRED_COLUMNS) <= set(samplesheet.columns.tolist()):\n missing_columns = set(SAMPLE_SHEET_REQUIRED_COLUMNS) ^ set(\n samplesheet.columns.tolist()\n )\n raise Exception(\n \"SampleSheet is missing the following required columns: {}.\".format(\n \",\".join(missing_columns)\n )\n )\n\n # Check for optional columns\n if set(SAMPLE_SHEET_REQUIRED_COLUMNS + SAMPLE_SHEET_OPTIONAL_COLUMNS) < set(\n samplesheet.columns.tolist()\n ):\n unrecognized_columns = set(\n SAMPLE_SHEET_REQUIRED_COLUMNS + SAMPLE_SHEET_OPTIONAL_COLUMNS\n ) ^ set(samplesheet.columns.tolist())\n print(\"WARNING: SampleSheet has additional unrecognized columns: {}\").format(\n \",\".join(unrecognized_columns)\n )\n elif set(SAMPLE_SHEET_REQUIRED_COLUMNS + SAMPLE_SHEET_OPTIONAL_COLUMNS) > set(\n samplesheet.columns.tolist()\n ):\n missing_columns = set(\n SAMPLE_SHEET_REQUIRED_COLUMNS + SAMPLE_SHEET_OPTIONAL_COLUMNS\n ) ^ set(samplesheet.columns.tolist())\n print(\n \"WARNING: SampleSheet is missing the following optional columns: {}\"\n ).format(\",\".join(missing_columns))\n\n ### resolve row values ###\n # Check if required column values are populated for all rows\n if not samplesheet.equals(samplesheet.dropna(subset=SAMPLE_SHEET_REQUIRED_COLUMNS)):\n raise Exception(\"Missing values in require columns.\")\n\n # Select the explicitly defined columns we want from the samplesheet & rename them\n try:\n title_file = samplesheet[columns_map_samplesheet.keys()]\n except KeyError:\n raise Exception(\"Cannot map sample sheet columns to title file.\")\n\n title_file.columns = columns_map_samplesheet.values()\n\n # populate title file barcode column\n try:\n title_file[TITLE_FILE__BARCODE_ID_COLUMN] = [\n barcode_x if barcode_x == barcode_y else barcode_x + \"_\" + barcode_y\n for barcode_x, barcode_y in zip(\n samplesheet[SAMPLE_SHEET__BARCODE_ID1_COLUMN],\n samplesheet[SAMPLE_SHEET__BARCODE_ID2_COLUMN],\n )\n ]\n except (KeyError, ValueError):\n raise Exception(\"Error while populating barcode values in the title file.\")\n\n # check for projectID and bait version\n def projectid_format(id):\n \"\"\"\n helper function to check project ID and extract bait version.\n \"\"\"\n if PROJECT_NAME.match(id):\n try:\n return BAIT_SEARCH.findall(id).pop().replace(ASSAY_NAME, \"\")\n except IndexError:\n raise Exception(\n \"Bait version cannot be identified from project/run ID.\"\n )\n else:\n raise Exception(\"Project ID is not in the required format.\")\n\n # Get bait version from project ID and perform check\n title_file[TITLE_FILE__BAIT_VERSION_COLUMN] = title_file[\n TITLE_FILE__POOL_COLUMN\n ].apply(projectid_format)\n if len(set(title_file[TITLE_FILE__BAIT_VERSION_COLUMN])) > 1:\n raise Exception(\"Samplesheet contains samples with mutliple bait version.\")\n if (\n not set(title_file[TITLE_FILE__BAIT_VERSION_COLUMN]).pop()\n == EXPECTED_BAIT_VERSION\n ):\n raise Exception(\"Samplesheet bait version does not match the expected value.\")\n\n # sample description/class check\n if not set(title_file[TITLE_FILE__SAMPLE_CLASS_COLUMN]) <= set(\n ALLOWED_SAMPLE_DESCRIPTION\n ):\n raise Exception(\n \"Unexpected sample description. Only the following sample descritpions are allowed: {}.\".format(\n \",\".join(ALLOWED_SAMPLE_DESCRIPTION)\n )\n )\n\n # split metadata column\n try:\n title_file[\n [\n TITLE_FILE__PATIENT_NAME_COLUMN,\n TITLE_FILE__ACCESSION_COLUMN,\n TITLE_FILE__SEX_COLUMN,\n TITLE_FILE__SEQUENCER_COLUMN,\n ]\n ] = samplesheet[SAMPLE_SHEET__METADATA_COLUMN].str.split(\n METADATA_COLUMN_DELIMETER, expand=True\n )[\n METADATA_REQUIRED_COLUMNS\n ]\n except (ValueError, KeyError):\n raise Exception(\n \"Operator column values are improperly defined. There should be at least 5 '|' delimited fields in this order: OperatorName|PatientName|Accession|Sex|Sequencer\"\n )\n\n # SEX column makes sense?\n title_file.loc[\n title_file[TITLE_FILE__SEX_COLUMN].isin(CONTROL_SAMPLE_SEX),\n TITLE_FILE__SEX_COLUMN,\n ] = FEMALE\n if not set(title_file[TITLE_FILE__SEX_COLUMN]) <= set(ALLOWED_SEX):\n raise Exception(\n \"Unrecognized SEX type. Should be one of: {}.\".format(\n \",\".join(ALLOWED_SEX + CONTROL_SAMPLE_SEX)\n )\n )\n\n # Check sequencer columns\n if not set(title_file[TITLE_FILE__SEQUENCER_COLUMN]) <= set(ALLOWED_SEQUENCERS):\n unrecognized_values = set(title_file[TITLE_FILE__SEQUENCER_COLUMN]) ^ set(\n ALLOWED_SEQUENCERS\n )\n raise Exception(\n \"Unrecognized sequencer names: {}\".format(\",\".join(unrecognized_values))\n )\n if len(set(title_file[TITLE_FILE__SEQUENCER_COLUMN])) > 1:\n raise Exception(\n \"Only one unique sequencer name is allowerd per title file. There are: {}\".format(\n \",\".join(set(title_file[TITLE_FILE__SEQUENCER_COLUMN]))\n )\n )\n\n # check sample id and sample name format\n def name_check(sampleid):\n \"\"\"\n helper function to validate sample IDs and names.\n \"\"\"\n if any([s1 in sampleid for s1 in DISALLOWED_SAMPLE_ID_CHARACTERS]):\n raise Exception(\n \"Disallowed characters in {}. Ensure that none of the following characters exist: {}\".format(\n sampleid, DISALLOWED_SAMPLE_ID_CHARACTERS\n )\n )\n\n title_file[TITLE_FILE__SAMPLE_ID_COLUMN].apply(name_check)\n title_file[TITLE_FILE__PATIENT_ID_COLUMN].apply(name_check)\n\n # infer sample type from sample id\n try:\n title_file[TITLE_FILE__SAMPLE_TYPE_COLUMN] = title_file[\n TITLE_FILE__SAMPLE_ID_COLUMN\n ].str.split(SAMPLE_ID_ALLOWED_DELIMETER).str[SELECT_SPLIT_COLUMN]\n except KeyError:\n raise Exception(\n \"Error when interpreting sample type from sample_id. Ensure the sample-id are in the 00000000-X format.\"\n )\n\n # inferred sample type check\n def sample_type_check(sample):\n if not ALLOWED_SAMPLE_TYPE.match(sample):\n raise Exception(\n \"Unknown sample type {}. Sample type should start with one of: {}\".format(\n sample, \",\".join(ALLOWED_SAMPLE_TYPE_LIST)\n )\n )\n\n title_file[TITLE_FILE__SAMPLE_TYPE_COLUMN].apply(sample_type_check)\n # if not set(title_file[TITLE_FILE__SAMPLE_TYPE_COLUMN]) <= set(ALLOWED_SAMPLE_TYPE):\n # raise Exception(\n # \"Unexpected sample type. Only the following sample types are allowed: {}.\".format(\n # \",\".join(ALLOWED_SAMPLE_TYPE)\n # )\n # )\n\n # Assign sample type\n title_file[TITLE_FILE__SAMPLE_TYPE_COLUMN] = [\n PLASMA if PLASMA_SAMPLE_TYPE.match(x) else BUFFY\n for x in title_file[TITLE_FILE__SAMPLE_TYPE_COLUMN]\n ]\n\n # constant columns\n title_file[TITLE_FILE__COLLAB_ID_COLUMN] = COLLAB_ID\n\n # Samplesheet does not include this information at the moment\n # TODO: DMS can work out a way to fill this info if required.\n title_file[TITLE_FILE__POOL_INPUT_COLUMN] = \"\"\n\n # Trim whitespace\n title_file = title_file.apply(lambda x: x.str.strip() if x.dtype == \"object\" else x)\n\n # Optionally split by lanes\n if len(title_file[TITLE_FILE__LANE_COLUMN].unique()) > 1:\n duplicate_samples = []\n for lane in title_file[TITLE_FILE__LANE_COLUMN].unique():\n duplicate_samples.extend(\n title_file[title_file[TITLE_FILE__LANE_COLUMN] == lane][\n TITLE_FILE__SAMPLE_ID_COLUMN\n ].tolist()\n )\n duplicate_samples = list(\n filter(lambda x: duplicate_samples.count(x) > 1, duplicate_samples)\n )\n columns_to_consider = title_file.columns.tolist()\n columns_to_consider.remove(TITLE_FILE__LANE_COLUMN)\n title_file = title_file.drop_duplicates(subset=columns_to_consider)\n title_file[TITLE_FILE__LANE_COLUMN].loc[\n title_file[TITLE_FILE__SAMPLE_ID_COLUMN].isin(duplicate_samples)\n ] = MERGED_LANE_VALUE\n title_file = title_file[TITLE_FILE__COLUMN_ORDER]\n title_file.to_csv(output_filename, sep=\"\\t\", index=False)\n else:\n title_file = title_file[TITLE_FILE__COLUMN_ORDER]\n title_file.to_csv(output_filename, sep=\"\\t\", index=False)\n\n\n########\n# Main #\n########\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\",\n \"--samplesheet_file_path\",\n help=\"Sample Manifest File (e.g. test_samplesheet.xlsx)\",\n required=True,\n )\n parser.add_argument(\n \"-o\",\n \"--output_filename\",\n help=\"Desired output title location and name\",\n required=True,\n )\n\n args = parser.parse_args()\n create_title_file(args.samplesheet_file_path, args.output_filename)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.read_csv",
"pandas.read_excel"
]
] |
davmre/autoreparam | [
"c25340f272209278d336627cca42e139c0e4c961"
] | [
"util.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tools.\"\"\"\n# pylint: disable=missing-docstring,g-doc-args,g-doc-return-or-yield\n# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary\n# pylint: disable=invalid-name,broad-except\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport time\n\nfrom absl import flags\nfrom absl import logging\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability import bijectors as tfb\nfrom tensorflow_probability import distributions as tfd\nfrom tensorflow_probability import edward2 as ed\n\nfrom tensorflow_probability.python.experimental.edward2.generated_random_variables import Normal\nfrom tensorflow_probability.python.experimental.edward2.interceptor import tape\nfrom tensorflow_probability.python.experimental.edward2.program_transformations import make_log_joint_fn\n\nfrom tensorflow.python.ops.parallel_for import pfor\n\nimport program_transformations as program_transformations\n\nFLAGS = flags.FLAGS\n\n# pylint: disable=g-import-not-at-top\ntry:\n import __builtin__\nexcept ImportError:\n # Python 3\n import builtins as __builtin__\n# pylint: enable=g-import-not-at-top\n\n__all__ = [\n 'condition_number_cp',\n 'condition_number_ncp',\n 'compute_V_cp',\n 'compute_V_ncp',\n 'mean_field_variational_inference',\n 'approximate_mcmc_step_size',\n]\n\n\ndef compute_V_cp(q, v):\n r = (v * q + q + 1.)\n return np.array([[1. + v, 1.], [1., q*v + 1.]]) / r\n\n\ndef compute_V_ncp(q, v):\n r = 1 / (v * q + q + 1)\n return r * np.array([[q + 1, -np.sqrt(v)*q], [-np.sqrt(v)*q, v*q + 1]])\n\n\ndef condition_number_cp(q, v):\n sqrt_det = 2 * np.sqrt((v * q + 1) * (v * q + 1) - v * (v * q + q + 1) *\n (v * q + 1) / (v + 1))\n lambda1 = 2*(v*q + 1) - sqrt_det\n lambda2 = 2*(v*q + 1) + sqrt_det\n return lambda2 / lambda1\n\n\ndef condition_number_ncp(q, v):\n sqrt_det = 2 * np.sqrt((v * q + 1) * (v * q + 1) - (v * q + q + 1) *\n (v * q + 1) / (q + 1))\n lambda1 = 2*(v*q + 1) - sqrt_det\n lambda2 = 2*(v*q + 1) + sqrt_det\n return lambda2 / lambda1\n\n\ndef mean_field_variational_inference(model, *args, **kwargs):\n num_optimization_steps = kwargs.get('num_optimization_steps', 2000)\n del kwargs['num_optimization_steps']\n\n (variational_model,\n variational_parameters) = program_transformations.make_variational_model(\n model, *args, **kwargs)\n\n log_joint = make_log_joint_fn(model)\n def target(**parameters):\n full_kwargs = dict(parameters, **kwargs)\n return log_joint(*args, **full_kwargs)\n\n log_joint_q = make_log_joint_fn(variational_model)\n def target_q(**parameters):\n return log_joint_q(*args, **parameters)\n\n elbo_sum = 0.\n for _ in range(16):\n with tape() as variational_tape:\n _ = variational_model(*args)\n\n params = variational_tape\n elbo_sum = elbo_sum + target(**params) - target_q(**params)\n\n elbo = elbo_sum / 16.\n best_elbo = None\n\n learning_rate_ph = tf.compat.v1.placeholder(shape=[], dtype=tf.float32)\n learning_rate = tf.Variable(learning_rate_ph, trainable=False)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n train = optimizer.minimize(-elbo)\n init = tf.compat.v1.global_variables_initializer()\n\n start_time = time.time()\n for learning_rate_val in [0.01, 0.1, 0.01, 0.1, 0.01, 0.1]:\n feed_dict = {learning_rate_ph: learning_rate_val}\n with tf.compat.v1.Session() as sess:\n sess.run(init, feed_dict=feed_dict)\n\n this_timeline = []\n print('VI with {} optimization steps'.format(num_optimization_steps))\n for _ in range(num_optimization_steps):\n _, e = sess.run([train, elbo], feed_dict=feed_dict)\n this_timeline.append(e)\n\n this_elbo = np.mean(this_timeline[-100:])\n if best_elbo is None or best_elbo < this_elbo:\n timeline = this_timeline\n best_elbo = this_elbo\n\n vals = sess.run(\n list(variational_parameters.values()), feed_dict=feed_dict)\n learned_variational_params = collections.OrderedDict(\n zip(variational_parameters.keys(), vals))\n\n vi_time = time.time() - start_time\n\n results = collections.OrderedDict()\n results['vp'] = learned_variational_params\n print('ELBO: {}'.format(best_elbo))\n\n return results, best_elbo, timeline, vi_time\n\n\ndef _marshal(*rvs):\n \"\"\"Args: a list of ed.RandomVariables each with vector or scalar event shape\n (which must be staticly known), and all having the same batch shape.\n\n Returns: a Tensor from concatenating their values along a single vector\n dimension.\n \"\"\"\n vector_rvs = []\n for rv in rvs:\n v = rv.value\n if v.shape.ndims == 0:\n vector_rvs.append([v])\n else:\n vector_rvs.append(v)\n print(vector_rvs)\n return tf.concat(vector_rvs, axis=-1)\n\n\ndef _to_vector_shape(tensor_shape):\n if tensor_shape.ndims > 1:\n raise Exception('cannot convert {} to vector shape!'.format(tensor_shape))\n elif tensor_shape.ndims == 0:\n return tf.TensorShape([1])\n return tensor_shape\n\n\ndef _tensorshape_size(tensor_shape):\n if tensor_shape.ndims > 1:\n raise Exception(\n 'shapes of ndims >1 are bad! (saw: {})!'.format(tensor_shape))\n elif tensor_shape.ndims == 0:\n return 1\n return tensor_shape[0].value\n\n\ndef get_iaf_elbo(target, num_mc_samples, param_shapes):\n shape_sizes = [_tensorshape_size(pshape) for pshape in param_shapes.values()]\n overall_shape = [sum(shape_sizes)]\n\n def unmarshal(variational_sample):\n results = []\n n_dimensions_used = 0\n for (n_to_add, result_shape) in zip(shape_sizes, param_shapes.values()):\n result = variational_sample[Ellipsis, n_dimensions_used:\n n_dimensions_used + n_to_add]\n results.append(tf.reshape(result, result_shape))\n n_dimensions_used += n_to_add\n return tuple(results)\n\n variational_dist = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=0., scale=1.),\n bijector=tfb.Invert(\n tfb.MaskedAutoregressiveFlow(\n shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(\n hidden_layers=[256, 256]))),\n event_shape=overall_shape,\n name='q_iaf')\n\n variational_samples = variational_dist.sample(num_mc_samples)\n target_q_sum = tf.reduce_sum(input_tensor=variational_dist.log_prob(variational_samples))\n target_sum = 0.\n for s in range(num_mc_samples):\n params = unmarshal(variational_samples[s, Ellipsis])\n target_sum = target_sum + target(*params)\n\n energy = target_sum / float(num_mc_samples)\n entropy = -target_q_sum / float(num_mc_samples)\n elbo = energy + entropy\n\n tf.compat.v1.summary.scalar('energy', energy)\n tf.compat.v1.summary.scalar('entropy', entropy)\n tf.compat.v1.summary.scalar('elbo', elbo)\n\n return elbo\n\n\ndef get_mean_field_elbo(model, target, num_mc_samples, model_args,\n model_obs_kwargs, vi_kwargs):\n if FLAGS.reparameterise_variational and 'cVIP' in FLAGS.method:\n combined_kwargs = model_obs_kwargs.copy()\n combined_kwargs.update(vi_kwargs)\n variational_model, variational_parameters = make_variational_model_special(\n model, *model_args, **combined_kwargs)\n else:\n variational_model, variational_parameters = program_transformations.make_variational_model(\n model, *model_args, **model_obs_kwargs)\n\n log_joint_q = make_log_joint_fn(variational_model)\n\n def target_q(**parameters):\n return log_joint_q(*model_args, **parameters)\n\n #beta = tf.get_variable(\"beta\", trainable=False, initializer=0.)\n #beta_incr = tf.assign(beta, tf.clip_by_value(beta + 0.1*beta + 0.0000001, 0., 1.))\n\n #with tf.control_dependencies([beta_incr]):\n\n def loop_body(mc_sample):\n with tape() as variational_tape:\n _ = variational_model(*model_args)\n\n params = variational_tape.values()\n\n energy = target(*params)\n entropy = tf.negative(target_q(**variational_tape))\n return energy + entropy\n\n if num_mc_samples == 1:\n elbo = tf.reduce_sum(loop_body(0))\n else:\n elbo = tf.reduce_sum(input_tensor=pfor(loop_body, num_mc_samples)) / num_mc_samples\n tf.compat.v1.summary.scalar('elbo', elbo)\n return elbo, variational_parameters\n\n\ndef get_approximate_step_size(variational_parameters, num_leapfrog_steps):\n return [\n variational_parameters[key] / num_leapfrog_steps**2\n for key in variational_parameters.keys()\n if key.endswith('_scale')\n ]\n\n\n# FIXME: need to make this nicer than with all these weird kwargs\ndef approximate_mcmc_step_size(model, *args, **kwargs):\n\n with tf.compat.v1.variable_scope('approx_step_size_{}'.format(model.__name__)):\n if 'diagnostics' in kwargs.keys():\n diagnostics = kwargs.pop('diagnostics')\n else:\n diagnostics = False\n\n if 'num_leapfrog_steps' in kwargs.keys():\n num_leapfrog_steps = kwargs.pop('num_leapfrog_steps')\n else:\n num_leapfrog_steps = 4\n\n results, final_elbo_val, _, vi_time = mean_field_variational_inference(\n model, *args, **kwargs)\n stepsize = [(np.array(np.array(results['vp'][key], dtype=np.float32)) /\n (float(num_leapfrog_steps)**2))\n for key in results['vp'].keys()\n if key.endswith('_scale')]\n\n if diagnostics:\n print('Estimated goodness of {}: {}'.format(model.__name__,\n final_elbo_val))\n print('Estimated stepsize of {}: {}'.format(model.__name__, stepsize))\n\n return stepsize, final_elbo_val, vi_time\n\n\ndef stddvs_to_mcmc_step_sizes(results, num_leapfrog_steps):\n stepsize = [(np.sqrt(2 * np.mean(results[key])) / float(num_leapfrog_steps))\n for key in results.keys()\n if key.endswith('_scale')]\n\n return stepsize\n\n\ndef estimate_true_mean(sample_groups, esss):\n\n true_mean = [0 for group in range(len(sample_groups))]\n\n r = float(sum(esss))\n\n for group in range(len(sample_groups)):\n\n samples = sample_groups[group]\n mean = [np.mean(v) for v in samples]\n\n true_mean[group] = [\n (true_mean[group] + esss[group] * var_mean / r) for var_mean in mean\n ]\n\n return true_mean\n\n\ndef make_variational_model_special(model, *args, **kwargs):\n\n variational_parameters = collections.OrderedDict()\n param_params = kwargs['parameterisation']\n\n def get_or_init(name, a, b, L=None, std_mean=None,\n prior_mean=None, prior_scale=None, shape=None):\n\n loc_name = name + '_loc'\n scale_name = name + '_scale'\n\n if loc_name in variational_parameters.keys() and \\\n scale_name in variational_parameters.keys():\n return (variational_parameters[loc_name],\n variational_parameters[scale_name])\n else:\n # shape must not be None\n pre_loc = tf.compat.v1.get_variable(\n name=loc_name, initializer=1e-2 * tf.random.normal(shape, dtype=tf.float32))\n pre_scale = tf.nn.softplus(\n tf.compat.v1.get_variable(\n name=scale_name,\n initializer=-2 * tf.ones(shape, dtype=tf.float32)))\n variational_parameters[loc_name] = (a + 0.1) * pre_loc\n variational_parameters[scale_name] = pre_scale**(b + 0.1)\n\n return (variational_parameters[loc_name],\n variational_parameters[scale_name])\n\n def mean_field(rv_constructor, *rv_args, **rv_kwargs):\n\n name = rv_kwargs['name']\n if name not in kwargs.keys():\n rv = rv_constructor(*rv_args, **rv_kwargs)\n\n try:\n a, b = param_params[name + '_a'], param_params[name + '_b']\n except Exception as err:\n print('couldn\\'t get centering params for variable {}: {}'.format(\n name, err))\n a, b = 1., 1.\n loc, scale = get_or_init(name, a=a, b=b, shape=rv.shape)\n\n # NB: name must be the same as original variable,\n # in order to be able to do black-box VI (setting\n # parameters to variational values obtained via trace).\n return Normal(loc=loc, scale=scale, name=name)\n else:\n rv_kwargs['value'] = kwargs[name]\n return rv_constructor(*rv_args, **rv_kwargs)\n\n def variational_model(*args):\n with ed.interception(mean_field):\n return model(*args)\n\n _ = variational_model(*args)\n\n return variational_model, variational_parameters\n\n\ndef variational_inits_from_params(learned_variational_params, param_names,\n num_inits):\n \"\"\"Sample from a normal variational dist, given saved parameters.\"\"\"\n locs = collections.OrderedDict()\n stddevs = collections.OrderedDict()\n samples = collections.OrderedDict()\n for k, v in learned_variational_params.items():\n if k.endswith('_loc'):\n locs[k[:-4]] = v\n elif k.endswith('_scale'):\n stddevs[k[:-6]] = v\n\n for k in param_names:\n shape = (num_inits,) + np.asarray(locs[k]).shape\n samples[k] = (np.random.randn(*shape) * stddevs[k] + locs[k]).astype(\n np.float32)\n return samples\n\n\ndef print(*args): # pylint: disable=redefined-builtin\n __builtin__.print(*args)\n logging.info(' '.join(args))\n\n\ndef reject_outliers(data, m=1.5):\n ret = data[abs(data - np.mean(data)) < m * np.std(data)]\n if len(ret) > 0:\n return ret\n else:\n return data\n\n\ndef get_min_ess_other(ess):\n\n ess = [[np.nan_to_num(e) for e in es] for es in ess]\n\n min_ess = []\n for c in range(FLAGS.num_chains):\n min_ess_c = min([np.array(e).min() for e in ess[c]])\n #print(\" Min ess of chain {} is {}.\".format(c, min_ess_c))\n min_ess.append(min_ess_c)\n\n min_ess = reject_outliers(np.array(min_ess))\n print(' Filtred {} outliers.'.format(FLAGS.num_chains - len(min_ess)))\n\n mean_ess = np.mean(min_ess)\n sem_ess = np.std(min_ess) / np.sqrt(len(min_ess))\n\n return mean_ess, sem_ess\n\n\ndef get_min_ess(ess):\n\n ess = [np.nan_to_num(e) for e in ess]\n\n min_ess = []\n for c in range(FLAGS.num_chains):\n min_ess_c = min([np.array(e[c]).min() for e in ess])\n min_ess.append(min_ess_c)\n\n # min_ess = reject_outliers(np.array(min_ess))\n # print(\" Filtred {} outliers.\".format(FLAGS.num_chains - len(min_ess)))\n\n mean_ess = np.mean(min_ess)\n sem_ess = np.std(min_ess) / np.sqrt(len(min_ess))\n\n return mean_ess, sem_ess\n"
] | [
[
"tensorflow.ones",
"numpy.mean",
"tensorflow.reshape",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.python.ops.parallel_for.pfor",
"numpy.nan_to_num",
"tensorflow.concat",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.TensorShape",
"tensorflow.Variable",
"numpy.sqrt",
"numpy.array",
"numpy.random.randn",
"tensorflow.compat.v1.Session",
"numpy.std",
"tensorflow.compat.v1.summary.scalar",
"numpy.asarray",
"tensorflow.random.normal"
]
] |
sfvnDTU/deep_detektor | [
"3413b805b1d108480358a3f50ec5bb18b1d6845b"
] | [
"run_files/loo_cv.py"
] | [
"from pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\nfrom sklearn.model_selection import LeaveOneOut\n\nfrom models.model_base import DetektorModel\nfrom project_paths import ProjectPaths\nfrom evaluations import Accuracy, F1, TruePositives, TrueNegatives, FalsePositives, FalseNegatives, Samples, \\\n AreaUnderROC\nfrom evaluations.area_roc import plot_roc, ROC, plot_multiple_rocs, mean_rocs\nfrom models.baselines import MLP, LogisticRegression, LogisticRegressionSK\nfrom models.recurrent.basic_recurrent import BasicRecurrent\nfrom models.PositiveLearningElkan.pu_learning import PULogisticRegressionSK\nfrom util.tensor_provider import TensorProvider\nfrom util.utilities import ensure_folder, save_fig\n\n\ndef leave_one_program_out_cv(tensor_provider, model_list, path,\n eval_functions=None, limit=None, return_predictions=False,\n save_ranked_sentences=True, save_full_predictions=True,\n save_model_weights=True):\n \"\"\"\n :param TensorProvider tensor_provider: Class providing all data to models.\n :param list[DetektorModel] model_list: List of model-classes for testing.\n :param list[Evaluation] eval_functions: List of evaluation functions used to test models.\n :param bool return_predictions: If True, the method stores all model test-predictions and returns them as well.\n Can be used to determine whether errors are the same across models.\n :param int | None limit: Only perform analysis on some programs (for testing)\n If None - run on all programs.\n :param Path path: Path for storing results\n :return:\n \"\"\"\n ensure_folder(path)\n\n # TODO: Consider also looping over loss-functions: classic ones and weighed ones\n n_models = len(model_list)\n\n # Default evaluation score\n if eval_functions is None:\n eval_functions = [Accuracy(), F1(), TruePositives(), TrueNegatives(), FalsePositives(), FalseNegatives(),\n Samples(), AreaUnderROC(), ROC()]\n\n # Elements keys\n keys = list(sorted(tensor_provider.accessible_annotated_keys))\n\n # Get program ids and number of programs\n program_ids = np.array(list(zip(*keys))[0])\n unique_programs = np.array(sorted(set(program_ids)))\n n_programs = len(unique_programs)\n program_names = [\"P{:02d}\".format(val + 1) for val in range(n_programs)]\n\n # Dictionary for holding actual predictions (they vary in length which discourages an array)\n test_predictions = dict()\n\n # Initialize array for holding results\n special_results = dict()\n evaluation_names = [val.name() for val in eval_functions if val.is_single_value]\n classification_results = np.full((n_programs, n_models, len(evaluation_names)), np.nan)\n classification_results = xr.DataArray(classification_results,\n name=\"Loo Results\",\n dims=[\"Program\", \"Model\", \"Evaluation\"],\n coords=dict(Program=program_names,\n Model=[model.name for model in model_list],\n Evaluation=evaluation_names))\n\n # Initialize file for storing ranked sentences\n if save_ranked_sentences:\n rank_file = Path(path, \"ranked_sentences.txt\").open(\"w\")\n\n # Loop over programs\n loo = LeaveOneOut()\n limit = len(unique_programs) if limit is None else limit\n print(\"\\n\\nRunning Leave-One-Out Tests.\\n\" + \"-\" * 75)\n for program_nr, (train, test) in enumerate(list(loo.split(unique_programs))[:limit]):\n program_name = program_names[program_nr]\n\n # Get split indices\n train_idx = np.where(program_ids != unique_programs[test])[0]\n test_idx = np.where(program_ids == unique_programs[test])[0]\n\n # Convert to keys\n train_idx = [keys[val] for val in train_idx]\n test_idx = [keys[val] for val in test_idx]\n\n # Report\n print(\"Program {}, using {} training samples and {} test samples.\".format(program_nr + 1,\n len(train_idx),\n len(test_idx)))\n\n # Make and set BoW-vocabulary\n bow_vocabulary = tensor_provider.extract_programs_vocabulary(train_idx)\n tensor_provider.set_bow_vocabulary(bow_vocabulary)\n\n # Get truth of test-set\n y_true = tensor_provider.load_labels(data_keys_or_idx=test_idx)\n\n # Go through models\n for model_nr, model in enumerate(model_list):\n model_name = model.name\n\n # Initialize model\n model.initialize_model(tensor_provider=tensor_provider)\n\n # Fit model\n model.fit(tensor_provider=tensor_provider,\n train_idx=train_idx,\n verbose=2)\n\n # Predict on test-data for performance\n y_pred, y_pred_binary = model.predict(tensor_provider=tensor_provider,\n predict_idx=test_idx)\n y_pred = np.squeeze(y_pred)\n y_pred_binary = np.squeeze(y_pred_binary)\n\n # Store predictions\n if return_predictions:\n test_predictions.setdefault(model_name, dict())[program_name] = y_pred\n\n # Save the best ranked senteces (in terms of claim)\n if save_ranked_sentences:\n rank_file.write(\"Test program: %s \\n\" % program_names[program_nr])\n rank_file.write(model.summary_to_string())\n ranked_sentences, rank_score, rank_indices \\\n = tensor_provider.get_ranked_predictions(y_pred, test_idx)\n rank_file.write(\"Sentence, Proability of claim, Truth \\n\")\n ranked_labels = tensor_provider.load_labels(rank_indices)\n for r in range(len(ranked_sentences)):\n rank_file.write(\"%s , %.5f, %i \\n\" % (ranked_sentences[r], rank_score[r], ranked_labels[r]))\n rank_file.write(\"\\n\")\n\n # Save predictions on full test set\n if save_full_predictions:\n with Path(path, \"%s_predictions.txt\" % program_names[program_nr]).open(\"w\") as file:\n all_sentences = tensor_provider.load_original_sentences(test_idx)\n for r in range(len(all_sentences)):\n file.write(\"%i;%.5f;%s\\n\" % (y_true[r], y_pred[r], all_sentences[r]))\n\n # Save model weights in case of logistic regression\n if save_model_weights and model_name == \"LogisticRegressionSKLEARN\":\n # TODO: Save most important weights in classification\n print(' ')\n\n # Evaluate with eval_functions\n evaluation_nr = 0\n for evalf in eval_functions:\n assert y_pred.shape == y_true.shape, \"y_pred ({}) and y_true ({}) \" \\\n \"do not have same shape\".format(y_pred.shape, y_true.shape)\n\n if evalf.is_single_value:\n evaluation_result = evalf(y_true=y_true,\n y_pred=y_pred,\n y_pred_binary=y_pred_binary)\n classification_results[program_nr, model_nr, evaluation_nr] = evaluation_result\n evaluation_nr += 1\n else:\n special_results[(model.name, evalf.name(), program_nr)] = evalf(y_true=y_true,\n y_pred=y_pred,\n y_pred_binary=y_pred_binary)\n ###\n # Plot ROC curves if wanted\n\n # Go through models\n models_mean_rocs = []\n for model in model_list:\n rocs = []\n labels = []\n\n # Go through programs\n for program_nr in range(len(unique_programs)):\n key = (model.name, \"ROC\", program_nr)\n if key in special_results:\n rocs.append(special_results[key])\n labels.append(\"Program {}\".format(program_nr))\n\n # Plot ROCs for each program for this model\n plot_multiple_rocs(rocs=rocs, labels=labels, center_line=False)\n mean = mean_rocs(rocs)\n models_mean_rocs.append(mean)\n plot_roc(*mean, title=model.name, label=\"Mean\",\n color=\"black\", linestyle=\"--\")\n plt.legend()\n\n # Store figure\n file_name = \"ROC_{}\".format(model.name)\n save_fig(Path(path, file_name))\n plt.close()\n\n # Plot mean-ROCs for models\n names = [model.name for model in model_list]\n plot_multiple_rocs(rocs=models_mean_rocs, labels=names, center_line=True,\n title=\"Models Mean-ROC\")\n plt.legend()\n save_fig(Path(path, \"Models_ROC\"))\n plt.close()\n\n if save_ranked_sentences:\n rank_file.close()\n\n if return_predictions:\n return classification_results, special_results, test_predictions\n return classification_results, special_results\n\n\nif __name__ == \"__main__\":\n # Initialize tensor-provider (data-source)\n the_tensor_provider = TensorProvider(verbose=True)\n\n # Choose number of programs to run though (None for all)\n program_limit = None\n\n # Choose models\n models = [\n #PULogisticRegressionSK(tensor_provider=the_tensor_provider)\n MLP(tensor_provider=the_tensor_provider)\n ]\n\n # Run LOO-program\n loo_path = Path(ProjectPaths.results, \"LOO_CV\")\n results, s_results = leave_one_program_out_cv(\n tensor_provider=the_tensor_provider,\n model_list=models,\n limit=program_limit,\n path=loo_path\n ) # type: xr.DataArray\n\n # Get mean-results over programs\n mean_results = results.mean(\"Program\")\n mean_results.name = \"Mean Loo Results\"\n mean_results = mean_results._to_dataset_split(\"Model\").to_dataframe()\n\n # Print mean results\n print(\"\\nMean LOO Results\\n\" + \"-\" * 75)\n with Path(loo_path, \"mean_results.txt\").open(\"w\") as file:\n file.write(str(mean_results))\n print(mean_results)\n"
] | [
[
"sklearn.model_selection.LeaveOneOut",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"numpy.where",
"numpy.squeeze"
]
] |
marsggbo/hyperbox | [
"91dcd04ad30164bcb12209d818df18961fa3f347"
] | [
"hyperbox/networks/mobilenet/mobile_net.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport math\r\n\r\nfrom hyperbox.mutables.spaces import OperationSpace\r\nfrom hyperbox.utils.utils import load_json\r\n\r\nfrom hyperbox.networks.base_nas_network import BaseNASNetwork\r\nfrom hyperbox.networks.mobilenet.mobile_ops import *\r\nfrom hyperbox.networks.mobilenet.mobile_utils import *\r\n\r\n\r\nclass MobileNet(BaseNASNetwork):\r\n def __init__(self,\r\n width_stages=[24,40,80,96,192,320],\r\n n_cell_stages=[4,4,4,4,4,1],\r\n stride_stages=[2,2,2,1,2,1],\r\n width_mult=1, classes=1000,\r\n dropout_rate=0, bn_param=(0.1, 1e-3), mask=''):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n width_stages: str\r\n width (output channels) of each cell stage in the block\r\n n_cell_stages: str\r\n number of cells in each cell stage\r\n stride_strages: str\r\n stride of each cell stage in the block\r\n width_mult : int\r\n the scale factor of width\r\n \"\"\"\r\n super(MobileNet, self).__init__(mask)\r\n input_channel = make_divisible(32 * width_mult, 8)\r\n first_cell_width = make_divisible(16 * width_mult, 8)\r\n for i in range(len(width_stages)):\r\n width_stages[i] = make_divisible(width_stages[i] * width_mult, 8)\r\n # first conv\r\n first_conv = ConvLayer(3, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='relu6', ops_order='weight_bn_act')\r\n # first block\r\n first_block_conv = OPS['3x3_MBConv1'](input_channel, first_cell_width, 1)\r\n first_block = first_block_conv\r\n\r\n input_channel = first_cell_width\r\n\r\n blocks = [first_block]\r\n\r\n stage_cnt = 0\r\n for width, n_cell, s in zip(width_stages, n_cell_stages, stride_stages):\r\n for i in range(n_cell):\r\n if i == 0:\r\n stride = s\r\n else:\r\n stride = 1\r\n calibrate_op = CalibrationLayer(input_channel, width, stride)\r\n blocks.append(calibrate_op)\r\n op_candidates = [OPS['3x3_MBConv3'](width, width, 1),\r\n OPS['3x3_MBConv6'](width, width, 1),\r\n OPS['5x5_MBConv3'](width, width, 1),\r\n OPS['5x5_MBConv6'](width, width, 1),\r\n OPS['7x7_MBConv3'](width, width, 1),\r\n OPS['7x7_MBConv6'](width, width, 1),\r\n OPS['Identity'](width, width, 1),\r\n OPS['Zero'](width, width, 1),\r\n ]\r\n if stride == 1 and input_channel == width:\r\n # if it is not the first one\r\n op_candidates += [OPS['Zero'](input_channel, width, stride)]\r\n if self.mask: op_candidates = [op_candidates[self.mask[f\"s{stage_cnt}_c{i}\"].argmax()]]\r\n conv_op = OperationSpace(op_candidates, return_mask=True, key=\"s{}_c{}\".format(stage_cnt, i))\r\n # shortcut\r\n if stride == 1 and input_channel == width:\r\n # if not first cell\r\n shortcut = IdentityLayer(input_channel, input_channel)\r\n else:\r\n shortcut = None\r\n inverted_residual_block = MobileInvertedResidualBlock(conv_op, shortcut, op_candidates)\r\n blocks.append(inverted_residual_block)\r\n input_channel = width\r\n stage_cnt += 1\r\n\r\n # feature mix layer\r\n last_channel = make_devisible(1280 * width_mult, 8) if width_mult > 1.0 else 1280\r\n feature_mix_layer = ConvLayer(input_channel, last_channel, kernel_size=1, use_bn=True, act_func='relu6', ops_order='weight_bn_act', )\r\n classifier = LinearLayer(last_channel, classes, dropout_rate=dropout_rate)\r\n\r\n self.first_conv = first_conv\r\n self.blocks = nn.ModuleList(blocks)\r\n self.feature_mix_layer = feature_mix_layer\r\n self.global_avg_pooling = nn.AdaptiveAvgPool2d(1)\r\n self.classifier = classifier\r\n\r\n # set bn param\r\n self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])\r\n\r\n def forward(self, x):\r\n x = self.first_conv(x)\r\n for block in self.blocks:\r\n x = block(x)\r\n x = self.feature_mix_layer(x)\r\n x = self.global_avg_pooling(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.classifier(x)\r\n return x\r\n\r\n def set_bn_param(self, momentum, eps):\r\n for m in self.modules():\r\n if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\r\n m.momentum = momentum\r\n m.eps = eps\r\n return\r\n\r\n def init_model(self, model_init='he_fout', init_div_groups=False):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n if model_init == 'he_fout':\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n if init_div_groups:\r\n n /= m.groups\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n elif model_init == 'he_fin':\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\r\n if init_div_groups:\r\n n /= m.groups\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n else:\r\n raise NotImplementedError\r\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n elif isinstance(m, nn.Linear):\r\n stdv = 1. / math.sqrt(m.weight.size(1))\r\n m.weight.data.uniform_(-stdv, stdv)\r\n if m.bias is not None:\r\n m.bias.data.zero_()\r\n\r\n @property\r\n def arch(self):\r\n arch = ''\r\n for module in self.blocks:\r\n if isinstance(module, MobileInvertedResidualBlock):\r\n index = module.mobile_inverted_conv.mask.cpu().detach().numpy().argmax()\r\n arch +=f'{index}-'\r\n return arch\r\n"
] | [
[
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ModuleList"
]
] |
shatadru99/archai | [
"8501080f8ecc73327979c02387e02011efb4c335"
] | [
"archai/common/trainer.py"
] | [
"# Copyright (c) Microsoft Corporation.\r\n# Licensed under the MIT license.\r\n\r\nfrom typing import Callable, Tuple, Optional\r\n\r\nimport torch\r\nfrom torch import nn, Tensor\r\nfrom torch.optim.optimizer import Optimizer\r\nfrom torch.optim.lr_scheduler import _LRScheduler\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom overrides import EnforceOverrides\r\n\r\nfrom archai.common.metrics import Metrics\r\nfrom archai.common.tester import Tester\r\nfrom archai.common.config import Config\r\nfrom archai.common import utils, ml_utils\r\nfrom archai.common.common import logger\r\nfrom archai.common.checkpoint import CheckPoint\r\nfrom archai.common.apex_utils import ApexUtils\r\nfrom archai.common.multi_optim import MultiOptim, OptimSched\r\n\r\n\r\nclass Trainer(EnforceOverrides):\r\n def __init__(self, conf_train:Config, model:nn.Module,\r\n checkpoint:Optional[CheckPoint])->None:\r\n # region config vars\r\n self.conf_train = conf_train\r\n conf_lossfn = conf_train['lossfn']\r\n self._aux_weight = conf_train['aux_weight']\r\n self._grad_clip = conf_train['grad_clip']\r\n self._drop_path_prob = conf_train['drop_path_prob']\r\n self._logger_freq = conf_train['logger_freq']\r\n self._title = conf_train['title']\r\n self._epochs = conf_train['epochs']\r\n self.conf_optim = conf_train['optimizer']\r\n self.conf_sched = conf_train['lr_schedule']\r\n self.batch_chunks = conf_train['batch_chunks']\r\n conf_validation = conf_train['validation']\r\n conf_apex = conf_train['apex']\r\n self._validation_freq = 0 if conf_validation is None else conf_validation['freq']\r\n # endregion\r\n\r\n self._apex = ApexUtils(conf_apex, logger)\r\n\r\n self._checkpoint = checkpoint\r\n self.model = model\r\n\r\n self._lossfn = ml_utils.get_lossfn(conf_lossfn)\r\n # using separate apex for Tester is not possible because we must use\r\n # same distributed model as Trainer and hence they must share apex\r\n self._tester = Tester(conf_validation, model, self._apex) \\\r\n if conf_validation else None\r\n self._metrics:Optional[Metrics] = None\r\n\r\n self._droppath_module = self._get_droppath_module()\r\n if self._droppath_module is None and self._drop_path_prob > 0.0:\r\n logger.warn({'droppath_module': None})\r\n\r\n self._start_epoch = -1 # nothing is started yet\r\n\r\n def fit(self, train_dl:DataLoader, val_dl:Optional[DataLoader])->Metrics:\r\n logger.pushd(self._title)\r\n\r\n self._metrics = Metrics(self._title, self._apex, logger_freq=self._logger_freq)\r\n\r\n # create optimizers and schedulers\r\n self._multi_optim = self.create_multi_optim(len(train_dl))\r\n # before checkpoint restore, convert to amp\r\n self.model = self._apex.to_amp(self.model, self._multi_optim,\r\n batch_size=train_dl.batch_size)\r\n\r\n self._lossfn = self._lossfn.to(self.get_device())\r\n\r\n self.pre_fit(train_dl, val_dl)\r\n\r\n # we need to restore checkpoint after all objects are created because\r\n # restoring checkpoint requires load_state_dict calls on these objects\r\n self._start_epoch = 0\r\n # do we have a checkpoint\r\n checkpoint_avail = self._checkpoint is not None\r\n checkpoint_val = checkpoint_avail and 'trainer' in self._checkpoint\r\n resumed = False\r\n if checkpoint_val:\r\n # restore checkpoint\r\n resumed = True\r\n self.restore_checkpoint()\r\n elif checkpoint_avail: # TODO: bad checkpoint?\r\n self._checkpoint.clear()\r\n logger.warn({'resumed': resumed, 'checkpoint_avail': checkpoint_avail,\r\n 'checkpoint_val': checkpoint_val,\r\n 'start_epoch': self._start_epoch,\r\n 'total_epochs': self._epochs})\r\n logger.info({'aux_weight': self._aux_weight,\r\n 'grad_clip': self._grad_clip,\r\n 'drop_path_prob': self._drop_path_prob,\r\n 'validation_freq': self._validation_freq,\r\n 'batch_chunks': self.batch_chunks})\r\n\r\n if self._start_epoch >= self._epochs:\r\n logger.warn(f'fit done because start_epoch {self._start_epoch}>={self._epochs}')\r\n return self.get_metrics() # we already finished the run, we might be checkpointed\r\n\r\n logger.pushd('epochs')\r\n for epoch in range(self._start_epoch, self._epochs):\r\n logger.pushd(epoch)\r\n self._set_epoch(epoch, train_dl, val_dl)\r\n self.pre_epoch(train_dl, val_dl)\r\n self._train_epoch(train_dl)\r\n self.post_epoch(train_dl, val_dl)\r\n logger.popd()\r\n logger.popd()\r\n self.post_fit(train_dl, val_dl)\r\n\r\n # make sure we don't keep references to the graph\r\n del self._multi_optim\r\n\r\n logger.popd()\r\n return self.get_metrics()\r\n\r\n def create_multi_optim(self, train_len:int)->MultiOptim:\r\n logger.info({'steps_per_epoch': train_len,\r\n 'conf_sched': self.conf_sched.to_dict()})\r\n logger.info({'conf_optim': self.conf_optim.to_dict()})\r\n\r\n # optimizers, schedulers needs to be recreated for each fit call\r\n # as they have state specific to each run\r\n optim = self.create_optimizer(self.conf_optim, self.model.parameters())\r\n # create scheduler for optim before applying amp\r\n sched, sched_on_epoch = self.create_scheduler(self.conf_sched, optim, train_len)\r\n\r\n multi_optim = MultiOptim()\r\n multi_optim.append(OptimSched(optim, sched, sched_on_epoch))\r\n\r\n logger.info({'multi_optim_len': len(multi_optim)})\r\n\r\n return multi_optim\r\n\r\n def create_optimizer(self, conf_optim:Config, params)->Optimizer:\r\n optim = ml_utils.create_optimizer(conf_optim, params)\r\n return optim\r\n\r\n def create_scheduler(self, conf_sched:Config, optim:Optimizer, steps_per_epoch:int) \\\r\n ->Tuple[Optional[_LRScheduler],bool]:\r\n return ml_utils.create_lr_scheduler(conf_sched, self._epochs,\r\n optim, steps_per_epoch)\r\n\r\n def get_optimizer(self, index=0)->Optimizer:\r\n return self._multi_optim[index].optim\r\n def get_scheduler(self, index=0)->Optional[_LRScheduler]:\r\n return self._multi_optim[index].sched\r\n\r\n def get_metrics(self)->Metrics:\r\n return self._metrics\r\n\r\n def _set_epoch(self, epoch:int, train_dl:DataLoader, val_dl:Optional[DataLoader])->None:\r\n # optimizers such as bi-level may use val set for its own use\r\n # which causes reshuffling due to automatic epoch counting\r\n # here we make sure that val_dl has same epoch as train_dl\r\n if hasattr(train_dl.sampler, 'set_epoch'):\r\n train_dl.sampler.set_epoch(epoch)\r\n if val_dl is not None and hasattr(val_dl.sampler, 'set_epoch'):\r\n val_dl.sampler.set_epoch(epoch)\r\n\r\n # apply droppath\r\n self._set_drop_path(epoch, self._epochs)\r\n\r\n assert self._metrics.epochs() == epoch\r\n\r\n ######################### hooks #########################\r\n def pre_fit(self, train_dl:DataLoader, val_dl:Optional[DataLoader])->None:\r\n self._metrics.pre_run()\r\n\r\n def post_fit(self, train_dl:DataLoader, val_dl:Optional[DataLoader])->None:\r\n self._metrics.post_run()\r\n\r\n def pre_epoch(self, train_dl:DataLoader, val_dl:Optional[DataLoader])->None:\r\n self._metrics.pre_epoch(lr=self._multi_optim.get_lr(0, 0))\r\n\r\n def post_epoch(self, train_dl:DataLoader, val_dl:Optional[DataLoader])->None:\r\n val_metrics = None\r\n # first run test before checkpointing, otherwise we won't have val metrics\r\n if val_dl and self._tester and self._validation_freq > 0:\r\n if self._metrics.epochs() % self._validation_freq == 0 or \\\r\n self._metrics.epochs() >= self._epochs:\r\n\r\n # these asserts makes sure train and val are not ovrlapiing\r\n # assert train_dl.sampler.epoch == val_dl.sampler.epoch\r\n # tidx = list(train_dl.sampler)\r\n # vidx = list(val_dl.sampler)\r\n # assert all(ti not in vidx for ti in tidx)\r\n\r\n val_metrics = self._tester.test(val_dl)\r\n\r\n # update val metrics\r\n self._metrics.post_epoch(val_metrics, lr=self._multi_optim.get_lr(0, 0))\r\n\r\n # checkpoint if enabled with given freq or if this is the last epoch\r\n if self._checkpoint is not None and self._apex.is_master() and \\\r\n self._checkpoint.freq > 0 and (self._metrics.epochs() % self._checkpoint.freq == 0 or \\\r\n self._metrics.epochs() >= self._epochs):\r\n self._checkpoint.new()\r\n self.update_checkpoint(self._checkpoint)\r\n self._checkpoint.commit()\r\n\r\n def pre_step(self, x:Tensor, y:Tensor)->None:\r\n self._metrics.pre_step(x, y)\r\n\r\n def post_step(self, x:Tensor, y:Tensor, logits:Tensor, loss:Tensor,\r\n steps:int)->None:\r\n self._metrics.post_step(x, y, logits, loss, steps)\r\n ######################### hooks #########################\r\n\r\n def get_device(self):\r\n return self._apex.device\r\n\r\n def restore_checkpoint(self)->None:\r\n state = self._checkpoint['trainer']\r\n last_epoch = state['last_epoch']\r\n assert last_epoch >= 0 and last_epoch < self._epochs\r\n\r\n self._metrics.load_state_dict(state['metrics'])\r\n assert self._metrics.epochs() == last_epoch+1\r\n self._apex.load_state_dict(state['amp'])\r\n self.model.load_state_dict(state['model'])\r\n self._multi_optim.load_state_dict(state['multi_optim'])\r\n\r\n self._start_epoch = last_epoch + 1\r\n\r\n def update_checkpoint(self, checkpoint:CheckPoint)->None:\r\n # save all necessory state\r\n state = {\r\n 'last_epoch': self._metrics.epochs()-1,\r\n 'metrics': self._metrics.state_dict(),\r\n 'model': self.model.state_dict(),\r\n 'multi_optim': self._multi_optim.state_dict(),\r\n 'amp': self._apex.state_dict()\r\n }\r\n self._checkpoint['trainer'] = state\r\n\r\n def _train_epoch(self, train_dl: DataLoader)->None:\r\n steps = len(train_dl)\r\n self.model.train()\r\n\r\n logger.pushd('steps')\r\n for step, (x, y) in enumerate(train_dl):\r\n logger.pushd(step)\r\n assert self.model.training # derived class might alter the mode\r\n\r\n self.pre_step(x, y)\r\n\r\n self._multi_optim.zero_grad()\r\n\r\n # divide batch in to chunks if needed so it fits in GPU RAM\r\n if self.batch_chunks > 1:\r\n x_chunks, y_chunks = torch.chunk(x, self.batch_chunks), torch.chunk(y, self.batch_chunks)\r\n else:\r\n x_chunks, y_chunks = (x,), (y,)\r\n\r\n logits_chunks = []\r\n loss_sum, loss_count = 0.0, 0\r\n for xc, yc in zip(x_chunks, y_chunks):\r\n xc, yc = xc.to(self.get_device(), non_blocking=True), yc.to(self.get_device(), non_blocking=True)\r\n\r\n logits_c, aux_logits = self.model(xc), None\r\n tupled_out = isinstance(logits_c, Tuple) and len(logits_c) >=2\r\n # if self._aux_weight: # TODO: some other way to validate?\r\n # assert tupled_out, \"aux_logits cannot be None unless aux tower is disabled\"\r\n if tupled_out: # then we are using model created by desc\r\n logits_c, aux_logits = logits_c[0], logits_c[1]\r\n loss_c = self.compute_loss(self._lossfn, yc, logits_c,\r\n self._aux_weight, aux_logits)\r\n\r\n self._apex.backward(loss_c, self._multi_optim)\r\n\r\n loss_sum += loss_c.item() * len(logits_c)\r\n loss_count += len(logits_c)\r\n logits_chunks.append(logits_c.detach().cpu())\r\n\r\n # TODO: original darts clips alphas as well but pt.darts doesn't\r\n self._apex.clip_grad(self._grad_clip, self.model, self._multi_optim)\r\n\r\n self._multi_optim.step()\r\n\r\n # TODO: we possibly need to sync so all replicas are upto date\r\n self._apex.sync_devices()\r\n\r\n self.post_step(x, y,\r\n ml_utils.join_chunks(logits_chunks),\r\n torch.tensor(loss_sum/loss_count),\r\n steps)\r\n logger.popd()\r\n\r\n # end of step\r\n\r\n self._multi_optim.epoch()\r\n logger.popd()\r\n\r\n def compute_loss(self, lossfn:Callable, y:Tensor, logits:Tensor,\r\n aux_weight:float, aux_logits:Optional[Tensor])->Tensor:\r\n loss = lossfn(logits, y)\r\n if aux_weight > 0.0 and aux_logits is not None:\r\n loss += aux_weight * lossfn(aux_logits, y)\r\n return loss\r\n\r\n def _get_droppath_module(self)->Optional[nn.Module]:\r\n m = self.model\r\n if hasattr(self.model, 'module'): # for data parallel model\r\n m = self.model.module\r\n if hasattr(m, 'drop_path_prob'):\r\n return m\r\n return None\r\n\r\n def _set_drop_path(self, epoch:int, epochs:int)->None:\r\n if self._drop_path_prob and self._droppath_module is not None:\r\n drop_prob = self._drop_path_prob * epoch / epochs\r\n # set value as property in model (it will be used by forward())\r\n # this is necessory when using DataParallel(model)\r\n # https://github.com/pytorch/pytorch/issues/16885\r\n m = self.model\r\n if hasattr(self.model, 'module'): # for data parallel model\r\n m = self.model.module\r\n if hasattr(m, 'drop_path_prob'):\r\n m.drop_path_prob(drop_prob)\r\n else:\r\n raise RuntimeError('Drop path value {} was specified but model'\r\n ' does not have drop_path_prob() method'\\\r\n .format(self._drop_path_prob))\r\n"
] | [
[
"torch.tensor",
"torch.chunk"
]
] |
PaNOSC-ViNYL/wofrysrw | [
"0b69374d1820b13f553dc53cd957f8727c127f41"
] | [
"wofrysrw/beamline/optical_elements/gratings/srw_grating.py"
] | [
"import numpy\n\nfrom syned.beamline.optical_elements.gratings.grating import Grating\nfrom syned.beamline.shape import Ellipse, Rectangle, Circle\n\nfrom wofrysrw.beamline.optical_elements.srw_optical_element import SRWOpticalElementWithAcceptanceSlit\nfrom wofrysrw.propagator.wavefront2D.srw_wavefront import WavefrontPropagationParameters\nfrom wofrysrw.beamline.optical_elements.absorbers.srw_aperture import SRWAperture\n\nfrom vinyl_srw.srwlib import SRWLOptC, SRWLOptMir, SRWLOptG\nfrom vinyl_srw.srwlib import srwl, srwl_opt_setup_surf_height_1d, srwl_opt_setup_surf_height_2d, srwl_uti_read_data_cols\n\nfrom wofrysrw.beamline.optical_elements.mirrors.srw_mirror import Orientation, TreatInputOutput, ApertureShape, SimulationMethod\n\nclass SRWGrating(Grating, SRWOpticalElementWithAcceptanceSlit):\n def __init__(self,\n name = \"Undefined\",\n optical_element_displacement = None,\n tangential_size = 1.2,\n sagittal_size = 0.01,\n grazing_angle = 0.003,\n vertical_position_of_mirror_center = 0.0,\n horizontal_position_of_mirror_center = 0.0,\n orientation_of_reflection_plane = Orientation.UP,\n invert_tangent_component = False,\n add_acceptance_slit=False,\n height_profile_data_file = \"mirror.dat\",\n height_profile_data_file_dimension = 1,\n height_amplification_coefficient = 1.0,\n diffraction_order = 1,\n grooving_density_0 =800, # groove density [lines/mm] (coefficient a0 in the polynomial groove density: a0 + a1*y + a2*y^2 + a3*y^3 + a4*y^4)\n grooving_density_1 =0.0, # groove density polynomial coefficient a1 [lines/mm^2]\n grooving_density_2 =0.0, # groove density polynomial coefficient a2 [lines/mm^3]\n grooving_density_3 =0.0, # groove density polynomial coefficient a3 [lines/mm^4]\n grooving_density_4 =0.0, # groove density polynomial coefficient a4 [lines/mm^5]\n grooving_angle = 0.0 # angle between the grove direction and the sagittal direction of the substrate\n ):\n\n SRWOpticalElementWithAcceptanceSlit.__init__(self,\n optical_element_displacement=optical_element_displacement,\n tangential_size = tangential_size,\n sagittal_size = sagittal_size,\n grazing_angle = grazing_angle,\n vertical_position_of_mirror_center = vertical_position_of_mirror_center,\n horizontal_position_of_mirror_center = horizontal_position_of_mirror_center,\n orientation_of_reflection_plane = orientation_of_reflection_plane,\n invert_tangent_component = invert_tangent_component,\n add_acceptance_slit = add_acceptance_slit)\n\n Grating.__init__(self,\n name=name,\n boundary_shape=Rectangle(x_left=horizontal_position_of_mirror_center - 0.5*sagittal_size,\n x_right=horizontal_position_of_mirror_center + 0.5*sagittal_size,\n y_bottom=vertical_position_of_mirror_center - 0.5*tangential_size,\n y_top=vertical_position_of_mirror_center + 0.5*tangential_size),\n surface_shape=self.get_shape(),\n ruling=grooving_density_0*1e3)\n\n self.height_profile_data_file = height_profile_data_file\n self.height_profile_data_file_dimension = height_profile_data_file_dimension\n self.height_amplification_coefficient = height_amplification_coefficient\n\n self.diffraction_order = diffraction_order\n\n self.grooving_density_0 = grooving_density_0\n self.grooving_density_1 = grooving_density_1\n self.grooving_density_2 = grooving_density_2\n self.grooving_density_3 = grooving_density_3\n self.grooving_density_4 = grooving_density_4\n self.grooving_angle = grooving_angle\n\n def get_alpha_angle(self):\n return self.grazing_angle\n\n def get_beta_angle(self, photon_energy):\n wavelength = 1.239842e-3/photon_energy # in mm\n\n return numpy.arcsin(wavelength*self.grooving_density_0 - numpy.cos(self.get_alpha_angle())) # Grating Output Angle\n\n def get_deflection_angle(self, photon_energy):\n return self.get_alpha_angle() + self.get_beta_angle(photon_energy) + 1.57079632679 # Grating Deflection Angle\n\n def get_output_orientation_vectors(self, photon_energy):\n deflection_angle = self.get_deflection_angle(photon_energy)\n tangent = 1.0 if not self.invert_tangent_component else -1.0\n\n if self.orientation_of_reflection_plane == Orientation.UP:\n return 0, numpy.sin(deflection_angle), numpy.cos(deflection_angle), tangent, 0.0\n elif self.orientation_of_reflection_plane == Orientation.DOWN:\n return 0, -numpy.sin(deflection_angle), numpy.cos(deflection_angle), tangent, 0.0\n elif self.orientation_of_reflection_plane == Orientation.LEFT:\n return numpy.sin(deflection_angle), 0, numpy.cos(deflection_angle), 0.0, tangent\n elif self.orientation_of_reflection_plane == Orientation.RIGHT:\n return -numpy.sin(deflection_angle), 0, numpy.cos(deflection_angle), 0.0, tangent\n\n def get_shape(self):\n raise NotImplementedError()\n\n def applyOpticalElement(self, wavefront=None, parameters=None, element_index=None):\n optical_elements, propagation_parameters = super(SRWGrating, self).create_propagation_elements()\n\n if not self.height_profile_data_file is None:\n optical_elements.append(self.get_optTrEr(wavefront))\n propagation_parameters.append(WavefrontPropagationParameters().to_SRW_array())\n\n optBL = SRWLOptC(optical_elements, propagation_parameters)\n\n srwl.PropagElecField(wavefront, optBL)\n\n return wavefront\n\n def add_to_srw_native_array(self, oe_array = [], pp_array=[], parameters=None, wavefront=None):\n super(SRWGrating, self).add_to_srw_native_array(oe_array, pp_array, parameters)\n\n if not self.height_profile_data_file is None:\n oe_array.append(self.get_optTrEr(wavefront))\n pp_array.append(WavefrontPropagationParameters().to_SRW_array())\n\n def get_substrate_mirror(self):\n nvx, nvy, nvz, tvx, tvy = self.get_orientation_vectors()\n x, y = self.getXY()\n\n if isinstance(self.get_boundary_shape(), Rectangle):\n ap_shape = ApertureShape.RECTANGULAR\n elif isinstance(self.get_boundary_shape(), Ellipse) or isinstance(self.get_boundary_shape(), Circle):\n ap_shape = ApertureShape.ELLIPTIC\n\n return self.get_SRWLOptMir(nvx, nvy, nvz, tvx, tvy, x, y, ap_shape)\n\n\n def toSRWLOpt(self):\n substrate_mirror = self.get_substrate_mirror()\n\n grating = SRWLOptG(_mirSub=substrate_mirror,\n _m=self.diffraction_order,\n _grDen =self.grooving_density_0,\n _grDen1=self.grooving_density_1,\n _grDen2=self.grooving_density_2,\n _grDen3=self.grooving_density_3,\n _grDen4=self.grooving_density_4,\n _grAng=self.grooving_angle)\n\n return grating\n\n\n def get_SRWLOptMir(self, nvx, nvy, nvz, tvx, tvy, x, y, ap_shape):\n mirror = SRWLOptMir()\n\n mirror.set_dim_sim_meth(_size_tang=self.tangential_size,\n _size_sag=self.sagittal_size,\n _ap_shape=ap_shape,\n _sim_meth=SimulationMethod.THICK,\n _treat_in_out=TreatInputOutput.WAVEFRONT_INPUT_CENTER_OUTPUT_CENTER)\n mirror.set_orient(_nvx=nvx,\n _nvy=nvy,\n _nvz=nvz,\n _tvx=tvx,\n _tvy=tvy,\n _x = x,\n _y = y)\n\n return mirror\n\n def get_optTrEr(self, wavefront):\n if self.orientation_of_reflection_plane == Orientation.LEFT or self.orientation_of_reflection_plane == Orientation.RIGHT:\n dim = 'x'\n elif self.orientation_of_reflection_plane == Orientation.UP or self.orientation_of_reflection_plane == Orientation.DOWN:\n dim = 'y'\n\n if self.height_profile_data_file_dimension == 1:\n height_profile_data = srwl_uti_read_data_cols(self.height_profile_data_file,\n _str_sep='\\t',\n _i_col_start=0,\n _i_col_end=1)\n\n optTrEr = srwl_opt_setup_surf_height_1d(_height_prof_data=height_profile_data,\n _ang=self.grazing_angle,\n _ang_r=self.get_deflection_angle(wavefront.get_photon_energy()),\n _dim=dim,\n _amp_coef=self.height_amplification_coefficient)\n\n elif self.height_profile_data_file_dimension == 2:\n height_profile_data = srwl_uti_read_data_cols(self.height_profile_data_file,\n _str_sep='\\t')\n\n optTrEr = srwl_opt_setup_surf_height_2d(_height_prof_data=height_profile_data,\n _ang=self.grazing_angle,\n _ang_r=self.get_deflection_angle(wavefront.get_photon_energy()),\n _dim=dim,\n _amp_coef=self.height_amplification_coefficient)\n\n return optTrEr\n\n\n\n def to_python_code(self, data=None):\n oe_name = data[0]\n wavefront = data[1]\n\n nvx, nvy, nvz, tvx, tvy = self.get_orientation_vectors()\n x, y = self.getXY()\n\n if isinstance(self.get_boundary_shape(), Rectangle):\n ap_shape = ApertureShape.RECTANGULAR\n elif isinstance(self.get_boundary_shape(), Ellipse) or isinstance(self.get_boundary_shape(), Circle):\n ap_shape = ApertureShape.ELLIPTIC\n\n if self.add_acceptance_slit:\n slit = SRWAperture()\n slit.fromSRWLOpt(self.get_acceptance_slit())\n\n text_code = slit.to_python_code(data=[\"acceptance_slits_\" + oe_name])\n text_code += \"\\n\"\n else:\n text_code = \"\"\n\n text_code += self.to_python_code_aux(nvx, nvy, nvz, tvx, tvy, x, y, ap_shape)\n\n text_code += \"substrate_mirror.set_dim_sim_meth(_size_tang=\" + str(self.tangential_size) + \",\" + \"\\n\"\n text_code += \" _size_sag=\" + str(self.sagittal_size) + \",\" + \"\\n\"\n text_code += \" _ap_shape='\" + str(ap_shape) + \"',\" + \"\\n\"\n text_code += \" _sim_meth=\" + str(SimulationMethod.THICK) + \",\" + \"\\n\"\n text_code += \" _treat_in_out=\" + str(TreatInputOutput.WAVEFRONT_INPUT_CENTER_OUTPUT_CENTER) + \")\" + \"\\n\"\n\n text_code += \"substrate_mirror.set_orient(_nvx=\" + str(nvx) + \",\" + \"\\n\"\n text_code += \" _nvy=\" + str(nvy) + \",\" + \"\\n\"\n text_code += \" _nvz=\" + str(nvz) + \",\" + \"\\n\"\n text_code += \" _tvx=\" + str(tvx) + \",\" + \"\\n\"\n text_code += \" _tvy=\" + str(tvy) + \",\" + \"\\n\"\n text_code += \" _x=\" + str(x) + \",\" + \"\\n\"\n text_code += \" _y=\" + str(y) + \")\" + \"\\n\"\n\n text_code += \"\\n\"\n\n text_code += oe_name + \"=\"+ \"SRWLOptG(_mirSub=substrate_mirror\" + \",\" + \"\\n\"\n text_code += \" _m=\" + str(self.diffraction_order) + \",\" + \"\\n\"\n text_code += \" _grDen=\" + str(self.grooving_density_0) + \",\" + \"\\n\"\n text_code += \" _grDen1=\"+ str(self.grooving_density_1) + \",\" + \"\\n\"\n text_code += \" _grDen2=\"+ str(self.grooving_density_2) + \",\" + \"\\n\"\n text_code += \" _grDen3=\"+ str(self.grooving_density_3) + \",\" + \"\\n\"\n text_code += \" _grDen4=\"+ str(self.grooving_density_4) + \",\" + \"\\n\"\n text_code += \" _grAng= \"+ str(self.grooving_angle) + \")\" + \"\\n\"\n\n if not self.height_profile_data_file is None:\n text_code += \"\\n\"\n\n if self.orientation_of_reflection_plane == Orientation.LEFT or self.orientation_of_reflection_plane == Orientation.RIGHT:\n dim = 'x'\n elif self.orientation_of_reflection_plane == Orientation.UP or self.orientation_of_reflection_plane == Orientation.DOWN:\n dim = 'y'\n\n if self.height_profile_data_file_dimension == 1:\n text_code += \"height_profile_data = srwl_uti_read_data_cols('\" + self.height_profile_data_file + \"',\" + \"\\n\"\n text_code += \" _str_sep='\\\\t',\" + \"\\n\"\n text_code += \" _i_col_start=0,\" + \"\\n\"\n text_code += \" _i_col_end=1)\" + \"\\n\"\n\n text_code += \"optTrEr_\" + oe_name + \" = srwl_opt_setup_surf_height_1d(_height_prof_data=height_profile_data,\" + \"\\n\"\n text_code += \" _ang=\"+ str(self.grazing_angle) + \",\" + \"\\n\"\n text_code += \" _ang_r=\"+ str(self.get_deflection_angle(wavefront.get_photon_energy())) + \",\" + \"\\n\"\n text_code += \" _dim='\"+ dim + \"',\" + \"\\n\"\n text_code += \" _amp_coef=\"+ str(self.height_amplification_coefficient) + \")\" + \"\\n\"\n\n elif self.height_profile_data_file_dimension == 2:\n text_code += \"height_profile_data = srwl_uti_read_data_cols('\" + self.height_profile_data_file + \"',\" + \"\\n\"\n text_code += \" _str_sep=\\'\\\\t\\')\" + \"\\n\"\n\n text_code += \"optTrEr_\" + oe_name + \" = srwl_opt_setup_surf_height_2d(_height_prof_data=height_profile_data,\" + \"\\n\"\n text_code += \" _ang=\"+ str(self.grazing_angle) + \",\" + \"\\n\"\n text_code += \" _ang_r=\"+ str(self.get_deflection_angle(wavefront.get_photon_energy())) + \",\" + \"\\n\"\n text_code += \" _dim='\"+ dim + \"',\" + \"\\n\"\n text_code += \" _amp_coef=\"+ str(self.height_amplification_coefficient) + \")\" + \"\\n\"\n\n return text_code\n\n def to_python_code_aux(self, nvx, nvy, nvz, tvx, tvy, x, y, ap_shape):\n raise NotImplementedError(\"This method is abstract\")\n"
] | [
[
"numpy.sin",
"numpy.cos"
]
] |
avilay/utils | [
"8c8d5f61173622e4d1c76094ad877c6d097f0972"
] | [
"ml/bindata.py"
] | [
"import os\nfrom collections import namedtuple\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom urllib.parse import urlsplit, urlunsplit\n\nimport numpy as np\nimport pandas as pd\nimport sklearn.datasets as skdata\nfrom google.cloud import storage\nfrom google.cloud.exceptions import NotFound\nfrom torch.utils.data import Dataset\n\nTrainValTest = namedtuple(\"TrainValTest\", [\"trainset\", \"valset\", \"testset\"])\n\n\ndef build_url(scheme, netloc, path):\n return urlunsplit((scheme, netloc, str(path), \"\", \"\"))\n\n\nclass DataNotFoundError(Exception):\n def __init__(self, path):\n super().__init__(f\"Data not found at {path}\")\n self.path = path\n\n def __repr__(self):\n return f\"Data not found at {self.path}\"\n\n\nclass BinDataset(Dataset):\n def __init__(self, X, Y):\n super().__init__()\n if X.shape[0] != Y.shape[0]:\n raise ValueError(\"X and Y must have the same number of samples.\")\n self._X = X\n self._Y = Y\n\n def __getitem__(self, idx):\n return (self._X[idx], self._Y[idx])\n\n def __len__(self):\n return self._X.shape[0]\n\n @classmethod\n def generate(\n cls,\n n_samples,\n train_split=0.7,\n val_split=0.2,\n test_split=0.1,\n n_features=20,\n n_informative=10,\n n_redundant=7,\n n_repeated=3,\n flip_y=0.05, # larger values will make classification hard\n class_sep=0.5, # larger values will make classification easy\n random_state=10,\n ):\n X, Y = skdata.make_classification(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_informative,\n n_redundant=n_redundant,\n n_repeated=n_repeated,\n n_classes=2,\n flip_y=flip_y,\n class_sep=class_sep,\n random_state=random_state,\n )\n X = X.astype(np.float32)\n Y = Y.astype(np.int8)\n\n splits = (train_split, val_split, test_split)\n test_size = np.floor(n_samples * (splits[2] / sum(splits))).astype(np.int)\n val_size = np.floor(n_samples * (splits[1] / sum(splits))).astype(np.int)\n train_size = n_samples - val_size - test_size\n\n if test_size:\n test_slice = slice(0, test_size)\n testset = cls(X[test_slice], Y[test_slice])\n else:\n testset = None\n\n if val_size:\n val_slice = slice(test_size, test_size + val_size)\n valset = cls(X[val_slice], Y[val_slice])\n else:\n valset = None\n\n if train_size:\n train_slice = slice(test_size + val_size, n_samples)\n trainset = cls(X[train_slice], Y[train_slice])\n else:\n trainset = None\n\n return TrainValTest(trainset=trainset, valset=valset, testset=testset)\n\n @classmethod\n def _load(cls, dataroot, filenames):\n datasets = []\n\n # dataroot is in the form scheme://netloc/path (path includes the leading /)\n flds = urlsplit(dataroot)\n if flds.scheme == \"file\":\n # file:///path/to/directory\n # ignore netloc\n for filename in filenames:\n path = Path(flds.path) / filename\n try:\n dataset = cls._tensorify(path)\n datasets.append(dataset)\n except FileNotFoundError:\n raise DataNotFoundError(path)\n elif flds.scheme == \"gs\":\n # gs://bucket/prefix\n bucket = flds.netloc\n prefix = Path(flds.path).relative_to(\"/\") # get rid of the leading /\n for filename in filenames:\n blobname = str(prefix / filename)\n try:\n dataset = cls._load_from_gcp(bucket, blobname)\n datasets.append(dataset)\n except NotFound:\n raise DataNotFoundError(f\"gs://{bucket}/{blobname}\")\n else:\n raise ValueError(\n f\"Unsupported scheme {flds.scheme}! Use either file:// or gcp://.\"\n )\n\n return datasets\n\n @classmethod\n def load_train_val_single(cls, dataroot):\n return cls._load(dataroot, [\"train.csv\", \"val.csv\"])\n\n @classmethod\n def load_test_single(cls, dataroot):\n return cls._load(dataroot, [\"test.csv\"])[0]\n\n @classmethod\n def load_train_val_partitioned(cls, dataroot, part_nums):\n train_filenames = [f\"train_part_{i:02d}.csv\" for i in part_nums]\n val_filenames = [f\"val_part_{i:02d}.csv\" for i in part_nums]\n return cls._load(dataroot, train_filenames), cls._load(dataroot, val_filenames)\n\n @classmethod\n def load_test_partitioned(cls, dataroot, part_nums):\n filenames = [f\"test_{i:02d}.csv\" for i in part_nums]\n return cls._load(dataroot, filenames)\n\n @classmethod\n def _load_from_gcp(cls, bucket, blobname):\n client = storage.Client()\n bucket = client.bucket(bucket)\n blob = bucket.blob(blobname)\n with NamedTemporaryFile(\"wb\", delete=False) as f:\n blob.download_to_file(f)\n dataset = cls._tensorify(f.name)\n os.remove(f.name)\n return dataset\n\n @classmethod\n def _tensorify(cls, path):\n data = pd.read_csv(path).values\n X = data[:, :-1].astype(np.float32)\n Y = data[:, -1].astype(np.int8)\n return cls(X, Y)\n\n def _to_csv(self, fd):\n n_features = self[0][0].shape[0]\n feature_names = [f\"f{i}\" for i in range(1, n_features + 1)] + [\"y\"]\n header = \",\".join(feature_names)\n print(header, file=fd)\n for x, y in self:\n row = \",\".join([str(x_i.item()) for x_i in x])\n row += f\",{y}\"\n print(row, file=fd)\n\n def _save_to_file(self, path):\n if path.exists():\n raise ValueError(f\"{path} already exists! Delete before reusing this path.\")\n os.makedirs(path.parent, exist_ok=True)\n with open(path, \"wt\") as f:\n self._to_csv(f)\n\n def _save_to_gcp(self, bucket, blobname):\n client = storage.Client()\n bucket = client.bucket(bucket)\n blob = bucket.blob(blobname)\n with NamedTemporaryFile(\"wt\", delete=False) as f:\n self._to_csv(f)\n blob.upload_from_filename(f.name)\n os.remove(f.name)\n\n def save(self, path):\n path = urlsplit(path)\n if path.scheme == \"file\":\n self._save_to_file(Path(path.path))\n elif path.scheme == \"gs\":\n self._save_to_gcp(\n bucket=path.netloc, blobname=str(Path(path.path).relative_to(\"/\"))\n )\n else:\n raise ValueError(\n f\"Unsupported scheme {path.scheme}! Use either file:// or gcp://.\"\n )\n return self\n\n def partition(self, n_parts, drop_last=False):\n part_len = len(self) // n_parts\n parts_indexes = _partition(list(range(len(self))), part_len, drop_last)\n parted_datasets = []\n for part_indexes in parts_indexes:\n part_X = self._X[part_indexes]\n part_Y = self._Y[part_indexes]\n parted_ds = BinDataset(part_X, part_Y)\n parted_datasets.append(parted_ds)\n return parted_datasets\n\n @classmethod\n def remove(cls, *paths):\n for orig_path in paths:\n path = urlsplit(orig_path)\n if path.scheme == \"file\":\n cls._remove_file(Path(path.path))\n elif path.scheme == \"gs\":\n cls._remove_gcp(\n bucket=path.netloc, blobname=str(Path(path.path).relative_to(\"/\")),\n )\n else:\n raise ValueError(\n f\"Unsupported scheme {path.scheme}! Use either file:// or gcp://.\"\n )\n\n @classmethod\n def _remove_file(cls, path):\n if path.exists():\n os.remove(path)\n\n @classmethod\n def _remove_gcp(cls, bucket, blobname):\n client = storage.Client()\n bucket = client.bucket(bucket)\n blob = bucket.blob(blobname)\n if blob.exists():\n blob.delete()\n\n\ndef _partition(xs, size, drop_last):\n if len(xs) < size:\n return [] if drop_last else [xs]\n\n if len(xs) == size:\n return [xs]\n\n return [xs[:size]] + _partition(xs[size:], size, drop_last)\n"
] | [
[
"pandas.read_csv",
"sklearn.datasets.make_classification"
]
] |
ethanfuerst/nba_vis | [
"9996eb72a451565dddcbed76cadec9229d55fcbd"
] | [
"nba_data.py"
] | [
"import numpy as np\nimport pandas as pd\nimport requests\nimport re\nimport time\nimport os\nfrom bs4 import BeautifulSoup\nfrom nba_api.stats.endpoints import leaguestandings\n\n\ndef get_colors(teamname):\n if teamname == 'New Orleans Pelicans':\n teamname = 'New Orleans Pelicans Team'\n URL = f\"https://teamcolorcodes.com/{teamname.replace(' ', '-').lower()}-color-codes/\"\n page = requests.get(URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n colors = []\n for i in soup.find_all(class_='colorblock'):\n hex = re.compile(r'#(?:[0-9A-Fa-f]{6}|[0-9A-Fa-f]{3})(?=;|[^(]*\\))').findall(i.text)\n if len(hex) > 0:\n colors.append(hex[0])\n\n if teamname == 'San Antonio Spurs':\n colors[0] = '#000000'\n colors[1] = '#C4CED4'\n return colors\n\nteam_colors = {\n \"Atlanta Hawks\": [\"#e03a3e\", \"#C1D32F\", \"#26282A\", \"#C8102E\", \"#FFCD00\", \"#87674F\", \"#000000\"], \n \"Boston Celtics\": [\"#007A33\", \"#BA9653\", \"#963821\", \"#E59E6D\", \"#000000\"], \n \"Brooklyn Nets\": [\"#000000\", \"#FFFFFF\", \"#002A60\", \"#CD1041\", \"#777D84\", \"#C6CFD4\", \"#FFFFFF\"], \n \"Charlotte Bobcats\": [\"#2e5a8b\", \"#f26432\", \"#959da0\", \"#232020\"], \n \"Charlotte Hornets\": [\"#1d1160\", \"#00788C\", \"#A1A1A4\", \"#00778B\", \"#280071\", \"#F9423A\"], \n \"Chicago Bulls\": [\"#CE1141\", \"#000000\"], \n \"Cleveland Cavaliers\": [\"#860038\", \"#041E42\", \"#FDBB30\", \"#000000\", \"#E35205\", \"#5C88DA\", \"#27251F\", \"#DC3B34\", \"#04225C\", \"#FFFFFF\"], \n \"Dallas Mavericks\": [\"#00538C\", \"#B8C4CA\", \"#002B5E\", \"#000000\", \"#002855\", \"#00843D\"], \n \"Denver Nuggets\": [\"#0E2240\", \"#FEC524\", \"#8B2131\", \"#1D428A\", \"#00285E\", \"#418FDE\", \"#FDB927\", \"#041E42\", \"#9D2235\", \"#8B6F4E\"], \n \"Detroit Pistons\": [\"#C8102E\", \"#1d42ba\", \"#bec0c2\", \"#002D62\", \"#ED174C\", \"#006BB6\", \"#bec0c2\", \"#002D62\", \"#D50032\", \"#003DA5\", \"#041E42\", \"#9D2235\", \"#FFA300\", \"#006272\", \"#8A8D8F\", \"#000000\"], \n \"Golden State Warriors\": [\"#1D428A\", \"#ffc72c\", \"#006BB6\", \"#FDB927\", \"#26282A\", \"#041E42\", \"#BE3A34\", \"#FFA300\", \"#00A9E0\", \"#FFCD00\"], \n \"Houston Rockets\": [\"#CE1141\", \"#000000\", \"#C4CED4\", \"#041E42\", \"#2C7AA1\", \"#BA0C2F\", \"#8A8D8F\", \"#BA0C2F\", \"#000000\", \"#FFC72C\"], \n \"Indiana Pacers\": [\"#002D62\", \"#FDBB30\", \"#BEC0C2\"], \n \"Los Angeles Clippers\": [\"#c8102E\", \"#1d428a\", \"#BEC0C2\", \"#000000\"], \n \"Los Angeles Lakers\": [\"#552583\", \"#FDB927\", \"#000000\"], \n \"Memphis Grizzlies\": [\"#5D76A9\", \"#12173F\", \"#F5B112\", \"#707271\", \"#6189B9\", \"#00285E\", \"#FDB927\", \"#BED4E9\", \"#00B2A9\", \"#E43C40\", \"#BC7844\", \"#040204\", \"#FFFFFF\"], \n \"Miami Heat\": [\"#98002E\", \"#F9A01B\", \"#000000\", \"#41B6E6\", \"#db3eb1\", \"#000000\", \"#FFFFFF\", \"#BA0C2F\", \"#FEDD00\", \"#000000\"], \n \"Milwaukee Bucks\": [\"#00471B\", \"#D5C8AD\", \"#0077c0\", \"#000000\", \"#AC1a2f\", \"#274e37\", \"#95999d\", \"#FFFFFF\", \"#702F8A\", \"#2C5234\", \"#8A8D8F\", \"#009429\", \"#f7a500\", \"#FFFFFF\", \"#000000\"], \n \"Minnesota Timberwolves\": [\"#0C2340\", \"#236192\", \"#9ea2a2\", \"#78BE20\", \"#221C35\", \"#981D97\", \"#FFFFFF\", \"#236192\", \"#00843D\", \"#8A8D8F\", \"#000000\", \"#FFD700\", \"#C8102E\", \"#24429C\", \"#1CA64C\", \"#848A8C\", \"#FFFFFF\"], \n \"New Orleans Pelicans\": [\"#0C2340\", \"#C8102E\", \"#85714D\"], \n \"New York Knicks\": [\"#006BB6\", \"#F58426\", \"#BEC0C2\", \"#000000\", \"#0072CE\", \"#FE5000\", \"#8A8D8F\", \"#000000\"], \n \"Oklahoma City Thunder\": [\"#007ac1\", \"#ef3b24\", \"#002D62\", \"#fdbb30\"], \n \"Orlando Magic\": [\"#0077c0\", \"#C4ced4\", \"#000000\"], \n \"Philadelphia 76ers\": [\"#006bb6\", \"#ed174c\", \"#002B5C\", \"#c4ced4\", \"#006bb6\", \"#D50032\", \"#BB9754\", \"#040204\", \"#002F6C\", \"#D50032\"], \n \"Phoenix Suns\": [\"#1d1160\", \"#e56020\", \"#000000\", \"#63727A\", \"#F9AD1B\", \"#B95915\", \"#Bec0c2\", \"#FF6900\", \"#FE5000\", \"#EF3340\", \"#5F259F\", \"#000000\"], \n \"Portland Trail Blazers\": [\"#E03A3E\", \"#000000\"], \n \"Sacramento Kings\": [\"#5a2d81\", \"#63727A\", \"#000000\", \"#542e91\", \"#c4ced4\", \"#000000\"], \n \"San Antonio Spurs\": [\"#000000\", \"#C4CED4\", \"#8a8d8f\", \"#000000\", \"#EF426F\", \"#00B2A9\", \"#FF8200\"], \n \"Toronto Raptors\": [\"#ce1141\", \"#000000\", \"#A1A1A4\", \"#B4975A\", \"#753BBD\", \"#BA0C2F\", \"#8A8D8F\", \"#000000\"], \n \"Utah Jazz\": [\"#002B5C\", \"#F9A01B\", \"#F9A01B\", \"#00471B\", \"#3E2680\", \"#6CAEDF\", \"#753BBD\", \"#00A9E0\", \"#006272\", \"#954E4C\"], \n \"Washington Wizards\": [\"#002B5C\", \"#e31837\", \"#C4CED4\"]\n }\n\ntable_cols = ['Rank', 'Team', 'Record', 'Win %', 'Games Back', 'at Home', 'Away', 'vs. Division', \n 'PPG', 'Opponent PPG', 'Difference', 'Current Streak', 'Last 10 Games']\n\ndef conf_table_cols(conference):\n if conference == 'League':\n conference = 'Conference'\n\n cols = table_cols[:]\n cols.insert(8, f'vs. {conference}')\n \n return cols\n\ndef conf_table_data(season, conference):\n #! add in playoff string reading for previous years after this works for current year\n url = f'https://www.espn.com/nba/standings/_/season/{int(season) + 1}'\n\n if conference == 'League':\n url += '/group/league'\n \n dfs = pd.read_html(url)\n time.sleep(1)\n\n flatten = lambda t: [item for sublist in t for item in sublist]\n start_cols = ['Rank', 'Team', 'Record', 'PCT', 'GB', 'HOME', 'AWAY', 'DIV', 'CONF', 'PPG', 'OPP PPG',\n 'DIFF', 'STRK', 'L10']\n \n if conference == 'West':\n val = 3\n else:\n val = 1\n\n conf = dfs[val]\n\n teams = pd.DataFrame([dfs[val - 1].columns.values.tolist()[0]] + flatten(dfs[val - 1].values.tolist()))\n \n def playoff_str(x):\n if str(x)[5].isdigit() and str(x)[6].islower():\n return str(x)[6:8]\n elif str(x)[5].islower():\n return str(x)[5:7]\n else:\n return ''\n\n playoff_str_vals = teams.apply(playoff_str, axis=1)\n teams = pd.DataFrame([item.split(' ')[-1] for sublist in teams.values for item in sublist])\n\n teams = teams.replace({0:{i.split(' ')[-1]: i for i in list(team_colors.keys())}})\n teams['t'] = playoff_str_vals\n teams = teams.apply(lambda row: row[0] + ' -' + row['t'] if row['t'] != '' else row[0], axis=1)\n\n conf['Team'] = teams.apply(lambda x: x[:-1] if x.endswith(' ') else x)\n conf['PCT'] = round(conf['PCT'] * 100, 2).astype(str) + '%'\n conf['Record'] = conf['W'].astype(str) + '-' + conf['L'].astype(str)\n conf['Rank'] = range(1, len(conf) + 1)\n\n for j in ['PPG', 'OPP PPG', 'DIFF']:\n conf[j] = round(conf[j], 1)\n conf[j] = conf[j].astype(str)\n \n conf = conf.reindex(columns=start_cols).copy()\n conf.columns = conf_table_cols(conference)\n\n return conf.copy()\n\nscatter_vals = ['Team', 'Average Age', 'Wins', 'Losses', 'Pythagorean Wins', 'Pythagorean Losses', \n 'Margin of Victory', 'Strength of Schedule', 'Simple Rating System', 'Offensive Rating', \n 'Defensive Rating', 'Net Rating', 'Pace', 'Free Throw Attempt Rate', '3 Point Attempt Rate', \n 'True Shooting Percentage', 'Effective Field Goal Percentage', 'Turnover Percentage', \n 'Offensive Rebound Percentage', 'Free Throws Per Field Goal Attempt', \n 'Effective Field Goal Percentage Allowed', 'Opponent Turnover Percentage', \n 'Defensive Rebound Pecentage', 'Opponent Free Throws Per Field Goal Attempt', 'Attendance', \n 'Attendance Per Game']\n\ndef scatter_data(season):\n html = requests.get(f'http://www.basketball-reference.com/leagues/NBA_{int(season) + 1}.html').content\n time.sleep(1)\n cleaned_soup = BeautifulSoup(re.sub(rb\"<!--|-->\",rb\"\", html), features='lxml')\n misc_table = cleaned_soup.find('table', {'id':'misc_stats'})\n\n df = pd.read_html(str(misc_table))[0]\n df.columns = df.columns.get_level_values(1)\n df['Team'] = df['Team'].apply(lambda x: x if x[-1] != '*' else x[:-1])\n\n df = df.drop(['Rk', 'Arena'], axis=1).copy()\n\n df.columns = scatter_vals\n \n df = df[df['Team'] != 'League Average']\n df[['Wins', 'Losses']] = df[['Wins', 'Losses']].astype(int)\n\n return df\n\n\n#%%\n\n"
] | [
[
"pandas.read_html"
]
] |
pygeo/geoval | [
"48a21abb8de974328af6a16a70710a85713fda64"
] | [
"tests/test_data.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nThis file is part of GEOVAL.\n(c) 2012- Alexander Loew\nFor COPYING and LICENSE details, please refer to the LICENSE file\n\"\"\"\n\nimport sys\nsys.path.append('..')\n\nimport unittest\n\nfrom geoval.core import GeoData\nfrom geoval.region import RegionPolygon\n\nimport os\nimport matplotlib.pylab as pl\nimport numpy as np\nfrom scipy import stats\nfrom dateutil.rrule import rrule\nfrom dateutil.rrule import MONTHLY\nimport datetime\n\nimport tempfile\n\n\nclass TestData(unittest.TestCase):\n\n def setUp(self):\n self.D = GeoData(None, None)\n self.D._init_sample_object(nt=1000, ny=1, nx=1)\n self._tmpdir = tempfile.mkdtemp()\n\n def test_log_warning_Standard(self):\n x = self.D.copy()\n logfile = tempfile.mktemp(suffix='.log')\n os.environ['DATA_WARNING_FILE'] = logfile\n if os.path.exists(logfile):\n os.remove(logfile)\n x._log_warning('testlog', write_log=True)\n print(logfile)\n self.assertTrue(os.path.exists(logfile))\n\n def test_log_warning_WithEnvironmentVariable(self):\n x = self.D.copy()\n logfile = tempfile.mktemp(suffix='.log') # './tmpdir/data_warningXXX.log'\n os.environ.update({'DATA_WARNING_FILE' : logfile})\n if os.path.exists(logfile):\n os.remove(logfile)\n x._log_warning('testlog', write_log=True)\n self.assertTrue(os.path.exists(logfile))\n os.remove(logfile)\n\n def test_DataInitLabelNotNone(self):\n d = GeoData(None,None, label='testlabel')\n self.assertEqual(d.label, 'testlabel')\n\n def test_DataInitUnitNotNone(self):\n d = GeoData(None,None, unit='myfunkyunit')\n self.assertEqual(d.unit, 'myfunkyunit')\n\n def test_DataInitTimeCycleNotNone(self):\n d = GeoData(None,None, time_cycle=24)\n self.assertEqual(d.time_cycle, 24)\n\n #~ def test_get_time_indices(self):\n #~ d1 = pl.num2date(pl.datestr2num('2001-01-05'))\n #~ d2 = pl.num2date(pl.datestr2num('2001-05-05'))\n #~ self.D._oldtime = True\n #~ i1,i2 = self.D._get_time_indices(d1,d2)\n #~ s1 = str(pl.num2date(self.D.time[i1]))\n #~ s2 = str(pl.num2date(self.D.time[i2]))\n #~ self.assertEqual(s1,'2001-01-05 00:00:00+00:00')\n #~ self.assertEqual(s2,'2001-05-05 00:00:00+00:00')\n\n #~ def test_get_time_indices_startNone(self):\n #~ d2 = pl.num2date(pl.datestr2num('2001-05-05'))\n #~ self.D._oldtime = True\n #~ i1, i2 = self.D._get_time_indices(None, d2)\n #~ s1 = str(pl.num2date(self.D.time[i1]))\n #~ ref1 = str(pl.num2date(self.D.time[0]))\n #~ s2 = str(pl.num2date(self.D.time[i2]))\n #~ self.assertEqual(s1,ref1)\n #~ self.assertEqual(s2,'2001-05-05 00:00:00+00:00')\n\n #~ def test_get_time_indices_stopNone(self):\n #~ d1 = pl.num2date(pl.datestr2num('2001-01-05'))\n #~ self.D._oldtime = True\n #~ i1, i2 = self.D._get_time_indices(d1, None)\n #~ s1 = str(pl.num2date(self.D.time[i1]))\n #~ s2 = str(pl.num2date(self.D.time[i2]))\n #~ ref2 = str(pl.num2date(self.D.time[-1]))\n #~ self.assertEqual(s1,'2001-01-05 00:00:00+00:00')\n #~ self.assertEqual(s2,ref2)\n\n #~ def test_get_time_indices_InvalidSwappedDates(self):\n #~ d1 = pl.num2date(pl.datestr2num('2001-01-05'))\n #~ d2 = pl.num2date(pl.datestr2num('2001-05-05'))\n #~ self.D._oldtime = True\n #~ with self.assertRaises(ValueError):\n #~ i1,i2 = self.D._get_time_indices(d2,d1) # not that this is swapped\n\n def test_get_time_indices_InvalidDates(self):\n i1, i2 = self.D._get_time_indices(None, None)\n self.assertEqual(i1, 0)\n self.assertEqual(i2, len(self.D.time)-1)\n\n def test_sub_sample_InvalidGeometry(self):\n x = self.D.copy()\n x.data = np.random.random((3,4,5,6))\n with self.assertRaises(ValueError):\n x._sub_sample(3)\n\n def test_squeeze(self):\n self.D.squeezed = False\n self.D._squeeze()\n self.assertTrue(self.D.squeezed)\n\n def test_sub_sample(self):\n x = self.D.copy()\n\n # 3D data\n nt_org = self.D.nt\n tmp = np.random.random((nt_org, 50, 80))\n x.data = np.ma.array(tmp, mask=tmp != tmp)\n x._sub_sample(10)\n nt,ny,nx = x.shape\n # check geometry of results first\n self.assertEqual(nt, nt_org)\n self.assertEqual(ny, 5)\n self.assertEqual(nx, 8)\n # now check values\n self.assertEqual(x.data[0,0,0], tmp[0,0,0])\n self.assertEqual(x.data[172,0,0], tmp[172,0,0])\n\n #todo continue here with checks of values!\n #~ print x.data[10,0,0]\n #~ print tmp[10,8:12,8:12]\n #~ self.assertEqual(x.data[10,0,0], tmp[10,11,11])\n\n # 2D\n tmp = np.random.random((73, 92))\n x.data = np.ma.array(tmp, mask=tmp != tmp)\n x._sub_sample(5)\n # check geometry of results first\n ny,nx = x.shape\n self.assertEqual(ny, 14+1)\n self.assertEqual(nx, 18+1)\n\n def test_timeshift(self):\n d = self.D.copy()\n r = d.timeshift(1, return_data=True)\n\n self.assertEqual(d.data[0,0,0], r.data[-1,0,0])\n self.assertEqual(d.data[1,0,0], r.data[0,0,0])\n self.assertEqual(d.data[2,0,0], r.data[1,0,0])\n # time remains unchanged ?\n self.assertTrue(np.all(np.abs(1.- d.time / r.time) < 1.E-6))\n\n\n def test_timeshift_WithTimeShift(self):\n d = self.D.copy()\n r = d.timeshift(1, return_data=True, shift_time=True)\n\n self.assertEqual(d.data[0,0,0], r.data[-1,0,0])\n self.assertEqual(d.data[1,0,0], r.data[0,0,0])\n self.assertEqual(d.data[2,0,0], r.data[1,0,0])\n # time also shifted ?\n self.assertEqual(d.time[0], r.time[-1])\n self.assertEqual(d.time[1], r.time[0])\n self.assertEqual(d.time[2], r.time[1])\n\n def test_timeshift_ManipulateSelf(self):\n d = self.D.copy()\n ref = d.copy()\n d.timeshift(1, return_data=False)\n\n self.assertEqual(ref.data[0,0,0], d.data[-1,0,0])\n self.assertEqual(ref.data[1,0,0], d.data[0,0,0])\n self.assertEqual(ref.data[2,0,0], d.data[1,0,0])\n\n def test_shift_time_start_firstdate(self):\n d = self.D.copy()\n\n # shift data first to produce an invalid dataset\n d.timeshift(2, shift_time=True)\n\n # apply automatic correction\n d._shift_time_start_firstdate()\n\n self.assertTrue(np.all(np.abs(1. - d.time / self.D.time) < 1.E-6))\n self.assertTrue(np.all(np.abs(1. - d.data / self.D.data) < 1.E-6))\n\n def test_timeshiftN0(self):\n d = self.D.copy()\n r = d.timeshift(0)\n self.assertTrue(np.all(np.abs(1.-r/d.data) < 1.E-6))\n\n def test_timeshiftN0(self):\n d = self.D.copy()\n with self.assertRaises(ValueError):\n r = d.timeshift(-1)\n\n def test_timeshiftInvalidGeometry(self):\n d = self.D.copy()\n d.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n r = d.timeshift(2)\n\n #~ def test_oldtimeoffset_Invalid(self):\n #~ d = self.D.copy()\n #~ del d.time_str\n #~ with self.assertRaises(ValueError):\n #~ d._oldtimeoffset()\n\n #~ def test_oldtimeoffset_InvalidTimeStr(self):\n #~ d = self.D.copy()\n #~ d.time_str = 'no_time_str'\n #~ with self.assertRaises(ValueError):\n #~ d._oldtimeoffset()\n\n #~ def test_oldtimeoffset_Invalid(self):\n #~ d = self.D.copy()\n #~ d.time_str = 'hours'\n #~ self.assertEqual(d._oldtimeoffset(), 24.)\n #~ d.time_str = 'seconds'\n #~ self.assertEqual(d._oldtimeoffset(), 86400.)\n #~ d.time_str = 'days'\n #~ self.assertEqual(d._oldtimeoffset(), 1.)\n\n\n def test_get_temporal_mask(self):\n x = self.D.copy()\n\n with self.assertRaises(ValueError):\n xx = x.get_temporal_mask([1,5,11],mtype='invalidtype')\n\n # test monthly mask\n mm = x.get_temporal_mask([1,5,11],mtype='monthly')\n d = x.date[mm]\n for t in d:\n self.assertTrue(t.month in [1,5,11])\n\n # yearly mask\n ym = x.get_temporal_mask([2002],mtype='yearly')\n d = x.date[ym]\n for t in d:\n self.assertTrue(t.year in [2002])\n ym = x.get_temporal_mask([2003],mtype='yearly')\n d = x.date[ym]\n for t in d:\n self.assertTrue(t.year in [2003])\n\n def test_get_temporal_mask_InvalidOption(self):\n x = self.D.copy()\n with self.assertRaises(ValueError):\n mm = x.get_temporal_mask([1,5,11], mtype='nixtype')\n\n\n def test__get_date_from_month(self):\n x = self.D.copy()\n x.time_str = 'months since 1983-05-01 00:00:00'\n d1 = x._get_date_from_month(2)\n self.assertEqual(d1.year,1983)\n self.assertEqual(d1.month,7)\n self.assertEqual(d1.day,1)\n\n x.time_str = 'months since 1987-07-13 00:00:00'\n d1 = x._get_date_from_month(5)\n self.assertEqual(d1.year,1987)\n self.assertEqual(d1.month,12)\n self.assertEqual(d1.day,13)\n\n x.time_str = 'months since 1987-08-22 00:00:00'\n d1 = x._get_date_from_month(9)\n self.assertEqual(d1.year,1988)\n self.assertEqual(d1.month,5)\n self.assertEqual(d1.day,22)\n\n def test_get_climatology_InvalidGeometry(self):\n x = self.D.copy()\n x.data = np.random.random((2,3,4,5))\n x.time_cycle = 1\n with self.assertRaises(ValueError):\n c = x.get_climatology()\n\n\n\n def test_get_climatology(self):\n x = self.D.copy()\n\n # timecycle = 1\n x.time_cycle = 1\n r = x.data.mean(axis=0)\n c = x.get_climatology()\n d = np.abs(1.-r/c)\n self.assertTrue(np.all(d < 1.E-6))\n\n # ... same, but with object returned\n c = x.get_climatology(return_object=True, ensure_start_first=False)\n d = np.abs(1.-r/c.data)\n self.assertTrue(np.all(d < 1.E-6))\n\n # varying timecycles\n for time_cycle in [1,5,12,23]:\n x.time_cycle=time_cycle\n c = x.get_climatology(ensure_start_first=False)\n nt,ny,nx = x.shape\n r = np.zeros((time_cycle,ny,nx))\n n = np.zeros((time_cycle,ny,nx))\n cnt = 0\n for i in range(nt):\n if cnt % time_cycle == 0:\n cnt = 0\n r[cnt,:,:] = r[cnt,:,:] + x.data[i,:,:]\n n[cnt,:,:] = n[cnt,:,:] + (~x.data.mask[i,:,:]).astype('int')\n cnt +=1\n res = r / n # reference mean\n d = np.abs(1.-res/c)\n self.assertTrue(np.all(d < 1.E-6))\n\n def test_get_climatology_InvalidTimecycle(self):\n d = self.D.copy()\n if hasattr(d, 'time_cycle'):\n del d.time_cycle\n with self.assertRaises(ValueError):\n d.get_climatology()\n\n def test_get_deseasonalized_anomalyCurrent(self):\n # TODO check not only that it runs but also results\n d = self.D.copy()\n d.time_cycle = 1\n d.get_deseasonalized_anomaly(base='current')\n\n def test_get_deseasonalized_anomalyAll(self):\n # TODO check not only that it runs but also results\n d = self.D.copy()\n d.time_cycle = 1\n d.get_deseasonalized_anomaly(base='all')\n\n def test_get_deseasonalized_anomalyInvalidBase(self):\n d = self.D.copy()\n with self.assertRaises(ValueError):\n d.get_deseasonalized_anomaly(base='nixbase')\n\n def test_get_deseasonalized_anomalyInvalidTimeCycle(self):\n d = self.D.copy()\n if hasattr(d, 'time_cycle'):\n del d.time_cycle\n with self.assertRaises(ValueError):\n d.get_deseasonalized_anomaly(base='all')\n\n\n def test_set_valid_range(self):\n x = self.D.copy()\n tmp = np.random.random((100,200,300)) * 10. - 5.\n x.data = np.ma.array(tmp, mask=tmp != tmp)\n x._set_valid_range(-2., 2.)\n self.assertTrue(np.all(x.data >=-2.))\n self.assertTrue(np.all(x.data <=2.))\n\n x._set_valid_range(-0.5, 1.)\n self.assertTrue(np.all(x.data >=-0.5))\n self.assertTrue(np.all(x.data <=1.))\n\n def test_is_monthly(self):\n a = self.D.copy()\n b = self.D.copy()\n t=[]\n x = pl.datestr2num('2001-01-15')\n for i in range(20):\n t.append(x)\n x += 30\n t = np.asarray(t)\n b.time = t\n self.assertEqual(a._is_monthly(), False)\n self.assertEqual(b._is_monthly(), True)\n\n def test_is_monthly_MissingTime(self):\n x = self.D.copy()\n del x.time\n self.assertFalse(x._is_monthly())\n\n\n def test_detrend_InvalidGeometry(self):\n x = self.D.copy()\n x.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n x.detrend()\n\n\n def test_cut_bounding_box_InvalidGeometry(self):\n d = self.D.copy()\n d.data = np.random.random((2,3,4,5))\n with self.assertRaises(ValueError):\n y = d.cut_bounding_box(return_object=True)\n\n def test_cut_bounding_box(self):\n x = self.D.copy()\n # sample data with invalid boundaries\n t = np.random.random((6,6,5))\n #... left border 1 pix\n t[:,:,0] = np.nan\n #... top border 2pix\n t[:,0,:] = np.nan\n t[:,1,:] = np.nan\n #... right border only some pixels invalid\n t[:,0:4,-1] = np.nan\n\n x.data = np.ma.array(t, mask = np.isnan(t))\n y = x.cut_bounding_box(return_object=True)\n\n # left border\n self.assertEqual(y.data[0,0,0], x.data[0,2,1])\n self.assertEqual(y.data[2,0,0], x.data[2,2,1])\n\n # right border\n #todo\n #~ print x.data[0,2,:]\n #~ print y.data[0,0,:]\n #~ self.assertEqual(y.data[0,0,-2], x.data[0,2,-2])\n\n\n def test_add(self):\n x = self.D.copy()\n y = self.D.copy()\n y.data += 3.\n c = x.add(y)\n self.assertTrue( np.all(np.abs(1.- c.data[0,0,0] / (x.data[0,0,0]*2.+3.)) < 1.E-6))\n self.assertTrue(np.all(np.abs(1. - c.data[100,0,0] / (x.data[100,0,0]*2.+3.)) < 1.E-6))\n\n def test_sub(self):\n x = self.D.copy()\n y = self.D.copy()\n y.data += 3.\n c = x.sub(y)\n self.assertTrue(np.abs(1.-c.data[0,0,0]/-3.) < 1.E-6)\n self.assertTrue(np.abs(1.-c.data[100,0,0]/-3.) < 1.E-6)\n\n def test_addc(self):\n r1 = self.D.addc(5.,copy=True)\n self.assertAlmostEqual(r1.data[4,0,0]-5.,self.D.data[4,0,0], 8)\n\n def testAddcWithoutDataCopy(self):\n ref = self.D.data[5,0,0]\n self.D.addc(666.,copy=False)\n self.assertEqual(ref+666.,self.D.data[5,0,0])\n\n def test_get_percentile(self):\n for p in [0.05, 0.5, 0.95]:\n r = self.D.get_percentile(p, return_object = False)[0,0]\n res = stats.mstats.scoreatpercentile(self.D.data[:,0,0], p * 100.)\n self.assertAlmostEqual(r, res)\n\n r = self.D.get_percentile(p, return_object = True)\n self.assertAlmostEqual(r.data[0,0], res)\n\n def test_timn(self):\n A = self.D.copy()\n B = self.D.copy()\n me = A.timmean(return_object=False)\n su = B.timsum(return_object=False)\n an = A.timn(return_object=False) # ndarray\n bn = B.timn(return_object=True) # Data object\n r = su/me\n self.assertEqual(r[0,0], an[0,0])\n self.assertEqual(r[0,0], bn.data[0,0])\n\n def test_flipud(self):\n x = self.D.copy()\n y = x.copy()\n y._flipud()\n self.assertEqual(x.data[0,0,0], y.data[0,-1,0])\n self.assertEqual(x.data[0,-1,0], y.data[0,0,0])\n\n\n def test_flipud_InvalidGeometry(self):\n x = self.D.copy()\n x.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n x._flipud()\n x.data = np.random.random((10,))\n with self.assertRaises(ValueError):\n x._flipud()\n\n\n @unittest.skip('wait for bugfree scipy')\n def test_correlate1(self):\n #test for correlation calculations\n r,p = self.D.correlate(self.D, pthres=1.01) #1) correlation with itself (returns data objects)\n self.assertEqual(r.data[0,0], 1.)\n self.assertEqual(p.data[0,0], 0.)\n\n def test_correlate_normalize(self):\n # TODO check validity\n r,p = self.D.correlate(self.D, pthres=1.01, detrend=True)\n\n def test_correlate_spearman(self):\n # TODO check validity\n r,p = self.D.correlate(self.D, pthres=1.01, spearman=True)\n\n def test_correlate_WithInvalidGeometries(self):\n x = self.D.copy()\n y = self.D.copy()\n y.data = np.random.random((10, 200, 273))\n with self.assertRaises(ValueError):\n x.correlate(y)\n\n def get_date_from_months_WithInvalidTimestr(self):\n d = self.D\n d.time_str = 'some_invalid_str'\n with self.assertRaises(ValueError):\n d._get_date_from_month(10)\n\n def test_set_time(self):\n # NB: num2date gives number of days PLUS one (see num2date docstring)\n self.D.time_str = \"days since 0001-01-01 00:00:00\"\n self.D.time = np.array([1.])\n self.D.set_time()\n self.assertEqual(self.D.time[0],1.)\n\n\n def test_mesh_latlon_vector(self):\n d = self.D.copy()\n lon = np.arange(-180., 180.).astype('float')\n lat = np.arange(-90., 90.).astype('float')\n d.lon = lon\n d.lat = lat\n d._mesh_lat_lon()\n\n self.assertEqual(d.lon.shape, (180, 360))\n self.assertTrue(np.all(d.lon[5,:] - lon == 0.))\n self.assertTrue(np.all(d.lat[:,5] - lat == 0.))\n\n\n def testTemporalTrendNoTimeNormalization(self):\n y = np.arange(len(self.D.time))*2.+8.\n self.D.data[:, 0, 0] = y\n\n # reference solution\n slope, intercept, r_value, p_value, std_err = stats.linregress(self.D.time,y)\n\n # calculate temporal correlation WITHOUT normalization of time\n R, S, I, P = self.D.temporal_trend(return_object=False) # no object is returned (default)\n self.assertEqual(R[0,0], r_value)\n self.assertEqual(S[0,0], slope)\n\n R, S, I, P = self.D.temporal_trend(return_object=True) # TODO further tests for slope and significance\n self.assertEqual(R.data[0, 0], r_value)\n self.assertEqual(S.data[0,0], slope)\n\n def test_timmean_InvalidDimension(self):\n with self.assertRaises(ValueError):\n d = self.D.copy()\n d.data = np.random.random((10, 20, 30, 40))\n d.timmean()\n\n def test_timstd_InvalidDimension(self):\n with self.assertRaises(ValueError):\n d = self.D.copy()\n d.data = np.random.random((10, 20, 30, 40))\n d.timstd()\n\n def test_timstd_2D(self):\n d = self.D.copy()\n d.data = np.random.random((10, 20))\n r = d.timstd()\n self.assertEqual(r, None)\n\n def test_timvar_2D(self):\n d = self.D.copy()\n d.data = np.random.random((10, 20))\n r = d.timvar()\n self.assertEqual(r, None)\n\n def test_timstd_2D(self):\n d = self.D.copy()\n d.data = np.random.random((10, 20))\n r = d.timstd()\n self.assertEqual(r, None)\n\n def test_timmean_timvar_consistency(self):\n d = self.D.copy()\n s = d.timstd(return_object=False)\n v = d.timvar(return_object=False)\n r = np.abs(1.- v / (s*s))\n self.assertTrue(np.all(r < 1.E-6))\n\n def test_timmin_InvalidDimension(self):\n with self.assertRaises(ValueError):\n d = self.D.copy()\n d.data = np.random.random((10, 20, 30, 40))\n d.timmin()\n\n def test_timmin_2D(self):\n d = self.D.copy()\n d.data = d.data[0,:,:]\n r= d.timmin(return_object=False)\n self.assertEqual(self.D.data[0,0,0], r[0,0])\n\n def test_timmax_InvalidDimension(self):\n with self.assertRaises(ValueError):\n d = self.D.copy()\n d.data = np.random.random((10, 20, 30, 40))\n d.timmax()\n\n def test_timmax_2D(self):\n d = self.D.copy()\n d.data = d.data[0,:,:]\n r= d.timmax(return_object=False)\n self.assertEqual(self.D.data[0,0,0], r[0,0])\n\n\n\n #~ def test_get_yearmean(self):\n #~ #check get_yeartime\n #~ D = self.D.copy()\n #~ t1 = pl.datestr2num('2001-01-01') + np.arange(4)\n #~ t2 = pl.datestr2num('2005-05-15') + np.arange(4)\n #~ t3 = pl.datestr2num('2010-07-15') + np.arange(4)\n #~ D.time = np.asarray([t1,t2,t3]).flatten()\n #~ D._oldtime = True\n #~ data = pl.rand(len(D.time), 1, 1)\n #~ data[8:, 0, 0] = np.nan\n #~ D.data = np.ma.array(data,mask=np.isnan(data))\n #~ r1 = np.mean(D.data[0:4])\n #~ r2 = np.mean(D.data[4:8])\n #~ r3=np.mean(D.data[8:])\n#~\n #~ years, res = D.get_yearmean()\n#~\n #~ self.assertEqual(years[0],2001)\n #~ self.assertEqual(years[1],2005)\n #~ self.assertEqual(res[0,0,0],r1)\n #~ self.assertEqual(res[1,0,0],r2)\n #~ self.assertEqual(res[2,0,0].mask,r3.mask)\n#~\n #~ R = D.get_yearmean(return_data=True)\n #~ self.assertEqual(R.date[0].year, 2001)\n #~ self.assertEqual(R.date[1].year, 2005)\n #~ self.assertEqual(R.data[0,0,0], r1)\n #~ self.assertEqual(R.data[1,0,0], r2)\n #~ self.assertEqual(R.data[2,0,0].mask, r3.mask)\n\n #years, res = D.get_yearmean()\n\n #~ def test_get_yearsum(self):\n #~ #check get_yeartime\n #~ D = self.D.copy()\n #~ t1 = pl.datestr2num('2001-01-01') + np.arange(4) #year 2001\n #~ t2 = pl.datestr2num('2005-05-15') + np.arange(4) #year 2005\n #~ t3 = pl.datestr2num('2010-07-15') + np.arange(4) #year 2010\n #~ D.time = np.asarray([t1,t2,t3]).flatten()\n #~ D._oldtime = True #use old python pylab time definition to be compliant with the test results here\n #~ data = pl.rand(len(D.time), 1, 1)\n #~ data[8:, 0, 0] = np.nan\n #~ D.data = np.ma.array(data,mask=np.isnan(data)) #generate random data\n #~ r1 = np.sum(D.data[0:4])\n #~ r2 = np.sum(D.data[4:8])\n #~ r3 = np.sum(D.data[8:])\n #~ years, res = D.get_yearsum()\n #~ resobj = D.get_yearsum(return_data=True)\n#~\n #~ self.assertEqual(years[0],2001)\n #~ self.assertEqual(resobj.date[0].year,2001)\n#~\n #~ self.assertEqual(years[1],2005)\n #~ self.assertEqual(resobj.date[1].year,2005)\n #~ self.assertEqual(res[0,0,0],r1)\n #~ self.assertEqual(resobj.data[0,0,0],r1)\n #~ self.assertEqual(res[1,0,0],r2)\n #~ self.assertEqual(resobj.data[1,0,0],r2)\n\n #~ R = D.get_yearmean(return_data=True)\n #~ self.assertEqual(R.date[0].year, 2001)\n #~ self.assertEqual(R.date[1].year, 2005)\n #~ self.assertEqual(R.data[0,0,0], r1)\n #~ self.assertEqual(R.data[1,0,0], r2)\n #~ self.assertEqual(R.data[2,0,0].mask, r3.mask)\n\n\n #~ def test_get_yearsum(self):\n #~ #check get_yeartime\n #~ D = self.D.copy()\n #~ t1 = pl.datestr2num('2001-01-01') + np.arange(4) #year 2001\n #~ t2 = pl.datestr2num('2005-05-15') + np.arange(4) #year 2005\n #~ t3 = pl.datestr2num('2010-07-15') + np.arange(4) #year 2010\n #~ D.time = np.asarray([t1,t2,t3]).flatten()\n #~ D._oldtime = True #use old python pylab time definition to be compliant with the test results here\n #~ data = pl.rand(len(D.time), 1, 1)\n #~ data[8:, 0, 0] = np.nan\n #~ D.data = np.ma.array(data,mask=np.isnan(data)) #generate random data\n #~ r1 = np.sum(D.data[0:4])\n #~ r2 = np.sum(D.data[4:8])\n #~ r3 = np.sum(D.data[8:])\n #~ years, res = D.get_yearsum()\n #~ resobj = D.get_yearsum(return_data=True)\n#~\n #~ self.assertEqual(years[0],2001)\n #~ self.assertEqual(resobj.date[0].year,2001)\n#~\n #~ self.assertEqual(years[1],2005)\n #~ self.assertEqual(resobj.date[1].year,2005)\n #~ self.assertEqual(res[0,0,0],r1)\n #~ self.assertEqual(resobj.data[0,0,0],r1)\n #~ self.assertEqual(res[1,0,0],r2)\n #~ self.assertEqual(resobj.data[1,0,0],r2)\n #~ self.assertEqual(res[2,0,0].mask,r3)\n\n\n\n\n\n# def test_diagnostic__get_valid_timeseries(self):\n# #test _get_valid_timeseries() of diagnostic tool\n# D = self.D.copy()\n#\n# S = Diagnostic(D,D)\n# d,m = S._get_valid_timeseries(S.x)\n# print d\n# print m\n# stop\n\n\n def test_weighting_matrix_InvalidType(self):\n d = self.D.copy()\n d.weighting_type = 'invalid_value'\n with self.assertRaises(ValueError):\n d._get_weighting_matrix()\n\n def test_weighting_matrix(self):\n D = self.D.copy() # single pixel\n\n x = np.ones((10,2,1))\n D.data=np.ma.array(x,mask=x == 0.)\n\n # case 1: valid data for all timestep\n D.cell_area = np.ones(D.data[0,:,:].shape)\n D.cell_area[0,0] = 75.; D.cell_area[1,0] = 25. #3/4 ; 1/4\n r = D._get_weighting_matrix()\n self.assertFalse(np.any(r[:,0,0] != 0.75))\n self.assertFalse(np.any(r[:,1,0] != 0.25))\n\n # case 2: invalid data for some timesteps\n D.data.mask[0,0,0] = True #mask one data as invalid\n r = D._get_weighting_matrix()\n self.assertFalse(np.any(r[1:,0,0] != 0.75))\n self.assertFalse(np.any(r[1:,1,0] != 0.25))\n self.assertFalse(r[0,1,0] != 1.)\n self.assertFalse(r.mask[0,0,0] == False)\n\n #case 3: invalid data, but normalization for whole area!\n D.weighting_type = 'all'\n r = D._get_weighting_matrix()\n self.assertFalse(r[0,1,0] != 0.25)\n\n def test_adjust_time(self):\n D = self.D.copy()\n #D._oldtime = True #use old time convention to be compliant with test routines here\n D.adjust_time(day=17)\n for i in range(len(D.time)):\n self.assertEqual(D.num2date(D.time[i]).day, 17)\n D.adjust_time(month=10)\n for i in range(len(D.time)):\n self.assertEqual(D.num2date(D.time[i]).month, 10)\n D.adjust_time(year=2025)\n for i in range(len(D.time)):\n self.assertEqual(D.num2date(D.time[i]).year, 2025)\n\n D.adjust_time(hour=0)\n for i in range(len(D.time)):\n self.assertEqual(D.num2date(D.time[i]).hour, 0)\n\n D.adjust_time(hour=22)\n for i in range(len(D.time)):\n self.assertEqual(D.num2date(D.time[i]).hour, 22)\n\n def test_timstat(self):\n \"\"\"\n test temporal statistic functions\n @return:\n \"\"\"\n D = self.D.copy()\n\n me = D.data.mean(axis=0)\n ME = D.timmean(return_object=True)\n self.assertEquals(me[0],ME.data[0])\n\n su = D.data.sum(axis=0)\n SU = D.timsum(return_object=True)\n self.assertEquals(su[0],SU.data[0])\n\n st = D.data.std(axis=0)\n ST = D.timstd(return_object=True)\n self.assertEquals(st[0],ST.data[0])\n\n cv = st/me\n CV = D.timcv(return_object=True)\n self.assertEquals(cv[0],CV.data[0])\n\n cv = st/me\n CV = D.timcv(return_object=False)\n self.assertEquals(cv[0],CV[0])\n\n va = D.data.var(axis=0)\n VA = D.timvar(return_object=True)\n self.assertEquals(va[0],VA.data[0])\n\n mi = D.data.min(axis=0)\n MI = D.timmin(return_object=True)\n self.assertEquals(mi[0],MI.data[0])\n\n ma = D.data.max(axis=0)\n MA = D.timmax(return_object=True)\n self.assertEquals(ma[0],MA.data[0])\n\n\n def test_get_years(self):\n d = self.D.date\n y = self.D._get_years()\n for i in range(self.D.nt):\n self.assertEqual(d[i].year, y[i])\n\n def test_get_months(self):\n d = self.D.date\n y = self.D._get_months()\n for i in range(self.D.nt):\n self.assertEqual(d[i].month, y[i])\n\n\n def test_days_per_month(self):\n ref = {1:[31],2:[28,29],3:31,4:30,5:31,6:30,7:31,8:31,9:30,10:31,11:30,12:31}\n x = self.D.copy()\n days = x._days_per_month()\n for i in range(x.nt):\n d = x.date[i]\n if d.month == 2:\n if d.year % 4 == 0:\n self.assertEqual(days[i],29)\n else:\n self.assertEqual(days[i],28)\n else:\n self.assertEqual(ref[d.month], days[i])\n\n def test_get_dateboundaries(self):\n # check mindate/maxdate functions\n x = self.D.copy()\n ma_date = x.date.max()\n mi_date = x.date.min()\n\n self.assertEqual(x._get_maxdate(), ma_date)\n self.assertEqual(x._get_mindate(), mi_date)\n\n self.assertEqual(x._get_maxdate(base='day').hour, 23)\n self.assertEqual(x._get_maxdate(base='day').minute, 59)\n self.assertEqual(x._get_maxdate(base='day').second, 59)\n\n self.assertEqual(x._get_mindate(base='day').hour, 0)\n self.assertEqual(x._get_mindate(base='day').minute, 0)\n self.assertEqual(x._get_mindate(base='day').second, 0)\n\n self.assertEqual(x._get_maxdate(base='month').hour, 23)\n self.assertEqual(x._get_maxdate(base='month').minute, 59)\n self.assertEqual(x._get_maxdate(base='month').second, 59)\n #~ self.assertEqual(x._get_maxdate(base='month').day, 1)\n\n self.assertEqual(x._get_mindate(base='month').hour, 0)\n self.assertEqual(x._get_mindate(base='month').minute, 0)\n self.assertEqual(x._get_mindate(base='month').second, 0)\n self.assertEqual(x._get_mindate(base='month').day, 1)\n\n self.assertEqual(x._get_maxdate(base='year').hour, 23)\n self.assertEqual(x._get_maxdate(base='year').minute, 59)\n self.assertEqual(x._get_maxdate(base='year').second, 59)\n self.assertEqual(x._get_maxdate(base='year').day, 31)\n self.assertEqual(x._get_maxdate(base='year').month, 12)\n\n self.assertEqual(x._get_mindate(base='year').hour, 0)\n self.assertEqual(x._get_mindate(base='year').minute, 0)\n self.assertEqual(x._get_mindate(base='year').second, 0)\n self.assertEqual(x._get_mindate(base='year').day, 1)\n self.assertEqual(x._get_mindate(base='year').month, 1)\n\n\n def test_timsort_InvalidTime(self):\n d = self.D.copy()\n d.time = None\n with self.assertRaises(ValueError):\n d.timsort()\n\n def test_timsort_InvalidGeometry(self):\n d = self.D.copy()\n d.data = np.random.random((2,3,4,5))\n with self.assertRaises(ValueError):\n d.timsort()\n\n def test_timsort(self):\n D=self.D.copy()\n D.adjust_time(day=15)\n\n # generate some sample data\n D.time = pl.datestr2num('2001-05-03') + np.arange(5)\n D.data = D.data[0:5, :, :]\n D.data[:, 0, 0] = np.arange(5)\n D.std = D.data.copy() + 2.2\n\n # reshuffle the data\n t1 = D.time[1] * 1.\n t2 = D.time[3] * 1.\n D.time[3] = t1\n D.time[1] = t2\n\n # save reference solutions before sorting\n y = D.data[:, 0, 0] * 1.\n t = D.time * 1.\n s = np.argsort(t)\n y1 = y[s]\n\n # sort data\n D.timsort(return_object=False)\n R = D.timsort()\n\n # a) check if time is sorted\n self.assertTrue(np.all(np.diff(D.time) > 0))\n self.assertTrue(np.all(np.diff(R.time) > 0))\n\n # b) check if data was sorted also appropriately\n self.assertTrue(np.all(y1-D.data[:, 0, 0]) == 0.)\n self.assertTrue(np.all(y1-R.data[:, 0, 0]) == 0.)\n\n self.assertTrue(np.all(y1+2.2-D.std [:, 0, 0]) == 0.)\n self.assertTrue(np.all(y1+2.2-R.std [:, 0, 0]) == 0.)\n\n\n @unittest.skip('wait for bugfree scipy')\n def test_diff(self):\n D = self.D.copy()\n A=D.copy()\n\n\n x=D.data[:,0,0]\n y=A.data[:,0,0]\n x1=D.data[:,0,1]\n y1=A.data[:,0,1]\n t,p = stats.ttest_ind(x,y,axis=0)\n t1,p1 = stats.ttest_ind(x1,y1,axis=0)\n\n s = A.diff(D, pthres=0.05)\n s1 = D.diff(D, pthres=0.05) # test with the same data\n\n #checks\n self.assertAlmostEqual(s.p_value[0,0], 1.-p, places=8)\n self.assertAlmostEqual(s.p_value[0,1], 1.-p1, places=8)\n if p <= 0.05:\n self.assertEqual(s.p_mask[0,0], True)\n else:\n self.assertEqual(s.p_mask[0,0], False)\n\n #test for same data\n self.assertEqual(s1.p_value[0,0], 0.)\n self.assertEqual(s1.p_value[0,1], 0.)\n\n\n #another test of the t-test, taken from http://web.mst.edu/~psyworld/texample.htm\n x = np.asarray([5.,7.,5.,3.,5.,3.,3.,9.])\n y = np.asarray([8.,1.,4.,6.,6.,4.,1.,2.])\n\n A=self.D.copy(); B=self.D.copy()\n X = np.zeros((len(x),1,1)); Y = np.zeros((len(y),1,1))\n X[:,0,0] = x; Y[:,0,0] = y\n A.data = np.ma.array(X,mask=X!=X); B.data = np.ma.array(Y,mask=Y!=Y)\n\n u = A.diff(B,pthres=0.05)\n self.assertAlmostEqual(u.t_value[0,0],0.847,places=3)\n self.assertEqual(u.data[0,0],1.)\n\n def test_read_FileNotExisting(self):\n d = GeoData(None, None)\n d.filename = 'nothing.nc'\n with self.assertRaises(ValueError):\n d.read(False)\n\n def test_save_InvalidOption(self):\n testfile = self._tmpdir + os.sep + 'mytestfile.nc'\n\n # invalid mean combination\n with self.assertRaises(ValueError):\n self.D.save(testfile, varname='testvar', format='nc', delete=True, mean=True, timmean=True)\n if os.path.exists(testfile):\n os.remove(testfile)\n\n # invalid format\n with self.assertRaises(ValueError):\n self.D.save(testfile, varname='testvar', format='abc', delete=True)\n if os.path.exists(testfile):\n os.remove(testfile)\n\n\n def test_save_netCDF(self):\n \"\"\"\n test netCDF save routine\n \"\"\"\n testfile = self._tmpdir + os.sep + 'mytestfile.nc'\n self.D.save(testfile, varname='testvar', format='nc', delete=True)\n self.D.save(tempfile.mktemp(suffix='.nc'), varname='testvar', format='nc', delete=True, mean=True)\n\n # read data again\n F = GeoData(testfile, 'testvar', read=True, verbose=False)\n\n self.assertEqual(len(F.time),len(self.D.time))\n self.assertFalse(np.any(self.D.data-F.data) != 0. )\n self.assertFalse(np.any(self.D.time-F.time) != 0. )\n del F\n\n # read data from default, this should then have the same variable name as self.D\n self.D.save(testfile, format='nc', delete=True)\n F = GeoData(testfile, 'testvarname', read=True, verbose=False)\n\n self.assertEqual(len(F.time), len(self.D.time))\n self.assertFalse(np.any(self.D.data-F.data) != 0. )\n\n os.remove(testfile)\n\n def test_interp_time_InvalidMethod(self):\n tref = self.D.num2date(pl.datestr2num('2001-05-05') + np.arange(200)*0.5+0.25)\n with self.assertRaises(ValueError):\n self.D.interp_time(tref, method='invalid_method')\n\n def test_interp_time_InvalidGeometry(self):\n d = self.D.copy()\n d.data = np.random.random((10,20,30,40))\n tref = d.num2date(pl.datestr2num('2001-05-05') + np.arange(200)*0.5+0.25)\n with self.assertRaises(ValueError):\n d.interp_time(tref)\n\n def test_interp_time_TimeNotAscending(self):\n d = self.D.copy()\n d.time = np.random.random(d.nt)\n tref = d.num2date(pl.datestr2num('2001-05-05') + np.arange(200)*0.5+0.25)\n with self.assertRaises(ValueError):\n d.interp_time(tref)\n\n def test_interp_time_TrefNotAscending(self):\n d = self.D.copy()\n tref = d.num2date(np.random.random(200))\n with self.assertRaises(ValueError):\n d.interp_time(tref)\n\n def test_interp_time(self):\n D = self.D.copy()\n import datetime\n\n start_date = datetime.datetime(2001,6,5)\n stop_date = datetime.datetime(2001,7,31)\n D.apply_temporal_subsetting(start_date, stop_date)\n\n #time is from 2001-01-01 for 1000 days as default\n\n #case 1: interpolate to half daily values for a small timeperiod\n tref = D.num2date(pl.datestr2num('2001-07-05') + np.arange(20)*0.5+0.25)\n # 5.July ... 14.July\n #~ print tref\n\n #... interpolate data object for time period specified by tref\n I = D.interp_time(tref)\n\n #... original data\n y = D.data[:, 0, 0]\n #... generate reference solution using numpy\n yy = np.interp(D.date2num(tref), D.time, y)\n\n #... optional: plotting (good for validation of test routine)\n if False:\n pl.figure()\n pl.plot(D.date, y, color='blue', label='original data')\n pl.plot(I.date, I.data[:,0,0], color='red', label='interpolated')\n pl.plot(tref, yy, color='green',label='reference interp',linestyle='--')\n pl.legend()\n pl.show()\n\n d = yy - I.data[:, 0, 0]\n self.assertFalse(np.any(np.abs(d[0:-1]) > 1.E-10 ) ) # boundary effects at end of period, therefore last value not used\n\n\n def test_date2num_NoTimeStr(self):\n del self.D.time_str\n t = np.arange(10).astype('float')\n with self.assertRaises(ValueError):\n self.D.date2num(t)\n\n def test_date2num_InvalidTimeStr(self):\n self.D.time_str=None\n t = np.arange(10).astype('float')\n with self.assertRaises(ValueError):\n self.D.date2num(t)\n\n def test_num2date_NoTimeStr(self):\n del self.D.time_str\n t = np.arange(10).astype('float')\n with self.assertRaises(ValueError):\n self.D.num2date(t)\n\n def test_num2date_InvalidTimeStr(self):\n self.D.time_str=None\n t = np.arange(10).astype('float')\n with self.assertRaises(ValueError):\n self.D.num2date(t)\n\n def test_save_ascii(self):\n self.D = GeoData(None, None)\n self.D._init_sample_object(nt=10, ny=1, nx=1)\n self.D._save_ascii(self._tmpdir + os.sep + 'testexport.txt', delete=True)\n self.assertTrue(os.path.exists(self._tmpdir + os.sep + 'testexport.txt'))\n os.remove(self._tmpdir + os.sep + 'testexport.txt')\n\n def test_save_ascii_not_time(self):\n self.D = GeoData(None, None)\n self.D._init_sample_object(nt=10, ny=1, nx=1)\n self.D.time = None\n with self.assertRaises(ValueError):\n self.D._save_ascii(self._tmpdir + os.sep + 'testexport.txt', delete=True)\n\n def test_save_ascii_invalid_geometry(self):\n self.D = GeoData(None, None)\n self.D._init_sample_object(nt=10, ny=1, nx=1)\n self.D.data = np.random.random((self.D.nt,5,6,7))\n with self.assertRaises(ValueError):\n self.D._save_ascii(self._tmpdir + os.sep + 'testexport.txt', delete=True)\n\n def test_arr2string(self):\n\n x = GeoData(None, None)\n x._init_sample_object(nt=3, ny=1, nx=2)\n\n # save string in ASCII file and then reload this\n s = x._arr2string(x.data[1,:,:], prefix='')\n fname = tempfile.mktemp(suffix='.txt')\n F = open(fname, 'w')\n F.write(s)\n F.close()\n d = np.loadtxt(fname, delimiter='\\t')\n\n self.assertEqual(d[0,0], x.lon[0,0])\n self.assertEqual(d[1,0], x.lon[0,1])\n self.assertEqual(d[0,1], x.lat[0,0])\n self.assertEqual(d[1,1], x.lat[0,1])\n\n self.assertAlmostEqual(d[0,2], x.data[1,0,0], 5)\n self.assertAlmostEqual(d[1,2], x.data[1,0,1], 5)\n\n def test_save_ascii_FileExistingAlreadyDelete(self):\n if not os.path.exists(self._tmpdir + os.sep + 'testexport.txt'):\n os.system('touch ' + self._tmpdir + os.sep + 'testexport.txt')\n self.D._save_ascii(self._tmpdir + os.sep + 'testexport.txt', delete=True)\n self.assertTrue(os.path.exists(self._tmpdir + os.sep + 'testexport.txt'))\n os.remove(self._tmpdir + os.sep + 'testexport.txt')\n\n def test_save_ascii_FileExistingAlreadyNoDelete(self):\n if not os.path.exists('testexport.txt'):\n os.system('touch ' + self._tmpdir + os.sep + 'testexport.txt')\n with self.assertRaises(ValueError):\n self.D._save_ascii(self._tmpdir + os.sep + 'testexport.txt', delete=False)\n os.remove(self._tmpdir + os.sep + 'testexport.txt')\n\n def test_div_Default(self):\n D = self.D.copy()\n R = D.div(D)\n self.assertTrue(np.all(R.data == 1.))\n\n def test_div_InvalidGeometry(self):\n D = self.D.copy()\n B = D.copy()\n B.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n R = D.div(B)\n\n def test_mul_InvalidGeometry(self):\n D = self.D.copy()\n B = D.copy()\n B.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n R = D.mul(B)\n\n def test_add_InvalidGeometry(self):\n D = self.D.copy()\n B = D.copy()\n B.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n R = D.add(B)\n\n def test_sub_InvalidGeometry(self):\n D = self.D.copy()\n B = D.copy()\n B.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n R = D.sub(B)\n\n def test_mul_CopyFalse(self):\n D = self.D.copy()\n ref = D.data*D.data\n D.mul(D, copy=False)\n self.assertTrue(np.all(ref-D.data < 1.E-6))\n\n def test_mul_CopyTrue(self):\n D = self.D.copy()\n ref = D.data*D.data\n R = D.mul(D)\n self.assertTrue(np.all(ref-R.data < 1.E-6))\n\n def test_divc_Default(self):\n D = self.D.copy()\n R = D.divc(2.)\n d = D.data[:,0,0] *0.5\n self.assertTrue(np.all(d-R.data[:,0,0]) == 0.)\n\n def test_divc_Default_copyFalse(self):\n D = self.D.copy()\n R = D.divc(2., copy=False)\n d = self.D.data[:,0,0] *0.5\n self.assertTrue(np.all(d-R.data[:,0,0]) == 0.)\n\n def test_subc(self):\n D = self.D.copy()\n R = D.subc(10.)\n d = D.data - 10.\n self.assertTrue(np.all(d-R.data) == 0.)\n\n\n\n def test_mulc(self):\n D = self.D.copy()\n R = D.mulc(2.)\n d = D.data[:,0,0] * 2.\n self.assertTrue(np.all(d-R.data[:,0,0]) == 0.)\n\n def test_ConvertMonthlyTimeSeries_RaisesValueErrorForInvalidCalendar(self):\n data_object= self.D\n data_object.calendar = 'nothing_calendar'\n with self.assertRaises(ValueError):\n data_object._convert_monthly_timeseries()\n\n def test_apply_temporal_mask_WithInvalidGeometryForMask(self):\n data_object= self.D\n with self.assertRaises(ValueError):\n data_object._apply_temporal_mask(np.random.random((10,20)))\n\n def test_apply_temporal_mask_WithInvalidMaskTimes(self):\n data_object= self.D\n with self.assertRaises(ValueError):\n data_object._apply_temporal_mask(np.random.random(data_object.nt + 1))\n\n\n def test_getunit_ForEmptyUnit(self):\n d = self.D\n d.unit = None\n self.assertEqual(d._get_unit(), '')\n\n def test_getunit_ForValidUnit(self):\n d = self.D\n d.unit = 'mm/h'\n self.assertEqual(d._get_unit(), '[mm/h]')\n\n\n def test_get_percentile_ForInvalidGeometry(self):\n d = self.D\n d.data = np.random.random((10, 20))\n with self.assertRaises(ValueError):\n d.get_percentile(0.5, return_object=True)\n\n def test_getmindate_ForInvalidBase(self):\n data_object= self.D\n with self.assertRaises(ValueError):\n data_object._get_mindate(base='something')\n\n def test_getmaxdate_ForInvalidBase(self):\n data_object= self.D\n with self.assertRaises(ValueError):\n data_object._get_maxdate(base='something')\n\n def test_partial_correlation(self):\n x = self.D\n nt,ny,nx = x.data.shape\n y = x.copy(); y.data = y.data + pl.rand(nt,ny,nx)*1000.\n z = x.copy(); z.data = z.data * pl.rand(nt,ny,nx)*100.\n\n res = x.partial_correlation(y, z)\n resarr = x.partial_correlation(y, z, return_object=False)\n res1 = x.partial_correlation(y, z, ZY=z) # test with second condition\n\n #generate reference solution\n slope, intercept, rxy, p_value, std_err = stats.linregress(x.data[:,0,0],y.data[:,0,0])\n slope, intercept, rxz, p_value, std_err = stats.linregress(x.data[:,0,0],z.data[:,0,0])\n slope, intercept, rzy, p_value, std_err = stats.linregress(z.data[:,0,0],y.data[:,0,0])\n\n ref = (rxy - rxz*rzy) / (np.sqrt(1.-rxz*rxz)*np.sqrt(1.-rzy*rzy))\n\n self.assertAlmostEqual(ref,res.data[0, 0], places=5)\n self.assertAlmostEqual(ref,resarr[0, 0], places=5)\n self.assertAlmostEqual(ref,res1.data[0, 0], places=5)\n self.assertAlmostEqual(ref,resarr[0, 0], places=5)\n\n def test_equal_lon(self):\n D=self.D\n\n #1) not equal longitudes\n D.lon = pl.rand(100,200)\n self.assertFalse(D._equal_lon())\n\n #2) equal longitudes\n x=np.arange(100)\n D.lon = np.zeros((2,100))\n D.lon[0,:] = x\n D.lon[1,:] = x\n self.assertTrue(D._equal_lon())\n\n def test_equal_lon_1D(self):\n D = self.D.copy()\n D.lon=np.arange(10)\n self.assertTrue(D._equal_lon())\n\n def test_equal_lon_InvalidGeometry(self):\n D = self.D.copy()\n D.lon=np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n self.assertTrue(D._equal_lon())\n\n def test__get_unique_lon_1D(self):\n D = self.D.copy()\n D.lon=np.arange(10).astype('float')\n r = D._get_unique_lon()\n\n print(r - D.lon)\n self.assertTrue(np.all( r - D.lon == 0.))\n\n def test__get_unique_lon(self):\n D = self.D.copy()\n # equal longitudes\n x=np.arange(100)\n D.lon = np.zeros((2,100))\n D.lon[0,:] = x\n D.lon[1,:] = x\n\n r = D._get_unique_lon()\n self.assertTrue(np.all((x-r) == 0.))\n\n def test_get_unique_lon_Invalid(self):\n D = self.D.copy()\n D.lon = None\n with self.assertRaises(ValueError):\n r = D._get_unique_lon()\n\n def test_get_unique_lon_InvalidDimension(self):\n D = self.D.copy()\n D.lon = np.random.random((10,20,30))\n with self.assertRaises(ValueError):\n r = D._get_unique_lon()\n\n def test_get_unique_lon_InvalidLons(self):\n D = self.D.copy()\n D.lon = np.random.random((10,20))\n with self.assertRaises(ValueError):\n r = D._get_unique_lon()\n\n\n\n def generate_tuple(self,n=None,mask=True):\n #generate perturbed tuple of data\n x = self.D.copy(); y = self.D.copy()\n nt,ny,nx = x.data.shape\n z = pl.randn(nt,ny,nx)\n y.data = y.data*z\n if mask:\n y.data = np.ma.array(y.data,mask=z>0.5) #mask some data so we have data with different masks\n else:\n y.data = np.ma.array(y.data,mask=y.data != y.data) #mask some data so we have data with different masks\n\n if n != None:\n if n < len(x.data)-1:\n x._temporal_subsetting(0, n)\n y._temporal_subsetting(0, n)\n\n return x,y\n\n def test_corr_single(self):\n x = self.D.copy()\n y = x.mulc(2.) #data[:, 0, 0].copy()*2.\n y = y.data[:,0,0]\n y += np.random.random(len(y))\n\n #--- pearson\n slope, intercept, r, prob, sterrest = stats.mstats.linregress(y, x.data[:,0,0])\n Rout, Sout, Iout, Pout, Cout = x.corr_single(y)\n\n self.assertAlmostEqual(r,Rout.data[0,0], 8)\n self.assertAlmostEqual(slope,Sout.data[0,0],8)\n self.assertAlmostEqual(intercept,Iout.data[0,0], 8)\n self.assertAlmostEqual(prob,Pout.data[0,0], 8)\n\n #--- spearman\n y = x.data[:,0,0].copy()*5.\n y += np.random.random(len(y))*3.\n\n rho, prob = stats.mstats.spearmanr(y, x.data[:,0,0])\n #~ Rout, Sout, Iout, Pout, Cout = x.corr_single(y, method='spearman')\n #~ self.assertAlmostEqual(r, Rout.data[0,0], 5) # todo activate tests again!\n #~ self.assertAlmostEqual(prob, Pout.data[0,0], 8)\n\n def test_corr_single_InvalidGeometry(self):\n x = self.D.copy()\n y = x.data[:,0,0].copy()*2.\n y += np.random.random(len(y))\n x.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n x.corr_single(y)\n\n def test_corr_single_InvalidMethod(self):\n x = self.D.copy()\n y = x.data[:,0,0].copy()*2.\n y += np.random.random(len(y))\n with self.assertRaises(ValueError):\n x.corr_single(y, method='some_funky_method')\n\n @unittest.skip('wait for bugfree scipy')\n def test_correlate(self):\n for n in [None,100,10,5]: # different size\n x,y = self.generate_tuple(n=n,mask=True)\n x1=x.data[:, 0, 0]\n y1=y.data[:, 0, 0]\n msk = (x1.mask == False) & (y1.mask == False)\n x2 = x1[msk]\n y2 = y1[msk] # this is only the valid data\n\n #print 'Number of masked pixels: ', sum(y.data.mask), n\n\n ##################################################################\n # PEARSON CORRELATION\n ##################################################################\n slope, intercept, r_value1, p_value1, std_err = stats.mstats.linregress(x1,y1) #masked\n slope, intercept, r_value2, p_value2, std_err = stats.linregress(x2,y2) #not masked\n r,p = x.correlate(y)\n\n #1) test if scipy functions return similar results\n self.assertAlmostEqual(r_value1,r_value2,places=10)\n\n #2) test data.correlate() results\n self.assertAlmostEqual(r.data[0,0],r_value2,places=10) #results from stats.linregress are used, as mstats is BUGGY!!\n self.assertAlmostEqual(p.data[0,0],p_value2,places=10)\n\n\n ##################################################################\n # SPEARMAN RANK CORRELATION\n ##################################################################\n\n # 1) test if scipy functions return similar results for masked/not masked arrays\n r_value1, p_value1 = stats.mstats.spearmanr(x1,y1) #masked\n r_value2, p_value2 = stats.spearmanr(x2,y2) #not masked\n\n self.assertAlmostEqual(r_value1,r_value2,places=10)\n self.assertAlmostEqual(p_value1,p_value2,places=10)\n\n #2) test data.correlate() function\n r,p = x.correlate(y,spearman=True)\n self.assertAlmostEqual(r.data[0,0],r_value1,places=10)\n self.assertAlmostEqual(p.data[0,0],p_value1,places=10)\n self.assertAlmostEqual(r.data[0,0],r_value2,places=10)\n self.assertAlmostEqual(p.data[0,0],p_value2,places=10)\n\n #/// linear detrending of data ///\n x = self.D.copy()\n tmp = np.arange(len(x.time))\n tmp = np.ma.array(tmp, mask = tmp != tmp)\n x.data[:,0,0] = np.ma.array(tmp, mask=tmp!=tmp)\n y = x.copy()\n y.data = y.data * 1.2 + 3.\n #~ y.data = np.ma.array(y.data, mask = y.data != y.data)\n\n r,p = x.correlate(y)\n self.assertAlmostEqual(r.data[0,0], 1., 10)\n\n #--- detrending ---\n r,p = x.correlate(y, detrend=True)\n self.assertEquals(r.data[0,0], 0.)\n\n @unittest.skip('wait for bugfree scipy')\n def test_detrend(self):\n x = self.D.copy()\n t = np.arange(len(x.time))\n r = np.random.random(len(x.time))\n y = t * 10.73 + 5.39 + r\n x.data[:,0,0] = np.ma.array(y, mask=y!=y)\n\n # return object\n xd = x.detrend()\n slope, intercept, r_value, p_value, std_err = stats.linregress(t,y)\n ref = y - (slope*t+intercept)\n d = np.abs(1.-xd.data[:,0,0]/ref)\n self.assertTrue(np.all(d < 1.E-10))\n\n # no object\n x = self.D.copy()\n x.data[:,0,0] = np.ma.array(y, mask=y!=y)\n x.detrend(return_object=False)\n slope, intercept, r_value, p_value, std_err = stats.linregress(t,y)\n ref = y - (slope*t+intercept)\n d = np.abs(1.-x.data[:,0,0]/ref)\n self.assertTrue(np.all(d < 1.E-10))\n\n def test_normalize(self):\n x = self.D.copy()\n d = x.data[:, 0, 0].copy()\n\n r = (d - d.mean()) / d.std()\n x.normalize(return_object=False)\n dif = np.abs(1.-x.data[:,0,0]/r)\n self.assertTrue(np.all(dif < 1.E-6))\n\n x = self.D.copy()\n y=x.normalize(return_object=True)\n dif = np.abs(1.-y.data[:, 0, 0]/r)\n self.assertTrue(np.all(dif < 1.E-6))\n\n def test_normalize_InvalidGeometry(self):\n x = self.D.copy()\n x.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n x.normalize(return_object=False)\n\n @unittest.skip('needs revision')\n def test_condstat(self):\n \"\"\"\n conditional statistics unittest\n \"\"\"\n\n #sample data\n D = GeoData(None, None)\n #~ D.data = pl.randn(100,3,1) # some sample data\n D._init_sample_object(nt=100, ny=3, nx=1)\n #~ D.cell_area = np.ones((3,1))\n D.cell_area[0,0] = 2.\n print(D.cell_area)\n D.cell_area[0,1] = 1.\n D.cell_area[0,1] = 3.\n msk = np.asarray([[1,2,3],]).T # sample mask\n\n # calculate conditional statistics\n res = D.condstat(msk)\n\n # test for mask value == 1 (2 pixels)\n rm = 0.5*(D.data[:,0,0] + D.data[:,1,0])\n rs = (D.data[:,0,0] + D.data[:,1,0])\n\n self.assertTrue(np.all((res[1]['mean']-rm) == 0. ))\n self.assertTrue(np.all((res[1]['sum']-rs) == 0. ))\n\n # test for mask value == 3 (1 pixel)\n rm = rs = D.data[:,2,0]\n self.assertTrue(np.all( (res[3]['mean']-rm) == 0. ))\n self.assertTrue(np.all( (res[3]['sum']-rs) == 0. ))\n\n # now test weighted statistics\n #~ res1 = D.condstat(msk, weight=True)\n #~ rm = (2.*D.data[:,0,0] + 1.*D.data[:,1,0]) / 3.\n\n\n\n def test_condstat_InvalidGeometry(self):\n D = self.D.copy()\n D.data = np.random.random((10,20,30,40))\n msk = np.asarray([[1,1,3],]).T\n with self.assertRaises(ValueError):\n res = D.condstat(msk)\n\n def test_condstat_InvalidGeometryMask(self):\n D = self.D.copy()\n D.data = pl.randn(100,3,1)\n msk = np.asarray([[1,2,4,5],]).T\n with self.assertRaises(ValueError):\n res = D.condstat(msk)\n\n def test_temporal_subsettingInvalidGeometry(self):\n x = self.D.copy()\n x.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n x._temporal_subsetting(2, 5)\n\n def test_temporal_subsettingInvalidIndices(self):\n x = self.D.copy()\n with self.assertRaises(ValueError):\n x._temporal_subsetting(5, 2)\n\n def test_apply_temporal_subsetting(self):\n # checks only if the right time is subsetted\n import datetime\n x = self.D.copy()\n\n start_date = datetime.datetime(2003,3,1)\n stop_date = datetime.datetime(2003,5,28)\n x.apply_temporal_subsetting(start_date, stop_date)\n\n d = x.date\n self.assertEqual(d[0].year,2003)\n self.assertEqual(d[0].month,3)\n self.assertEqual(d[0].day,1)\n self.assertEqual(d[-1].year,2003)\n self.assertEqual(d[-1].month,5)\n self.assertEqual(d[-1].day,28)\n\n def test_apply_temporal_mask(self):\n D=self.D.copy()\n D.data[:,:,:]=1.\n m = np.zeros(len(D.data)).astype('bool')\n m[1] = True; m[5]=True\n D._apply_temporal_mask(m)\n\n def test_apply_temporal_mask_InvalidGeometry(self):\n D=self.D.copy()\n D.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n m = np.zeros(len(D.data)).astype('bool')\n m[1] = True; m[5]=True\n D._apply_temporal_mask(m)\n\n def test_bounding_box(self):\n D = self.D.copy()\n D.data = np.ma.array(pl.rand(10,5,8),mask=np.zeros((10,5,8)).astype('bool'))\n\n #generate some sample data with known bounding box\n D.data.mask[:,:,0] = True\n D.data.mask[:,:,7] = True\n D.data.mask[:,0,:] = True\n D.data.mask[:,4,:] = True\n\n #validate function\n i1,i2,j1,j2 = D.get_bounding_box()\n self.assertEqual(i1,1)\n self.assertEqual(i2,3)\n self.assertEqual(j1,1)\n self.assertEqual(j2,6)\n\n def test_fldmean_InvalidGeometry(self):\n d = self.D.copy()\n d.data = np.random.random((2,3,4,5))\n with self.assertRaises(ValueError):\n d.fldmean()\n\n def test_fldstd_InvalidGeometry(self):\n d = self.D.copy()\n d.data = np.random.random((2,3,4,5))\n with self.assertRaises(ValueError):\n d.fldstd()\n\n def test_fldstd_Invalid_ddof(self):\n x = self.D.copy()\n with self.assertRaises(ValueError):\n x.fldstd(ddof=1)\n with self.assertRaises(ValueError):\n x.fldstd(ddof=10)\n\n @unittest.skip('wait for bug free scipy')\n def test_fldmean(self):\n \"\"\"\n unittest for fldmean() function\n @return:\n \"\"\"\n\n # define testdata\n D = self.D\n x = np.ones((1,3,1))\n for i in [0]:\n x [i,0,0] = 5.\n x [i,1,0] = 10.\n x [i,2,0] = 20.\n D.data = np.ma.array(x,mask=x!=x)\n y = np.ones((3,1))\n y[0,0] = 75.\n y[1,0] = 25.\n y[2,0] = 25.\n D.cell_area = y\n\n D1=D.copy() # 2D version\n xx = np.ones((3,1))\n xx[0,0]=5.\n xx[1,0]=10.\n xx[2,0]=20.\n D1.data = np.ma.array(xx,mask=xx!=xx)\n\n # do test\n r1 = D.fldmean()[0] # with weights\n r1a = D1.fldmean()[0]\n\n self.assertEqual(r1, 9.)\n self.assertEqual(r1a, 9.)\n\n r2 = D.fldmean(apply_weights=False) # without weights\n r2a = D1.fldmean(apply_weights=False)\n self.assertEqual(r2[0],x.mean())\n self.assertEqual(r2a[0],xx.mean())\n\n # 2D case\n D=self.D.copy()\n x=np.ones((1,4))\n x[0,1] = 1.\n x[0,2] = 5.\n D.data = np.ma.array(x,mask=x==0.)\n ny,nx = x.shape\n ca = np.ones((ny,nx))\n D.cell_area = np.ma.array(ca,mask=ca < 0.)\n r = D.fldmean()[0]\n self.assertEquals(r, 2.)\n\n # now test against results from CDO\n D.save('tmp_data.nc', delete=True, varname='test')\n cmd = 'cdo -f nc fldmean tmp_data.nc tmp_fldmean.nc'\n os.system(cmd)\n T = GeoData('tmp_fldmean.nc', 'test', read=True)\n self.assertEquals(r, T.data[0,0])\n self.assertEquals(2., T.data[0,0])\n os.remove('tmp_fldmean.nc')\n os.remove('tmp_data.nc')\n\n # testcase where some of data is not valid and different weighting approaches are applied\n D = self.D.copy()\n\n x=np.ones((1,1,4))\n D.data = np.ma.array(x,mask=x==0.)\n nt,ny,nx = x.shape\n ca = np.ones((ny,nx))\n D.cell_area = np.ma.array(ca,mask=ca < 0.)\n\n D.weighting_type='valid'\n r = D.fldmean()[0]\n self.assertEquals(r,1.)\n x[:,0,0] = np.nan\n D.data = np.ma.array(x,mask=np.isnan(x))\n r = D.fldmean()[0]\n self.assertEquals(r, 1.)\n\n #... now check what happens if normalization factor is for ALL pixels and not only the valid ones! --> should give 0.75\n D.weighting_type='all'\n r = D.fldmean()[0]\n self.assertEquals(r, 0.75)\n\n\n def test_fldstd(self):\n #define testdata\n D = self.D\n x = np.ones((1,3,1))\n for i in [0]:\n x [i,0,0] = 5.; x [i,1,0] = 10.; x [i,2,0] = 20.\n D.data = np.ma.array(x,mask=x!=x)\n y = np.ones((3,1))\n y[0,0] = 75.; y[1,0] = 25.; y[2,0] = 25.\n D.cell_area = y\n\n # define testcase described under http://en.wikipedia.org/wiki/Weighted_mean#Weighted_sample_variance\n # For example, if values \\{2, 2, 4, 5, 5, 5\\} are drawn from\n # the same distribution, then we can treat this set as an\n # unweighted sample, or we can treat it as the weighted\n # sample \\{2, 4, 5\\} with corresponding weights\n # \\{2, 1, 3\\}, and we should get the same results\n\n xdat = np.asarray([2., 2., 4., 5., 5., 5.])\n\n ### 2D data ###\n\n # 1) no weighting\n A = self.D.copy()\n x = np.ones((1, len(xdat)))\n x[0,:] = xdat*1.\n y = np.ones_like(x)*3. # cell area dummy\n A.cell_area = y*1.\n A.data = np.ma.array(x, mask=x != x)\n\n # ddof = 0\n r = A.fldstd(apply_weights=False, ddof=0)\n self.assertEqual(r,xdat.std(ddof=0))\n # ddof = 1\n #~ r = A.fldstd(apply_weights=False, ddof=1)\n #~ self.assertEqual(r,xdat.std(ddof=1))\n\n # 2) weighting\n\n # a) same cell size\n r = A.fldstd(apply_weights=True, ddof=0)\n self.assertAlmostEqual(r[0], xdat.std(ddof=0), 10)\n\n #~ r = A.fldstd(apply_weights=True, ddof=1)\n #~ self.assertAlmostEqual(r, xdat.std(ddof=1),10)\n\n # b) different cell size\n refdat = np.asarray([2.,4.,5.])\n x = np.ones((1,3))\n x[0,0] = 2.\n x[0,1] = 4.\n x[0,2] = 5.\n A.data = np.ma.array(x, mask=x != x)\n y = np.ones_like(x)\n y[0,0] = 2. # weight in acordance with the number of\n y[0,1] = 1. # occurences in xdat (se above)\n y[0,2] = 3.\n y = y * 10. # scale cell sizes still a bit\n A.cell_area = y*1.\n\n # ddof = 0\n r = A.fldstd(apply_weights=True, ddof=0)\n self.assertAlmostEqual(r, xdat.std(ddof=0), 10)\n\n # ddof = 1\n #~ r = A.fldstd(apply_weights=True, ddof=1)\n #~ self.assertAlmostEqual(r, xdat.std(ddof=1), 10)\n\n\n\n\n ### 3D data ###\n del A\n A = self.D.copy()\n x = np.ones((3,6,1))\n y = np.ones((6,1))\n x[0,:,0] = xdat*1.\n x[1,:,0] = xdat*1.\n x[2,:,0] = xdat*1.\n A.data = np.ma.array(x, mask= x!=x)\n A.cell_area = y*1.\n\n #1) no weighting\n\n # ddof = 0\n r = A.fldstd(apply_weights=False, ddof=0)\n self.assertEqual(r[0],xdat.std(ddof=0))\n self.assertEqual(r[1],xdat.std(ddof=0))\n self.assertEqual(r[2],xdat.std(ddof=0))\n # ddof = 1\n #~ r = A.fldstd(apply_weights=False, ddof=1)\n #~ self.assertEqual(r[0],xdat.std(ddof=1))\n #~ self.assertEqual(r[1],xdat.std(ddof=1))\n #~ self.assertEqual(r[2],xdat.std(ddof=1))\n\n #2) weighting\n\n # a) same size\n r = A.fldstd(apply_weights=True, ddof=0)\n #ddof = 0\n self.assertAlmostEqual(r[0], xdat.std(ddof=0),10)\n self.assertAlmostEqual(r[1], xdat.std(ddof=0),10)\n self.assertAlmostEqual(r[2], xdat.std(ddof=0),10)\n #ddof = 1\n #~ r = A.fldstd(apply_weights=True, ddof=1)\n #~ self.assertAlmostEqual(r[0], xdat.std(ddof=1),10) todo does not work, but not sure it std(ddof=1) is the proper reference!\n #~ self.assertAlmostEqual(r[1], xdat.std(ddof=1),10)\n #~ self.assertAlmostEqual(r[2], xdat.std(ddof=1),10)\n\n\n # b) different cell size\n B = self.D.copy()\n refdat = np.asarray([2.,4.,5.])\n x = np.ones((3,3,1))\n x[0,:,0] = refdat*1.\n x[1,:,0] = refdat*1.\n x[2,:,0] = refdat*1.\n\n y = np.ones((3,1))\n y[0,0] = 2.\n y[1,0] = 1.\n y[2,0] = 3.\n B.data = np.ma.array(x, mask= x!=x)\n B.cell_area = y*1.\n\n # ddof = 0\n r = B.fldstd(apply_weights=True, ddof=0)\n self.assertAlmostEqual(r[0], xdat.std(ddof=0),10)\n self.assertAlmostEqual(r[1], xdat.std(ddof=0),10)\n self.assertAlmostEqual(r[2], xdat.std(ddof=0),10)\n\n #ddof = 1\n #~ r = B.fldstd(apply_weights=True, ddof=1)\n #~ self.assertAlmostEqual(r[0], xdat.std(ddof=1),10) todo does not work, but not sure it std(ddof=1) is the proper reference!\n #~ self.assertAlmostEqual(r[1], xdat.std(ddof=1),10)\n #~ self.assertAlmostEqual(r[2], xdat.std(ddof=1),10)\n\n # now test against results from CDO\n #~ D1.save('tmp_data.nc', delete=True, varname='test')\n #~ cmd = 'cdo -f nc fldstd tmp_data.nc tmp_fldstd.nc'\n #~ os.system(cmd)\n #~ T = Data('tmp_fldstd.nc', 'test', read=True)\n #~ print T.data, r1, r1a, ref\n #~ #stop\n #~ self.assertEquals(r1a, T.data[0,0])\n\n\n def test_areasum(self):\n \"\"\"\n unittest for areasum() function\n \"\"\"\n\n # define testdata\n D = self.D\n x = np.ones((1,3,1))\n for i in [0]:\n x [i,0,0] = 5.\n x [i,1,0] = 10.\n x [i,2,0] = 20.\n D.data = np.ma.array(x, mask=x!=x)\n y = np.ones((3,1))\n y[0,0] = 75.\n y[1,0] = 25.\n y[2,0] = 25. # total area = 125.\n D.cell_area = y\n\n D1=D.copy() # 2D version\n xx = np.ones((3,1))\n xx[0,0]=5.\n xx[1,0]=10.\n xx[2,0]=20.\n D1.data = np.ma.array(xx, mask=xx!=xx)\n\n # do test\n r1 = D .areasum()[0] #result should be 5.*75. + 10.*25. + 20.*25.\n r1a = D1.areasum()[0]\n r1d = D .areasum(return_data=True).data[0]\n r1ad = D1.areasum(return_data=True).data[0]\n\n self.assertEqual(r1, 5.*75. + 10.*25. + 20.*25.)\n self.assertEqual(r1a, 5.*75. + 10.*25. + 20.*25.)\n self.assertEqual(r1d, 5.*75. + 10.*25. + 20.*25.)\n self.assertEqual(r1ad, 5.*75. + 10.*25. + 20.*25.)\n\n r2 = D.areasum(apply_weights=False) #without weights\n r2a = D1.areasum(apply_weights=False)\n self.assertEqual(r2[0], x.sum())\n self.assertEqual(r2a[0], xx.sum())\n\n\n # 2D case\n D=self.D.copy()\n x=np.ones((1,4))\n x[0,1]=1.; x[0,2] = 5.\n D.data = np.ma.array(x,mask=x==0.)\n ny,nx = x.shape\n ca = np.ones((ny,nx))\n D.cell_area = np.ma.array(ca,mask=ca < 0.)\n r = D.areasum()[0]\n self.assertEquals(r,8.)\n\n # testcase where some of data is not valid and different weighting approaches are applied\n D = self.D.copy()\n\n x=np.ones((1,1,4))\n D.data = np.ma.array(x, mask=x==0.)\n nt,ny,nx = x.shape\n ca = np.ones((ny, nx))\n D.cell_area = np.ma.array(ca, mask=ca < 0.)\n\n D.weighting_type='valid'\n r = D.areasum()[0]\n self.assertEquals(r, 4.)\n x[:,0,0] = np.nan\n D.data = np.ma.array(x,mask=np.isnan(x))\n r = D.areasum()[0]\n self.assertEquals(r, 3.)\n\n # ... now check what happens if normalization factor is for ALL pixels and not only the valid ones! --> should give 0.75\n D.weighting_type='all'\n r = D.areasum()[0]\n self.assertEquals(r, 3.)\n\n def test_set_timecycle(self):\n D = self.D\n\n # set some monthly timeseries\n s_start_time = '2003-01-01'\n s_stop_time = '2005-12-31'\n start_time = pl.num2date(pl.datestr2num(s_start_time))\n stop_time = pl.num2date(pl.datestr2num(s_stop_time ))\n tref = rrule(MONTHLY, dtstart = start_time).between(start_time, stop_time, inc=True) #monthly timeseries\n D.time = pl.date2num(tref)\n\n #1) a perfect monthly timeseries\n #check that that timeseries is based on monthly data\n self.assertTrue(D._is_monthly())\n D._set_timecycle()\n self.assertEquals(D.time_cycle,12)\n\n #2) some timeseries that is not monthly\n D.time_cycle=None\n D.time[2]=pl.datestr2num('2010-05-01')\n D._set_timecycle()\n self.assertFalse(D._is_monthly())\n self.assertEquals(D.time_cycle,None)\n\n #3) some timeseries that is has increasing months, but wrong years!\n D.time_cycle=None\n D.time = pl.date2num(tref)\n t = pl.num2date(D.time[2])\n D.time[2]=pl.datestr2num('2010-' + str(t.month).zfill(2) + '-' + str(t.day).zfill(2))\n D._set_timecycle()\n self.assertFalse(D._is_monthly())\n self.assertEquals(D.time_cycle,None)\n\n def test_get_valid_mask_InvalidFrac(self):\n with self.assertRaises(ValueError):\n self.D.get_valid_mask(frac=-0.1)\n with self.assertRaises(ValueError):\n self.D.get_valid_mask(frac=2.)\n\n def test_get_valid_mask_InvalidGeometry(self):\n d = self.D.copy()\n d.data=np.random.random((2,3,4,5))\n with self.assertRaises(ValueError):\n d.get_valid_mask()\n\n def test_get_valid_data_InvalidMode(self):\n with self.assertRaises(ValueError):\n self.D.get_valid_data(mode='nothing')\n\n def test_apply_mask_InvalidGeometry(self):\n d = self.D.copy()\n d.data=np.random.random((2,3,4,5))\n d.data = np.ma.array(d.data, mask=d.data != d.data)\n m=np.ones_like(d.data[0,:,:])\n m=np.ma.array(m, mask=m != m)\n with self.assertRaises(ValueError):\n d._apply_mask(m)\n\n\n def test_get_valid_mask(self):\n D = self.D.copy()\n\n #case 1: 2D data\n x = np.ones((1,2))\n D.data = np.ma.array(x,mask=x == 0)\n m = D.get_valid_mask()\n self.assertTrue(m[0,0]==True)\n self.assertTrue(m[0,1]==True)\n\n #case 2: 3D with all valid data\n D = self.D.copy()\n x = np.ones((50,1,2))\n D.data = np.ma.array(x,mask=x == 0)\n m = D.get_valid_mask()\n self.assertTrue(m[0,0]==True)\n self.assertTrue(m[0,1]==True)\n\n #case 3: some invalid data at one pixel (frac=1=default)\n D = self.D.copy()\n x = np.ones((50,1,2))\n x[0:25,0,0] = 0.\n D.data = np.ma.array(x,mask=x == 0)\n m = D.get_valid_mask()\n self.assertTrue(m[0,0]==False)\n self.assertTrue(m[0,1]==True)\n\n #case 4 exactly 50% invalid\n D = self.D.copy()\n x = np.ones((50,1,2))\n x[0:25,0,0] = 0.\n D.data = np.ma.array(x,mask=x == 0)\n m = D.get_valid_mask(frac=0.5)\n self.assertTrue(m[0,0]==True)\n self.assertTrue(m[0,1]==True)\n\n #case 5: <50% valid\n D = self.D.copy()\n x = np.ones((50,1,2))\n x[0:26,0,0] = 0.\n D.data = np.ma.array(x,mask=x == 0)\n m = D.get_valid_mask(frac=0.5)\n self.assertTrue(m[0,0]==False)\n self.assertTrue(m[0,1]==True)\n\n #case 6: 1D data (all valid)\n x = np.ones(100)\n D.data = np.ma.array(x,mask=x == 0)\n m = D.get_valid_mask()\n self.assertTrue(m[0,0]==True)\n\n #case 7: 1D data (51% invalid)\n x = np.ones(100)\n x[0:51] = 0.\n D.data = np.ma.array(x,mask=x == 0)\n m = D.get_valid_mask(frac=0.5)\n self.assertTrue(m[0,0]==False)\n\n #case 7: 1D data (50% invalid)\n x = np.ones(100)\n x[0:50] = 0.\n D.data = np.ma.array(x,mask=x == 0)\n m = D.get_valid_mask(frac=0.5)\n self.assertTrue(m[0,0]==True)\n\n def test_time_conversion(self):\n x = self.D.copy()\n t = x.time\n dref = pl.num2date(t)\n t2=pl.date2num(dref)\n d1 = x.num2date(t) # convert time to datetime object\n t1 = x.date2num(d1) # convert back\n\n d = t-t1\n self.assertTrue(np.all(d == 0.))\n d = t-t2\n self.assertTrue(np.all(d == 0.))\n\n def test_align(self):\n \"\"\"test temporal alignment of two datasets\"\"\"\n x = self.D.copy()\n y = self.D.copy()\n y.subc(2.5, copy=False)\n y._temporal_subsetting(500, 750) # generate a subset dataset\n\n x1, y1 = x.align(y, base='day') # do temporal alignment\n d = x1.sub(y1).divc(2.5).subc(1.) # should give small diff.\n\n # check dates\n self.assertEqual(x1.date[0], y1.date[0])\n self.assertEqual(x1.date[-1], y1.date[-1])\n\n # check that really the same data is used\n self.assertTrue(np.all(np.abs(d.data) < 0.00000001))\n\n #... and the other way round\n y1, x1 = y.align(x, base='day')\n d = x1.sub(y1).divc(2.5).subc(1.)\n self.assertEqual(x1.date[0], y1.date[0])\n self.assertEqual(x1.date[-1], y1.date[-1])\n self.assertTrue(np.all(np.abs(d.data) < 0.00000001))\n\n # test monthly with invalid data\n with self.assertRaises(ValueError):\n y1, x1 = y.align(x, base='month')\n\n def test_align_unsorted_X(self):\n x = self.D.copy()\n y = self.D.copy()\n y.subc(2.5, copy=False)\n y._temporal_subsetting(500, 750) # generate a subset dataset\n x.time = np.random.random(x.nt) # generate unsorted time\n with self.assertRaises(ValueError):\n x1, y1 = x.align(y, base='day') # do temporal alignment\n\n def test_align_unsorted_X(self):\n x = self.D.copy()\n y = self.D.copy()\n y.subc(2.5, copy=False)\n y._temporal_subsetting(500, 750) # generate a subset dataset\n y.time = np.random.random(y.nt) # generate unsorted time\n with self.assertRaises(ValueError):\n x1, y1 = x.align(y, base='day') # do temporal alignment\n\n def test_align_InvalidBase(self):\n \"\"\"test temporal alignment of two datasets\"\"\"\n x = self.D.copy()\n y = self.D.copy()\n y.subc(2.5, copy=False)\n y._temporal_subsetting(500, 750) # generate a subset dataset\n with self.assertRaises(ValueError):\n x1, y1 = x.align(y, base='some_invalid_base')\n with self.assertRaises(ValueError):\n x1, y1 = x.align(y, base=None)\n\n\n\n\n def test_is_daily(self):\n x = self.D.copy() # is already daily\n self.assertTrue(x._is_daily())\n\n x = self.D.copy()\n x.time[2] += 4.\n self.assertFalse(x._is_daily())\n\n def test_is_daily_WithOutTime(self):\n x = self.D.copy() # is already daily\n del x.time\n self.assertFalse(x._is_daily())\n\n def test_is_sorted(self):\n x = self.D.copy()\n self.assertTrue(x._is_sorted())\n\n x.time[5] += 10.\n self.assertFalse(x._is_sorted())\n\n def test_days_per_month(self):\n x = self.D.copy()\n x.time[0] = pl.datestr2num('1999-03-15')\n x.time[1] = pl.datestr2num('2000-03-15')\n x.time[2] = pl.datestr2num('2002-09-15')\n x.time[3] = pl.datestr2num('2000-02-15')\n x.time[4] = pl.datestr2num('2003-02-15')\n\n d = x._days_per_month()\n self.assertEqual(d[0], 31)\n self.assertEqual(d[1], 31)\n self.assertEqual(d[2], 30)\n self.assertEqual(d[3], 29)\n self.assertEqual(d[4], 28)\n\n\n def test_temporal_smooth_InvalidGeometry(self):\n d = self.D.copy()\n tmp = np.random.random((2,3))\n d.data = np.ma.array(tmp, mask=tmp != tmp)\n with self.assertRaises(ValueError):\n y3 = d.temporal_smooth(3)\n\n @unittest.skip('skipping at the moment')\n def test_temporal_smooth(self):\n \"\"\"\n test smooth routine\n \"\"\"\n x = self.D.copy()\n\n #--- TEST for 1D data ---\n tmp = np.random.random(1000)\n x.data = np.ma.array(tmp, mask=tmp!=tmp)\n\n\n # windowsize 2\n with self.assertRaises(ValueError):\n xxx = x.temporal_smooth(2)\n\n # windowsize 3\n y3a = x.temporal_smooth(3)\n y3b = x.temporal_smooth(3, return_object=False)\n self.assertEqual(y3a.data[10], y3b[10])\n self.assertAlmostEqual(tmp[10:13].sum()/3., y3a.data[11], 8)\n\n # windowsize 5\n y5a = x.temporal_smooth(5)\n y5b = x.temporal_smooth(5, return_object=False)\n self.assertEqual(y5a.data[20], y5b[20])\n self.assertAlmostEqual(tmp[30:35].sum()/5., y5a.data[32], 8)\n\n #--- TEST FOR 3D data ---\n tmp = np.random.random((100, 2, 3))\n x.data = np.ma.array(tmp, mask=tmp!=tmp)\n y3a = x.temporal_smooth(3)\n y3b = x.temporal_smooth(3, return_object=False)\n self.assertEqual(y3a.data[10,0,0], y3b[10,0,0])\n self.assertAlmostEqual(tmp[10:13,0,0].sum()/3., y3a.data[11,0,0], 8)\n self.assertAlmostEqual(tmp[10:13,1,1].sum()/3., y3a.data[11,1,1], 8)\n self.assertAlmostEqual(tmp[10:13,1,0].sum()/3., y3a.data[11,1,0], 8)\n\n\n def test_hp_filter_InvalidLambda(self):\n with self.assertRaises(ValueError):\n self.D.hp_filter(-10., return_object=True)\n\n def test_hp_filter_Invalid2D(self):\n x = self.D.copy()\n x.data = np.random.random((20, 30))\n with self.assertRaises(ValueError):\n x.hp_filter(100, return_object=True)\n\n def test_hp_filter(self):\n # TODO check also validity\n x = self.D.copy()\n x.hp_filter(100, return_object=True)\n\n\n def test_areasum_InvalidGeometry(self):\n x = self.D.copy()\n x.data = np.random.random((10,20,30,40))\n with self.assertRaises(ValueError):\n x.areasum()\n\n def test_get_label_Empty(self):\n d = self.D.copy()\n del d.label\n s = d._get_label()\n self.assertTrue(s == '')\n\n\n def test_shift_lon(self):\n d = self.D.copy()\n\n d.lon = np.asarray([170.])\n d._shift_lon()\n self.assertEqual(d.lon[0], 170.)\n\n d.lon = np.asarray([0.])\n d._shift_lon()\n self.assertEqual(d.lon[0], 0.)\n\n d.lon = np.asarray([180.])\n d._shift_lon()\n self.assertEqual(d.lon[0], -180.)\n\n d.lon = np.asarray([190.])\n d._shift_lon()\n self.assertEqual(d.lon[0], -170.)\n\n d.lon = np.asarray([360.])\n d._shift_lon()\n self.assertEqual(d.lon[0], 0.)\n\n self.assertFalse(d._lon360)\n\n\n def test_shift_lon_360(self):\n d = self.D.copy()\n\n d.lon = np.asarray([170.])\n d._shift_lon_360()\n self.assertEqual(d.lon[0], 170.)\n\n d.lon = np.asarray([0.])\n d._shift_lon_360()\n self.assertEqual(d.lon[0], 0.)\n\n d.lon = np.asarray([-180.])\n d._shift_lon_360()\n self.assertEqual(d.lon[0], 180.)\n\n d.lon = np.asarray([-170.])\n d._shift_lon_360()\n self.assertEqual(d.lon[0], 190.)\n\n self.assertTrue(d._lon360)\n\n def test_convert_time(self):\n d = self.D.copy()\n d.time = np.asarray([20010303., 20231224.5])\n d._convert_time()\n\n self.assertEqual(d.date[0].year, 2001)\n self.assertEqual(d.date[0].month, 3)\n self.assertEqual(d.date[0].day, 3)\n self.assertEqual(d.date[0].hour, 0)\n self.assertEqual(d.date[0].minute, 0)\n self.assertEqual(d.date[0].second, 0)\n\n self.assertEqual(d.date[1].year, 2023)\n self.assertEqual(d.date[1].month, 12)\n self.assertEqual(d.date[1].day, 24)\n self.assertEqual(d.date[1].hour, 12)\n self.assertEqual(d.date[1].minute, 0)\n self.assertEqual(d.date[1].second, 0)\n\n #~ def test_split_time_float(self):\n #~ d = self.D.copy()\n#~\n #~ Y,M,D,h,m,s = d._split_time_float(2.)\n #~ self.assertEqual(Y, 2)\n #~ self.assertEqual(M, 0)\n #~ self.assertEqual(D, 0)\n #~ self.assertEqual(h, 0)\n #~ self.assertEqual(m, 0)\n #~ self.assertEqual(s, 0)\n#~\n #~ Y,M,D,h,m,s = d._split_time_float(4.5)\n #~ self.assertEqual(Y, 4)\n #~ self.assertEqual(M, 6)\n #~ self.assertEqual(D, 0)\n #~ self.assertEqual(h, 0)\n #~ self.assertEqual(m, 0)\n #~ self.assertEqual(s, 0)\n\n\n #~ how to treat leap years ???\n\n\n\n\n\n def test_convert_timeYYYY(self):\n d = self.D.copy()\n\n # no leap year\n d.time = np.asarray([1999., 1999.0 + 23./(365.*24.), 1999.0 + 2./365. + 20./(365.*24.), 1999.0 + 59./365., 1999.+1./(365.*24.) ]) # 23:00\n d._convert_timeYYYY()\n k = 0\n self.assertEqual(d.date[k].year, 1999)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 1)\n self.assertEqual(d.date[k].hour, 0)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 1\n self.assertEqual(d.date[k].year, 1999)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 1)\n self.assertEqual(d.date[k].hour, 23)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 2\n self.assertEqual(d.date[k].year, 1999)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 3)\n self.assertEqual(d.date[k].hour, 20)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 3\n self.assertEqual(d.date[k].year, 1999)\n self.assertEqual(d.date[k].month, 3)\n self.assertEqual(d.date[k].day, 1)\n self.assertEqual(d.date[k].hour, 0)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 4\n self.assertEqual(d.date[k].year, 1999)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 1)\n self.assertEqual(d.date[k].hour, 1)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n\n\n\n\n\n # no leap year\n d.time = np.asarray([1900., 1900.0 + 23./(365.*24.), 1900.0 + 2./365. + 20./(365.*24.)]) # 23:00\n d._convert_timeYYYY()\n k = 0\n self.assertEqual(d.date[k].year, 1900)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 1)\n self.assertEqual(d.date[k].hour, 0)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 1\n self.assertEqual(d.date[k].year, 1900)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 1)\n self.assertEqual(d.date[k].hour, 23)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 2\n self.assertEqual(d.date[k].year, 1900)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 3)\n self.assertEqual(d.date[k].hour, 20)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n\n\n # special leap year\n d.time = np.asarray([2000., 2000.0 + 23./(366.*24.), 2000.0 + 2./366. + 20./(366.*24.), 2000.0 + 61./366. + 19./(366.*24.) ]) # 23:00\n d._convert_timeYYYY()\n k = 0\n self.assertEqual(d.date[k].year, 2000)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 1)\n self.assertEqual(d.date[k].hour, 0)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 1\n self.assertEqual(d.date[k].year, 2000)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 1)\n self.assertEqual(d.date[k].hour, 23)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 2\n self.assertEqual(d.date[k].year, 2000)\n self.assertEqual(d.date[k].month, 1)\n self.assertEqual(d.date[k].day, 3)\n self.assertEqual(d.date[k].hour, 20)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n k = 3\n self.assertEqual(d.date[k].year, 2000)\n self.assertEqual(d.date[k].month, 3)\n self.assertEqual(d.date[k].day, 2)\n self.assertEqual(d.date[k].hour, 19)\n self.assertEqual(d.date[k].minute, 0)\n self.assertEqual(d.date[k].second, 0)\n\n\n def test_convert_timeYYYYMM(self):\n d = self.D.copy()\n d.time = np.asarray([20010303., 20231224.5])\n d._convert_timeYYYYMM()\n\n self.assertEqual(d.date[0].year, 2001)\n self.assertEqual(d.date[0].month, 3)\n self.assertEqual(d.date[0].day, 1)\n self.assertEqual(d.date[0].hour, 0)\n self.assertEqual(d.date[0].minute, 0)\n self.assertEqual(d.date[0].second, 0)\n\n self.assertEqual(d.date[1].year, 2023)\n self.assertEqual(d.date[1].month, 12)\n self.assertEqual(d.date[1].day, 1)\n self.assertEqual(d.date[1].hour, 0)\n self.assertEqual(d.date[1].minute, 0)\n self.assertEqual(d.date[1].second, 0)\n\n def test_distance(self):\n # example distance Berlin-Tokio\n # http://de.wikipedia.org/wiki/Orthodrome\n\n lat_berlin = 52.517\n lon_berlin = 13.4\n lat_tokio = 35.70\n lon_tokio = 139.767\n\n d = self.D.copy()\n d.lat = np.asarray([[lat_berlin]])\n d.lon = np.asarray([[lon_berlin]])\n\n r = d.distance(lon_tokio, lat_tokio, earth_radius=6370.)\n self.assertTrue(abs(r[0][0]-8918000.)<1000.)\n\n # test for 2D\n d = self.D.copy()\n d.lat = np.asarray(np.ones((10,20))*lat_berlin)\n d.lon = np.asarray(np.ones((10,20))*lon_berlin)\n\n r = d.distance(lon_tokio, lat_tokio, earth_radius=6370.)\n self.assertTrue(np.all(np.abs(r-8918000.)<1000.))\n\n\n def test_ny_nx(self):\n x = self.D\n self.assertEqual(x.nx, 1)\n self.assertEqual(x.ny, 1)\n\n tmp = np.random.random((4, 5))\n x.data = np.ma.array(tmp, mask=tmp != tmp)\n self.assertEqual(x.nx, 5)\n self.assertEqual(x.ny, 4)\n\n def tests_get_center_pixel(self):\n D = self.D\n y = D.get_center_data(return_object=False)\n z = D.get_center_data(return_object=True)\n self.assertTrue(np.all(D.data[:, 0,0] - y == 0.))\n self.assertTrue(np.all(D.data[:, 0,0] - z.data[:, 0, 0] == 0.))\n\n tmp = np.random.random((4, 5))\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n y = D.get_center_data()\n z = D.get_center_data(return_object=True)\n #~ self.assertTrue(y is None)\n #~ self.assertTrue(z is None)\n\n tmp = np.random.random((17, 23)) # 2D (odd all)\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n y = D.get_center_data(return_object=False)\n z = D.get_center_data(return_object=True)\n self.assertEqual(D.data[8, 11], y)\n self.assertEqual(D.data[8, 11], z.data[0, 0])\n\n tmp = np.random.random((6, 8)) # 2D (equal all)\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n y = D.get_center_data(return_object=False)\n z = D.get_center_data(return_object=True)\n self.assertEqual(D.data[2, 3], y)\n self.assertEqual(D.data[2, 3], z.data[0, 0])\n\n tmp = np.random.random((6, 23)) # 2D (equal only in Y)\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n y = D.get_center_data(return_object=False)\n z = D.get_center_data(return_object=True)\n self.assertEqual(D.data[2, 11], y)\n self.assertEqual(D.data[2, 11], z.data[0, 0])\n\n tmp = np.random.random((100, 17, 23)) # 3D (odd all)\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n y = D.get_center_data(return_object=False)\n z = D.get_center_data(return_object=True)\n self.assertTrue(np.all(D.data[:, 8,11]-y == 0.))\n self.assertTrue(np.all(D.data[:, 8,11]-z.data[:,0,0] == 0.))\n self.assertEqual(z.data.shape, (100,1,1))\n self.assertEqual(z.cell_area.shape, (1,1))\n\n def test_get_center_position(self):\n D = self.D\n # 1/1\n tmp = np.random.random((1, 1))\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n i, j = D._get_center_position()\n self.assertEqual(i, 0)\n self.assertEqual(j, 0)\n\n tmp = np.random.random((4, 5))\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n i, j = D._get_center_position()\n self.assertEqual(i, 1)\n self.assertTrue(j, 2)\n\n tmp = np.random.random((17, 23))\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n i, j = D._get_center_position()\n self.assertEqual(i, 8)\n self.assertEqual(j, 11)\n\n tmp = np.random.random((500, 23))\n D.data = np.ma.array(tmp, mask=tmp != tmp)\n i, j = D._get_center_position()\n self.assertEqual(i, 249)\n self.assertEqual(j, 11)\n\n def test_init_sample_object(self):\n x = GeoData(None, None)\n x._init_sample_object(ny=200, nx=100)\n self.assertTrue(x.shape == (200,100))\n\n x._init_sample_object(ny=200, nx=100, nt=373)\n self.assertTrue(x.shape == (373, 200,100))\n\n def test_rasterize_init(self):\n x = GeoData(None, None)\n x._init_sample_object(ny=1, nx=272)\n\n def test_invalid_dimensions_xy(self):\n self.D.data = np.random.random((4,3,2,1))\n with self.assertRaises(ValueError):\n r = self.D.nx\n with self.assertRaises(ValueError):\n r = self.D.ny\n\n\n# TODO would need to implement a test which ensures that the area weights are properly calculated,\n# independent whether the input file format is supported by the CDO's or not.\n\n\n @unittest.skip('some cdo related error needs to be fixed')\n def test_set_cell_area(self):\n x = self.D.copy()\n del x.cell_area\n x._set_cell_area()\n\n def test_mask_region(self):\n rlon = [-20., -20., 50., 50.]\n rlat = [10., 20., 20., 10.]\n reg = RegionPolygon(123, rlon, rlat, label='testreg')\n\n x = GeoData(None, None)\n x._init_sample_object(nt=10, ny=100, nx=50)\n\n mfile = tempfile.mktemp(suffix='.nc')\n\n y = x.copy()\n y.mask_region(reg, return_object=False, method='full', maskfile=None, force=False)\n y1 = y.copy()\n res2 = y.mask_region(reg, return_object=True, method='full', maskfile=None, force=False)\n\n with self.assertRaises(ValueError):\n res3 = y.mask_region(reg, return_object=False, method='full', maskfile='no_valid_filename', force=False)\n\n y = x.copy()\n res4 = y.mask_region(reg, return_object=True, method='full', maskfile=mfile, force=False)\n self.assertTrue(os.path.exists(mfile))\n\n y = x.copy()\n res5 = y.mask_region(reg, return_object=True, method='full', maskfile=mfile, force=False)\n self.assertTrue(os.path.exists(mfile))\n\n y = x.copy()\n res6 = y.mask_region(reg, return_object=True, method='full', maskfile=mfile, force=True)\n self.assertTrue(os.path.exists(mfile))\n\n # now check that results are usefull\n\n #1) right mask value\n msk = GeoData(mfile, 'mask', read=True)\n self.assertTrue(np.all(msk.data == 123.))\n\n #2) results from all options above give the same\n self.assertTrue((y1.data == res2.data).all())\n self.assertTrue((res2.data == res4.data).all())\n self.assertTrue((res2.data == res5.data).all())\n self.assertTrue((res2.data == res6.data).all())\n\n #3) the right values have been actually masked\n\n\n def test_get_days_per_month(self):\n\n x = GeoData(None, None)\n x._init_sample_object(nt=36, ny=100, nx=50)\n tref = []\n for i in range(12): # no leap year\n tref.append(datetime.datetime(2001, i+1, 15))\n for i in range(12): # leap year\n tref.append(datetime.datetime(2004, i+1, 15))\n for i in range(12): # special leap year\n tref.append(datetime.datetime(2000, i+1, 15))\n x.time = x.date2num(tref)\n\n # reference days\n dref = [31,28, 31, 30 ,31 ,30 ,31,31,30,31,30,31]\n dref += [31,29, 31, 30 ,31 ,30 ,31,31,30,31,30,31]\n dref += [31,29, 31, 30 ,31 ,30 ,31,31,30,31,30,31]\n\n mlen = x._get_days_per_month()\n for i in range(len(mlen)):\n print(i, len(mlen), len(dref))\n self.assertEqual(mlen[i], dref[i])\n\n def test_mul_tvec(self):\n x = GeoData(None, None)\n x._init_sample_object(nt=10, ny=1, nx=2)\n xref = x.copy()\n\n t = np.arange(10)\n\n with self.assertRaises(ValueError):\n x.mul_tvec(np.arange(3), copy=True)\n with self.assertRaises(ValueError):\n x.mul_tvec(np.random.random((10,20)), copy=True)\n\n x.mul_tvec(t, copy=False)\n y = xref.mul_tvec(t, copy=True)\n for i in range(x.nt):\n self.assertEqual(x.data[i,0,1], xref.data[i,0,1]*t[i])\n self.assertEqual(y.data[i,0,1], xref.data[i,0,1]*t[i])\n\n\n def test_get_area(self):\n x = GeoData(None, None)\n x._init_sample_object(nt=3, ny=2, nx=3)\n\n x.cell_area[0,:] = 1.\n x.cell_area[1,:] = 2.\n\n # mask some data\n M = np.ones((x.ny, x.nx))\n M[1,1] = 2.\n x._apply_mask(M == 1.)\n\n # total area (all pixels)\n A1 = x.get_area(valid=False)\n self.assertEqual(A1, 6.+3.)\n\n # only valid pixels\n A2 = x.get_area()\n self.assertEqual(A2, 6.+3.-2.)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"matplotlib.pylab.date2num",
"numpy.ones_like",
"scipy.stats.linregress",
"scipy.stats.mstats.scoreatpercentile",
"matplotlib.pylab.show",
"numpy.random.random",
"numpy.arange",
"numpy.sqrt",
"scipy.stats.mstats.spearmanr",
"matplotlib.pylab.plot",
"numpy.array",
"numpy.zeros",
"matplotlib.pylab.datestr2num",
"scipy.stats.ttest_ind",
"numpy.diff",
"numpy.loadtxt",
"numpy.argsort",
"matplotlib.pylab.rand",
"scipy.stats.mstats.linregress",
"numpy.isnan",
"numpy.asarray",
"numpy.ones",
"matplotlib.pylab.legend",
"scipy.stats.spearmanr",
"matplotlib.pylab.num2date",
"matplotlib.pylab.figure",
"numpy.any",
"numpy.ma.array",
"matplotlib.pylab.randn",
"numpy.abs",
"numpy.all"
]
] |
Y-F-Acoustics/Python_DSP_Study | [
"2e0232d6875c6109321c8b3a90d3b62ce3fc200a"
] | [
"03_Chapter3/01_Section1/Script3-1-1.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 29 00:44:57 2021\r\n\r\n@author: yuki1\r\n\"\"\"\r\nimport math\r\nimport numpy as np\r\nfrom scipy import signal\r\nimport matplotlib.pyplot as plt\r\n\r\n# Generate the Signal\r\nx = np.array([1, 1, 1, 1, 0, 0, 0, 0])\r\nn = np.arange(0, len(x))\r\n\r\n# Plot the Signal\r\nfig1 = plt.figure()\r\nax1 = fig1.add_subplot(1, 1, 1)\r\nax1.stem(n, x, use_line_collection=True, basefmt = \" \")\r\nax1.set_xlim(0, len(x))\r\nax1.set_ylim(np.min(x), np.max(x))\r\nax1.grid()\r\nax1.set_xlabel('Time $n$')\r\nax1.set_ylabel('$x[n]$')\r\n\r\n\r\n# Discrete Time Fourier Transform (DTFT)\r\nw = np.linspace(-np.pi, np.pi, 1024, endpoint=False)\r\n_, Xejw = signal.freqz(x, 1, w)\r\nfig2 = plt.figure()\r\nax2 = fig2.add_subplot(1, 1, 1)\r\nmaxX = np.max(np.abs(Xejw))\r\nax2.plot(w, np.abs(Xejw))\r\nax2.set_xlim(-np.pi, np.pi)\r\nax2.set_ylim(0, maxX)\r\nax2.grid()\r\nax2.set_xlabel('Frequency $\\omega$ [rad]')\r\nax2.set_ylabel('$|X(e^{j\\omega})|$')\r\n\r\n\r\n# Fast Fourier Transform (FFT)\r\nk = n\r\nprint('k = \\n', k)\r\nX = np.fft.fft(x)\r\nprint('X = \\n', X)\r\nmagX = np.abs(X)\r\nprint('magX = \\n', magX)\r\nfig3 = plt.figure()\r\nax3 = fig3.add_subplot(1, 1, 1)\r\nax3.stem(k, magX, use_line_collection=True, basefmt = \" \")\r\nax3.set_xlim(0, len(k))\r\nax3.set_ylim(0, maxX)\r\nax3.grid()\r\nax3.set_xlabel('Frequency $k$')\r\nax3.set_ylabel('$|X[k]|$')\r\n\r\n# Shift Show\r\nkshift = k - math.floor(len(k) / 2)\r\nXshift = np.fft.fftshift(X)\r\nmagXshift = np.abs(Xshift)\r\nfig4 = plt.figure()\r\nax4 = fig4.add_subplot(1, 1, 1)\r\nax4.stem(kshift, magXshift, use_line_collection=True, basefmt = \" \")\r\nax4.set_xlim(-len(k)/2, len(k)/2)\r\nax4.set_ylim(0, maxX)\r\nax4.grid()\r\nax4.set_xlabel('Frequency $k$')\r\nax4.set_ylabel('$|X[k]|$')\r\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.fft.fftshift",
"numpy.min",
"matplotlib.pyplot.figure",
"numpy.fft.fft",
"numpy.abs",
"scipy.signal.freqz",
"numpy.linspace"
]
] |
rjhanes/biosteam | [
"ee345ac0b14ce4de9b38ac5a467588d2f854da71"
] | [
"build/lib/biosteam/units/_multi_effect_evaporator.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 23 21:43:13 2018\n\n@author: yoelr\n\"\"\"\nimport numpy as np\nimport biosteam as bst\nfrom .. import Unit, Stream\nfrom scipy.optimize import brentq\nfrom . import Mixer, HXutility\nfrom ._flash import Evaporator_PV, Evaporator_PQ\nfrom .designtools import vacuum_system\nfrom warnings import warn\nimport ht\nlog = np.log\nexp = np.exp\n\n# Table 22.32 Product process and design (pg 592)\n# Name: ('Area range (m2)', 'Cost(A) (USD)', 'U (kJ/(hr*m2*K)))', 'Material')\nevaporators = {'Horizontal tube': \n ((9.29, 743.224),\n lambda A, CE: CE*2.304*A**0.53,\n 4906.02,\n 'Carbon steel'),\n 'Long-tube vertical':\n ((9.29, 743.224),\n lambda A, CE: CE*3.086*A**0.55,\n 8176.699,\n 'Carbon steel'),\n 'Forced circulation': \n ((13.935, 8000),\n lambda A, CE: CE/500*exp(8.2986 + 0.5329*log(A*0.0929)-0.000196*log(A*0.0929)**2),\n 10731.918,\n 'Carbon steel'),\n 'Falling film': \n ((13.935, 371.612),\n lambda A, CE: CE*7.416*A**0.55,\n 10220.874,\n 'Stainless steel tubes/Carbon steel shell')}\n\n\nclass MultiEffectEvaporator(Unit):\n \"\"\"Creates evaporatorators with pressures given by P (a list of pressures). Adjusts first evaporator vapor fraction to satisfy an overall fraction evaporated. All evaporators after the first have zero duty. Condenses the vapor coming out of the last evaporator. Pumps all liquid streams to prevent back flow in later parts. All liquid evaporated is ultimately recondensed. Cost is based on required heat transfer area. Vacuum system is based on air leakage. Air leakage is based on volume, as given by residence time `tau` and flow rate to each evaporator.\n\n **Parameters**\n\n **component:** *[str]* Component being evaporated\n \n **P:** *[tuple]* Pressures describing each evaporator (Pa)\n \n **V:** *[float]* Overall molar fraction of component evaporated\n \n **P_liq:** *[tuple]* Liquid pressure after pumping (Pa)\n \n \"\"\"\n _units = {'Area': 'm^2',\n 'Volume': 'm^3'}\n _has_power_utility = True\n _N_heat_utilities = 2\n BM = 2.45\n line = 'Multi-Effect Evaporator'\n\n #: Residence time (hr)\n tau = 0.30\n\n # Evaporator type\n _Type = 'Forced circulation'\n \n # Data for simmulation and costing\n _evap_data = evaporators[_Type]\n\n @property\n def Type(self):\n \"\"\"Evaporation type.\"\"\"\n return self._Type\n @Type.setter\n def Type(self, evap_type):\n try:\n self._evap_data = evaporators[evap_type]\n except KeyError:\n dummy = str(evaporators.keys())[11:-2]\n raise ValueError(f\"Type must be one of the following: {dummy}\")\n self._Type = evap_type\n\n def __init__(self, ID='', ins=None, outs=(), *,\n component='Water', P, V):\n Unit.__init__(self, ID, ins, outs)\n # Unpack\n out_wt_solids, liq = self.outs\n \n # Create components\n self._N_evap = n = len(P) # Number of evaporators\n Stream.species = liq.species\n evap0 = Evaporator_PV(None, outs=(None, None),\n component=component, P=P[0])\n \n evaporators = [evap0]\n for i in range(1, n):\n evap = Evaporator_PQ(None,\n outs=(None, None, None),\n component=component, P=P[i], Q=0)\n evaporators.append(evap)\n condenser = HXutility(None, outs=Stream(None), V=0)\n evap0._heat_utilities[0], condenser._heat_utilities[0] = self._heat_utilities\n mixer = Mixer(None, outs=Stream(None))\n \n def V_error(v1):\n # Run first evaporator\n v_test = v1\n evap0.V = v1\n evap0._run()\n # Put liquid first, then vapor side stream\n ins = [evap0.outs[1], evap0.outs[0]]\n for i in range(1, n):\n evap = evaporators[i]\n evap._ins[:] = ins\n evap._run()\n v_test += (1-v_test) * evap._V\n # Put liquid first, then vapor side stream\n ins = [evap.outs[1], evap.outs[0]]\n return V - v_test\n self._V_error = V_error\n self.components = {'evaporators': evaporators,\n 'condenser': condenser,\n 'mixer': mixer}\n \n\n def _run(self):\n out_wt_solids, liq = self.outs\n ins = self.ins\n\n n = self._N_evap # Number of evaporators\n\n # Set-up components\n components = self.components\n evaporators = components['evaporators']\n evaporators[0].ins[:] = [Stream.like(i, None) for i in ins]\n condenser = components['condenser']\n mixer = components['mixer']\n brentq(self._V_error, 0.0001, 0.9909, xtol=0.0001)\n \n # Condensing vapor from last effector\n \n outs_vap = evaporators[-1].outs[0]\n condenser.ins[:] = [outs_vap]\n condenser._run()\n outs_liq = [condenser.outs[0]] # list containing all output liquids\n\n # Unpack other output streams\n out_wt_solids.copylike(evaporators[-1].outs[1])\n for i in range(1, n):\n evap = evaporators[i]\n outs_liq.append(evap.outs[2])\n\n # Mix liquid streams\n mixer.ins[:] = outs_liq\n mixer._run()\n liq.copylike(mixer.outs[0])\n \n def _design(self):\n # This functions also finds the cost\n A_range, C_func, U, _ = self._evap_data\n components = self.components\n evaporators = components['evaporators']\n Design = self._Design\n Cost = self._Cost\n CE = bst.CE\n \n evap0 = evaporators[0]\n hu = evap0._heat_utilities[0]\n duty = evap0._H_out - evap0._H_in\n hu(duty, evap0.ins[0].T, evap0.outs[0].T)\n Q = abs(duty)\n Tci = evap0.ins[0].T\n Tco = evap0.outs[0].T\n Th = evap0._heat_utilities[0]._fresh.T\n LMTD = ht.LMTD(Th, Th, Tci, Tco)\n ft = 1\n A = HXutility._calc_area(LMTD, U, Q, ft)\n self._evap_costs = evap_costs = [C_func(A, CE)]\n \n # Find condenser requirements\n condenser = components['condenser']\n condenser._design()\n condenser._cost()\n Cost['Condenser'] = condenser._Cost['Heat exchanger']\n \n # Find area and cost of evaporators\n As = [A]\n for evap in evaporators[1:]:\n Q = evap._Q\n Tc = evap.outs[0].T\n Th = evap.outs[2].T\n LMTD = Th - Tc\n A = HXutility._calc_area(LMTD, U, Q, ft)\n As.append(A)\n if not A_range[0] < A < A_range[1]:\n warn('area requirement ({A}) is out of range, {A_range}')\n evap_costs.append(C_func(A, CE))\n self._As = As\n Design['Area'] = A = sum(As)\n Design['Volume'] = vol = self._N_evap * self.tau * self.ins[0].volnet\n Cost['Evaporators'] = sum(evap_costs)\n \n # Calculate power\n power, cost = vacuum_system(massflow=0, volflow=0,\n P_suction=evap.outs[0].P, vol=vol,\n vacuum_system_preference='Liquid-ring pump')\n Cost['Vacuum liquid-ring pump'] = cost\n self._power_utility(power)\n \n \n"
] | [
[
"scipy.optimize.brentq"
]
] |
vzhuang/rlpyt | [
"3abf873e4bae0cd9cccbb9e5c9cc4c875710f6b6"
] | [
"rlpyt/distributions/gaussian.py"
] | [
"\nimport torch\nimport math\n\nfrom rlpyt.distributions.base import Distribution\nfrom rlpyt.utils.collections import namedarraytuple\nfrom rlpyt.utils.tensor import valid_mean\n\nEPS = 1e-8\n\nDistInfo = namedarraytuple(\"DistInfo\", [\"mean\"])\nDistInfoStd = namedarraytuple(\"DistInfoStd\", [\"mean\", \"log_std\"])\n\n\nclass Gaussian(Distribution):\n \"\"\"Multivariate Gaussian with independent variables (diagonal covariance).\n Standard deviation can be provided, as scalar or value per dimension, or it\n will be drawn from the dist_info (possibly learnable), where it is expected\n to have a value per each dimension.\n Noise clipping or sample clipping optional during sampling, but not\n accounted for in formulas (e.g. entropy).\n Clipping of standard deviation optional and accounted in formulas.\n Squashing of samples to squash * tanh(sample) is optional and accounted for\n in log_likelihood formula but not entropy.\n \"\"\"\n\n def __init__(\n self,\n dim,\n std=None,\n clip=None,\n noise_clip=None,\n min_std=None,\n max_std=None,\n squash=None, # None or > 0\n ):\n \"\"\"Saves input arguments.\"\"\"\n self._dim = dim\n self.set_std(std)\n self.clip = clip\n self.noise_clip = noise_clip\n self.min_std = min_std\n self.max_std = max_std\n self.min_log_std = math.log(min_std) if min_std is not None else None\n self.max_log_std = math.log(max_std) if max_std is not None else None\n self.squash = squash\n assert (clip is None or squash is None), \"Choose one.\"\n\n @property\n def dim(self):\n return self._dim\n\n def kl(self, old_dist_info, new_dist_info):\n if self.squash is not None:\n raise NotImplementedError\n old_mean = old_dist_info.mean\n new_mean = new_dist_info.mean\n # Formula: {[(m1 - m2)^2 + (s1^2 - s2^2)] / (2*s2^2)} + ln(s1/s2)\n num = (old_mean - new_mean) ** 2\n if self.std is None:\n old_log_std = old_dist_info.log_std\n new_log_std = new_dist_info.log_std\n if self.min_std is not None or self.max_std is not None:\n old_log_std = torch.clamp(old_log_std, min=self.min_log_std,\n max=self.max_log_std)\n new_log_std = torch.clamp(new_log_std, min=self.min_log_std,\n max=self.max_log_std)\n old_std = torch.exp(old_log_std)\n new_std = torch.exp(new_log_std)\n num += old_std ** 2 - new_std ** 2\n den = 2 * new_std ** 2 + EPS\n vals = num / den + new_log_std - old_log_std\n else:\n den = 2 * self.std ** 2 + EPS\n vals = num / den\n return torch.sum(vals, dim=-1)\n\n def mean_kl(self, old_dist_info, new_dist_info, valid=None):\n return valid_mean(self.kl(old_dist_info, new_dist_info), valid)\n\n def entropy(self, dist_info):\n \"\"\"Uses ``self.std`` unless that is None, then will get log_std from dist_info. Not\n implemented for squashing.\n \"\"\"\n if self.squash is not None:\n raise NotImplementedError\n if self.std is None:\n log_std = dist_info.log_std\n if self.min_log_std is not None or self.max_log_std is not None:\n log_std = torch.clamp(log_std, min=self.min_log_std,\n max=self.max_log_std)\n else:\n # shape = dist_info.mean.shape[:-1]\n # log_std = torch.log(self.std).repeat(*shape, 1)\n log_std = torch.log(self.std) # Shape broadcast in following formula.\n return torch.sum(log_std + math.log(math.sqrt(2 * math.pi * math.e)),\n dim=-1)\n\n def perplexity(self, dist_info):\n return torch.exp(self.entropy(dist_info))\n\n def mean_entropy(self, dist_info, valid=None):\n return valid_mean(self.entropy(dist_info), valid)\n\n def mean_perplexity(self, dist_info, valid=None):\n return valid_mean(self.perplexity(dist_info), valid)\n\n def log_likelihood(self, x, dist_info):\n \"\"\"\n Uses ``self.std`` unless that is None, then uses log_std from dist_info.\n When squashing: instead of numerically risky arctanh, assume param\n 'x' is pre-squash action, see ``sample_loglikelihood()`` below.\n \"\"\"\n mean = dist_info.mean\n if self.std is None:\n log_std = dist_info.log_std\n if self.min_log_std is not None or self.max_log_std is not None:\n log_std = torch.clamp(log_std, min=self.min_log_std,\n max=self.max_log_std)\n std = torch.exp(log_std)\n else:\n std, log_std = self.std, torch.log(self.std)\n # When squashing: instead of numerically risky arctanh, assume param\n # 'x' is pre-squash action, see sample_loglikelihood() below.\n # if self.squash is not None:\n # x = torch.atanh(x / self.squash) # No torch implementation.\n z = (x - mean) / (std + EPS)\n logli = -(torch.sum(log_std + 0.5 * z ** 2, dim=-1) +\n 0.5 * self.dim * math.log(2 * math.pi))\n if self.squash is not None:\n logli -= torch.sum(\n torch.log(self.squash * (1 - torch.tanh(x) ** 2) + EPS),\n dim=-1)\n return logli\n\n def likelihood_ratio(self, x, old_dist_info, new_dist_info):\n logli_old = self.log_likelihood(x, old_dist_info)\n logli_new = self.log_likelihood(x, new_dist_info)\n return torch.exp(logli_new - logli_old)\n\n def sample_loglikelihood(self, dist_info):\n \"\"\"\n Special method for use with SAC algorithm, which returns a new sampled \n action and its log-likelihood for training use. Temporarily turns OFF\n squashing, so that log_likelihood can be computed on non-squashed sample,\n and then restores squashing and applies it to the sample before output.\n \"\"\"\n squash = self.squash\n self.squash = None # Temporarily turn OFF, raw sample into log_likelihood.\n sample = self.sample(dist_info)\n self.squash = squash # Turn it back ON, squash correction in log_likelihood.\n logli = self.log_likelihood(sample, dist_info)\n if squash is not None:\n sample = squash * torch.tanh(sample)\n return sample, logli\n\n # def sample_loglikelihood(self, dist_info):\n # \"\"\"Use in SAC with squash correction, since log_likelihood() expects raw_action.\"\"\"\n # mean = dist_info.mean\n # log_std = dist_info.log_std\n # if self.min_log_std is not None or self.max_log_std is not None:\n # log_std = torch.clamp(log_std, min=self.min_log_std,\n # max=self.max_log_std)\n # std = torch.exp(log_std)\n # normal = torch.distributions.Normal(mean, std)\n # sample = normal.rsample()\n # logli = normal.log_prob(sample)\n # if self.squash is not None:\n # sample = self.squash * torch.tanh(sample)\n # logli -= torch.sum(\n # torch.log(self.squash * (1 - torch.tanh(sample) ** 2) + EPS),\n # dim=-1)\n # return sample, logli\n\n\n # squash = self.squash\n # self.squash = None # Temporarily turn OFF.\n # sample = self.sample(dist_info)\n # self.squash = squash # Turn it back ON, raw_sample into squash correction.\n # logli = self.log_likelihood(sample, dist_info)\n # if squash is not None:\n # sample = squash * torch.tanh(sample)\n # return sample, logli\n\n def sample(self, dist_info):\n \"\"\"\n Generate random samples using ``torch.normal``, from\n ``dist_info.mean``. Uses ``self.std`` unless it is ``None``, then uses\n ``dist_info.log_std``.\n \"\"\"\n mean = dist_info.mean\n if self.std is None:\n log_std = dist_info.log_std\n if self.min_log_std is not None or self.max_log_std is not None:\n log_std = torch.clamp(log_std, min=self.min_log_std,\n max=self.max_log_std)\n std = torch.exp(log_std)\n else:\n # shape = mean.shape[:-1]\n # std = self.std.repeat(*shape, 1).to(mean.device)\n std = self.std.to(mean.device)\n # For reparameterization trick: mean + std * N(0, 1)\n # (Also this gets noise on same device as mean.)\n noise = std * torch.normal(torch.zeros_like(mean), torch.ones_like(mean))\n # noise = torch.normal(mean=0, std=std)\n if self.noise_clip is not None:\n noise = torch.clamp(noise, -self.noise_clip, self.noise_clip)\n sample = mean + noise\n # Other way to do reparameterization trick:\n # dist = torch.distributions.Normal(mean, std)\n # sample = dist.rsample()\n if self.clip is not None:\n sample = torch.clamp(sample, -self.clip, self.clip)\n elif self.squash is not None:\n sample = self.squash * torch.tanh(sample)\n return sample\n\n def set_clip(self, clip):\n \"\"\"Input value or ``None`` to turn OFF.\"\"\"\n self.clip = clip # Can be None.\n assert self.clip is None or self.squash is None\n\n def set_squash(self, squash):\n \"\"\"Input multiplicative factor for ``squash * tanh(sample)`` (usually\n will be 1), or ``None`` to turn OFF.\"\"\"\n self.squash = squash # Can be None.\n assert self.clip is None or self.squash is None\n\n def set_noise_clip(self, noise_clip):\n \"\"\"Input value or ``None`` to turn OFF.\"\"\"\n self.noise_clip = noise_clip # Can be None.\n\n def set_std(self, std):\n \"\"\"\n Input value, which can be same shape as action space, or else broadcastable\n up to that shape, or ``None`` to turn OFF and use ``dist_info.log_std`` in\n other methods.\n \"\"\"\n if std is not None:\n if not isinstance(std, torch.Tensor):\n std = torch.tensor(std).float() # Can be size == 1 or dim.\n # Used to have, but shape of std should broadcast everywhere needed:\n # if std.numel() == 1:\n # std = std * torch.ones(self.dim).float() # Make it size dim.\n assert std.numel() in (self.dim, 1)\n self.std = std\n"
] | [
[
"torch.clamp",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like",
"torch.tanh",
"torch.log",
"torch.exp",
"torch.sum"
]
] |
redjerdai/lazy_graph | [
"a6d32a235bc68728077971224d975bcea3f88c3a"
] | [
"graph_skeleton.py"
] | [
"# Lazy Miner v.0.1.6\n# @author: redjerdai\n# TODO: Add graphviz configuration options [10]\nimport numpy\nimport pandas\n\nimport os\nos.environ[\"PATH\"] += os.pathsep + 'E:\\\\RAMP-EXTERNAL\\\\IP-02\\\\OSTRTA\\\\graphviz-2.38\\\\release\\\\bin'\nfrom graphviz import Digraph\n\n# TODO: make nodes and edges class entities [11]\n# TODO: remove router; it should be gained automatically from weights / we should compare with zeros [13]\n# TODO: check input data [14]\n\n\nclass GraphSkeleton:\n\n def __init__(self, configuration, nodes_back_colour_base='#ffffff'):\n\n self.nodes = Nodes(back_colour_base=nodes_back_colour_base)\n self.edges = Edges()\n self.configuration = configuration\n\n # TODO: add groups and clusters [16]\n # TODO: clarify all names [17]\n\n def feed_all(self, nodes_node_frame, nodes_names, edges_names, edges_weights,\n nodes_back_colour_low=50, nodes_back_colour_up=200,\n edges_boldness_low=0.5, edges_boldness_up=4):\n\n self.nodes.feed_nodes(activity_name=self.configuration.activity_name,\n node_frame=nodes_node_frame, nodes_names=nodes_names,\n weight_column=self.configuration.nodes_weights_column,\n back_colour_column=self.configuration.nodes_back_colour_column,\n back_colour_low=nodes_back_colour_low, back_colour_up=nodes_back_colour_up)\n self.edges.feed_edges(names=edges_names, weights=edges_weights,\n boldness_low=edges_boldness_low, boldness_up=edges_boldness_up)\n\n def draw(self):\n\n # border_colouring, back_colouring, clustering, shaping, bordering,\n graph = Digraph(comment='pydge')\n\n # adding nodes\n for k in range(self.nodes.n()):\n # TODO: add such additional parameters as: border line style [12]\n graph.node(name=self.nodes.names[k], label=self.nodes.labels[k],\n color='#000000',\n fillcolor=self.nodes.back_colours[k],\n penwidth='1',\n style='filled')\n\n # adding edges\n #print(self.edges.names)\n #print(self.edges.router)\n #print(self.edges.n())\n '''\n for k in range(self.edges.m()):\n _from = self.nodes.names.tolist().index(self.edges.names[k, 0])\n _to = self.nodes.names.tolist().index(self.edges.names[k, 1])\n '''\n\n for i in range(self.edges.n()):\n for j in range(self.edges.n()):\n if self.edges.router[i, j]:\n #print(self.nodes.names[i])\n #print(self.nodes.names[j])\n #print(self.edges.boldness_values[i])\n graph.edge(tail_name=self.nodes.names[i], head_name=self.nodes.names[j],\n label=str(self.edges.weights[i, j]), penwidth=str(self.edges.boldness_values[i, j]),\n style='solid')\n\n graph.view()\n\n\ndef str_vector(x):\n x = numpy.array(x, dtype=str)\n return x\n\n\ndef str_concat(x):\n return x[0] + x[1]\n\n\ndef str_vector_concat(a, b):\n\n if isinstance(a, numpy.ndarray):\n a_dimensionality = len(a.shape)\n else:\n a_dimensionality = 0\n if isinstance(b, numpy.ndarray):\n b_dimensionality = len(b.shape)\n else:\n b_dimensionality = 0\n\n if a_dimensionality == 1 and b_dimensionality == 0:\n b = numpy.array([b] * a.shape[0])\n b_dimensionality = 1\n if a_dimensionality == 0 and b_dimensionality == 1:\n a = numpy.array([a] * b.shape[0])\n a_dimensionality = 1\n\n if a_dimensionality == 1 and b_dimensionality == 1:\n c = {'a': a, 'b': b}\n c = pandas.DataFrame(data=c)\n d = c.apply(func=str_concat, axis=1)\n d = d.values\n else:\n if a_dimensionality == 0:\n d = numpy.full(shape=(b.shape[0], b.shape[1]), fill_value='', dtype=object)\n for i in range(b.shape[0]):\n for j in range(b.shape[1]):\n d[i, j] = a + b[i, j]\n elif b_dimensionality == 0:\n d = numpy.full(shape=(a.shape[0], a.shape[1]), fill_value='', dtype=object)\n for i in range(a.shape[0]):\n for j in range(a.shape[1]):\n d[i, j] = a[i, j] + b\n else:\n raise Exception('I do not know how to treat that (-_-) Seriously...')\n return d\n\n\ndef cut_hex(x):\n return hex(x)[2:]\n\n\ndef hex_vector(x):\n x = numpy.array(x, dtype=int)\n x = pandas.Series(x)\n x = x.apply(func=cut_hex)\n x = x.values\n return x\n\n\n# TODO: Add check for consistency of inputs [16]\nclass Nodes:\n\n def __init__(self, back_colour_base):\n\n # TODO: select better names (uniform and intuitive) [15]\n self.names = None\n self.labels = None\n self.weights = None\n self.back_colour_base = back_colour_base\n self.back_colour_function = DefaultScaleFunction(conversion_function=hex_vector)\n self.back_colours = None\n\n def get_back_colour(self, value):\n\n scaled = self.back_colour_function.linear_scaling(value=value)\n result = str_vector_concat(self.back_colour_base, scaled)\n return result\n\n def feed_nodes(self, activity_name, node_frame, nodes_names, weight_column, back_colour_column, back_colour_low, back_colour_up):\n\n #self.names = node_frame[name_column].values\n self.names = nodes_names\n self.labels = nodes_names#node_frame[label_column].values\n self.weights = node_frame[weight_column].values\n self.back_colour_function.feed(data=node_frame[back_colour_column].values, low_bound_target=back_colour_low, up_bound_target=back_colour_up)\n #print(node_frame)\n a = node_frame.sort_values(by=activity_name)\n #a = numpy.sort()\n #print(a)\n self.back_colours = self.get_back_colour(value=a[back_colour_column].values)\n\n def n(self):\n\n return self.names.shape[0]\n\n\nclass Edges:\n\n def __init__(self):\n\n # TODO: names is a misleading name for this field, it should be changed [18]\n self.names = None\n self.router = None\n self.weights = None #numpy.zeros(shape=(dimensionality, dimensionality), dtype=int)\n self.boldness_base = ''\n self.boldness_function = DefaultScaleFunction(conversion_function=str_vector)\n self.boldness_values = None #numpy.full(shape=(dimensionality, dimensionality), fill_value=self.boldness_base, dtype='<U9')\n\n def get_boldness(self, value):\n\n scaled = self.boldness_function.linear_scaling(value=value)\n result = str_vector_concat(self.boldness_base, scaled)\n return result\n\n def feed_edges(self, names, weights, boldness_low, boldness_up):\n\n self.names = names\n self.weights = weights\n self.boldness_function.feed(data=weights, low_bound_target=boldness_low, up_bound_target=boldness_up)\n self.boldness_values = self.get_boldness(value=weights)\n self.router = numpy.array(self.weights, dtype=bool)\n\n def m(self):\n return self.names.shape[0]\n\n def n(self):\n return self.router.shape[0]\n\n\nclass DefaultScaleFunction:\n\n def __init__(self, conversion_function):\n\n self.low_bound_value = None\n self.low_bound_quantile = 0.1\n self.low_bound_target = None\n self.up_bound_value = None\n self.up_bound_quantile = 0.1\n self.up_bound_target = None\n self.scale_function = self.linear_scaling\n self.data = None\n self.conversion_function = conversion_function\n self.intercept = None\n self.coefficient = None\n\n def feed(self, data, low_bound_target, up_bound_target):\n\n self.data = data\n self.low_bound_value = numpy.quantile(a=self.data, q=self.low_bound_quantile)\n self.up_bound_value = numpy.quantile(a=self.data, q=(1 - self.up_bound_quantile))\n\n self.up_bound_target = up_bound_target\n self.low_bound_target = low_bound_target\n #print(self.up_bound_target)\n #print(self.low_bound_target)\n #print(self.up_bound_value)\n #print(self.low_bound_value)\n self.intercept = self.low_bound_target\n self.coefficient = (self.up_bound_target - self.low_bound_target) / (self.up_bound_value - self.low_bound_value)\n\n self.low_bound_target = low_bound_target\n self.up_bound_target = up_bound_target\n\n def linear_scaling(self, value):\n\n #print(self.intercept)\n #print(self.coefficient)\n #print(self.low_bound_value)\n #print(value)\n #print(value - self.low_bound_value)\n #print(self.coefficient * (value - self.low_bound_value))\n #print(self.intercept + self.coefficient * (value - self.low_bound_value))\n #print(self.conversion_function(self.intercept + self.coefficient * (value - self.low_bound_value)))\n\n scaled_value = numpy.where(value <= self.low_bound_value, self.conversion_function(self.low_bound_target),\n numpy.where(value >= self.up_bound_value, self.conversion_function(self.up_bound_target),\n self.conversion_function((self.intercept + self.coefficient * (value - self.low_bound_value)))))\n '''\n if value <= self.low_bound_value:\n scaled_value = self.conversion_function(value=self.low_bound_target)\n elif value >= self.up_bound_value:\n scaled_value = self.conversion_function(value=self.up_bound_target)\n else:\n scaled_value = self.conversion_function(value=(\n self.intercept + self.coefficient * (value - self.low_bound_value)))\n '''\n\n return scaled_value\n\n"
] | [
[
"numpy.full",
"numpy.array",
"numpy.quantile",
"pandas.DataFrame",
"pandas.Series"
]
] |
elton-souza/IGTI-Python | [
"2633dd70af8403a6ab52f8f97ee53063dbb0ef77"
] | [
"2 - Analise de Dados/Aula 6.py"
] | [
"#Regressão linear numpy\n\n#Visualização de dados\nfrom matplotlib import pyplot as plt\n\n#dados\nx = [1, 2 ,3 ,4 , 5, 6]\ny = [10, 50 , 100, 150, 200, 250]\n\n#plot dos dados\nplt.figure(figsize=(10,5))\nplt.plot(x,y,'o',label = 'Dados originais')\nplt.legend()\nplt.xlabel(\"x\")\nplt.ylabel(\"Y\")\nplt.grid()\nplt.show()\n\n'''\nIremos estimar uma funçao do tipo : y = ax+b, ou seja\ndevemos achar quais valores de a e b que melhor representa os dados\n\nOs valores reais de a e b são (2,1)\n'''\n#Trnsformando para numy e vetor coluna\nx, y = np.array(x).reshape(-1,1), np.array(y).reshape(-1,1)\n\n#Adicioanando bias: para estimar o termo b:\nX = np.hstack((x,np.ones(x.shape))) #np.hstack - concatena dois arrays horinzontalmente\n\n#Estimando a e b\nbeta = np.linalg.pinv(x).dot(y)\nprint(\"a estimado: \",beta[0][1])\nprint(\"b estimado: \",beta[0][1])\n\n#plot de dados\nplt.figure(figsize=(10,5))\nplt.plot(x,y,'o',label=\"Dados originais\")\nplt.plot(x,X.dot(beta),label=\"Regressao linear\")\nplt.legend()\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.title(\"Regressão linear com numpy\")\nplt.grid()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
] |
semeraro/yt | [
"aad3cfa3b4ebab7838352ab467275a27c26ff363"
] | [
"yt/frontends/gadget/simulation_handling.py"
] | [
"import glob\nimport os\n\nimport numpy as np\nfrom unyt import dimensions, unyt_array\nfrom unyt.unit_registry import UnitRegistry\n\nfrom yt.data_objects.time_series import DatasetSeries, SimulationTimeSeries\nfrom yt.funcs import only_on_root\nfrom yt.loaders import load\nfrom yt.utilities.cosmology import Cosmology\nfrom yt.utilities.exceptions import (\n InvalidSimulationTimeSeries,\n MissingParameter,\n NoStoppingCondition,\n YTUnidentifiedDataType,\n)\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects\n\n\nclass GadgetSimulation(SimulationTimeSeries):\n r\"\"\"\n Initialize an Gadget Simulation object.\n\n Upon creation, the parameter file is parsed and the time and redshift\n are calculated and stored in all_outputs. A time units dictionary is\n instantiated to allow for time outputs to be requested with physical\n time units. The get_time_series can be used to generate a\n DatasetSeries object.\n\n parameter_filename : str\n The simulation parameter file.\n find_outputs : bool\n If True, the OutputDir directory is searched for datasets.\n Time and redshift information are gathered by temporarily\n instantiating each dataset. This can be used when simulation\n data was created in a non-standard way, making it difficult\n to guess the corresponding time and redshift information.\n Default: False.\n\n Examples\n --------\n >>> import yt\n >>> gs = yt.load_simulation(\"my_simulation.par\", \"Gadget\")\n >>> gs.get_time_series()\n >>> for ds in gs:\n ... print(ds.current_time)\n\n \"\"\"\n\n def __init__(self, parameter_filename, find_outputs=False):\n self.simulation_type = \"particle\"\n self.dimensionality = 3\n SimulationTimeSeries.__init__(\n self, parameter_filename, find_outputs=find_outputs\n )\n\n def _set_units(self):\n self.unit_registry = UnitRegistry()\n self.time_unit = self.quan(1.0, \"s\")\n if self.cosmological_simulation:\n # Instantiate Cosmology object for units and time conversions.\n self.cosmology = Cosmology(\n hubble_constant=self.hubble_constant,\n omega_matter=self.omega_matter,\n omega_lambda=self.omega_lambda,\n unit_registry=self.unit_registry,\n )\n if \"h\" in self.unit_registry:\n self.unit_registry.modify(\"h\", self.hubble_constant)\n else:\n self.unit_registry.add(\n \"h\", self.hubble_constant, dimensions.dimensionless\n )\n # Comoving lengths\n for my_unit in [\"m\", \"pc\", \"AU\"]:\n new_unit = f\"{my_unit}cm\"\n # technically not true, but should be ok\n self.unit_registry.add(\n new_unit,\n self.unit_registry.lut[my_unit][0],\n dimensions.length,\n \"\\\\rm{%s}/(1+z)\" % my_unit,\n prefixable=True,\n )\n self.length_unit = self.quan(\n self.unit_base[\"UnitLength_in_cm\"],\n \"cmcm / h\",\n registry=self.unit_registry,\n )\n self.mass_unit = self.quan(\n self.unit_base[\"UnitMass_in_g\"], \"g / h\", registry=self.unit_registry\n )\n self.box_size = self.box_size * self.length_unit\n self.domain_left_edge = self.domain_left_edge * self.length_unit\n self.domain_right_edge = self.domain_right_edge * self.length_unit\n self.unit_registry.add(\n \"unitary\",\n float(self.box_size.in_base()),\n self.length_unit.units.dimensions,\n )\n else:\n # Read time from file for non-cosmological sim\n self.time_unit = self.quan(\n self.unit_base[\"UnitLength_in_cm\"]\n / self.unit_base[\"UnitVelocity_in_cm_per_s\"],\n \"s\",\n )\n self.unit_registry.add(\"code_time\", 1.0, dimensions.time)\n self.unit_registry.modify(\"code_time\", self.time_unit)\n # Length\n self.length_unit = self.quan(self.unit_base[\"UnitLength_in_cm\"], \"cm\")\n self.unit_registry.add(\"code_length\", 1.0, dimensions.length)\n self.unit_registry.modify(\"code_length\", self.length_unit)\n\n def get_time_series(\n self,\n initial_time=None,\n final_time=None,\n initial_redshift=None,\n final_redshift=None,\n times=None,\n redshifts=None,\n tolerance=None,\n parallel=True,\n setup_function=None,\n ):\n\n \"\"\"\n Instantiate a DatasetSeries object for a set of outputs.\n\n If no additional keywords given, a DatasetSeries object will be\n created with all potential datasets created by the simulation.\n\n Outputs can be gather by specifying a time or redshift range\n (or combination of time and redshift), with a specific list of\n times or redshifts), or by simply searching all subdirectories\n within the simulation directory.\n\n initial_time : tuple of type (float, str)\n The earliest time for outputs to be included. This should be\n given as the value and the string representation of the units.\n For example, (5.0, \"Gyr\"). If None, the initial time of the\n simulation is used. This can be used in combination with\n either final_time or final_redshift.\n Default: None.\n final_time : tuple of type (float, str)\n The latest time for outputs to be included. This should be\n given as the value and the string representation of the units.\n For example, (13.7, \"Gyr\"). If None, the final time of the\n simulation is used. This can be used in combination with either\n initial_time or initial_redshift.\n Default: None.\n times : tuple of type (float array, str)\n A list of times for which outputs will be found and the units\n of those values. For example, ([0, 1, 2, 3], \"s\").\n Default: None.\n initial_redshift : float\n The earliest redshift for outputs to be included. If None,\n the initial redshift of the simulation is used. This can be\n used in combination with either final_time or\n final_redshift.\n Default: None.\n final_redshift : float\n The latest redshift for outputs to be included. If None,\n the final redshift of the simulation is used. This can be\n used in combination with either initial_time or\n initial_redshift.\n Default: None.\n redshifts : array_like\n A list of redshifts for which outputs will be found.\n Default: None.\n tolerance : float\n Used in combination with \"times\" or \"redshifts\" keywords,\n this is the tolerance within which outputs are accepted\n given the requested times or redshifts. If None, the\n nearest output is always taken.\n Default: None.\n parallel : bool/int\n If True, the generated DatasetSeries will divide the work\n such that a single processor works on each dataset. If an\n integer is supplied, the work will be divided into that\n number of jobs.\n Default: True.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n\n Examples\n --------\n\n >>> import yt\n >>> gs = yt.load_simulation(\"my_simulation.par\", \"Gadget\")\n\n >>> gs.get_time_series(initial_redshift=10, final_time=(13.7, \"Gyr\"))\n\n >>> gs.get_time_series(redshifts=[3, 2, 1, 0])\n\n >>> # after calling get_time_series\n >>> for ds in gs.piter():\n ... p = ProjectionPlot(ds, \"x\", \"density\")\n ... p.save()\n\n >>> # An example using the setup_function keyword\n >>> def print_time(ds):\n ... print(ds.current_time)\n >>> gs.get_time_series(setup_function=print_time)\n >>> for ds in gs:\n ... SlicePlot(ds, \"x\", \"Density\").save()\n\n \"\"\"\n\n if (\n initial_redshift is not None or final_redshift is not None\n ) and not self.cosmological_simulation:\n raise InvalidSimulationTimeSeries(\n \"An initial or final redshift has been given for a \"\n + \"noncosmological simulation.\"\n )\n\n my_all_outputs = self.all_outputs\n if not my_all_outputs:\n DatasetSeries.__init__(\n self, outputs=[], parallel=parallel, unit_base=self.unit_base\n )\n mylog.info(\"0 outputs loaded into time series.\")\n return\n\n # Apply selection criteria to the set.\n if times is not None:\n my_outputs = self._get_outputs_by_key(\n \"time\", times, tolerance=tolerance, outputs=my_all_outputs\n )\n\n elif redshifts is not None:\n my_outputs = self._get_outputs_by_key(\n \"redshift\", redshifts, tolerance=tolerance, outputs=my_all_outputs\n )\n\n else:\n if initial_time is not None:\n if isinstance(initial_time, float):\n initial_time = self.quan(initial_time, \"code_time\")\n elif isinstance(initial_time, tuple) and len(initial_time) == 2:\n initial_time = self.quan(*initial_time)\n elif not isinstance(initial_time, unyt_array):\n raise RuntimeError(\n \"Error: initial_time must be given as a float or \"\n + \"tuple of (value, units).\"\n )\n elif initial_redshift is not None:\n my_initial_time = self.cosmology.t_from_z(initial_redshift)\n else:\n my_initial_time = self.initial_time\n\n if final_time is not None:\n if isinstance(final_time, float):\n final_time = self.quan(final_time, \"code_time\")\n elif isinstance(final_time, tuple) and len(final_time) == 2:\n final_time = self.quan(*final_time)\n elif not isinstance(final_time, unyt_array):\n raise RuntimeError(\n \"Error: final_time must be given as a float or \"\n + \"tuple of (value, units).\"\n )\n my_final_time = final_time.in_units(\"s\")\n elif final_redshift is not None:\n my_final_time = self.cosmology.t_from_z(final_redshift)\n else:\n my_final_time = self.final_time\n\n my_initial_time.convert_to_units(\"s\")\n my_final_time.convert_to_units(\"s\")\n my_times = np.array([a[\"time\"] for a in my_all_outputs])\n my_indices = np.digitize([my_initial_time, my_final_time], my_times)\n if my_initial_time == my_times[my_indices[0] - 1]:\n my_indices[0] -= 1\n my_outputs = my_all_outputs[my_indices[0] : my_indices[1]]\n\n init_outputs = []\n for output in my_outputs:\n if os.path.exists(output[\"filename\"]):\n init_outputs.append(output[\"filename\"])\n if len(init_outputs) == 0 and len(my_outputs) > 0:\n mylog.warning(\n \"Could not find any datasets. \"\n \"Check the value of OutputDir in your parameter file.\"\n )\n\n DatasetSeries.__init__(\n self,\n outputs=init_outputs,\n parallel=parallel,\n setup_function=setup_function,\n unit_base=self.unit_base,\n )\n mylog.info(\"%d outputs loaded into time series.\", len(init_outputs))\n\n def _parse_parameter_file(self):\n \"\"\"\n Parses the parameter file and establishes the various\n dictionaries.\n \"\"\"\n\n self.unit_base = {}\n\n # Let's read the file\n lines = open(self.parameter_filename).readlines()\n comments = [\"%\", \";\"]\n for line in (l.strip() for l in lines):\n for comment in comments:\n if comment in line:\n line = line[0 : line.find(comment)]\n if len(line) < 2:\n continue\n param, vals = (i.strip() for i in line.split(None, 1))\n # First we try to decipher what type of value it is.\n vals = vals.split()\n # Special case approaching.\n if \"(do\" in vals:\n vals = vals[:1]\n if len(vals) == 0:\n pcast = str # Assume NULL output\n else:\n v = vals[0]\n # Figure out if it's castable to floating point:\n try:\n float(v)\n except ValueError:\n pcast = str\n else:\n if any(\".\" in v or \"e\" in v for v in vals):\n pcast = float\n elif v == \"inf\":\n pcast = str\n else:\n pcast = int\n # Now we figure out what to do with it.\n if param.startswith(\"Unit\"):\n self.unit_base[param] = float(vals[0])\n if len(vals) == 0:\n vals = \"\"\n elif len(vals) == 1:\n vals = pcast(vals[0])\n else:\n vals = np.array([pcast(i) for i in vals])\n\n self.parameters[param] = vals\n\n # Domain dimensions for Gadget datasets are always 2x2x2 for octree\n self.domain_dimensions = np.array([2, 2, 2])\n\n if self.parameters[\"ComovingIntegrationOn\"]:\n cosmo_attr = {\n \"box_size\": \"BoxSize\",\n \"omega_lambda\": \"OmegaLambda\",\n \"omega_matter\": \"Omega0\",\n \"hubble_constant\": \"HubbleParam\",\n }\n self.initial_redshift = 1.0 / self.parameters[\"TimeBegin\"] - 1.0\n self.final_redshift = 1.0 / self.parameters[\"TimeMax\"] - 1.0\n self.cosmological_simulation = 1\n for a, v in cosmo_attr.items():\n if v not in self.parameters:\n raise MissingParameter(self.parameter_filename, v)\n setattr(self, a, self.parameters[v])\n self.domain_left_edge = np.array([0.0, 0.0, 0.0])\n self.domain_right_edge = (\n np.array([1.0, 1.0, 1.0]) * self.parameters[\"BoxSize\"]\n )\n else:\n self.cosmological_simulation = 0\n self.omega_lambda = self.omega_matter = self.hubble_constant = 0.0\n\n def _find_data_dir(self):\n \"\"\"\n Find proper location for datasets. First look where parameter file\n points, but if this doesn't exist then default to the current\n directory.\n \"\"\"\n if self.parameters[\"OutputDir\"].startswith(\"/\"):\n data_dir = self.parameters[\"OutputDir\"]\n else:\n data_dir = os.path.join(self.directory, self.parameters[\"OutputDir\"])\n if not os.path.exists(data_dir):\n mylog.info(\n \"OutputDir not found at %s, instead using %s.\", data_dir, self.directory\n )\n data_dir = self.directory\n self.data_dir = data_dir\n\n def _snapshot_format(self, index=None):\n \"\"\"\n The snapshot filename for a given index. Modify this for different\n naming conventions.\n \"\"\"\n\n if self.parameters[\"NumFilesPerSnapshot\"] > 1:\n suffix = \".0\"\n else:\n suffix = \"\"\n if self.parameters[\"SnapFormat\"] == 3:\n suffix += \".hdf5\"\n if index is None:\n count = \"*\"\n else:\n count = \"%03d\" % index\n filename = f\"{self.parameters['SnapshotFileBase']}_{count}{suffix}\"\n return os.path.join(self.data_dir, filename)\n\n def _get_all_outputs(self, find_outputs=False):\n \"\"\"\n Get all potential datasets and combine into a time-sorted list.\n \"\"\"\n\n # Find the data directory where the outputs are\n self._find_data_dir()\n\n # Create the set of outputs from which further selection will be done.\n if find_outputs:\n self._find_outputs()\n else:\n if self.parameters[\"OutputListOn\"]:\n a_values = [\n float(a)\n for a in open(\n os.path.join(\n self.data_dir, self.parameters[\"OutputListFilename\"]\n ),\n ).readlines()\n ]\n else:\n a_values = [float(self.parameters[\"TimeOfFirstSnapshot\"])]\n time_max = float(self.parameters[\"TimeMax\"])\n while a_values[-1] < time_max:\n if self.cosmological_simulation:\n a_values.append(\n a_values[-1] * self.parameters[\"TimeBetSnapshot\"]\n )\n else:\n a_values.append(\n a_values[-1] + self.parameters[\"TimeBetSnapshot\"]\n )\n if a_values[-1] > time_max:\n a_values[-1] = time_max\n\n if self.cosmological_simulation:\n self.all_outputs = [\n {\"filename\": self._snapshot_format(i), \"redshift\": (1.0 / a - 1)}\n for i, a in enumerate(a_values)\n ]\n\n # Calculate times for redshift outputs.\n for output in self.all_outputs:\n output[\"time\"] = self.cosmology.t_from_z(output[\"redshift\"])\n else:\n self.all_outputs = [\n {\n \"filename\": self._snapshot_format(i),\n \"time\": self.quan(a, \"code_time\"),\n }\n for i, a in enumerate(a_values)\n ]\n\n self.all_outputs.sort(key=lambda obj: obj[\"time\"].to_ndarray())\n\n def _calculate_simulation_bounds(self):\n \"\"\"\n Figure out the starting and stopping time and redshift for the simulation.\n \"\"\"\n\n # Convert initial/final redshifts to times.\n if self.cosmological_simulation:\n self.initial_time = self.cosmology.t_from_z(self.initial_redshift)\n self.initial_time.units.registry = self.unit_registry\n self.final_time = self.cosmology.t_from_z(self.final_redshift)\n self.final_time.units.registry = self.unit_registry\n\n # If not a cosmology simulation, figure out the stopping criteria.\n else:\n if \"TimeBegin\" in self.parameters:\n self.initial_time = self.quan(self.parameters[\"TimeBegin\"], \"code_time\")\n else:\n self.initial_time = self.quan(0.0, \"code_time\")\n\n if \"TimeMax\" in self.parameters:\n self.final_time = self.quan(self.parameters[\"TimeMax\"], \"code_time\")\n else:\n self.final_time = None\n if \"TimeMax\" not in self.parameters:\n raise NoStoppingCondition(self.parameter_filename)\n\n def _find_outputs(self):\n \"\"\"\n Search for directories matching the data dump keywords.\n If found, get dataset times py opening the ds.\n \"\"\"\n potential_outputs = glob.glob(self._snapshot_format())\n self.all_outputs = self._check_for_outputs(potential_outputs)\n self.all_outputs.sort(key=lambda obj: obj[\"time\"])\n only_on_root(mylog.info, \"Located %d total outputs.\", len(self.all_outputs))\n\n # manually set final time and redshift with last output\n if self.all_outputs:\n self.final_time = self.all_outputs[-1][\"time\"]\n if self.cosmological_simulation:\n self.final_redshift = self.all_outputs[-1][\"redshift\"]\n\n def _check_for_outputs(self, potential_outputs):\n r\"\"\"\n Check a list of files to see if they are valid datasets.\n \"\"\"\n\n only_on_root(\n mylog.info, \"Checking %d potential outputs.\", len(potential_outputs)\n )\n\n my_outputs = {}\n for my_storage, output in parallel_objects(\n potential_outputs, storage=my_outputs\n ):\n try:\n ds = load(output)\n except (FileNotFoundError, YTUnidentifiedDataType):\n mylog.error(\"Failed to load %s\", output)\n continue\n my_storage.result = {\n \"filename\": output,\n \"time\": ds.current_time.in_units(\"s\"),\n }\n if ds.cosmological_simulation:\n my_storage.result[\"redshift\"] = ds.current_redshift\n\n my_outputs = [\n my_output for my_output in my_outputs.values() if my_output is not None\n ]\n return my_outputs\n\n def _write_cosmology_outputs(self, filename, outputs, start_index, decimals=3):\n r\"\"\"\n Write cosmology output parameters for a cosmology splice.\n \"\"\"\n\n mylog.info(\"Writing redshift output list to %s.\", filename)\n f = open(filename, \"w\")\n for output in outputs:\n f.write(f\"{1.0 / (1.0 + output['redshift']):f}\\n\")\n f.close()\n"
] | [
[
"numpy.array",
"numpy.digitize"
]
] |
isears/addons | [
"e18fcfc5cce8a8176189ce0e21416936fd588f35"
] | [
"tensorflow_addons/layers/spectral_normalization.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport tensorflow as tf\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass SpectralNormalization(tf.keras.layers.Wrapper):\n \"\"\"Performs spectral normalization on weights.\n\n This wrapper controls the Lipschitz constant of the layer by\n constraining its spectral norm, which can stabilize the training of GANs.\n\n See [Spectral Normalization for Generative Adversarial Networks](https://arxiv.org/abs/1802.05957).\n\n Wrap `tf.keras.layers.Conv2D`:\n\n >>> x = np.random.rand(1, 10, 10, 1)\n >>> conv2d = SpectralNormalization(tf.keras.layers.Conv2D(2, 2))\n >>> y = conv2d(x)\n >>> y.shape\n TensorShape([1, 9, 9, 2])\n\n Wrap `tf.keras.layers.Dense`:\n\n >>> x = np.random.rand(1, 10, 10, 1)\n >>> dense = SpectralNormalization(tf.keras.layers.Dense(10))\n >>> y = dense(x)\n >>> y.shape\n TensorShape([1, 10, 10, 10])\n\n Args:\n layer: A `tf.keras.layers.Layer` instance that\n has either `kernel` or `embeddings` attribute.\n power_iterations: `int`, the number of iterations during normalization.\n Raises:\n AssertionError: If not initialized with a `Layer` instance.\n ValueError: If initialized with negative `power_iterations`.\n AttributeError: If `layer` does not has `kernel` or `embeddings` attribute.\n \"\"\"\n\n @typechecked\n def __init__(self, layer: tf.keras.layers, power_iterations: int = 1, **kwargs):\n super().__init__(layer, **kwargs)\n if power_iterations <= 0:\n raise ValueError(\n \"`power_iterations` should be greater than zero, got \"\n \"`power_iterations={}`\".format(power_iterations)\n )\n self.power_iterations = power_iterations\n self._initialized = False\n\n def build(self, input_shape):\n \"\"\"Build `Layer`\"\"\"\n super().build(input_shape)\n input_shape = tf.TensorShape(input_shape)\n self.input_spec = tf.keras.layers.InputSpec(shape=[None] + input_shape[1:])\n\n if hasattr(self.layer, \"kernel\"):\n self.w = self.layer.kernel\n elif hasattr(self.layer, \"embeddings\"):\n self.w = self.layer.embeddings\n else:\n raise AttributeError(\n \"{} object has no attribute 'kernel' nor \"\n \"'embeddings'\".format(type(self.layer).__name__)\n )\n\n self.w_shape = self.w.shape.as_list()\n\n self.u = self.add_weight(\n shape=(1, self.w_shape[-1]),\n initializer=tf.initializers.TruncatedNormal(stddev=0.02),\n trainable=False,\n name=\"sn_u\",\n dtype=self.w.dtype,\n )\n\n def call(self, inputs, training=None):\n \"\"\"Call `Layer`\"\"\"\n if training is None:\n training = tf.keras.backend.learning_phase()\n\n if training:\n self.normalize_weights()\n\n output = self.layer(inputs)\n return output\n\n def compute_output_shape(self, input_shape):\n return tf.TensorShape(self.layer.compute_output_shape(input_shape).as_list())\n\n @tf.function\n def normalize_weights(self):\n \"\"\"Generate spectral normalized weights.\n\n This method will update the value of `self.w` with the\n spectral normalized value, so that the layer is ready for `call()`.\n \"\"\"\n\n w = tf.reshape(self.w, [-1, self.w_shape[-1]])\n u = self.u\n\n with tf.name_scope(\"spectral_normalize\"):\n for _ in range(self.power_iterations):\n v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True))\n u = tf.math.l2_normalize(tf.matmul(v, w))\n\n sigma = tf.matmul(tf.matmul(v, w), u, transpose_b=True)\n\n self.w.assign(self.w / sigma)\n self.u.assign(u)\n\n def get_config(self):\n config = {\"power_iterations\": self.power_iterations}\n base_config = super().get_config()\n return {**base_config, **config}\n"
] | [
[
"tensorflow.initializers.TruncatedNormal",
"tensorflow.keras.backend.learning_phase",
"tensorflow.TensorShape",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.keras.layers.InputSpec",
"tensorflow.name_scope",
"tensorflow.keras.utils.register_keras_serializable"
]
] |
shadowk29/ising2d | [
"6e42efc7738b066bbfb6cc9f0d7c79a24ee86325"
] | [
"example_data.py"
] | [
"from ising2d import ising2d\nimport numpy as np\n\ndef main():\n tauneg = -np.array([10**n for n in np.linspace(-2, -0.26, 8)])\n taupos = np.array([10**n for n in np.linspace(-2, -0.12, 8)])\n tau = np.append(tauneg, taupos)\n tau = np.append(tau, 0)\n temperatures = 2/np.log(1+np.sqrt(2))*(1+tau)\n fields= [0]\n sizes = [32, 8]\n microstates = 10\n magnet = ising2d(temperatures, fields, sizes, microstates, output_folder = 'Example_Data', save_states=10)\n magnet.run()\n\nif __name__=='__main__':\n main()\n"
] | [
[
"numpy.linspace",
"numpy.sqrt",
"numpy.append"
]
] |
densechen/DIP | [
"957de1d3e4e8b663ce305e896c7a82a0bd044301"
] | [
"utils/utils.py"
] | [
"'''\nDescripttion: [email protected]\nversion: 0.0\nAuthor: Dense Chen\nDate: 1970-01-01 08:00:00\nLastEditors: Dense Chen\nLastEditTime: 2020-08-12 20:45:43\n'''\nimport math\nfrom collections import Counter, OrderedDict, namedtuple\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom pytorch3d.ops import iterative_closest_point\nfrom pytorch3d.transforms import (euler_angles_to_matrix,\n matrix_to_euler_angles, matrix_to_quaternion,\n quaternion_to_matrix, random_rotations)\nfrom torch.distributions import Normal\n\n\"\"\"\nDATA STRUCTURE BASED ON NAMEDTUPLE\n\"\"\"\nPI = math.pi\nIntrinsic = namedtuple(\"Intrinsic\", [\"fx\", \"fy\", \"cx\", \"cy\"])\nRotation = namedtuple(\"Rotation\", [\"euler\", \"quat\", \"matrix\", \"ortho6d\"])\nTranslation = namedtuple(\"Translation\", [\"translation\"])\nPose = namedtuple(\"Pose\", [\"Rotation\", \"Translation\"])\nRawData = namedtuple(\"RawData\", [\n \"image\", \"depth\", \"Intrinsic\", \"mask\", \"target_pose\", \"init_pose\",\n \"model_points\"\n])\nTestData = namedtuple(\n \"TestData\",\n [\"image\", \"depth\", \"Intrinsic\", \"mask\", \"init_pose\", \"model_points\"])\nSynRawData = namedtuple(\"SynRawData\", [\n \"data\", \"Intrinsic\", \"target_pose\", \"init_pose\", \"model_points\",\n \"interpolate_ratio\"\n])\nData = namedtuple(\"Data\", [\n \"observed_image\", \"observed_depth\", \"observed_mask\", \"rendered_image\",\n \"rendered_depth\", \"rendered_mask\"\n])\nBBOX = namedtuple(\"BBOX\", [\"left\", \"right\", \"top\", \"low\"])\n\n\ndef index_select_namedtuple(t, dim, index: torch.Tensor):\n return type(t)(*[\n torch.index_select(i, dim=dim, index=index) if isinstance(\n i, torch.Tensor) else index_select_namedtuple(i, dim, index)\n for i in t\n ])\n\n\ndef cat_namedtuple_list(t: list, dim=0):\n t_cat = type(t[0])(*[[] for _ in t[0]])\n for tt in t:\n for i, ttt in enumerate(tt):\n t_cat[i].append(ttt)\n t_catted = type(t[0])(*[\n torch.cat(i, dim=dim) if isinstance(i[0], torch.Tensor\n ) else cat_namedtuple_list(i, dim)\n for i in t_cat\n ])\n return t_catted\n\n\ndef stack_namedtuple_list(t: list, dim=0):\n t_stack = type(t[0])(*[[] for _ in t[0]])\n for tt in t:\n for i, ttt in enumerate(tt):\n t_stack[i].append(ttt)\n t_stacked = type(t[0])(*[\n torch.stack(i, dim=dim)\n if isinstance(i[0], torch.Tensor) else stack_namedtuple_list(i, dim)\n for i in t_stack\n ])\n return t_stacked\n\n\ndef repeat_namedtuple(t, shape: list):\n return type(t)(*[\n i.repeat(*shape) if isinstance(i, torch.Tensor\n ) else repeat_namedtuple(i, shape)\n for i in t\n ])\n\n\ndef squeeze_namedtuple(t, dim=0):\n return type(t)(*[\n i.squeeze(dim) if isinstance(i, torch.Tensor\n ) else squeeze_namedtuple(i, dim)\n for i in t\n ])\n\n\ndef unsqueeze_namedtuple(t, dim=0):\n return type(t)(*[\n i.unsqueeze(dim) if isinstance(i, torch.Tensor\n ) else unsqueeze_namedtuple(i, dim)\n for i in t\n ])\n\n\ndef variable_namedtuple(t, device):\n return type(t)(*[\n torch.autograd.Variable(i).to(device)\n if isinstance(i, torch.Tensor) else variable_namedtuple(i, device)\n for i in t\n ])\n\n\ndef detach_namedtuple(t):\n return type(t)(*[\n i.detach().clone() if isinstance(i, torch.Tensor\n ) else detach_namedtuple(i) for i in t\n ])\n\n\ndef numpy_namedtuple(t):\n return type(t)(*[\n i.detach().cpu().numpy(\n ) if isinstance(i, torch.Tensor) else numpy_namedtuple(i) for i in t\n ])\n\n\ndef append_namedtuple(t1, t2):\n for tt1, tt2 in zip(t1, t2):\n tt1.append(tt2)\n\n\ndef build_translation(translation) -> Translation:\n return Translation(translation)\n\n\ndef build_rotation(rotation, format=\"matrix\") -> Rotation:\n \"\"\" Convert roation (with format) into Rotation.\n format: matrix, ortho6d, quat, euler\n \"\"\"\n # 1. CONVERT SPECIFIED FORMAT TO MATRIX FIRST\n if format == \"matrix\":\n matrix = rotation\n elif format == \"ortho6d\":\n matrix = compute_rotation_matrix_from_ortho6d(rotation)\n elif format == \"euler\":\n matrix = euler_angles_to_matrix(rotation, convention=\"XYZ\")\n elif format == \"quat\":\n matrix = quaternion_to_matrix(rotation)\n else:\n raise TypeError\n\n # 2. BUILD ROTATION\n return Rotation(\n ortho6d=rotation if format == \"ortho6d\" else\n compute_ortho6d_from_rotation_matrix(matrix),\n quat=rotation if format == \"quat\" else matrix_to_quaternion(matrix),\n matrix=rotation if format == \"matrix\" else matrix,\n euler=rotation if format == \"euler\" else matrix_to_euler_angles(\n matrix, convention=\"XYZ\"))\n\n\ndef transpose_rotation(rotation: Rotation) -> Rotation:\n \"\"\" As for ortho6d and euler, we will first convert to matrix, then transpose.\n As others, we will use \n \"\"\"\n # 1. TRANSPOSE ORTHO6D\n matrix = compute_rotation_matrix_from_ortho6d(rotation.ortho6d).transpose(\n 2, 1)\n ortho6d = compute_ortho6d_from_rotation_matrix(matrix)\n\n # 2. TRANSPOSE EULER\n matrix = euler_angles_to_matrix(rotation.euler,\n convention=\"XYZ\").transpose(2, 1)\n euler = matrix_to_euler_angles(matrix, convention=\"XYZ\")\n\n # 3. TRANSPOSE QUAT\n quat = quaternion_invert(rotation.quat)\n\n # 4. TRANSPOSE MATRIX\n matrix = rotation.matrix.transpose(2, 1)\n\n return Rotation(ortho6d=ortho6d, quat=quat, matrix=matrix, euler=euler)\n\n\ndef normalize_vector(v):\n n = torch.norm(v, dim=-1, keepdim=True)\n n = torch.clamp_min(n, min=1e-8)\n return v / n\n\n\ndef cross_vector(u, v):\n return torch.cross(u, v, dim=-1)\n\n\ndef compute_rotation_matrix_from_ortho6d(ortho6d):\n x_raw = ortho6d[:, :3]\n y_raw = ortho6d[:, 3:]\n\n x = normalize_vector(x_raw)\n z = cross_vector(x, y_raw)\n z = normalize_vector(z)\n y = cross_vector(z, x)\n\n return torch.stack([x, y, z], dim=1)\n\n\ndef compute_ortho6d_from_rotation_matrix(rotation):\n x_raw = rotation[:, 0, :]\n y_raw = rotation[:, 1, :]\n return torch.cat([x_raw, y_raw], dim=-1)\n\n\ndef apply_action_to_pose(action, src_pose: Pose, settings) -> Pose:\n action_rotation = build_rotation(rotation=action[:, :settings.ROT_DIM],\n format=settings.ROT_FORMAT)\n action_delta_voxel = action[:, -settings.TRANS_DIM:]\n\n src_rotation = src_pose.Rotation\n src_translation = src_pose.Translation.translation\n\n # NEW\n z_tgt = src_translation[:, 2] / torch.exp(action_delta_voxel[:, 2])\n # y_tgt = (action_delta_voxel[:, 1] / settings.FY +\n # src_translation[:, 1] / src_translation[:, 2]) * z_tgt\n # x_tgt = (action_delta_voxel[:, 0] / settings.FX +\n # src_translation[:, 0] / src_translation[:, 2]) * z_tgt\n y_tgt = (action_delta_voxel[:, 1] +\n src_translation[:, 1] / src_translation[:, 2]) * z_tgt\n x_tgt = (action_delta_voxel[:, 0] +\n src_translation[:, 0] / src_translation[:, 2]) * z_tgt\n rotation = build_rotation(torch.bmm(src_rotation.matrix,\n action_rotation.matrix),\n format=\"matrix\")\n translation = build_translation(torch.stack([x_tgt, y_tgt, z_tgt], dim=-1))\n\n return Pose(Rotation=rotation, Translation=translation)\n # return Pose(Rotation=rotation,\n # Translation=build_translation(src_translation))\n\n\n# def apply_action_to_pose(action, src_pose: Pose, settings) -> Pose:\n# action_rotation = build_rotation(rotation=action[:, :settings.ROT_DIM],\n# format=settings.ROT_FORMAT)\n# action_translation = action[:, -settings.TRANS_DIM:]\n\n# src_rotation = src_pose.Rotation\n# src_translation = src_pose.Translation.translation\n\n# rotation = build_rotation(torch.bmm(src_rotation.matrix, action_rotation.matrix), format=\"matrix\")\n# translation = build_translation(src_translation + action_translation)\n\n# return Pose(Rotation=rotation, Translation=translation)\n\n\ndef apply_transform_to_pose(pose: Pose, transform: Pose) -> Pose:\n transform_matrix = transform.Rotation.matrix\n transform_trans = transform.Translation.translation\n\n pose_matrix = pose.Rotation.matrix.expand_as(transform_matrix)\n pose_trans = pose.Translation.translation.expand_as(transform_trans)\n\n return Pose(Rotation=build_rotation(torch.bmm(pose_matrix,\n transform_matrix),\n format=\"matrix\"),\n Translation=build_translation(pose_trans + transform_trans))\n\n\ndef distance_normalization_function(x, k=3):\n return 1 - torch.exp(-x * k)\n\n\ndef compute_geodesic_distance_from_two_matrices(m1, m2):\n # 1. MUL\n m = torch.bmm(m1, m2.transpose(1, 2))\n\n # 2. TR\n cos = (m[:, 0, 0] + m[:, 1, 1] + m[:, 2, 2] - 1) / 2\n\n # 3. CLAMP\n cos = torch.clamp(cos, min=-1, max=1)\n\n return torch.acos(cos)\n\n\ndef transform_point_cloud(points, pose: Pose):\n return torch.bmm(\n points,\n pose.Rotation.matrix) + pose.Translation.translation.unsqueeze(dim=1)\n\n\ndef translation_to_voxel_and_depth(translation, intrinsic, settings):\n pix_x = translation[:, 0] * intrinsic.fx / translation[:, 2] + intrinsic.cx\n pix_y = translation[:, 1] * intrinsic.fy / translation[:, 2] + intrinsic.cy\n\n # if settings.DATASET == \"ycb\":\n # pix_x = settings.IMAGE_WIDTH - pix_x\n return torch.stack([pix_x, pix_y], dim=-1), translation[:, 2]\n\n\ndef blend_two_images(img1, img2):\n img = Image.blend(img1, img2, 0.5)\n return img\n\n\ndef check_bbox(bbox: BBOX, image_width, image_height):\n if isinstance(bbox.left, int):\n if bbox.left < 0 or bbox.right > image_width or bbox.low < 0 or bbox.top > image_height:\n return False\n else:\n return True\n else:\n if torch.any(bbox.left < 0) or torch.any(\n bbox.right > image_width) or torch.any(\n bbox.low < 0) or torch.any(bbox.top > image_height):\n return False\n else:\n return True\n\n\ndef save_image_pair(image1, image2, filename1, filename2, pairname):\n image1 = np.asarray(image1.detach().cpu().numpy() * 255.0, dtype=np.uint8)\n image2 = np.asarray(image2.detach().cpu().numpy() * 255.0, dtype=np.uint8)\n image1 = Image.fromarray(image1).convert(\"RGBA\")\n image2 = Image.fromarray(image2).convert(\"RGBA\")\n image = blend_two_images(image1, image2)\n image.save(pairname)\n image1.save(filename1)\n image2.save(filename2)\n\n\ndef load_state_dict(model, pretrained_state_dict):\n model_dict = model.state_dict()\n\n # REMOVE USELESS PARAMETERS\n pretrained_state_dict = {\n k: v\n for k, v in pretrained_state_dict.items() if k in model_dict\n }\n\n # UPDATE STATE DICT\n model_dict.update(pretrained_state_dict)\n\n # LOAD\n model.load_state_dict(model_dict)\n\n\ndef calculate_expected_reward(last_pose: Pose, current_pose: Pose,\n target_pose: Pose):\n last_distance = F.l1_loss(\n last_pose.Translation.translation, target_pose.Translation.translation\n ) + compute_geodesic_distance_from_two_matrices(\n last_pose.Rotation.matrix, target_pose.Rotation.matrix)\n current_distance = F.l1_loss(\n current_pose.Translation.translation, target_pose.Translation.\n translation) + compute_geodesic_distance_from_two_matrices(\n current_pose.Rotation.matrix, target_pose.Rotation.matrix)\n\n return (last_distance - current_distance).detach() # if better, do it.\n"
] | [
[
"torch.cat",
"torch.acos",
"torch.stack",
"torch.nn.functional.l1_loss",
"torch.norm",
"torch.any",
"torch.autograd.Variable",
"torch.clamp",
"torch.bmm",
"torch.cross",
"torch.index_select",
"torch.exp",
"torch.clamp_min"
]
] |
lamkina/OpenMDAO | [
"280bea25449ce2e64637e45b9b4f9f872796afec"
] | [
"openmdao/core/tests/test_indep_var_comp.py"
] | [
"\"\"\"IndepVarComp tests used in the IndepVarComp feature doc.\"\"\"\nimport unittest\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_warning, assert_warnings\nfrom openmdao.warnings import OMDeprecationWarning\n\n\nclass TestIndepVarComp(unittest.TestCase):\n\n def test_add_output_retval(self):\n # check basic metadata expected in return value\n expected = {\n 'value': 3,\n 'shape': (1,),\n 'size': 1,\n 'units': 'ft',\n 'desc': '',\n 'tags': {'indep_var'},\n }\n expected_discrete = {\n 'value': 3,\n 'type': int,\n 'desc': '',\n 'tags': {'indep_var'},\n }\n\n class IDVComp(om.IndepVarComp):\n def setup(self):\n meta = self.add_output('y', val=3.0, units='ft')\n for key, val in expected.items():\n assert meta[key] == val, f'Expected {key}: {val} but got {key}: {meta[key]}'\n\n meta = self.add_discrete_output('disc', val=3)\n for key, val in expected_discrete.items():\n assert meta[key] == val, f'Expected {key}: {val} but got {key}: {meta[key]}'\n\n prob = om.Problem()\n prob.model.add_subsystem('idv', IDVComp())\n prob.setup()\n\n def test_simple(self):\n \"\"\"Define one independent variable and set its value.\"\"\"\n\n comp = om.IndepVarComp('indep_var')\n prob = om.Problem(comp).setup()\n\n assert_near_equal(prob.get_val('indep_var'), 1.0)\n\n prob.set_val('indep_var', 2.0)\n assert_near_equal(prob.get_val('indep_var'), 2.0)\n\n def test_simple_default(self):\n \"\"\"Define one independent variable with a default value.\"\"\"\n\n comp = om.IndepVarComp('indep_var', val=2.0)\n prob = om.Problem(comp).setup()\n\n assert_near_equal(prob.get_val('indep_var'), 2.0)\n\n def test_simple_kwargs(self):\n \"\"\"Define one independent variable with a default value and additional options.\"\"\"\n\n comp = om.IndepVarComp('indep_var', val=2.0, units='m', lower=0, upper=10)\n prob = om.Problem(comp).setup()\n\n assert_near_equal(prob.get_val('indep_var'), 2.0)\n\n def test_simple_array(self):\n \"\"\"Define one independent array variable.\"\"\"\n\n array = np.array([\n [1., 2.],\n [3., 4.],\n ])\n\n comp = om.IndepVarComp('indep_var', val=array)\n prob = om.Problem(comp).setup()\n\n assert_near_equal(prob.get_val('indep_var'), array)\n\n def test_add_output(self):\n \"\"\"Define two independent variables using the add_output method.\"\"\"\n\n comp = om.IndepVarComp()\n comp.add_output('indep_var_1', val=1.0)\n comp.add_output('indep_var_2', val=2.0)\n\n prob = om.Problem(comp).setup()\n\n assert_near_equal(prob.get_val('indep_var_1'), 1.0)\n assert_near_equal(prob.get_val('indep_var_2'), 2.0)\n\n def test_promote_glob_no_inputs(self):\n p = om.Problem()\n p.model.add_subsystem('indep',\n om.IndepVarComp('x', 2.0),\n promotes_inputs=['*'],\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', om.ExecComp('y=x'), promotes_inputs=['x'], promotes_outputs=['y'])\n p.setup()\n p.run_model()\n self.assertEqual(p.get_val('x'), p.get_val('y'))\n\n def test_invalid_tags(self):\n with self.assertRaises(TypeError) as cm:\n comp = om.IndepVarComp('indep_var', tags=99)\n\n self.assertEqual(str(cm.exception),\n \"IndepVarComp: Value (99) of option 'tags' has type 'int', \"\n \"but one of types ('str', 'list') was expected.\")\n\n def test_simple_with_tags(self):\n \"\"\"Define one independent variable and set its value. Try filtering with tag\"\"\"\n\n comp = om.IndepVarComp('indep_var', tags='tag1')\n prob = om.Problem(comp).setup(check=False)\n prob.run_model()\n\n # Outputs no tags\n outputs = prob.model.list_outputs(values=False, out_stream=None)\n self.assertEqual(sorted(outputs), [\n ('indep_var', {}),\n ])\n\n # Outputs with automatically added indep_var_comp tag\n outputs = prob.model.list_outputs(values=False, out_stream=None, tags=\"indep_var\")\n self.assertEqual(sorted(outputs), [\n ('indep_var', {}),\n ])\n\n # Outputs with tag\n outputs = prob.model.list_outputs(values=False, out_stream=None, tags=\"tag1\")\n self.assertEqual(sorted(outputs), [\n ('indep_var', {}),\n ])\n\n # Outputs with wrong tag\n outputs = prob.model.list_outputs(values=False, out_stream=None, tags=\"tag_wrong\")\n self.assertEqual(sorted(outputs), [])\n\n def test_add_output_with_tags(self):\n \"\"\"Define two independent variables using the add_output method.\n Add tags to them and see if we can filter them with list_outputs\"\"\"\n\n comp = om.IndepVarComp()\n comp.add_output('indep_var_1', val=1.0, tags=\"tag1\")\n comp.add_output('indep_var_2', val=2.0, tags=\"tag2\")\n\n prob = om.Problem(comp).setup(check=False)\n prob.run_model()\n\n # Outputs no tags\n outputs = prob.model.list_outputs(out_stream=None)\n self.assertEqual(sorted(outputs), [\n ('indep_var_1', {'value': [1.]}),\n ('indep_var_2', {'value': [2.]}),\n ])\n\n # Outputs with tags\n outputs = prob.model.list_outputs(out_stream=None, tags=\"tag1\")\n self.assertEqual(sorted(outputs), [\n ('indep_var_1', {'value': [1.]}),\n ])\n\n # Outputs with the indep_var tags\n outputs = prob.model.list_outputs(out_stream=None, tags=\"indep_var\")\n self.assertEqual(sorted(outputs), [\n ('indep_var_1', {'value': [1.]}),\n ('indep_var_2', {'value': [2.]}),\n ])\n\n # Outputs with multiple tags\n outputs = prob.model.list_outputs(out_stream=None, tags=[\"tag1\", \"tag2\"])\n self.assertEqual(sorted(outputs), [\n ('indep_var_1', {'value': [1.]}),\n ('indep_var_2', {'value': [2.]}),\n ])\n\n # Outputs with tag that does not match\n outputs = prob.model.list_outputs(out_stream=None, tags=\"tag3\")\n self.assertEqual(sorted(outputs), [])\n\n def test_error_novars(self):\n try:\n prob = om.Problem(om.IndepVarComp()).setup()\n except Exception as err:\n self.assertEqual(str(err),\n \"<model> <class IndepVarComp>: No outputs (independent variables) have been declared. They must either be declared during \"\n \"instantiation or by calling add_output or add_discrete_output afterwards.\")\n else:\n self.fail('Exception expected.')\n\n def test_error_bad_arg(self):\n try:\n comp = om.IndepVarComp(1.0)\n prob = om.Problem(comp).setup()\n except Exception as err:\n self.assertEqual(str(err),\n \"first argument to IndepVarComp init must be either of type \"\n \"`str` or an iterable of tuples of the form (name, value) or \"\n \"(name, value, keyword_dict).\")\n else:\n self.fail('Exception expected.')\n\n def test_add_output_type_bug(self):\n prob = om.Problem()\n model = prob.model\n\n ivc = om.IndepVarComp()\n ivc.add_output('x1', val=[1, 2, 3])\n\n model.add_subsystem('p', ivc)\n\n prob.setup()\n\n prob['p.x1'][0] = 0.5\n prob.run_model()\n\n assert_near_equal(prob.get_val('p.x1')[0], 0.5)\n\n def test_options(self):\n class Parameters(om.IndepVarComp):\n def initialize(self):\n self.options.declare('num_x', default=0)\n self.options.declare('val_y', default=0.)\n\n def setup(self):\n self.add_discrete_output('num_x', val = np.zeros(self.options['num_x']))\n self.add_output('val_y',val = self.options['val_y'])\n\n prob = om.Problem(model=Parameters(num_x=4, val_y=2.5))\n prob.setup()\n prob.run_model()\n\n self.assertEqual(len(prob.get_val('num_x')), 4)\n self.assertEqual(prob.get_val('val_y'), 2.5)\n\n def test_ivc_deprecations(self):\n msg = \"'p1' <class IndepVarComp>: The '{}' argument was used when adding output '{}'. \" + \\\n \"This argument has been deprecated and will be removed in a future version.\"\n\n prob = om.Problem()\n\n indep = prob.model.add_subsystem('p1', om.IndepVarComp())\n\n # ref, ref0\n with assert_warnings([(OMDeprecationWarning, msg.format('ref', 'a')),\n (OMDeprecationWarning, msg.format('ref0', 'a'))]):\n indep.add_output('a', 12., ref=0.0, ref0=1.)\n\n # res_units\n with assert_warning(OMDeprecationWarning, msg.format('res_units', 'b')):\n indep.add_output('b', 12., res_units='m')\n\n # upper\n with assert_warning(OMDeprecationWarning, msg.format('upper', 'c')):\n indep.add_output('c', 12., upper=1.)\n\n # lower\n with assert_warning(OMDeprecationWarning, msg.format('lower', 'd')):\n indep.add_output('d', 12., lower=1.)\n\n # res_ref\n with assert_warning(OMDeprecationWarning, msg.format('res_ref', 'e')):\n indep.add_output('e', 12., res_ref=1.)\n\n # res_ref\n with assert_warning(OMDeprecationWarning, msg.format('ref', 'f')):\n indep.add_output('f', 12., ref=2.)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
mjamroz/gluon-cv | [
"5966f657df51e90df04aa592890241079a835604"
] | [
"docs/tutorials/segmentation/demo_icnet.py"
] | [
"\"\"\"7. Test with ICNet Pre-trained Models for Multi-Human Parsing\n======================================\n\nThis is a quick demo of using GluonCV ICNet model for multi-human parsing on real-world images.\nPlease follow the `installation guide <../index.html>`_ to install MXNet and GluonCV if not yet.\n\"\"\"\nimport mxnet as mx\nfrom mxnet import image\nfrom mxnet.gluon.data.vision import transforms\nimport gluoncv\n# using cpu\nctx = mx.cpu(0)\n\n\n##############################################################################\n# Prepare the image\n# -----------------\n#\n# download the example image\nurl = 'https://github.com/KuangHaofei/GluonCV_Test/blob/master/' + \\\n 'mhp_v1/demo%20images/1528.jpg?raw=true'\nfilename = 'mhp_v1_example.jpg'\ngluoncv.utils.download(url, filename, True)\n\n\n##############################################################################\n# load the image\nimg = image.imread(filename)\n\nfrom matplotlib import pyplot as plt\nplt.imshow(img.asnumpy())\nplt.show()\n\n##############################################################################\n# normalize the image using dataset mean\nfrom gluoncv.data.transforms.presets.segmentation import test_transform\nimg = test_transform(img, ctx)\n\n##############################################################################\n# Load the pre-trained model and make prediction\n# ----------------------------------------------\n#\n# get pre-trained model\nmodel = gluoncv.model_zoo.get_model('icnet_resnet50_mhpv1', pretrained=True)\n\n##############################################################################\n# make prediction using single scale\noutput = model.predict(img)\npredict = mx.nd.squeeze(mx.nd.argmax(output, 1)).asnumpy()\n\n##############################################################################\n# Add color pallete for visualization\nfrom gluoncv.utils.viz import get_color_pallete\nimport matplotlib.image as mpimg\nmask = get_color_pallete(predict, 'mhpv1')\nmask.save('output.png')\n\n##############################################################################\n# show the predicted mask\nmmask = mpimg.imread('output.png')\nplt.imshow(mmask)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.image.imread",
"matplotlib.pyplot.imshow"
]
] |
JasmineSamadi/ORSuite | [
"e2b2b0a5b497ea6566e794dcef1f176081fca4ce"
] | [
"or_suite/agents/resource_allocation/equal_allocation.py"
] | [
"import numpy as np\nfrom .. import Agent\n\n\n\"\"\" Agent which implements several heuristic algorithms\"\"\"\n\n\nclass equalAllocationAgent(Agent):\n\n def __init__(self, epLen, env_config):\n \"\"\"\n Args:\n epLen: number of steps\n func: function used to decide action\n env_config: parameters used in initialization of environment\n data: all data observed so far\n \"\"\"\n self.env_config = env_config\n\n self.num_types = env_config['weight_matrix'].shape[0]\n self.num_resources = self.env_config['weight_matrix'].shape[1]\n\n self.current_budget = np.copy(self.env_config['init_budget'])\n #print('Starting Budget: ' + str(self.current_budget))\n self.epLen = epLen\n self.data = []\n self.rel_exp_endowments = self.get_expected_endowments()\n # print(\"R\")\n # print(self.rel_exp_endowments)\n\n def get_expected_endowments(self, N=1000):\n \"\"\"\n Monte Carlo Method for estimating Expectation of type distribution using N realizations\n Only need to run this once to get expectations for all locations\n\n Returns: rel_exp_endowments: matrix containing expected proportion of endowments for location t\n \"\"\"\n num_types = self.env_config['weight_matrix'].shape[0]\n exp_size = np.zeros((num_types, self.env_config['num_rounds']))\n # print(num_types)\n # print(self.env_config['num_rounds'])\n for t in range(self.env_config['num_rounds']):\n for _ in range(N):\n obs_size = self.env_config['type_dist'](t)\n exp_size[:, t] += obs_size\n exp_size[:, t] = (1/N)*exp_size[:, t]\n\n return exp_size\n\n def reset(self):\n # resets data matrix to be empty\n self.current_budget = np.copy(self.env_config['init_budget'])\n\n self.data = []\n\n def update_config(self, env, config):\n '''Updates environment configuration dictionary'''\n self.env_config = config\n return\n\n def update_obs(self, obs, action, reward, newObs, timestep, info):\n '''Add observation to records'''\n self.data.append(newObs)\n return\n\n def update_policy(self, k):\n '''Update internal policy based upon records'''\n self.current_budget = np.copy(self.env_config['init_budget'])\n self.greedy = self.greedy\n\n def greedy(self, state, timestep, epsilon=0):\n '''\n Select action according to function\n '''\n\n num_types = self.env_config['weight_matrix'].shape[0]\n sizes = state[self.num_resources:]\n action = np.zeros((num_types, self.num_resources))\n\n for typ in range(num_types):\n action[typ, :] = (self.env_config['init_budget'] / sizes[typ])*(\n self.rel_exp_endowments[typ, timestep] / np.sum(self.rel_exp_endowments))\n\n self.current_budget -= np.sum([action[typ, :] * sizes[typ]\n for typ in range(num_types)])\n #print('Allocation: ' + str(action))\n\n return action\n\n def pick_action(self, state, step):\n action = self.greedy(state, step)\n return action\n"
] | [
[
"numpy.sum",
"numpy.copy",
"numpy.zeros"
]
] |
yradeva93/estimagic | [
"77d840ba01748314b35be8117c99460a1944062f"
] | [
"estimagic/tests/logging/test_database_utilities.py"
] | [
"import pickle\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport sqlalchemy\nfrom numpy.testing import assert_array_equal\nfrom sqlalchemy import Float\nfrom sqlalchemy import PickleType\n\nfrom estimagic.logging.database_utilities import append_row\nfrom estimagic.logging.database_utilities import load_database\nfrom estimagic.logging.database_utilities import make_optimization_iteration_table\nfrom estimagic.logging.database_utilities import make_optimization_problem_table\nfrom estimagic.logging.database_utilities import make_optimization_status_table\nfrom estimagic.logging.database_utilities import read_last_rows\nfrom estimagic.logging.database_utilities import read_new_rows\n\n\[email protected]\ndef iteration_data():\n data = {\n \"external_params\": np.ones(1),\n \"internal_params\": np.ones(1),\n \"timestamp\": datetime(year=2020, month=4, day=9, hour=12, minute=41, second=1),\n \"distance_origin\": 1.0,\n \"distance_ones\": 0.0,\n \"value\": 5.0,\n }\n return data\n\n\[email protected]\ndef problem_data():\n data = {\n \"direction\": \"maximize\",\n \"criterion\": np.sum,\n \"params\": np.arange(3),\n \"algorithm\": \"bla\",\n \"constraints\": [{\"type\": \"bla\"}],\n \"algo_options\": None,\n \"derivative\": None,\n \"derivative_kwargs\": None,\n \"criterion_and_derivative\": None,\n \"criterion_and_derivative_kwargs\": None,\n \"numdiff_options\": {},\n \"log_options\": {\"fast_logging\": False},\n }\n return data\n\n\ndef test_load_database_from_path(tmp_path):\n \"\"\"Test that database is generated because it does not exist.\"\"\"\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n assert isinstance(database, sqlalchemy.MetaData)\n assert database.bind is not None\n\n\ndef test_load_database_after_pickling(tmp_path):\n \"\"\"Pickling unsets database.bind. Test that load_database sets it again.\"\"\"\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n database = pickle.loads(pickle.dumps(database))\n database = load_database(metadata=database, path=path)\n assert database.bind is not None\n\n\ndef test_load_database_with_bound_metadata(tmp_path):\n \"\"\"Test that nothing happens when load_database is called with bound MetaData.\"\"\"\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n new_database = load_database(metadata=database)\n assert new_database is database\n\n\ndef test_optimization_iteration_table_scalar(tmp_path, iteration_data):\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n make_optimization_iteration_table(database, first_eval={\"output\": 0.5})\n append_row(iteration_data, \"optimization_iterations\", database, path, False)\n res = read_last_rows(database, \"optimization_iterations\", 1, \"list_of_dicts\")\n assert isinstance(res, list) and isinstance(res[0], dict)\n res = res[0]\n assert res[\"rowid\"] == 1\n assert res[\"internal_derivative\"] is None\n for key in [\"internal_params\", \"external_params\"]:\n assert_array_equal(res[key], iteration_data[key])\n\n for key in [\"distance_ones\", \"distance_origin\", \"value\", \"timestamp\"]:\n assert res[key] == iteration_data[key]\n\n\ndef test_optimization_iteration_table_vector_valued(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n make_optimization_iteration_table(\n database, first_eval={\"output\": {\"contributions\": np.ones(3), \"value\": 0.5}}\n )\n assert isinstance(\n database.tables[\"optimization_iterations\"].columns[\"contributions\"].type,\n PickleType,\n )\n\n\ndef test_optimization_iteration_table_dict_valued(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n first_eval = {\n \"output\": {\"contributions\": np.ones(3), \"value\": 5, \"bla\": pd.DataFrame()}\n }\n make_optimization_iteration_table(database, first_eval=first_eval)\n for col in [\"contributions\", \"bla\"]:\n assert isinstance(\n database.tables[\"optimization_iterations\"].columns[col].type, PickleType\n )\n assert isinstance(\n database.tables[\"optimization_iterations\"].columns[\"value\"].type, Float\n )\n\n\ndef test_optimization_status_table(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n make_optimization_status_table(database)\n for status in [\"scheduled\", \"running\", \"success\"]:\n append_row({\"status\": status}, \"optimization_status\", database, path, False)\n\n res, _ = read_new_rows(database, \"optimization_status\", 1, \"dict_of_lists\")\n\n expected = {\"rowid\": [2, 3], \"status\": [\"running\", \"success\"]}\n assert res == expected\n\n\ndef test_optimization_problem_table(tmp_path, problem_data):\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n make_optimization_problem_table(database)\n append_row(problem_data, \"optimization_problem\", database, path, False)\n res = read_last_rows(database, \"optimization_problem\", 1, \"list_of_dicts\")[0]\n assert res[\"rowid\"] == 1\n for key, expected in problem_data.items():\n if key == \"criterion\":\n assert res[key](np.ones(3)) == 3\n elif isinstance(expected, np.ndarray):\n assert_array_equal(res[key], expected)\n else:\n assert res[key] == expected\n\n\ndef test_read_new_rows_stride(tmp_path, iteration_data):\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n make_optimization_iteration_table(database, first_eval={\"output\": 0.5})\n for i in range(1, 11): # sqlalchemy starts counting at 1\n iteration_data[\"value\"] = i\n append_row(iteration_data, \"optimization_iterations\", database, path, False)\n\n res = read_new_rows(\n database=database,\n table_name=\"optimization_iterations\",\n last_retrieved=1,\n return_type=\"dict_of_lists\",\n stride=2,\n )[0][\"value\"]\n\n expected = [2.0, 4.0, 6.0, 8.0, 10.0]\n assert res == expected\n\n\ndef test_read_last_rows_stride(tmp_path, iteration_data):\n path = tmp_path / \"test.db\"\n database = load_database(path=path)\n make_optimization_iteration_table(database, first_eval={\"output\": 0.5})\n for i in range(1, 11): # sqlalchemy starts counting at 1\n iteration_data[\"value\"] = i\n append_row(iteration_data, \"optimization_iterations\", database, path, False)\n\n res = read_last_rows(\n database=database,\n table_name=\"optimization_iterations\",\n n_rows=3,\n return_type=\"dict_of_lists\",\n stride=2,\n )[\"value\"]\n\n expected = [10.0, 8.0, 6.0]\n assert res == expected\n"
] | [
[
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"numpy.ones",
"numpy.arange"
]
] |
Soooooda/flask-vue-crud | [
"c6e5cc87baec96911b44e37103e334819e2eedc5"
] | [
"server/utils/inference.py"
] | [
"from typing import Optional, Union\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\ndef process_dataloader(model, dataloader, input_key: Optional[str] = None,\n output_key: Optional[Union[str, int]] = None, method: Optional[str] = None,\n device: str = \"cuda\"):\n scores = []\n with torch.no_grad():\n model.eval()\n for img in tqdm(dataloader):\n if input_key is not None:\n img = img[input_key]\n\n img = img.to(device)\n if method is not None:\n output = getattr(model, method)(img)\n else:\n output = model(img)\n if output_key is not None:\n output = output[output_key]\n\n score = output.tolist()\n scores += score\n\n return np.array(scores)\n\n\ndef process_dataset(model, dataset, batch_size=8, num_workers=4, device: str = \"cuda\",\n input_key: Optional[str] = None,\n output_key: Optional[Union[str, int]] = None, method: Optional[str] = None):\n dataloader = DataLoader(dataset, batch_size, num_workers=num_workers, shuffle=False)\n scores = process_dataloader(model=model, dataloader=dataloader, input_key=input_key,\n output_key=output_key, method=method, device=device)\n paths = dataset.image_paths\n p = []\n for path in paths:\n p.append(str(path).split('assets')[1])\n file_inference_results = {\"full_path\": p, \"predicted\": list(scores.flatten())}\n # del dataloader,scores\n # print(type(dataset.image_paths))\n # print(type(scores.flatten()))\n return file_inference_results\n # return pd.DataFrame(file_inference_results)\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"torch.no_grad"
]
] |
DongYongfei36/End-to-end-ASR-Pytorch | [
"4c82b8fab4aa45faaa16402e615655a219398346"
] | [
"src/module.py"
] | [
"import torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\n\nclass VGGExtractor(nn.Module):\n ''' VGG extractor for ASR described in https://arxiv.org/pdf/1706.02737.pdf'''\n\n def __init__(self, input_dim):\n super(VGGExtractor, self).__init__()\n self.init_dim = 64\n self.hide_dim = 128\n in_channel, freq_dim, out_dim = self.check_dim(input_dim)\n self.in_channel = in_channel\n self.freq_dim = freq_dim\n self.out_dim = out_dim\n\n self.extractor = nn.Sequential(\n nn.Conv2d(in_channel, self.init_dim, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(self.init_dim, self.init_dim, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(2, stride=2), # Half-time dimension\n nn.Conv2d(self.init_dim, self.hide_dim, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(self.hide_dim, self.hide_dim, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(2, stride=2) # Half-time dimension\n )\n\n def check_dim(self, input_dim):\n # Check input dimension, delta feature should be stack over channel.\n if input_dim % 13 == 0:\n # MFCC feature\n return int(input_dim/13), 13, (13//4)*self.hide_dim\n elif input_dim % 40 == 0:\n # Fbank feature\n return int(input_dim/40), 40, (40//4)*self.hide_dim\n else:\n raise ValueError(\n 'Acoustic feature dimension for VGG should be 13/26/39(MFCC) or 40/80/120(Fbank) but got '+input_dim)\n\n def view_input(self, feature, feat_len):\n # downsample time\n feat_len = feat_len//4\n # crop sequence s.t. t%4==0\n if feature.shape[1] % 4 != 0:\n feature = feature[:, :-(feature.shape[1] % 4), :].contiguous()\n bs, ts, ds = feature.shape\n # stack feature according to result of check_dim\n feature = feature.view(bs, ts, self.in_channel, self.freq_dim)\n feature = feature.transpose(1, 2)\n\n return feature, feat_len\n\n def forward(self, feature, feat_len):\n # Feature shape BSxTxD -> BS x CH(num of delta) x T x D(acoustic feature dim)\n feature, feat_len = self.view_input(feature, feat_len)\n # Foward\n feature = self.extractor(feature)\n # BSx128xT/4xD/4 -> BSxT/4x128xD/4\n feature = feature.transpose(1, 2)\n # BS x T/4 x 128 x D/4 -> BS x T/4 x 32D\n feature = feature.contiguous().view(feature.shape[0], feature.shape[1], self.out_dim)\n return feature, feat_len\n\nclass CNNExtractor(nn.Module):\n ''' A simple 2-layer CNN extractor for acoustic feature down-sampling'''\n\n def __init__(self, input_dim, out_dim):\n super(CNNExtractor, self).__init__()\n\n self.out_dim = out_dim\n self.extractor = nn.Sequential(\n nn.Conv1d(input_dim, out_dim, 4, stride=2, padding=1),\n nn.Conv1d(out_dim, out_dim, 4, stride=2, padding=1),\n )\n\n def forward(self, feature, feat_len):\n # Fixed down-sample ratio\n feat_len = feat_len//4\n # Channel first\n feature = feature.transpose(1,2) \n # Foward\n feature = self.extractor(feature)\n # Channel last\n feature = feature.transpose(1, 2)\n\n return feature, feat_len\n\n\nclass RNNLayer(nn.Module):\n ''' RNN wrapper, includes time-downsampling'''\n\n def __init__(self, input_dim, module, dim, bidirection, dropout, layer_norm, sample_rate, sample_style, proj):\n super(RNNLayer, self).__init__()\n # Setup\n rnn_out_dim = 2*dim if bidirection else dim\n self.out_dim = sample_rate * \\\n rnn_out_dim if sample_rate > 1 and sample_style == 'concat' else rnn_out_dim\n self.dropout = dropout\n self.layer_norm = layer_norm\n self.sample_rate = sample_rate\n self.sample_style = sample_style\n self.proj = proj\n\n if self.sample_style not in ['drop', 'concat']:\n raise ValueError('Unsupported Sample Style: '+self.sample_style)\n\n # Recurrent layer\n self.layer = getattr(nn, module.upper())(\n input_dim, dim, bidirectional=bidirection, num_layers=1, batch_first=True)\n\n # Regularizations\n if self.layer_norm:\n self.ln = nn.LayerNorm(rnn_out_dim)\n if self.dropout > 0:\n self.dp = nn.Dropout(p=dropout)\n\n # Additional projection layer\n if self.proj:\n self.pj = nn.Linear(rnn_out_dim, rnn_out_dim)\n\n def forward(self, input_x, x_len):\n # Forward RNN\n if not self.training:\n self.layer.flatten_parameters()\n # ToDo: check time efficiency of pack/pad\n #input_x = pack_padded_sequence(input_x, x_len, batch_first=True, enforce_sorted=False)\n output, _ = self.layer(input_x)\n #output,x_len = pad_packed_sequence(output,batch_first=True)\n\n # Normalizations\n if self.layer_norm: \n output = self.ln(output)\n if self.dropout > 0: # 是否执行Dropout\n output = self.dp(output)\n\n # Perform Downsampling (Listen:Down Sampling)\n if self.sample_rate > 1:\n batch_size, timestep, feature_dim = output.shape\n x_len = x_len//self.sample_rate\n\n if self.sample_style == 'drop':\n # Drop the unselected timesteps\n output = output[:, ::self.sample_rate, :].contiguous()\n else:\n # Drop the redundant frames and concat the rest according to sample rate\n if timestep % self.sample_rate != 0:\n output = output[:, :-(timestep % self.sample_rate), :]\n output = output.contiguous().view(batch_size, int(\n timestep/self.sample_rate), feature_dim*self.sample_rate)\n\n if self.proj:\n output = torch.tanh(self.pj(output))\n\n return output, x_len\n\n\nclass BaseAttention(nn.Module):\n ''' Base module for attentions '''\n\n def __init__(self, temperature, num_head):\n super().__init__()\n self.temperature = temperature\n self.num_head = num_head\n self.softmax = nn.Softmax(dim=-1)\n self.reset_mem()\n\n def reset_mem(self):\n # Reset mask\n self.mask = None\n self.k_len = None\n\n def set_mem(self, prev_att):\n pass\n\n def compute_mask(self, k, k_len):\n # Make the mask for padded states\n self.k_len = k_len\n bs, ts, _ = k.shape\n self.mask = np.zeros((bs, self.num_head, ts))\n for idx, sl in enumerate(k_len):\n self.mask[idx, :, sl:] = 1 # ToDo: more elegant way?\n self.mask = torch.from_numpy(self.mask).to(\n k_len.device, dtype=torch.bool).view(-1, ts) # BNxT\n\n def _attend(self, energy, value):\n attn = energy / self.temperature\n attn = attn.masked_fill(self.mask, -np.inf)\n attn = self.softmax(attn) # BNxT\n output = torch.bmm(attn.unsqueeze(1), value).squeeze(\n 1) # BNxT x BNxTxD-> BNxD\n return output, attn\n\n\nclass ScaleDotAttention(BaseAttention):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, temperature, num_head):\n super().__init__(temperature, num_head)\n\n def forward(self, q, k, v):\n ts = k.shape[1]\n energy = torch.bmm(q.unsqueeze(1), k.transpose(\n 1, 2)).squeeze(1) # BNxD * BNxDxT = BNxT\n output, attn = self._attend(energy, v)\n\n attn = attn.view(-1, self.num_head, ts) # BNxT -> BxNxT\n\n return output, attn\n\n\nclass LocationAwareAttention(BaseAttention):\n ''' Location-Awared Attention '''\n\n def __init__(self, kernel_size, kernel_num, dim, num_head, temperature):\n super().__init__(temperature, num_head)\n self.prev_att = None\n self.loc_conv = nn.Conv1d(\n num_head, kernel_num, kernel_size=2*kernel_size+1, padding=kernel_size, bias=False)\n self.loc_proj = nn.Linear(kernel_num, dim, bias=False)\n self.gen_energy = nn.Linear(dim, 1)\n self.dim = dim\n\n def reset_mem(self):\n super().reset_mem()\n self.prev_att = None\n\n def set_mem(self, prev_att):\n self.prev_att = prev_att\n\n def forward(self, q, k, v):\n bs_nh, ts, _ = k.shape\n bs = bs_nh//self.num_head\n\n # Uniformly init prev_att\n if self.prev_att is None:\n self.prev_att = torch.zeros((bs, self.num_head, ts)).to(k.device)\n for idx, sl in enumerate(self.k_len):\n self.prev_att[idx, :, :sl] = 1.0/sl\n\n # Calculate location context\n loc_context = torch.tanh(self.loc_proj(self.loc_conv(\n self.prev_att).transpose(1, 2))) # BxNxT->BxTxD\n loc_context = loc_context.unsqueeze(1).repeat(\n 1, self.num_head, 1, 1).view(-1, ts, self.dim) # BxNxTxD -> BNxTxD\n q = q.unsqueeze(1) # BNx1xD\n\n # Compute energy and context\n energy = self.gen_energy(torch.tanh(\n k+q+loc_context)).squeeze(2) # BNxTxD -> BNxT\n output, attn = self._attend(energy, v)\n attn = attn.view(bs, self.num_head, ts) # BNxT -> BxNxT\n self.prev_att = attn\n\n return output, attn\n"
] | [
[
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"numpy.zeros",
"torch.nn.Softmax",
"torch.nn.Conv1d",
"torch.nn.MaxPool2d",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.tanh"
]
] |
jdro10/ESTG-ML-Spam-Detection | [
"76bbaacf276f9218574325a4c2f82b5be9cd1da9"
] | [
"rest_api/controllers/controller.py"
] | [
"import joblib\nimport csv\nimport numpy as np\nimport wordninja as wn\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nimport unidecode\n\nstopwords = set(stopwords.words('english'))\nwordnet_lemmatizer = WordNetLemmatizer()\n\n\ndef remove_duplicates(l):\n return list(set(l))\n\n\ndef index_test():\n return \"Index test\"\n\n\ndef spam_or_not_spam(string):\n st = string.split(\" \")\n list_string = list(st)\n tmp_list = []\n new_tmp_list = []\n\n s = str(list_string)\n s = re.sub(r'[^\\w\\s]', '', s)\n\n word_tokens = word_tokenize(s)\n\n filtered_sentence = [w for w in word_tokens if not w in stopwords]\n\n for w in word_tokens:\n if w not in stopwords:\n unaccented_w = unidecode.unidecode(str(w))\n filtered_sentence.append(unaccented_w)\n\n for i in range(len(filtered_sentence)):\n tmp_list.append(filtered_sentence[i].lower())\n word = wordnet_lemmatizer.lemmatize(tmp_list[i], pos=\"v\")\n wordd = wn.split(word)\n\n for j in range(len(wordd)):\n if wordd[j].isnumeric():\n pass\n else:\n new_tmp_list.append(wordd[j])\n\n remove_duplicates_list = remove_duplicates(new_tmp_list)\n\n first_row = []\n write_list = []\n pos_list = []\n words_list = []\n\n with open(\"../../datasets/balanced_dataset_under.csv\") as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n first_row = row\n break\n\n for i in range(len(first_row)):\n for j in range(len(new_tmp_list)):\n if first_row[i] == new_tmp_list[j]:\n pos_list.append(i)\n words_list.append(first_row[i])\n\n write_list = [0] * (len(first_row) - 1)\n\n for i in range(len(pos_list)):\n for j in range(len(write_list)):\n write_list[pos_list[i]] = 1\n\n repeated = {i: words_list.count(i) for i in words_list}\n\n for i in range(len(pos_list)):\n for j in range(len(write_list)):\n write_list[pos_list[i]] = repeated[words_list[i]]\n\n loaded_model = joblib.load(\"../../algorithm_preparation/saved_models/RandomForest_Model_Under.sav\")\n\n write_list = np.array([write_list])\n result = loaded_model.predict(write_list)\n\n res = str(result)\n\n if res == \"[False]\":\n var_to_return = \"ham\"\n else:\n var_to_return = \"spam\"\n\n ret = {\n \"spam\": var_to_return\n }\n\n return ret\n"
] | [
[
"numpy.array"
]
] |
ZamyDZ/Master-Thesis-Development-of-a-Deep-RL-Model-for-simulated-Driving-2D-Vector-Space | [
"8aaecb8bd565456ce365c9865ed809deed4c6231"
] | [
"docker/src/basis_ddqn.py"
] | [
"import sys\nimport gym\nimport random\nimport numpy as np\nimport cv2\nimport skimage as skimage\nimport skimage as skimage\nfrom skimage import transform, color, exposure\nfrom skimage.transform import rotate\nfrom skimage.viewer import ImageViewer\nfrom collections import deque\nfrom keras.layers import Dense\n#from keras.optimizers import Adam TODO DID CHANGE HERE\nfrom tensorflow.keras.optimizers import Adam\nfrom keras.models import Sequential\nfrom keras.initializers import normal, identity\nfrom keras.models import model_from_json\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nimport tensorflow as tf\nfrom keras import backend as K\n\n# TODO: DID CHANGE HERE, anderes gym, hier das von karmer\n#import donkey_gym\nimport gym_donkeycar\nimport my_cv\n\n# TODO DID CHANGE HERE\nimport os\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nEPISODES = 10000\nimg_rows , img_cols = 80, 80\n# Convert image into Black and white\nimg_channels = 4 # We stack 4 frames\n\nclass DQNAgent:\n\n def __init__(self, state_size, action_size):\n self.t = 0\n self.max_Q = 0\n self.train = True\n self.lane_detection = False # Set to True to train on images with segmented lane lines\n\n # Get size of state and action\n self.state_size = state_size\n self.action_size = action_size\n\n # These are hyper parameters for the DQN\n self.discount_factor = 0.99\n self.learning_rate = 1e-4\n if (self.train):\n self.epsilon = 1.0\n self.initial_epsilon = 1.0\n else:\n self.epsilon = 1e-6\n self.initial_epsilon = 1e-6\n self.epsilon_min = 0.02\n self.batch_size = 64\n self.train_start = 100\n self.explore = 10000\n\n # Create replay memory using deque\n self.memory = deque(maxlen=10000)\n\n # Create main model and target model\n self.model = self.build_model()\n self.target_model = self.build_model()\n\n # Copy the model to target model\n # --> initialize the target model so that the parameters of model & target model to be same\n self.update_target_model()\n\n def build_model(self):\n print(\"Now we build the model\")\n model = Sequential()\n model.add(Convolution2D(32, 8, 8, subsample=(4, 4), border_mode='same',input_shape=(img_rows,img_cols,img_channels))) #80*80*4\n model.add(Activation('relu'))\n model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))\n model.add(Activation('relu'))\n model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))\n model.add(Activation('relu'))\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n\n # 15 categorical bins for Steering angles\n model.add(Dense(15, activation=\"linear\")) \n\n adam = Adam(lr=self.learning_rate)\n model.compile(loss='mse',optimizer=adam)\n print(\"We finished building the model\")\n\n return model\n\n def process_image(self, obs):\n\n if not agent.lane_detection:\n obs = skimage.color.rgb2gray(obs)\n obs = skimage.transform.resize(obs, (img_rows, img_cols))\n return obs\n else:\n obs = cv2.cvtColor(obs, cv2.COLOR_BGR2GRAY)\n obs = cv2.resize(obs, (img_rows, img_cols))\n edges = my_cv.detect_edges(obs, low_threshold=50, high_threshold=150)\n\n rho = 0.8\n theta = np.pi/180\n threshold = 25\n min_line_len = 5\n max_line_gap = 10\n\n hough_lines = my_cv.hough_lines(edges, rho, theta, threshold, min_line_len, max_line_gap)\n\n left_lines, right_lines = my_cv.separate_lines(hough_lines)\n\n filtered_right, filtered_left = [],[]\n if len(left_lines):\n filtered_left = my_cv.reject_outliers(left_lines, cutoff=(-30.0, -0.1), lane='left')\n if len(right_lines):\n filtered_right = my_cv.reject_outliers(right_lines, cutoff=(0.1, 30.0), lane='right')\n\n lines = []\n if len(filtered_left) and len(filtered_right):\n lines = np.expand_dims(np.vstack((np.array(filtered_left),np.array(filtered_right))),axis=0).tolist()\n elif len(filtered_left):\n lines = np.expand_dims(np.expand_dims(np.array(filtered_left),axis=0),axis=0).tolist()\n elif len(filtered_right):\n lines = np.expand_dims(np.expand_dims(np.array(filtered_right),axis=0),axis=0).tolist()\n\n ret_img = np.zeros((80,80))\n\n if len(lines):\n try:\n my_cv.draw_lines(ret_img, lines, thickness=1)\n except:\n pass\n\n return ret_img\n \n\n def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())\n\n # Get action from model using epsilon-greedy policy\n def get_action(self, s_t):\n if np.random.rand() <= self.epsilon:\n #print(\"Return Random Value\")\n #return random.randrange(self.action_size)\n return np.random.uniform(-1,1)\n else:\n #print(\"Return Max Q Prediction\")\n q_value = self.model.predict(s_t)\n # Convert q array to steering value\n return linear_unbin(q_value[0])\n\n def replay_memory(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n if self.epsilon > self.epsilon_min:\n #self.epsilon *= self.epsilon_decay\n self.epsilon -= (self.initial_epsilon - self.epsilon_min) / self.explore\n\n\n def train_replay(self):\n if len(self.memory) < self.train_start:\n return\n \n batch_size = min(self.batch_size, len(self.memory))\n minibatch = random.sample(self.memory, batch_size)\n\n state_t, action_t, reward_t, state_t1, terminal = zip(*minibatch)\n state_t = np.concatenate(state_t)\n state_t1 = np.concatenate(state_t1)\n targets = self.model.predict(state_t)\n self.max_Q = np.max(targets[0])\n target_val = self.model.predict(state_t1)\n target_val_ = self.target_model.predict(state_t1)\n for i in range(batch_size):\n if terminal[i]:\n targets[i][action_t[i]] = reward_t[i]\n else:\n a = np.argmax(target_val[i])\n targets[i][action_t[i]] = reward_t[i] + self.discount_factor * (target_val_[i][a])\n\n self.model.train_on_batch(state_t, targets)\n\n def load_model(self, name):\n self.model.load_weights(name)\n\n # Save the model which is under training\n def save_model(self, name):\n self.model.save_weights(name)\n\n## Utils Functions ##\n\ndef linear_bin(a):\n \"\"\"\n Convert a value to a categorical array.\n\n Parameters\n ----------\n a : int or float\n A value between -1 and 1\n\n Returns\n -------\n list of int\n A list of length 15 with one item set to 1, which represents the linear value, and all other items set to 0.\n \"\"\"\n a = a + 1\n b = round(a / (2 / 14))\n arr = np.zeros(15)\n arr[int(b)] = 1\n return arr\n\n\ndef linear_unbin(arr):\n \"\"\"\n Convert a categorical array to value.\n\n See Also\n --------\n linear_bin\n \"\"\"\n if not len(arr) == 15:\n raise ValueError('Illegal array length, must be 15')\n b = np.argmax(arr)\n a = b * (2 / 14) - 1\n return a\n\n\nif __name__ == \"__main__\":\n\n config = tf.ConfigProto() #tf.compat.v1.ConfigProto TODO\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n K.set_session(sess)\n\n # TODO: DID CHANGE HERE, die beiden ersten sind von basis\n #env = gym.make(\"donkey-generated-roads-v0\")\n #env = gym.make(\"donkey-avc-sparkfun-v0\")\n\n # TODO: DID CHANGE HERE THIS IS MY CONFIG\n os.environ['DONKEY_SIM_PATH'] = \"/home/zamy/masterthesis/DonkeySimLinux/donkey_sim.x86_64\"\n os.environ['DONKEY_SIM_PORT'] = str(9091)\n os.environ['DONKEY_SIM_HEADLESS'] = str(0) # \"1\" is headless\n env = gym.make(\"donkey-generated-roads-v0\")\n\n # Get size of state and action from environment\n state_size = (img_rows, img_cols, img_channels)\n action_size = env.action_space.n # Steering and Throttle\n\n agent = DQNAgent(state_size, action_size)\n\n throttle = 0.3 # Set throttle as constant value\n\n episodes = []\n\n if not agent.train:\n print(\"Now we load the saved model\")\n agent.load_model(\"./save_model/save_model.h5\")\n\n for e in range(EPISODES):\n\n print(\"Episode: \", e)\n\n done = False\n obs = env.reset()\n\n episode_len = 0\n \n x_t = agent.process_image(obs)\n\n s_t = np.stack((x_t,x_t,x_t,x_t),axis=2)\n # In Keras, need to reshape\n s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) #1*80*80*4 \n \n while not done:\n\n # Get action for the current state and go one step in environment\n steering = agent.get_action(s_t)\n action = [steering, throttle]\n next_obs, reward, done, info = env.step(action)\n\n x_t1 = agent.process_image(next_obs)\n\n x_t1 = x_t1.reshape(1, x_t1.shape[0], x_t1.shape[1], 1) #1x80x80x1\n s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3) #1x80x80x4\n\n # Save the sample <s, a, r, s'> to the replay memory\n agent.replay_memory(s_t, np.argmax(linear_bin(steering)), reward, s_t1, done)\n\n if agent.train:\n agent.train_replay()\n\n s_t = s_t1\n agent.t = agent.t + 1\n episode_len = episode_len + 1\n if agent.t % 30 == 0:\n print(\"EPISODE\", e, \"TIMESTEP\", agent.t,\"/ ACTION\", action, \"/ REWARD\", reward, \"/ EPISODE LENGTH\", episode_len, \"/ Q_MAX \" , agent.max_Q)\n\n if done:\n\n # Every episode update the target model to be same with model\n agent.update_target_model()\n\n episodes.append(e)\n \n\n # Save model for each episode\n if agent.train:\n agent.save_model(\"./save_model/save_model.h5\")\n\n print(\"episode:\", e, \" memory length:\", len(agent.memory),\n \" epsilon:\", agent.epsilon, \" episode length:\", episode_len)\n\n"
] | [
[
"numpy.concatenate",
"numpy.max",
"numpy.array",
"numpy.random.rand",
"numpy.zeros",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.Session",
"numpy.stack",
"numpy.argmax",
"numpy.random.uniform",
"numpy.append",
"tensorflow.keras.optimizers.Adam"
]
] |
lzkelley/zcode | [
"55a63693fe3ad744957d7ce2d74fb4c8e09ea8ba"
] | [
"zcode/astro/gws.py"
] | [
"\"\"\"Gravitational Waves related functions\n\nReferences:\n- EN07 : [Enoki & Nagashima 2007](https://ui.adsabs.harvard.edu/abs/2007PThPh.117..241E/abstract)\n- Sesana+2004 : [Sesana+2004](http://adsabs.harvard.edu/abs/2004ApJ...611..623S)\n\"\"\"\n\n\nimport numpy as np\n\nfrom zcode.constants import NWTG, SPLC\n\n__all__ = [\n 'chirp_mass',\n 'gw_hardening_rate_dadt', 'gw_dedt',\n # 'gw_strain',\n 'gw_strain_source',\n 'gw_char_strain', 'gw_freq_dist_func', '_gw_freq_dist_func_old', 'gw_lum_circ',\n # 'gw_strain_source_circ',\n 'sep_to_merge_in_time', 'time_to_merge_at_sep',\n]\n\n_SCHW_CONST = 2*NWTG/np.square(SPLC)\n\n# e.g. Sesana+2004 Eq.36\n_GW_SRC_CONST = 8 * np.power(NWTG, 5/3) * np.power(np.pi, 2/3) / np.sqrt(10) / np.power(SPLC, 4)\n_GW_DADT_SEP_CONST = - 64 * np.power(NWTG, 3) / 5 / np.power(SPLC, 5)\n_GW_DEDT_ECC_CONST = - 304 * np.power(NWTG, 3) / 15 / np.power(SPLC, 5)\n\n# EN07, Eq.2.2\n_LUM_CONST = (32.0 / 5.0) * np.power(NWTG, 7.0/3.0) * np.power(SPLC, -5.0)\n\nfrom . import astro_core\n\n\ndef chirp_mass(m1, m2=None):\n if m2 is None:\n m1, m2 = np.moveaxis(m1, -1, 0)\n return np.power(m1*m2, 3/5)/np.power(m1+m2, 1/5)\n\n\ndef gw_char_strain(hs, dur_obs, freq_orb_obs, freq_orb_rst, dfdt):\n \"\"\"\n\n See, e.g., Sesana+2004, Eq.35\n\n Arguments\n ---------\n hs : array_like scalar\n Strain amplitude (e.g. `gw_strain()`, sky- and polarization- averaged)\n dur_obs : array_like scalar\n Duration of observations, in the observer frame\n\n\n \"\"\"\n\n ncycles = freq_orb_rst**2 / dfdt\n ncycles = np.clip(ncycles, None, dur_obs * freq_orb_obs)\n hc = hs * np.sqrt(ncycles)\n return hc\n\n\ndef gw_dedt(m1, m2, sma, ecc):\n \"\"\"GW Eccentricity Evolution rate (de/dt).\n\n See Peters 1964, Eq. 5.7\n http://adsabs.harvard.edu/abs/1964PhRv..136.1224P\n \"\"\"\n cc = _GW_DEDT_ECC_CONST\n e2 = ecc**2\n dedt = cc * m1 * m2 * (m1 + m2) / np.power(sma, 4)\n dedt *= (1.0 + e2*121.0/304.0) * ecc / np.power(1 - e2, 5.0/2.0)\n return dedt\n\n\ndef gw_freq_dist_func(nn, ee=0.0):\n \"\"\"Frequency Distribution Function.\n\n See [Enoki & Nagashima 2007](astro-ph/0609377) Eq. 2.4.\n This function gives g(n,e)\n\n FIX: use recursion relation when possible,\n J_{n-1}(x) + J_{n+1}(x) = (2n/x) J_n(x)\n \"\"\"\n import scipy as sp\n import scipy.special # noqa\n\n # Calculate with non-zero eccentrictiy\n bessel = sp.special.jn\n ne = nn*ee\n n2 = np.square(nn)\n jn_m2 = bessel(nn-2, ne)\n jn_m1 = bessel(nn-1, ne)\n\n # jn = bessel(nn, ne)\n # jn_p1 = bessel(nn+1, ne)\n # jn_p2 = bessel(nn+2, ne)\n\n # Use recursion relation:\n jn = (2*(nn-1) / ne) * jn_m1 - jn_m2\n jn_p1 = (2*nn / ne) * jn - jn_m1\n jn_p2 = (2*(nn+1) / ne) * jn_p1 - jn\n\n aa = np.square(jn_m2 - 2.0*ee*jn_m1 + (2/nn)*jn + 2*ee*jn_p1 - jn_p2)\n bb = (1 - ee*ee)*np.square(jn_m2 - 2*ee*jn + jn_p2)\n cc = (4.0/(3.0*n2)) * np.square(jn)\n gg = (n2*n2/32) * (aa + bb + cc)\n return gg\n\n\ndef _gw_freq_dist_func_old(nn, ee=0.0):\n \"\"\"Frequency Distribution Function.\n\n See [Enoki & Nagashima 2007](astro-ph/0609377) Eq. 2.4.\n This function gives g(n,e)\n\n FIX: use recursion relation when possible,\n J_{n-1}(x) + J_{n+1}(x) = (2n/x) J_n(x)\n \"\"\"\n import scipy as sp\n import scipy.special # noqa\n\n # Calculate with non-zero eccentrictiy\n bessel = sp.special.jn\n ne = nn*ee\n n2 = np.square(nn)\n jn_m2 = bessel(nn-2, ne)\n jn_m1 = bessel(nn-1, ne)\n\n jn = bessel(nn, ne)\n jn_p1 = bessel(nn+1, ne)\n jn_p2 = bessel(nn+2, ne)\n\n aa = np.square(jn_m2 - 2.0*ee*jn_m1 + (2/nn)*jn + 2*ee*jn_p1 - jn_p2)\n bb = (1 - ee*ee)*np.square(jn_m2 - 2*ee*jn + jn_p2)\n cc = (4.0/(3.0*n2)) * np.square(jn)\n gg = (n2*n2/32) * (aa + bb + cc)\n return gg\n\n\ndef gw_hardening_rate_dadt(m1, m2, sma, ecc=None):\n \"\"\"GW Hardening rate (da/dt).\n\n See Peters 1964, Eq. 5.6\n http://adsabs.harvard.edu/abs/1964PhRv..136.1224P\n \"\"\"\n cc = _GW_DADT_SEP_CONST\n dadt = cc * m1 * m2 * (m1 + m2) / np.power(sma, 3)\n if ecc is not None:\n fe = _gw_ecc_func(ecc)\n dadt *= fe\n return dadt\n\n\ndef gw_lum_circ(mchirp, freq_orb_rest):\n \"\"\"\n\n EN07: Eq. 2.2\n \"\"\"\n lgw_circ = _LUM_CONST * np.power(2.0*np.pi*freq_orb_rest*mchirp, 10.0/3.0)\n return lgw_circ\n\n\ndef gw_strain_source(mchirp, dlum, freq_orb_rest):\n \"\"\"GW Strain from a single source in a circular orbit.\n\n e.g. Sesana+2004 Eq.36\n e.g. EN07 Eq.17\n\n NOTE: THIS IS ORBITAL-FREQUENCY, NOT GW-OBSERVED [2020-06-17]\n\n \"\"\"\n #\n hs = _GW_SRC_CONST * mchirp * np.power(2*mchirp*freq_orb_rest, 2/3) / dlum\n return hs\n\n\n'''\ndef gw_strain(mchirp, dlum, freq_gw_rest):\n \"\"\"GW Strain from a single source in a circular orbit.\n\n e.g. Sesana+2004 Eq.36\n http://adsabs.harvard.edu/abs/2004ApJ...611..623S\n NOTE: THIS IS GW-FREQUENCY, NOT ORBITAL [2020-05-29]\n\n \"\"\"\n cc = _GW_SRC_CONST\n hs = cc * mchirp * np.power(mchirp*freq_gw_rest, 2/3) / dlum\n return hs\n'''\n\n'''\ndef gw_strain_source_circ(mchirp, dist_lum, freq_orb_rest):\n \"\"\"GW Strain from a single source in a circular orbit.\n \"\"\"\n cc = _GW_SRC_CONST\n hs = cc * mchirp * np.power(mchirp*freq_orb_rest, 2/3) / dist_lum\n return hs\n'''\n\n\ndef sep_to_merge_in_time(m1, m2, time):\n \"\"\"The initial separation required to merge within the given time.\n\n See: [Peters 1964].\n \"\"\"\n GW_CONST = 64*np.power(NWTG, 3.0)/(5.0*np.power(SPLC, 5.0))\n a1 = astro_core.rad_isco(m1, m2)\n return np.power(GW_CONST*m1*m2*(m1+m2)*time - np.power(a1, 4.0), 1./4.)\n\n\ndef time_to_merge_at_sep(m1, m2, sep):\n \"\"\"The time required to merge starting from the given initial separation.\n\n See: [Peters 1964].\n \"\"\"\n GW_CONST = 64*np.power(NWTG, 3.0)/(5.0*np.power(SPLC, 5.0))\n a1 = astro_core.rad_isco(m1, m2)\n delta_sep = np.power(sep, 4.0) - np.power(a1, 4.0)\n return delta_sep/(GW_CONST*m1*m2*(m1+m2))\n\n\ndef _gw_ecc_func(ecc):\n \"\"\"GW Hardening rate eccentricitiy dependence F(e).\n\n See Peters 1964, Eq. 5.6\n EN07: Eq. 2.3\n \"\"\"\n e2 = ecc*ecc\n num = 1 + (73/24)*e2 + (37/96)*e2*e2\n den = np.power(1 - e2, 7/2)\n fe = num / den\n return fe\n"
] | [
[
"numpy.square",
"numpy.power",
"numpy.sqrt",
"numpy.clip",
"numpy.moveaxis"
]
] |
ahmedezzat85/SNPX_ML | [
"7316b0d46d39d2335b3095527a3ac81be208928d"
] | [
"python/snpx/snpx_tf/arch/mlp.py"
] | [
"from __future__ import absolute_import\n\nimport tensorflow as tf\nfrom . tf_net import TFNet\n\nclass MLP(TFNet):\n \"\"\"\n \"\"\"\n def __init__(self, data, data_format, num_classes, is_train=True):\n dtype = data.dtype.base_dtype\n super(MLP, self).__init__(dtype, data_format, train=is_train)\n self.net_out = tf.identity(data, name='data')\n self.num_classes = num_classes\n\n def __call__(self, hidden=2):\n net_out = self.flatten(self.net_out)\n for k in range(hidden):\n net_out = self.fully_connected(net_out, 128, add_bn=True, act_fn='relu', name='fc_'+str(k))\n \n net_out = self.Softmax(net_out, self.num_classes)\n return net_out\n\ndef snpx_net_create(num_classes, input_data, data_format=\"NHWC\", is_training=True):\n \"\"\" \"\"\"\n net = MLP(input_data, data_format, num_classes, is_training)\n net_out = net()\n return net_out\n"
] | [
[
"tensorflow.identity"
]
] |
cndrip/sklearn100 | [
"bfaa54f7fe8e16ea66655159ccacbcb61086a434"
] | [
"code/019.py"
] | [
"from numpy.lib.npyio import load\nfrom sklearn.datasets import load_iris\nimport pandas as pd\n\niris=load_iris()\ndata=iris[\"data\"]\nprint(type(data.shape)) # (150,4)\ntarget=iris[\"target\"]\n\nprint(pd.Series(target).unique()) # [0,1,2]\n\n# vdata=pd.DataFrame(data=iris[\"data\"],columns=iris[\"feature_names\"])\n# print(iris[\"target_names\"])\n# print(type(iris))\n# print(iris.keys())\n# print(iris[\"filename\"])\n# print(iris[\"data\"][:5])\n# print(vdata)"
] | [
[
"sklearn.datasets.load_iris",
"pandas.Series"
]
] |
zhbli/pytorch-faster-rcnn | [
"de3f5a4db43e04a60e13578ad5bb09aa9834abd8"
] | [
"tools/test_net.py"
] | [
"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Zheqi he, Xinlei Chen, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport _init_paths\nfrom model.test import test_net\nfrom model.config import cfg, cfg_from_file, cfg_from_list\nfrom datasets.factory import get_imdb\nimport argparse\nimport pprint\nimport time, os, sys\n\nfrom nets.vgg16 import vgg16\nfrom nets.resnet_v1 import resnetv1\nfrom nets.mobilenet_v1 import mobilenetv1\n\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file', default=None, type=str)\n parser.add_argument('--model', dest='model',\n help='model to test',\n default=None, type=str)\n parser.add_argument('--imdb', dest='imdb_name',\n help='dataset to test',\n default='voc_2007_test', type=str)\n parser.add_argument('--comp', dest='comp_mode', help='competition mode',\n action='store_true')\n parser.add_argument('--num_dets', dest='max_per_image',\n help='max number of detections per image',\n default=100, type=int)\n parser.add_argument('--tag', dest='tag',\n help='tag of the model',\n default='', type=str)\n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152, mobile',\n default='res50', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n # v4.0\n if args.imdb_name != 'voc_2007_test':\n input('constomed by zhbli')\n # v4.0\n\n # if has model, get the name from it\n # if does not, then just use the initialization weights\n if args.model:\n filename = os.path.splitext(os.path.basename(args.model))[0]\n else:\n filename = os.path.splitext(os.path.basename(args.weight))[0]\n\n tag = args.tag\n tag = tag if tag else 'default'\n filename = tag + '/' + filename\n\n imdb = get_imdb(args.imdb_name)\n imdb.competition_mode(args.comp_mode)\n\n # load network\n if args.net == 'vgg16':\n net = vgg16()\n elif args.net == 'res50':\n net = resnetv1(num_layers=50)\n elif args.net == 'res101':\n net = resnetv1(num_layers=101)\n elif args.net == 'res152':\n net = resnetv1(num_layers=152)\n elif args.net == 'mobile':\n net = mobilenetv1()\n else:\n raise NotImplementedError\n\n # load model\n net.create_architecture(imdb.num_classes, tag='default',\n anchor_scales=cfg.ANCHOR_SCALES,\n anchor_ratios=cfg.ANCHOR_RATIOS)\n\n net.eval()\n net.cuda()\n\n if args.model:\n print(('Loading model check point from {:s}').format(args.model))\n net.load_state_dict(torch.load(args.model))\n print('Loaded.')\n else:\n print(('Loading initial weights from {:s}').format(args.weight))\n print('Loaded.')\n\n test_net(net, imdb, filename, max_per_image=args.max_per_image)\n"
] | [
[
"torch.load"
]
] |
0xdarkman/RL-Bitcoin-trading-bot | [
"f2a908adfaa38cc4f5e3441de7c775e1bb2a5151"
] | [
"RL-Bitcoin-trading-bot_6/RL-Bitcoin-trading-bot_6.py"
] | [
"#================================================================\n#\n# File name : RL-Bitcoin-trading-bot_6.py\n# Author : PyLessons\n# Created date: 2021-02-08\n# Website : https://pylessons.com/\n# GitHub : https://github.com/pythonlessons/RL-Bitcoin-trading-bot\n# Description : Trading Crypto with Reinforcement Learning #6\n#\n#================================================================\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport copy\nimport pandas as pd\nimport numpy as np\nimport random\nfrom collections import deque\nfrom tensorboardX import SummaryWriter\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nfrom model import Actor_Model, Critic_Model, Shared_Model\nfrom utils import TradingGraph, Write_to_file\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom indicators import AddIndicators\nfrom multiprocessing_env import train_multiprocessing, test_multiprocessing\n\nclass CustomAgent:\n # A custom Bitcoin trading agent\n def __init__(self, lookback_window_size=50, lr=0.00005, epochs=1, optimizer=Adam, batch_size=32, model=\"\"):\n self.lookback_window_size = lookback_window_size\n self.model = model\n \n # Action space from 0 to 3, 0 is hold, 1 is buy, 2 is sell\n self.action_space = np.array([0, 1, 2])\n\n # folder to save models\n self.log_name = datetime.now().strftime(\"%Y_%m_%d_%H_%M\")+\"_Crypto_trader\"\n \n # State size contains Market+Orders history for the last lookback_window_size steps\n #self.state_size = (lookback_window_size, 10)\n self.state_size = (lookback_window_size, 10+9) # 10 standard information +9 indicators\n\n # Neural Networks part bellow\n self.lr = lr\n self.epochs = epochs\n self.optimizer = optimizer\n self.batch_size = batch_size\n\n # Create shared Actor-Critic network model\n self.Actor = self.Critic = Shared_Model(input_shape=self.state_size, action_space = self.action_space.shape[0], lr=self.lr, optimizer = self.optimizer, model=self.model)\n # Create Actor-Critic network model\n #self.Actor = Actor_Model(input_shape=self.state_size, action_space = self.action_space.shape[0], lr=self.lr, optimizer = self.optimizer)\n #self.Critic = Critic_Model(input_shape=self.state_size, action_space = self.action_space.shape[0], lr=self.lr, optimizer = self.optimizer)\n \n # create tensorboard writer\n def create_writer(self, initial_balance, normalize_value, train_episodes):\n self.replay_count = 0\n self.writer = SummaryWriter('runs/'+self.log_name)\n\n # Create folder to save models\n if not os.path.exists(self.log_name):\n os.makedirs(self.log_name)\n\n self.start_training_log(initial_balance, normalize_value, train_episodes)\n \n def start_training_log(self, initial_balance, normalize_value, train_episodes): \n # save training parameters to Parameters.txt file for future\n with open(self.log_name+\"/Parameters.txt\", \"w\") as params:\n current_date = datetime.now().strftime('%Y-%m-%d %H:%M')\n params.write(f\"training start: {current_date}\\n\")\n params.write(f\"initial_balance: {initial_balance}\\n\")\n params.write(f\"training episodes: {train_episodes}\\n\")\n params.write(f\"lookback_window_size: {self.lookback_window_size}\\n\")\n params.write(f\"lr: {self.lr}\\n\")\n params.write(f\"epochs: {self.epochs}\\n\")\n params.write(f\"batch size: {self.batch_size}\\n\")\n params.write(f\"normalize_value: {normalize_value}\\n\")\n params.write(f\"model: {self.model}\\n\")\n \n def end_training_log(self):\n with open(self.log_name+\"/Parameters.txt\", \"a+\") as params:\n current_date = datetime.now().strftime('%Y-%m-%d %H:%M')\n params.write(f\"training end: {current_date}\\n\")\n\n def get_gaes(self, rewards, dones, values, next_values, gamma = 0.99, lamda = 0.95, normalize=True):\n deltas = [r + gamma * (1 - d) * nv - v for r, d, nv, v in zip(rewards, dones, next_values, values)]\n deltas = np.stack(deltas)\n gaes = copy.deepcopy(deltas)\n for t in reversed(range(len(deltas) - 1)):\n gaes[t] = gaes[t] + (1 - dones[t]) * gamma * lamda * gaes[t + 1]\n\n target = gaes + values\n if normalize:\n gaes = (gaes - gaes.mean()) / (gaes.std() + 1e-8)\n return np.vstack(gaes), np.vstack(target)\n\n def replay(self, states, actions, rewards, predictions, dones, next_states):\n # reshape memory to appropriate shape for training\n states = np.vstack(states)\n next_states = np.vstack(next_states)\n actions = np.vstack(actions)\n predictions = np.vstack(predictions)\n\n # Get Critic network predictions \n values = self.Critic.critic_predict(states)\n next_values = self.Critic.critic_predict(next_states)\n \n # Compute advantages\n advantages, target = self.get_gaes(rewards, dones, np.squeeze(values), np.squeeze(next_values))\n '''\n plt.plot(target,'-')\n plt.plot(advantages,'.')\n ax=plt.gca()\n ax.grid(True)\n plt.show()\n '''\n # stack everything to numpy array\n y_true = np.hstack([advantages, predictions, actions])\n \n # training Actor and Critic networks\n a_loss = self.Actor.Actor.fit(states, y_true, epochs=self.epochs, verbose=0, shuffle=True, batch_size=self.batch_size)\n c_loss = self.Critic.Critic.fit(states, target, epochs=self.epochs, verbose=0, shuffle=True, batch_size=self.batch_size)\n\n self.writer.add_scalar('Data/actor_loss_per_replay', np.sum(a_loss.history['loss']), self.replay_count)\n self.writer.add_scalar('Data/critic_loss_per_replay', np.sum(c_loss.history['loss']), self.replay_count)\n self.replay_count += 1\n\n return np.sum(a_loss.history['loss']), np.sum(c_loss.history['loss'])\n\n def act(self, state):\n # Use the network to predict the next action to take, using the model\n prediction = self.Actor.actor_predict(np.expand_dims(state, axis=0))[0]\n action = np.random.choice(self.action_space, p=prediction)\n return action, prediction\n \n def save(self, name=\"Crypto_trader\", score=\"\", args=[]):\n # save keras model weights\n self.Actor.Actor.save_weights(f\"{self.log_name}/{score}_{name}_Actor.h5\")\n self.Critic.Critic.save_weights(f\"{self.log_name}/{score}_{name}_Critic.h5\")\n\n # log saved model arguments to file\n if len(args) > 0:\n with open(f\"{self.log_name}/log.txt\", \"a+\") as log:\n current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n atgumets = \"\"\n for arg in args:\n atgumets += f\", {arg}\"\n log.write(f\"{current_time}{atgumets}\\n\")\n\n def load(self, folder, name):\n # load keras model weights\n self.Actor.Actor.load_weights(os.path.join(folder, f\"{name}_Actor.h5\"))\n self.Critic.Critic.load_weights(os.path.join(folder, f\"{name}_Critic.h5\"))\n\n \nclass CustomEnv:\n # A custom Bitcoin trading environment\n def __init__(self, df, initial_balance=1000, lookback_window_size=50, Render_range=100, Show_reward=False, Show_indicators=False, normalize_value=40000):\n # Define action space and state size and other custom parameters\n self.df = df.dropna().reset_index()\n self.df_total_steps = len(self.df)-1\n self.initial_balance = initial_balance\n self.lookback_window_size = lookback_window_size\n self.Render_range = Render_range # render range in visualization\n self.Show_reward = Show_reward # show order reward in rendered visualization\n self.Show_indicators = Show_indicators # show main indicators in rendered visualization\n\n # Orders history contains the balance, net_worth, crypto_bought, crypto_sold, crypto_held values for the last lookback_window_size steps\n self.orders_history = deque(maxlen=self.lookback_window_size)\n \n # Market history contains the OHCL values for the last lookback_window_size prices\n self.market_history = deque(maxlen=self.lookback_window_size)\n\n self.indicators_history = deque(maxlen=self.lookback_window_size)\n\n self.normalize_value = normalize_value\n\n # Reset the state of the environment to an initial state\n def reset(self, env_steps_size = 0):\n self.visualization = TradingGraph(Render_range=self.Render_range, Show_reward=self.Show_reward, Show_indicators=self.Show_indicators) # init visualization\n self.trades = deque(maxlen=self.Render_range) # limited orders memory for visualization\n \n self.balance = self.initial_balance\n self.net_worth = self.initial_balance\n self.prev_net_worth = self.initial_balance\n self.crypto_held = 0\n self.crypto_sold = 0\n self.crypto_bought = 0\n self.episode_orders = 0 # track episode orders count\n self.prev_episode_orders = 0 # track previous episode orders count\n self.rewards = deque(maxlen=self.Render_range)\n self.env_steps_size = env_steps_size\n self.punish_value = 0\n if env_steps_size > 0: # used for training dataset\n self.start_step = random.randint(self.lookback_window_size, self.df_total_steps - env_steps_size)\n self.end_step = self.start_step + env_steps_size\n else: # used for testing dataset\n self.start_step = self.lookback_window_size\n self.end_step = self.df_total_steps\n \n self.current_step = self.start_step\n\n for i in reversed(range(self.lookback_window_size)):\n current_step = self.current_step - i\n self.orders_history.append([self.balance, self.net_worth, self.crypto_bought, self.crypto_sold, self.crypto_held])\n\n self.market_history.append([self.df.loc[current_step, 'Open'],\n self.df.loc[current_step, 'High'],\n self.df.loc[current_step, 'Low'],\n self.df.loc[current_step, 'Close'],\n self.df.loc[current_step, 'Volume'],\n ])\n\n self.indicators_history.append(\n [self.df.loc[current_step, 'sma7'] / self.normalize_value,\n self.df.loc[current_step, 'sma25'] / self.normalize_value,\n self.df.loc[current_step, 'sma99'] / self.normalize_value,\n self.df.loc[current_step, 'bb_bbm'] / self.normalize_value,\n self.df.loc[current_step, 'bb_bbh'] / self.normalize_value,\n self.df.loc[current_step, 'bb_bbl'] / self.normalize_value,\n self.df.loc[current_step, 'psar'] / self.normalize_value,\n self.df.loc[current_step, 'MACD'] / 400,\n self.df.loc[current_step, 'RSI'] / 100\n ])\n \n\n state = np.concatenate((self.market_history, self.orders_history), axis=1) / self.normalize_value\n state = np.concatenate((state, self.indicators_history), axis=1)\n\n return state\n\n # Get the data points for the given current_step\n def _next_observation(self):\n self.market_history.append([self.df.loc[self.current_step, 'Open'],\n self.df.loc[self.current_step, 'High'],\n self.df.loc[self.current_step, 'Low'],\n self.df.loc[self.current_step, 'Close'],\n self.df.loc[self.current_step, 'Volume'],\n ])\n\n self.indicators_history.append([self.df.loc[self.current_step, 'sma7'] / self.normalize_value,\n self.df.loc[self.current_step, 'sma25'] / self.normalize_value,\n self.df.loc[self.current_step, 'sma99'] / self.normalize_value,\n self.df.loc[self.current_step, 'bb_bbm'] / self.normalize_value,\n self.df.loc[self.current_step, 'bb_bbh'] / self.normalize_value,\n self.df.loc[self.current_step, 'bb_bbl'] / self.normalize_value,\n self.df.loc[self.current_step, 'psar'] / self.normalize_value,\n self.df.loc[self.current_step, 'MACD'] / 400,\n self.df.loc[self.current_step, 'RSI'] / 100\n ])\n \n obs = np.concatenate((self.market_history, self.orders_history), axis=1) / self.normalize_value\n obs = np.concatenate((obs, self.indicators_history), axis=1)\n \n return obs\n\n # Execute one time step within the environment\n def step(self, action):\n self.crypto_bought = 0\n self.crypto_sold = 0\n self.current_step += 1\n\n # Set the current price to a random price between open and close\n #current_price = random.uniform(\n # self.df.loc[self.current_step, 'Open'],\n # self.df.loc[self.current_step, 'Close'])\n current_price = self.df.loc[self.current_step, 'Open']\n Date = self.df.loc[self.current_step, 'Date'] # for visualization\n High = self.df.loc[self.current_step, 'High'] # for visualization\n Low = self.df.loc[self.current_step, 'Low'] # for visualization\n\n if action == 0: # Hold\n pass\n\n elif action == 1 and self.balance > self.initial_balance/100:\n # Buy with 100% of current balance\n self.crypto_bought = self.balance / current_price\n self.balance -= self.crypto_bought * current_price\n self.crypto_held += self.crypto_bought\n self.trades.append({'Date' : Date, 'High' : High, 'Low' : Low, 'total': self.crypto_bought, 'type': \"buy\", 'current_price': current_price})\n self.episode_orders += 1\n\n elif action == 2 and self.crypto_held>0:\n # Sell 100% of current crypto held\n self.crypto_sold = self.crypto_held\n self.balance += self.crypto_sold * current_price\n self.crypto_held -= self.crypto_sold\n self.trades.append({'Date' : Date, 'High' : High, 'Low' : Low, 'total': self.crypto_sold, 'type': \"sell\", 'current_price': current_price})\n self.episode_orders += 1\n\n self.prev_net_worth = self.net_worth\n self.net_worth = self.balance + self.crypto_held * current_price\n\n self.orders_history.append([self.balance, self.net_worth, self.crypto_bought, self.crypto_sold, self.crypto_held])\n\n # Receive calculated reward\n reward = self.get_reward()\n\n if self.net_worth <= self.initial_balance/2:\n done = True\n else:\n done = False\n\n obs = self._next_observation()\n \n return obs, reward, done\n\n # Calculate reward\n def get_reward(self):\n self.punish_value += self.net_worth * 0.00001\n if self.episode_orders > 1 and self.episode_orders > self.prev_episode_orders:\n self.prev_episode_orders = self.episode_orders\n if self.trades[-1]['type'] == \"buy\" and self.trades[-2]['type'] == \"sell\":\n reward = self.trades[-2]['total']*self.trades[-2]['current_price'] - self.trades[-2]['total']*self.trades[-1]['current_price']\n reward -= self.punish_value\n self.punish_value = 0\n self.trades[-1][\"Reward\"] = reward\n return reward\n elif self.trades[-1]['type'] == \"sell\" and self.trades[-2]['type'] == \"buy\":\n reward = self.trades[-1]['total']*self.trades[-1]['current_price'] - self.trades[-2]['total']*self.trades[-2]['current_price']\n reward -= self.punish_value\n self.punish_value = 0\n self.trades[-1][\"Reward\"] = reward\n return reward\n else:\n return 0 - self.punish_value\n\n # render environment\n def render(self, visualize = False):\n #print(f'Step: {self.current_step}, Net Worth: {self.net_worth}')\n if visualize:\n # Render the environment to the screen\n img = self.visualization.render(self.df.loc[self.current_step], self.net_worth, self.trades)\n return img\n\n \ndef Random_games(env, visualize, test_episodes = 50, comment=\"\"):\n average_net_worth = 0\n average_orders = 0\n no_profit_episodes = 0\n for episode in range(test_episodes):\n state = env.reset()\n while True:\n env.render(visualize)\n action = np.random.randint(3, size=1)[0]\n state, reward, done = env.step(action)\n if env.current_step == env.end_step:\n average_net_worth += env.net_worth\n average_orders += env.episode_orders\n if env.net_worth < env.initial_balance: no_profit_episodes += 1 # calculate episode count where we had negative profit through episode\n print(\"episode: {}, net_worth: {}, average_net_worth: {}, orders: {}\".format(episode, env.net_worth, average_net_worth/(episode+1), env.episode_orders))\n break\n\n print(\"average {} episodes random net_worth: {}, orders: {}\".format(test_episodes, average_net_worth/test_episodes, average_orders/test_episodes))\n # save test results to test_results.txt file\n with open(\"test_results.txt\", \"a+\") as results:\n current_date = datetime.now().strftime('%Y-%m-%d %H:%M')\n results.write(f'{current_date}, {\"Random games\"}, test episodes:{test_episodes}')\n results.write(f', net worth:{average_net_worth/(episode+1)}, orders per episode:{average_orders/test_episodes}')\n results.write(f', no profit episodes:{no_profit_episodes}, comment: {comment}\\n')\n\ndef train_agent(env, agent, visualize=False, train_episodes = 50, training_batch_size=500):\n agent.create_writer(env.initial_balance, env.normalize_value, train_episodes) # create TensorBoard writer\n total_average = deque(maxlen=100) # save recent 100 episodes net worth\n best_average = 0 # used to track best average net worth\n for episode in range(train_episodes):\n state = env.reset(env_steps_size = training_batch_size)\n\n states, actions, rewards, predictions, dones, next_states = [], [], [], [], [], []\n for t in range(training_batch_size):\n env.render(visualize)\n action, prediction = agent.act(state)\n next_state, reward, done = env.step(action)\n states.append(np.expand_dims(state, axis=0))\n next_states.append(np.expand_dims(next_state, axis=0))\n action_onehot = np.zeros(3)\n action_onehot[action] = 1\n actions.append(action_onehot)\n rewards.append(reward)\n dones.append(done)\n predictions.append(prediction)\n state = next_state\n\n a_loss, c_loss = agent.replay(states, actions, rewards, predictions, dones, next_states)\n total_average.append(env.net_worth)\n average = np.average(total_average)\n \n agent.writer.add_scalar('Data/average net_worth', average, episode)\n agent.writer.add_scalar('Data/episode_orders', env.episode_orders, episode)\n \n print(\"episode: {:<5} net worth {:<7.2f} average: {:<7.2f} orders: {}\".format(episode, env.net_worth, average, env.episode_orders))\n if episode > len(total_average):\n if best_average < average:\n best_average = average\n print(\"Saving model\")\n agent.save(score=\"{:.2f}\".format(best_average), args=[episode, average, env.episode_orders, a_loss, c_loss])\n agent.save()\n \n agent.end_training_log()\n \n\ndef test_agent(env, agent, visualize=True, test_episodes=10, folder=\"\", name=\"Crypto_trader\", comment=\"\"):\n agent.load(folder, name)\n average_net_worth = 0\n average_orders = 0\n no_profit_episodes = 0\n for episode in range(test_episodes):\n state = env.reset()\n while True:\n env.render(visualize)\n action, prediction = agent.act(state)\n state, reward, done = env.step(action)\n if env.current_step == env.end_step:\n average_net_worth += env.net_worth\n average_orders += env.episode_orders\n if env.net_worth < env.initial_balance: no_profit_episodes += 1 # calculate episode count where we had negative profit through episode\n print(\"episode: {:<5}, net_worth: {:<7.2f}, average_net_worth: {:<7.2f}, orders: {}\".format(episode, env.net_worth, average_net_worth/(episode+1), env.episode_orders))\n break\n \n print(\"average {} episodes agent net_worth: {}, orders: {}\".format(test_episodes, average_net_worth/test_episodes, average_orders/test_episodes))\n print(\"No profit episodes: {}\".format(no_profit_episodes))\n # save test results to test_results.txt file\n with open(\"test_results.txt\", \"a+\") as results:\n current_date = datetime.now().strftime('%Y-%m-%d %H:%M')\n results.write(f'{current_date}, {name}, test episodes:{test_episodes}')\n results.write(f', net worth:{average_net_worth/(episode+1)}, orders per episode:{average_orders/test_episodes}')\n results.write(f', no profit episodes:{no_profit_episodes}, model: {agent.model}, comment: {comment}\\n')\n\n\nif __name__ == \"__main__\": \n df = pd.read_csv('./BTCUSD_1h.csv')\n df = df.sort_values('Date')\n df = AddIndicators(df) # insert indicators to df\n\n lookback_window_size = 100\n test_window = 720*3 # 3 months \n train_df = df[100:-test_window-lookback_window_size] # we leave 100 to have properly calculated indicators\n test_df = df[-test_window-lookback_window_size:]\n\n # single processing training\n #agent = CustomAgent(lookback_window_size=lookback_window_size, lr=0.00001, epochs=5, optimizer=Adam, batch_size = 32, model=\"Dense\")\n #train_env = CustomEnv(train_df, lookback_window_size=lookback_window_size)\n #train_agent(train_env, agent, visualize=False, train_episodes=50000, training_batch_size=500)\n\n # multiprocessing training/testing. Note - run from cmd or terminal\n agent = CustomAgent(lookback_window_size=lookback_window_size, lr=0.00001, epochs=5, optimizer=Adam, batch_size = 32, model=\"Dense\")\n #train_multiprocessing(CustomEnv, agent, train_df, num_worker = 32, training_batch_size=500, visualize=False, EPISODES=200000)\n test_multiprocessing(CustomEnv, agent, test_df, num_worker = 16, visualize=False, test_episodes=1000, folder=\"2021_01_21_20_06_Crypto_trader\", name=\"1984.93_Crypto_trader\", comment=\"Dense\")\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.expand_dims",
"numpy.random.choice",
"numpy.zeros",
"numpy.sum",
"numpy.stack",
"numpy.random.randint",
"numpy.average",
"numpy.hstack",
"pandas.read_csv",
"numpy.squeeze",
"numpy.vstack"
]
] |
vatch123/GeneNet | [
"19ed9ddc7f1d3a883095744ba7059fd26f4e1b89"
] | [
"data.py"
] | [
"\"\"\"\nThis module deals loading and transforming data\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import normalize\n\nfrom keras.utils import to_categorical\n\n\ndef load_data(filepath):\n \"\"\"\n This function loads the train, validation and test data from a folder.\n It also normalizes the data along each histone signal (column) and returns the\n final data in numpy format.\n\n Parameters\n ----------\n filepath : `string`\n The path to the data files\n\n Returns\n -------\n `tuple`\n The normalized train, valid and test data\n \"\"\"\n\n # Read all the data\n train = pd.read_csv(filepath+'/train.csv', header=None)\n test = pd.read_csv(filepath+'/test.csv', header=None)\n valid = pd.read_csv(filepath+'/valid.csv', header=None)\n\n\n # Extract the input and output\n y_train = train[7]\n x_train = train.drop([0, 1, 7], axis=1)\n\n y_test = test[7]\n x_test = test.drop([0, 1, 7], axis=1)\n\n y_valid = valid[7]\n x_valid = valid.drop([0, 1, 7], axis=1)\n\n\n # Convert the dataframes to numpy arrays\n x_train = np.array(x_train, dtype=float)\n x_test = np.array(x_test, dtype=float)\n x_valid = np.array(x_valid, dtype=float)\n y_train = np.array(y_train, dtype=float)\n y_test = np.array(y_test, dtype=float)\n y_valid = np.array(y_valid, dtype=float)\n\n # Reshape the arrays to get all the predictions for a particular gene together\n y_train = y_train.reshape((y_train.shape[0]//100, 100))\n y_train = y_train[:, 0]\n y_test = y_test.reshape((y_test.shape[0]//100, 100))\n y_test = y_test[:, 0]\n y_valid = y_valid.reshape((y_valid.shape[0]//100, 100))\n y_valid = y_valid[:, 0]\n\n # Similarly reshape the input variables\n x_train = x_train.reshape((x_train.shape[0]//100,100,x_train.shape[1]))\n x_test = x_test.reshape((x_test.shape[0]//100,100,x_test.shape[1]))\n x_valid = x_valid.reshape((x_valid.shape[0]//100,100,x_valid.shape[1]))\n\n\n # Now for each train, valid and test normalize the data column wise and also add an\n # additional feature - the normalized sum of all modifications\n\n for i in range(x_train.shape[0]):\n s = x_train[i,:,:]\n s = normalize(s, axis=1, norm='max')\n x_train[i,:,:] = s\n\n a = x_train.sum(1)\n a = normalize(a, axis=1, norm='max')\n x_train = x_train.reshape((x_train.shape[0],-1))\n x_train = np.c_[x_train, a]\n x_train = x_train.reshape((x_train.shape[0],-1,1))\n\n\n for i in range(x_test.shape[0]):\n s = x_test[i,:,:]\n s = normalize(s, axis=1, norm='max')\n x_test[i,:,:] = s\n\n a = x_test.sum(1)\n a = normalize(a, axis=1, norm='max')\n x_test = x_test.reshape((x_test.shape[0],-1))\n x_test = np.c_[x_test, a]\n x_test = x_test.reshape((x_test.shape[0],-1,1))\n\n\n for i in range(x_valid.shape[0]):\n s = x_valid[i,:,:]\n s = normalize(s, axis=1, norm='max')\n x_valid[i,:,:] = s\n\n a = x_valid.sum(1)\n a = normalize(a, axis=1, norm='max')\n x_valid = x_valid.reshape((x_valid.shape[0],-1))\n x_valid = np.c_[x_valid, a]\n x_valid = x_valid.reshape((x_valid.shape[0],-1,1))\n\n # Convert labels to categorical values\n y_train = to_categorical(y_train)\n y_test = to_categorical(y_test)\n y_valid = to_categorical(y_valid)\n\n\n return (x_train, y_train), (x_valid, y_valid), (x_test, y_test)\n\n\ndef load_data_mix(args):\n \"\"\"\n This function also loads data for every cell but it also mixes the data for different cell types.\n It only mizes the train and validation data but the test is done individually on each cell\n\n Parameters\n ----------\n args : `dict`\n The argument dictionary\n\n Returns\n -------\n `tuple`\n The train, test and validation data\n \"\"\"\n\n # Placeholders for the mixed data\n X = []\n Y = []\n\n # For each cell type\n cells = os.listdir(args.data_dir)\n \n for cell in cells:\n\n # The data path\n filepath = args.data_dir+'/'+cell+'/classification'\n train = pd.read_csv(filepath+'/train.csv', header=None)\n test = pd.read_csv(filepath+'/test.csv', header=None)\n valid = pd.read_csv(filepath+'/valid.csv', header=None)\n\n\n y_train = train[7]\n x_train = train.drop([0, 1, 7], axis=1)\n\n y_test = test[7]\n x_test = test.drop([0, 1, 7], axis=1)\n\n y_valid = valid[7]\n x_valid = valid.drop([0, 1, 7], axis=1)\n\n\n x_train = np.array(x_train, dtype=float)\n x_valid = np.array(x_valid, dtype=float)\n x_test = np.array(x_test, dtype=float)\n y_train = np.array(y_train, dtype=float)\n y_valid = np.array(y_valid, dtype=float)\n y_test = np.array(y_test, dtype=float)\n\n\n y_train = y_train.reshape((y_train.shape[0]//100, 100))\n y_train = y_train[:, 0]\n y_valid = y_valid.reshape((y_valid.shape[0]//100, 100))\n y_valid = y_valid[:, 0]\n y_test = y_test.reshape((y_test.shape[0]//100, 100))\n y_test = y_test[:, 0]\n\n\n x_train = x_train.reshape((x_train.shape[0]//100,100,x_train.shape[1]))\n x_valid = x_valid.reshape((x_valid.shape[0]//100,100,x_valid.shape[1]))\n x_test = x_valid.reshape((x_test.shape[0]//100,100,x_test.shape[1]))\n\n\n for i in range(x_train.shape[0]):\n s = x_train[i,:,:]\n s = normalize(s, axis=1, norm='max')\n x_train[i,:,:] = s\n\n a = x_train.sum(1)\n a = normalize(a, axis=1, norm='max')\n x_train = x_train.reshape((x_train.shape[0],-1))\n x_train = np.c_[x_train, a]\n x_train = x_train.reshape((x_train.shape[0],-1,1))\n\n\n for i in range(x_valid.shape[0]):\n s = x_valid[i,:,:]\n s = normalize(s, axis=1, norm='max')\n x_valid[i,:,:] = s\n\n a = x_valid.sum(1)\n a = normalize(a, axis=1, norm='max')\n x_valid = x_valid.reshape((x_valid.shape[0],-1))\n x_valid = np.c_[x_valid, a]\n x_valid = x_valid.reshape((x_valid.shape[0],-1,1))\n\n for i in range(x_test.shape[0]):\n s = x_test[i,:,:]\n s = normalize(s, axis=1, norm='max')\n x_test[i,:,:] = s\n\n a = x_test.sum(1)\n a = normalize(a, axis=1, norm='max')\n x_test = x_test.reshape((x_test.shape[0],-1))\n x_test = np.c_[x_test, a]\n x_test = x_test.reshape((x_test.shape[0],-1,1))\n\n\n y_train = to_categorical(y_train)\n y_valid = to_categorical(y_valid)\n y_test = to_categorical(y_test)\n\n X.append(x_train)\n X.append(x_valid)\n Y.append(y_train)\n Y.append(y_valid)\n\n X = np.array(X)\n Y = np.array(Y)\n\n X = X.reshape((-1,505,1))\n Y = Y.reshape((-1,2))\n\n x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size = 0.3, shuffle = True, random_state =2)\n\n return (x_train, y_train), (x_val, y_val), (x_test, y_test)\n"
] | [
[
"sklearn.model_selection.train_test_split",
"numpy.array",
"pandas.read_csv",
"sklearn.preprocessing.normalize"
]
] |
ofirnachum/uncertainty_gbm | [
"6596750286324e58f1d872e4634abe3fa4901bd7"
] | [
"regressor.py"
] | [
"__doc__ = \"\"\"Gradient Boosted Regression Trees for heteroscedastic data.\"\"\"\n\nimport loss\n\nfrom sklearn.ensemble import gradient_boosting\n\nclass UncertaintyGBM(gradient_boosting.BaseGradientBoosting):\n \"\"\"GBM for maximizing likelihood of an observed y(x) by predicting\n a normal distribution defined by mu_y(x) and std_y(x).\n\n Parameters\n ----------\n learning_rate : float, optional (default=0.1)\n learning rate shrinks the contribution of each tree by `learning_rate`.\n There is a trade-off between learning_rate and n_estimators.\n\n n_estimators : int (default=100)\n The number of boosting stages to perform. Gradient boosting\n is fairly robust to over-fitting so a large number usually\n results in better performance.\n\n max_depth : integer, optional (default=3)\n maximum depth of the individual regression estimators. The maximum\n depth limits the number of nodes in the tree. Tune this parameter\n for best performance; the best value depends on the interaction\n of the input variables.\n Ignored if ``max_leaf_nodes`` is not None.\n\n min_samples_split : integer, optional (default=2)\n The minimum number of samples required to split an internal node.\n\n min_samples_leaf : integer, optional (default=1)\n The minimum number of samples required to be at a leaf node.\n\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the input samples required to be at a\n leaf node.\n\n subsample : float, optional (default=1.0)\n The fraction of samples to be used for fitting the individual base\n learners. If smaller than 1.0 this results in Stochastic Gradient\n Boosting. `subsample` interacts with the parameter `n_estimators`.\n Choosing `subsample < 1.0` leads to a reduction of variance\n and an increase in bias.\n\n max_features : int, float, string or None, optional (default=None)\n The number of features to consider when looking for the best split:\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a percentage and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Choosing `max_features < n_features` leads to a reduction of variance\n and an increase in bias.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n max_leaf_nodes : int or None, optional (default=None)\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n init : BaseEstimator, None, optional (default=None)\n An estimator object that is used to compute the initial\n predictions. ``init`` has to provide ``fit`` and ``predict``.\n If None it uses ``loss.init_estimator``.\n\n verbose : int, default: 0\n Enable verbose output. If 1 then it prints progress and performance\n once in a while (the more trees the lower the frequency). If greater\n than 1 then it prints progress and performance for every tree.\n\n warm_start : bool, default: False\n When set to ``True``, reuse the solution of the previous call to fit\n and add more estimators to the ensemble, otherwise, just erase the\n previous solution.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n presort : bool or 'auto', optional (default='auto')\n Whether to presort the data to speed up the finding of best splits in\n fitting. Auto mode by default will use presorting on dense data and\n default to normal sorting on sparse data. Setting presort to true on\n sparse data will raise an error.\n\n Attributes\n ----------\n feature_importances_ : array, shape = [n_features]\n The feature importances (the higher, the more important the feature).\n\n oob_improvement_ : array, shape = [n_estimators]\n The improvement in loss (= deviance) on the out-of-bag samples\n relative to the previous iteration.\n ``oob_improvement_[0]`` is the improvement in\n loss of the first stage over the ``init`` estimator.\n\n train_score_ : array, shape = [n_estimators]\n The i-th score ``train_score_[i]`` is the deviance (= loss) of the\n model at iteration ``i`` on the in-bag sample.\n If ``subsample == 1`` this is the deviance on the training data.\n\n loss_ : LossFunction\n The concrete ``LossFunction`` object.\n\n `init` : BaseEstimator\n The estimator that provides the initial predictions.\n Set via the ``init`` argument or ``loss.init_estimator``.\n\n estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]\n The collection of fitted sub-estimators.\n\n References\n ----------\n J. Friedman, Greedy Function Approximation: A Gradient Boosting\n Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.\n\n J. Friedman, Stochastic Gradient Boosting, 1999\n\n T. Hastie, R. Tibshirani and J. Friedman.\n Elements of Statistical Learning Ed. 2, Springer, 2009.\n\n Q. Le, A. SMola and S. Canu. Heteroscedastic Gaussian Process\n Regression. ICML, 2005.\n\n \"\"\"\n\n _SUPPORTED_LOSS = ('heteroscedastic_normal')\n\n def __init__(self, learning_rate=0.1, n_estimators=100,\n subsample=1.0, min_samples_split=2,\n min_samples_leaf=1, min_weight_fraction_leaf=0.,\n max_depth=3, init=None, random_state=None,\n max_features=None, verbose=0, max_leaf_nodes=None,\n warm_start=False, presort='auto'):\n\n super(UncertaintyGBM, self).__init__(\n loss='heteroscedastic_normal',\n learning_rate=learning_rate, n_estimators=n_estimators,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_depth=max_depth, init=init, subsample=subsample,\n max_features=max_features,\n random_state=random_state, verbose=verbose,\n max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,\n presort='auto')\n\n def _validate_y(self, y):\n self.n_classes_ = 2\n if y.dtype.kind == 'O':\n y = y.astype(np.float64)\n return y\n\n def predict(self, X):\n \"\"\"Predict mu(X), std(X).\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples, 2]\n The predicted values.\n \"\"\"\n X = gradient_boosting.check_array(\n X, dtype=gradient_boosting.DTYPE, order=\"C\")\n return self._decision_function(X)\n\n def staged_predict(self, X):\n \"\"\"Predict mu(X), std(X) at each stage for X.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each stage.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : generator of array of shape = [n_samples, 2]\n The predicted value of the input samples.\n \"\"\"\n for y in self._staged_decision_function(X):\n yield y\n\n def apply(self, X):\n \"\"\"Apply trees in the ensemble to X, return leaf indices.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape = [n_samples, n_features]\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n X_leaves : array_like, shape = [n_samples, n_estimators]\n For each datapoint x in X and for each tree in the ensemble,\n return the index of the leaf x ends up in in each estimator.\n \"\"\"\n\n leaves = super(UncertaintyGBM, self).apply(X)\n leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])\n return leaves\n"
] | [
[
"sklearn.ensemble.gradient_boosting.check_array"
]
] |
eym55/power_grid_sim | [
"35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa"
] | [
"run_game.py"
] | [
"import gym\nfrom environments.defender_game import PowerGrid\nimport pypsa\nimport numpy as np\nfrom agents import RandomAgent,DQNAgent\nfrom environments.defender_game_v1 import PowerGridV1\nimport pickle\n\n\nnp.random.seed(10)\nnetwork = pypsa.Network('networks/sample_grid.nc')\nLINES = network.lines.shape[0]\nattack_distribution = np.random.dirichlet(np.ones(LINES),size= 1)[0]\n\n#Make random attacker\nattacker_agent_config = {\n 'action_distribution':attack_distribution\n}\n#Make environment\nenv_config = {\n 'network':network,\n 'agent_config':attacker_agent_config,\n 'agent_class':RandomAgent}\nenv = PowerGrid(env_config)\n\n#Make defender\ndefend_config = {\n 'checkpoint_path':'results/DQN_checkpoint_power_grid_v1/checkpoint-251',\n 'agent_config':{\n \"env_config\": {'network':network,'attack_distribution':attack_distribution},\n \"num_workers\": 8,\n \"n_step\": 5,\n \"noisy\": True,\n \"num_atoms\": 2,\n \"v_min\": 0,\n \"v_max\": 1000.0,\n }\n}\ndefender = DQNAgent(PowerGridV1,defend_config)\n\nresults_length= []\nresults_rewards=[]\nnum_episodes = 25\nfor episode in range(num_episodes):\n total_reward = 0\n done = False\n i=0\n obs = env.reset()\n action = defender.compute_action(obs)\n while done == False:\n obs, rewards, done, info = env.step(action)\n action = defender.compute_action(obs)\n i+=1\n total_reward += rewards\n results_length.append(i)\n results_rewards.append(total_reward)\n print(f\"\\n\\n\\n Episode {episode} done. Episode lasted {i} timesteps and had a cumulative reward of {total_reward} \\n\\n\\n\")\n\nprint(f\"\\n\\n\\n All {num_episodes} have completed. \\n\\n\\n\")\nprint(f\"The average episode rewards was {np.mean(results_rewards)} and the mean episode length was {np.mean(results_length)} timesteps\")\n"
] | [
[
"numpy.random.seed",
"numpy.ones",
"numpy.mean"
]
] |
LolaSegura/quadruped_ctrl | [
"6a2625eb2ebb6a819e2ca925b8021f2f0e8b3afa"
] | [
"scripts/walking_simulation.py"
] | [
"#!/usr/bin/env python\n\nimport concurrent\nimport ctypes\nimport cv2\nimport math\nimport numpy as np\nimport os\nimport pybullet as p\nimport pybullet_data\nimport random\nimport rospkg\nimport rospy\nimport tf2_ros\nimport threading\nimport numpy as np\n\nfrom cv_bridge import CvBridge\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import PoseWithCovarianceStamped, TransformStamped, Twist\nfrom quadruped_ctrl.srv import QuadrupedCmd, QuadrupedCmdResponse\nfrom pybullet_utils import gazebo_world_parser\nfrom sensor_msgs.msg import Image, Imu, JointState, PointCloud2, PointField\nfrom tf_conversions import transformations\nfrom whole_body_state_msgs.msg import WholeBodyState\nfrom whole_body_state_msgs.msg import JointState as WBJointState\nfrom whole_body_state_msgs.msg import ContactState as WBContactState\nfrom grid_map_msgs.msg import GridMap\nfrom std_msgs.msg import Float32MultiArray\nfrom std_msgs.msg import Float32\n\n\nclass StructPointer(ctypes.Structure):\n _fields_ = [(\"eff\", ctypes.c_double * 12)]\n\n\nclass WalkingSimulation(object):\n def __init__(self):\n self.terrain = \"racetrack\"\n self.camera = True\n self.get_last_vel = [0] * 3\n self.robot_height = 0.30\n self.motor_id_list = [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14]\n self.init_new_pos = [0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\n self.__init_ros()\n self.__load_controller()\n self.__init_simulator()\n\n add_thread = threading.Thread(target=self.__thread_job)\n add_thread.start()\n\n if self.camera:\n add_thread_1 = threading.Thread(target=self.__camera_update)\n add_thread_1.start()\n\n def __init_ros(self):\n self.terrain = rospy.get_param('/simulation/terrain')\n self.camera = rospy.get_param('/simulation/camera')\n self.lateralFriction = rospy.get_param('/simulation/lateralFriction')\n self.spinningFriction = rospy.get_param('/simulation/spinningFriction')\n self.freq = rospy.get_param('/simulation/freq')\n self.stand_kp = rospy.get_param('/simulation/stand_kp')\n self.stand_kd = rospy.get_param('/simulation/stand_kd')\n self.joint_kp = rospy.get_param('/simulation/joint_kp')\n self.joint_kd = rospy.get_param('/simulation/joint_kd')\n self.elevation_map = GridMap()\n rospy.loginfo(\"lateralFriction = \" + str(self.lateralFriction) +\n \" spinningFriction = \" + str(self.spinningFriction))\n rospy.loginfo(\" freq = \" + str(self.freq) + \" PID = \" +\n str([self.stand_kp, self.stand_kd, self.joint_kp, self.joint_kd]))\n\n self.s0 = rospy.Service('gait_type', QuadrupedCmd, self.__callback_gait)\n self.s1 = rospy.Service('robot_mode', QuadrupedCmd, self.__callback_mode)\n self.s2 = rospy.Subscriber(\"cmd_vel\", Twist, self.__callback_body_vel, buff_size=30)\n self.s3 = rospy.Subscriber(\"elevation_mapping/elevation_map\", GridMap, self.__callback_elevation_map)\n\n self.robot_tf = tf2_ros.TransformBroadcaster()\n self.elevation_map = Float32MultiArray()\n self.em_map = []\n # np.asarray()\n\n def __load_controller(self):\n self.path = rospkg.RosPack().get_path('quadruped_ctrl')\n so_file = self.path.replace('src/quadruped_ctrl', 'devel/lib/libquadruped_ctrl.so')\n if(not os.path.exists(so_file)):\n so_file = self.path.replace('src/quadruped_ctrl', 'build/lib/libquadruped_ctrl.so')\n if(not os.path.exists(so_file)):\n rospy.logerr(\"cannot find cpp.so file\")\n self.cpp_gait_ctrller = ctypes.cdll.LoadLibrary(so_file)\n self.cpp_gait_ctrller.torque_calculator.restype = ctypes.POINTER(StructPointer)\n rospy.loginfo(\"find so file = \" + so_file)\n\n def __init_simulator(self):\n robot_start_pos = [0, 0, self.robot_height]\n p.connect(p.GUI)\n p.setAdditionalSearchPath(pybullet_data.getDataPath()) # optionally\n p.resetSimulation()\n p.setTimeStep(1.0/self.freq)\n p.setGravity(0, 0, -9.81)\n self.reset = p.addUserDebugParameter(\"reset\", 1, 0, 0)\n self.low_energy_mode = p.addUserDebugParameter(\"low_energy_mode\", 1, 0, 0)\n self.high_performance_mode = p.addUserDebugParameter(\"high_performance_mode\", 1, 0, 0)\n p.resetDebugVisualizerCamera(0.2, 45, -30, [1, -1, 1])\n\n heightPerturbationRange = 0.06\n numHeightfieldRows = 256\n numHeightfieldColumns = 256\n if self.terrain == \"plane\":\n planeShape = p.createCollisionShape(shapeType=p.GEOM_PLANE)\n ground_id = p.createMultiBody(0, planeShape)\n p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])\n p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)\n elif self.terrain == \"random1\":\n heightfieldData = [0]*numHeightfieldRows*numHeightfieldColumns\n for j in range(int(numHeightfieldColumns/2)):\n for i in range(int(numHeightfieldRows/2)):\n height = random.uniform(0, heightPerturbationRange)\n heightfieldData[2*i+2*j*numHeightfieldRows] = height\n heightfieldData[2*i+1+2*j*numHeightfieldRows] = height\n heightfieldData[2*i+(2*j+1)*numHeightfieldRows] = height\n heightfieldData[2*i+1+(2*j+1)*numHeightfieldRows] = height\n terrainShape = p.createCollisionShape(\n shapeType=p.GEOM_HEIGHTFIELD,\n meshScale=[.05, .05, 1],\n heightfieldTextureScaling=(numHeightfieldRows-1)/2,\n heightfieldData=heightfieldData,\n numHeightfieldRows=numHeightfieldRows,\n numHeightfieldColumns=numHeightfieldColumns)\n ground_id = p.createMultiBody(0, terrainShape)\n p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])\n p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)\n elif self.terrain == \"random2\":\n terrain_shape = p.createCollisionShape(\n shapeType=p.GEOM_HEIGHTFIELD,\n meshScale=[.5, .5, .5],\n fileName=\"heightmaps/ground0.txt\",\n heightfieldTextureScaling=128)\n ground_id = p.createMultiBody(0, terrain_shape)\n textureId = p.loadTexture(self.path + \"/models/grass.png\")\n p.changeVisualShape(ground_id, -1, textureUniqueId=textureId)\n p.resetBasePositionAndOrientation(ground_id, [1, 0, 0.2], [0, 0, 0, 1])\n p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)\n elif self.terrain == \"stairs\":\n planeShape = p.createCollisionShape(shapeType=p.GEOM_PLANE)\n ground_id = p.createMultiBody(0, planeShape)\n p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])\n # many boxes\n colSphereId = p.createCollisionShape(\n p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.01])\n colSphereId1 = p.createCollisionShape(\n p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.02])\n colSphereId2 = p.createCollisionShape(\n p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.03])\n colSphereId3 = p.createCollisionShape(\n p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.04])\n p.createMultiBody(100, colSphereId, basePosition=[1.0, 1.0, 0.0])\n p.changeDynamics(colSphereId, -1, lateralFriction=self.lateralFriction)\n p.createMultiBody(100, colSphereId1, basePosition=[1.2, 1.0, 0.0])\n p.changeDynamics(colSphereId1, -1, lateralFriction=self.lateralFriction)\n p.createMultiBody(100, colSphereId2, basePosition=[1.4, 1.0, 0.0])\n p.changeDynamics(colSphereId2, -1, lateralFriction=self.lateralFriction)\n p.createMultiBody(100, colSphereId3, basePosition=[1.6, 1.0, 0.0])\n p.changeDynamics(colSphereId3, -1, lateralFriction=self.lateralFriction)\n p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)\n elif self.terrain == \"racetrack\":\n os.chdir(self.path)\n p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)\n gazebo_world_parser.parseWorld(p, filepath=\"worlds/racetrack_day.world\")\n p.configureDebugVisualizer(shadowMapResolution=8192)\n p.configureDebugVisualizer(shadowMapWorldSize=25)\n # Enable rendering after loading the world\n p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)\n\n # Disable visualization of cameras in pybullet GUI\n p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW, 0)\n p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0)\n p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0)\n\n # Enable this if you want better performance\n p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING, 0)\n p.configureDebugVisualizer(p.COV_ENABLE_GUI, 1)\n\n # TODO: Get the URDF from robot_description parameter (or URDF file in the repo)\n self.boxId = p.loadURDF(\"mini_cheetah/mini_cheetah.urdf\", robot_start_pos, useFixedBase=False)\n p.changeDynamics(self.boxId, 3, spinningFriction=self.spinningFriction)\n p.changeDynamics(self.boxId, 7, spinningFriction=self.spinningFriction)\n p.changeDynamics(self.boxId, 11, spinningFriction=self.spinningFriction)\n p.changeDynamics(self.boxId, 15, spinningFriction=self.spinningFriction)\n\n self.__reset_robot()\n\n def __reset_robot(self):\n if self.terrain == \"racetrack\":\n robot_z = 0.4\n else:\n robot_z = self.robot_height\n p.resetBasePositionAndOrientation(\n self.boxId, [0, 0, robot_z], [0, 0, 0, 1])\n p.resetBaseVelocity(self.boxId, [0, 0, 0], [0, 0, 0])\n for j in range(12):\n p.resetJointState(\n self.boxId, self.motor_id_list[j], self.init_new_pos[j], self.init_new_pos[j+12])\n self.cpp_gait_ctrller.init_controller(\n self.__convert_type(self.freq),\n self.__convert_type([self.stand_kp, self.stand_kd, self.joint_kp, self.joint_kd]))\n\n for _ in range(10):\n p.stepSimulation()\n imu_data, leg_data, _, _ = self.__get_data_from_sim()\n self.cpp_gait_ctrller.pre_work(self.__convert_type(\n imu_data), self.__convert_type(leg_data[\"state\"]))\n\n p.setJointMotorControlArray(bodyUniqueId=self.boxId,\n jointIndices=self.motor_id_list,\n controlMode=p.VELOCITY_CONTROL,\n forces=[0]*len(self.motor_id_list))\n\n self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(1))\n\n def run(self):\n rate = rospy.Rate(self.freq) # Hz\n reset_flag = p.readUserDebugParameter(self.reset)\n low_energy_flag = p.readUserDebugParameter(self.low_energy_mode)\n high_performance_flag = p.readUserDebugParameter(self.high_performance_mode)\n while not rospy.is_shutdown():\n # check reset button state\n if(reset_flag < p.readUserDebugParameter(self.reset)):\n reset_flag = p.readUserDebugParameter(self.reset)\n rospy.logwarn(\"reset the robot\")\n self.__reset_robot()\n if(low_energy_flag < p.readUserDebugParameter(self.low_energy_mode)):\n low_energy_flag = p.readUserDebugParameter(self.low_energy_mode)\n rospy.loginfo(\"set robot to low energy mode\")\n self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(1))\n if(high_performance_flag < p.readUserDebugParameter(self.high_performance_mode)):\n high_performance_flag = p.readUserDebugParameter(self.high_performance_mode)\n rospy.loginfo(\"set robot to high performance mode\")\n self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(0))\n\n self.__simulation_step()\n\n rate.sleep()\n\n def __simulation_step(self):\n # get data from simulator\n imu_data, leg_data, base_pos, contact_points = self.__get_data_from_sim()\n\n # pub msg\n self.__pub_nav_msg(base_pos, imu_data)\n self.__pub_ground_truth_pose(base_pos, imu_data)\n self.__pub_imu_msg(imu_data)\n self.__pub_joint_states(leg_data)\n self.__pub_whole_body_state(imu_data, leg_data, base_pos, contact_points)\n\n # call cpp function to calculate mpc tau\n tau = self.cpp_gait_ctrller.torque_calculator(self.__convert_type(\n imu_data), self.__convert_type(leg_data[\"state\"]))\n\n # set tau to simulator\n p.setJointMotorControlArray(bodyUniqueId=self.boxId,\n jointIndices=self.motor_id_list,\n controlMode=p.TORQUE_CONTROL,\n forces=tau.contents.eff)\n\n p.stepSimulation()\n\n def __get_ros_depth_image_msg(self, depth):\n depth_raw_image = self.far * self.near / (self.far - (self.far - self.near) * depth)\n depth_raw_image = (depth_raw_image * 1000).astype(np.uint16)\n msg = CvBridge().cv2_to_imgmsg(depth_raw_image)\n msg.header.stamp = rospy.Time.now()\n msg.header.frame_id = \"body\"\n return msg\n\n def __get_ros_rgb_image_msg(self, rgba):\n image = cv2.cvtColor(np.uint8(rgba), code=cv2.COLOR_RGBA2RGB)\n msg = CvBridge().cv2_to_imgmsg(image)\n msg.header.stamp = rospy.Time().now()\n msg.header.frame_id = \"cam\"\n msg.encoding = \"rgb8\"\n return msg\n\n def calIntrinsicMatrix(self):\n f = math.sqrt(self.width * self.width / 4.0 + self.height * self.height / 4.0) / 2.0 / \\\n math.tan(self.fov / 2.0 / 180.0 * math.pi)\n return (f, 0.0, self.width / 2.0 - 0.5, 0.0, f, self.height / 2.0 - 0.5, 0.0, 0.0, 1.0)\n\n def __generate_scene_pointcloud(self, depth, rgba):\n '''Generate point cloud from depth image and color image\n Args:\n depth(str / np.array): Depth image path or depth.\n rgb(str / np.array): RGB image path or RGB values.\n intrinsics(np.array): Camera intrinsics matrix.\n depth_scale(float): The depth factor.\n Returns:\n np.array(float), np.array(int): points and colors\n '''\n intrinsics = np.array(self.calIntrinsicMatrix()).reshape((3, 3))\n depth_scale = 1.0\n depths = self.far * self.near / (self.far - (self.far - self.near) * depth)\n colors = cv2.cvtColor(np.uint8(rgba), code=cv2.COLOR_RGBA2RGB)\n\n fx, fy = intrinsics[0, 0], intrinsics[1, 1]\n cx, cy = intrinsics[0, 2], intrinsics[1, 2]\n\n xmap, ymap = np.arange(colors.shape[1]), np.arange(colors.shape[0])\n xmap, ymap = np.meshgrid(xmap, ymap)\n\n points_z = depths / depth_scale\n points_x = (xmap - cx) / fx * points_z\n points_y = (ymap - cy) / fy * points_z\n\n mask = (points_z > 0)\n points = np.stack([points_x, points_y, points_z], axis=-1)\n points = points[mask]\n colors = colors[mask]\n return points, colors\n\n def __get_ros_pointcloud_msg(self, depth, rgba):\n points, colors = self.__generate_scene_pointcloud(depth, rgba)\n points = points.astype(np.float32)\n msg = PointCloud2()\n msg.header.stamp = rospy.Time().now()\n\n C = np.zeros((colors[:, 0].size, 4), dtype=np.uint8)\n\n C[:, 0] = colors[:, 2].astype(np.uint8)\n C[:, 1] = colors[:, 1].astype(np.uint8)\n C[:, 2] = colors[:, 0].astype(np.uint8)\n\n C = C.view(\"uint32\")\n C = C.view(\"float32\")\n pointsColor = np.zeros((points.shape[0], 1), \\\n dtype={\n \"names\": ( \"x\", \"y\", \"z\", \"rgba\" ),\n \"formats\": ( \"f4\", \"f4\", \"f4\", \"f4\" )} )\n\n points = points.astype(np.float32)\n\n pointsColor[\"x\"] = points[:, 0].reshape((-1, 1))\n pointsColor[\"y\"] = points[:, 1].reshape((-1, 1))\n pointsColor[\"z\"] = points[:, 2].reshape((-1, 1))\n pointsColor[\"rgba\"] = C\n msg.header.frame_id = \"cam\"\n if len(points.shape) == 3:\n msg.height = points.shape[1]\n msg.width = points.shape[0]\n else:\n msg.height = 1\n msg.width = len(points)\n\n msg.fields = [\n PointField('x', 0, PointField.FLOAT32, 1),\n PointField('y', 4, PointField.FLOAT32, 1),\n PointField('z', 8, PointField.FLOAT32, 1),\n PointField('rgb', 12, PointField.FLOAT32, 1)]\n msg.is_bigendian = False\n msg.point_step = 16\n msg.row_step = msg.point_step * points.shape[0]\n msg.is_dense = int(np.isfinite(points).all())\n msg.data = pointsColor.tostring()\n return msg\n\n # https://github.com/OCRTOC/OCRTOC_software_package/blob/master/pybullet_simulator/scripts/pybullet_env.py\n def __camera_update(self):\n rate = rospy.Rate(20)\n\n # Projection matrix parameters\n self.near = 0.01\n self.far = 3.0\n self.fov = 60\n step_index = 4\n self.width = int(320 / step_index)\n self.height = int(240 / step_index)\n self.aspect = float(self.width) / float(self.height)\n\n # Init ROS publishers\n self.pointcloud_publisher = rospy.Publisher(\"/cam0/depth/points\", PointCloud2, queue_size=1)\n self.image_publisher = rospy.Publisher(\"/cam0/image_raw\", Image, queue_size=1)\n self.depth_publisher = rospy.Publisher(\"/cam0/image_depth\", Image, queue_size=1)\n\n rospy.loginfo(\"Starting camera thread\")\n\n T1 = np.mat([[0, -1.0/2.0, np.sqrt(3.0)/2.0, 0.25], [-1, 0, 0, 0],\n [0, -np.sqrt(3.0)/2.0, -1.0/2.0, 0], [0, 0, 0, 1]])\n\n cameraEyePosition = [0.3, 0, 0.26436384367425125]\n cameraTargetPosition = [1.0, 0, 0]\n cameraUpVector = [0, 0, 1]\n\n while not rospy.is_shutdown():\n cubePos, cubeOrn = p.getBasePositionAndOrientation(self.boxId)\n get_matrix = p.getMatrixFromQuaternion(cubeOrn)\n\n T2 = np.mat([[get_matrix[0], get_matrix[1], get_matrix[2], cubePos[0]],\n [get_matrix[3], get_matrix[4], get_matrix[5], cubePos[1]],\n [get_matrix[6], get_matrix[7], get_matrix[8], cubePos[2]],\n [0, 0, 0, 1]])\n\n T3 = np.array(T2*T1)\n\n cameraEyePosition = T3[0:3, 3].tolist()\n cameraTargetPosition = (np.mat(T3) * np.array([[0], [0], [1], [1]]))[0:3]\n\n # Get quaternion from numpy homogeneus matrix\n cameraQuat = transformations.quaternion_from_matrix(T3)\n\n self.robot_tf.sendTransform(self.__fill_tf_message(\"world\", \"body\", cubePos, cubeOrn))\n self.robot_tf.sendTransform(\n self.__fill_tf_message(\"world\", \"cam\", cameraEyePosition, cameraQuat))\n self.robot_tf.sendTransform(\n self.__fill_tf_message(\"world\", \"tar\", cameraTargetPosition, cubeOrn))\n\n viewMatrix = p.computeViewMatrix(\n cameraEyePosition, cameraTargetPosition, cameraUpVector)\n projectionMatrix = p.computeProjectionMatrixFOV(\n self.fov, self.aspect, self.near, self.far)\n _, _, rgba, depth, _ = p.getCameraImage(\n self.width,\n self.height,\n viewMatrix=viewMatrix,\n projectionMatrix=projectionMatrix,\n shadow=1,\n lightDirection=[1, 1, 1],\n renderer=p.ER_BULLET_HARDWARE_OPENGL,\n flags=p.ER_NO_SEGMENTATION_MASK)\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n f1 = executor.submit(self.__get_ros_depth_image_msg, depth)\n f2 = executor.submit(self.__get_ros_rgb_image_msg, rgba)\n f3 = executor.submit(self.__get_ros_pointcloud_msg, depth, rgba)\n\n r1 = f1.result()\n r2 = f2.result()\n r3 = f3.result()\n\n if(self.depth_publisher.get_num_connections() > 0):\n self.depth_publisher.publish(r1)\n if(self.image_publisher.get_num_connections() > 0):\n self.image_publisher.publish(r2)\n if(self.pointcloud_publisher.get_num_connections() > 0):\n self.pointcloud_publisher.publish(r3)\n\n rate.sleep()\n\n def __convert_type(self, input):\n ctypes_map = {\n int: ctypes.c_int,\n float: ctypes.c_double,\n str: ctypes.c_char_p,\n }\n input_type = type(input)\n if input_type is list:\n length = len(input)\n if length == 0:\n rospy.logerr(\"convert type failed...input is \" + input)\n return 0\n else:\n arr = (ctypes_map[type(input[0])] * length)()\n for i in range(length):\n arr[i] = bytes(\n input[i], encoding=\"utf-8\") if (type(input[0]) is str) else input[i]\n return arr\n else:\n if input_type in ctypes_map:\n return ctypes_map[input_type](bytes(input, encoding=\"utf-8\") if type(input) is str else input)\n else:\n rospy.logerr(\"convert type failed...input is \"+input)\n return 0\n\n def __thread_job(self):\n rospy.spin()\n\n def __callback_gait(self, req):\n self.cpp_gait_ctrller.set_gait_type(self.__convert_type(req.cmd))\n return QuadrupedCmdResponse(0, \"get the gait\")\n\n def __callback_mode(self, req):\n self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(req.cmd))\n return QuadrupedCmdResponse(0, \"get the mode\")\n\n def __callback_elevation_map(self, msg):\n map = Float32MultiArray()\n map = msg.data\n map_1 = [0]*3600\n arr = np.array(map_1)\n for i in range(len(map)):\n arr = arr + np.array(msg.data[i].data)\n\n #rospy.loginfo(len(arr))\n\n #self.em_map = arr.tolist()\n #rospy.loginfo(len(self.em_map))\n self.cpp_gait_ctrller.store_map(self.__convert_type(arr.tolist()))\n\n def __callback_body_vel(self, msg):\n vel = [msg.linear.x, msg.linear.y, msg.angular.z]\n self.cpp_gait_ctrller.set_robot_vel(self.__convert_type(vel))\n\n def __fill_tf_message(self, parent_frame, child_frame, translation, rotation):\n t = TransformStamped()\n t.header.stamp = rospy.Time.now()\n t.header.frame_id = parent_frame\n t.child_frame_id = child_frame\n t.transform.translation.x = translation[0]\n t.transform.translation.y = translation[1]\n t.transform.translation.z = translation[2]\n t.transform.rotation.x = rotation[0]\n t.transform.rotation.y = rotation[1]\n t.transform.rotation.z = rotation[2]\n t.transform.rotation.w = rotation[3]\n return t\n\n def __pub_nav_msg(self, base_pos, imu_data):\n pub_odom = rospy.Publisher(\"/robot_odom\", Odometry, queue_size=30)\n odom = Odometry()\n odom.header.stamp = rospy.Time.now()\n odom.header.frame_id = \"world\"\n odom.child_frame_id = \"body\"\n odom.pose.pose.position.x = base_pos[0]\n odom.pose.pose.position.y = base_pos[1]\n odom.pose.pose.position.z = base_pos[2]\n odom.pose.pose.orientation.x = imu_data[3]\n odom.pose.pose.orientation.y = imu_data[4]\n odom.pose.pose.orientation.z = imu_data[5]\n odom.pose.pose.orientation.w = imu_data[6]\n\n pub_odom.publish(odom)\n\n # Publish odom Tf\n t = self.__fill_tf_message(\n odom.header.frame_id, odom.child_frame_id, base_pos[0:3], imu_data[3:7])\n self.robot_tf.sendTransform(t)\n\n def __pub_ground_truth_pose(self, base_pos, imu_data):\n pub_gt_pose = rospy.Publisher(\"/gt_pose\", PoseWithCovarianceStamped, queue_size=1)\n gt_pose = PoseWithCovarianceStamped()\n gt_pose.header.stamp = rospy.Time.now()\n gt_pose.header.frame_id = \"body\"\n gt_pose.pose.pose.position.x = base_pos[0]\n gt_pose.pose.pose.position.y = base_pos[1]\n gt_pose.pose.pose.position.z = base_pos[2]\n gt_pose.pose.pose.orientation.x = imu_data[3]\n gt_pose.pose.pose.orientation.y = imu_data[4]\n gt_pose.pose.pose.orientation.z = imu_data[5]\n gt_pose.pose.pose.orientation.w = imu_data[6]\n pub_gt_pose.publish(gt_pose)\n\n def __pub_imu_msg(self, imu_data):\n pub_imu = rospy.Publisher(\"/imu0\", Imu, queue_size=30)\n imu_msg = Imu()\n imu_msg.linear_acceleration.x = imu_data[0]\n imu_msg.linear_acceleration.y = imu_data[1]\n imu_msg.linear_acceleration.z = imu_data[2]\n imu_msg.angular_velocity.x = imu_data[7]\n imu_msg.angular_velocity.y = imu_data[8]\n imu_msg.angular_velocity.z = imu_data[9]\n imu_msg.orientation.x = imu_data[3]\n imu_msg.orientation.y = imu_data[4]\n imu_msg.orientation.z = imu_data[5]\n imu_msg.orientation.w = imu_data[6]\n imu_msg.header.stamp = rospy.Time.now()\n imu_msg.header.frame_id = \"body\"\n pub_imu.publish(imu_msg)\n\n def __pub_joint_states(self, joint_states):\n pub_js = rospy.Publisher(\"joint_states\", JointState, queue_size=30)\n js_msg = JointState()\n js_msg.name = []\n js_msg.position = []\n js_msg.velocity = []\n for idx, name in enumerate(joint_states[\"name\"]):\n js_msg.name.append(name.decode('utf-8'))\n js_msg.position.append(joint_states[\"state\"][idx])\n js_msg.velocity.append(joint_states[\"state\"][12+idx])\n js_msg.header.stamp = rospy.Time.now()\n js_msg.header.frame_id = \"body\"\n pub_js.publish(js_msg)\n\n def __pub_whole_body_state(self, imu_data, leg_data, base_pos, contact_points):\n wbs_pub = rospy.Publisher(\"wb_state\", WholeBodyState, queue_size=10)\n wbs = WholeBodyState()\n wbs.header.stamp = rospy.Time.now()\n wbs.header.frame_id = \"world\"\n wbs.time = wbs.header.stamp.secs\n # This represents the base state (CoM motion, angular motion and centroidal momenta)\n wbs.centroidal.com_position.x = base_pos[0]\n wbs.centroidal.com_position.y = base_pos[1]\n wbs.centroidal.com_position.z = base_pos[2]\n wbs.centroidal.base_orientation.x = imu_data[3]\n wbs.centroidal.base_orientation.y = imu_data[4]\n wbs.centroidal.base_orientation.z = imu_data[5]\n wbs.centroidal.base_orientation.w = imu_data[6]\n wbs.centroidal.base_angular_velocity.x = imu_data[7]\n wbs.centroidal.base_angular_velocity.y = imu_data[8]\n wbs.centroidal.base_angular_velocity.z = imu_data[9]\n # This represents the joint state (position, velocity, acceleration and effort)\n wbs.joints = []\n for idx, name in enumerate(leg_data[\"name\"]):\n js_msg = WBJointState()\n js_msg.name = name.decode('utf-8')\n js_msg.position = leg_data[\"state\"][idx]\n js_msg.velocity = leg_data[\"state\"][12+idx]\n wbs.joints.append(js_msg)\n # This represents the end-effector state (cartesian position and contact forces)\n wbs.contacts = []\n for contact_point in contact_points:\n contact_msg = WBContactState()\n contact_msg.name = \"body\"\n contact_msg.type = WBContactState.ACTIVE\n contact_msg.pose.position.x = contact_point[5][0]\n contact_msg.pose.position.y = contact_point[5][1]\n contact_msg.pose.position.z = contact_point[5][2]\n contact_msg.wrench.force.z = contact_point[9]\n contact_msg.surface_normal.x = contact_point[7][0]\n contact_msg.surface_normal.y = contact_point[7][1]\n contact_msg.surface_normal.z = contact_point[7][2]\n contact_msg.friction_coefficient = self.lateralFriction\n wbs.contacts.append(contact_msg)\n wbs_pub.publish(wbs)\n\n def __get_motor_joint_states(self, robot):\n joint_number_range = range(p.getNumJoints(robot))\n joint_states = p.getJointStates(robot, joint_number_range)\n joint_infos = [p.getJointInfo(robot, i) for i in joint_number_range]\n joint_states, joint_name = \\\n zip(*[(j, i[1]) for j, i in zip(joint_states, joint_infos) if i[2] != p.JOINT_FIXED])\n joint_positions = [state[0] for state in joint_states]\n joint_velocities = [state[1] for state in joint_states]\n joint_torques = [state[3] for state in joint_states]\n return joint_positions, joint_velocities, joint_torques, joint_name\n\n def __get_data_from_sim(self):\n # print(\"Getting data!!!!\")\n get_matrix = []\n get_velocity = []\n get_invert = []\n imu_data = [0] * 10\n leg_data = {}\n leg_data[\"state\"] = [0] * 24\n leg_data[\"name\"] = [\"\"] * 12\n\n base_pose = p.getBasePositionAndOrientation(self.boxId)\n\n get_velocity = p.getBaseVelocity(self.boxId)\n get_invert = p.invertTransform(base_pose[0], base_pose[1])\n get_matrix = p.getMatrixFromQuaternion(get_invert[1])\n\n # IMU data\n imu_data[3] = base_pose[1][0]\n imu_data[4] = base_pose[1][1]\n imu_data[5] = base_pose[1][2]\n imu_data[6] = base_pose[1][3]\n\n imu_data[7] = get_matrix[0] * get_velocity[1][0] + get_matrix[1] * \\\n get_velocity[1][1] + get_matrix[2] * get_velocity[1][2]\n imu_data[8] = get_matrix[3] * get_velocity[1][0] + get_matrix[4] * \\\n get_velocity[1][1] + get_matrix[5] * get_velocity[1][2]\n imu_data[9] = get_matrix[6] * get_velocity[1][0] + get_matrix[7] * \\\n get_velocity[1][1] + get_matrix[8] * get_velocity[1][2]\n\n # calculate the acceleration of the robot\n linear_X = (get_velocity[0][0] - self.get_last_vel[0]) * self.freq\n linear_Y = (get_velocity[0][1] - self.get_last_vel[1]) * self.freq\n linear_Z = 9.8 + (get_velocity[0][2] - self.get_last_vel[2]) * self.freq\n imu_data[0] = get_matrix[0] * linear_X + \\\n get_matrix[1] * linear_Y + get_matrix[2] * linear_Z\n imu_data[1] = get_matrix[3] * linear_X + \\\n get_matrix[4] * linear_Y + get_matrix[5] * linear_Z\n imu_data[2] = get_matrix[6] * linear_X + \\\n get_matrix[7] * linear_Y + get_matrix[8] * linear_Z\n\n # joint data\n joint_positions, joint_velocities, _, joint_names = \\\n self.__get_motor_joint_states(self.boxId)\n leg_data[\"state\"][0:12] = joint_positions\n leg_data[\"state\"][12:24] = joint_velocities\n leg_data[\"name\"] = joint_names\n\n # CoM velocity\n self.get_last_vel = [get_velocity[0][0], get_velocity[0][1], get_velocity[0][2]]\n\n # Contacts\n contact_points = p.getContactPoints(self.boxId)\n\n return imu_data, leg_data, base_pose[0], contact_points\n\n\nif __name__ == '__main__':\n rospy.init_node('quadruped_simulator', anonymous=True)\n walking_simulation = WalkingSimulation()\n walking_simulation.run()\n"
] | [
[
"numpy.uint8",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"numpy.arange",
"numpy.isfinite",
"numpy.sqrt",
"numpy.meshgrid",
"numpy.mat"
]
] |
EduardoAcacio/DataScienceGraficos | [
"0294ade9c4e507f44a42190bc8a9ddacf500fe42"
] | [
"Graficos/graficoScatterplotLinhaReta.py"
] | [
"import matplotlib.pyplot as pyplot\n\nx1 = [1,100]\ny1 = [1,100]\n\nx2 = [1,2,3,22,5,3,32,52,2,5,3,32,5]\ny2 = [2,3,4,1,16,4,13,23,4,1,16,4,45]\n\ntitulo = \"Grafico de Scatterplot\"\neixox = \"Eixo X\"\neixoy = \"Eixo Y\"\n\npyplot.title(titulo)\npyplot.xlabel(eixox)\npyplot.ylabel(eixoy)\n\npyplot.scatter(x2, y2, label = \"Dispersão\", color = \"r\")\npyplot.plot(x1, y1)\npyplot.legend()\npyplot.show()\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter"
]
] |
hcoura/color-extractor | [
"a69fc4a9a8b7c90d292f954d289c84a38323eda6"
] | [
"color_extractor/image_to_color.py"
] | [
"import numpy as np\n\nfrom .back import Back\nfrom .cluster import Cluster\nfrom .name import Name\nfrom .resize import Resize\nfrom .selector import Selector\nfrom .skin import Skin\nfrom .task import Task\n\n\nclass ImageToColor(Task):\n def __init__(self, samples, labels, settings=None):\n\n if settings is None:\n settings = {}\n\n super(ImageToColor, self).__init__(settings)\n self._resize = Resize(self._settings['resize'])\n self._back = Back(self._settings['back'])\n self._skin = Skin(self._settings['skin'])\n self._cluster = Cluster(self._settings['cluster'])\n self._selector = Selector(self._settings['selector'])\n self._name = Name(samples, labels, self._settings['name'])\n\n def get(self, img):\n resized = self._resize.get(img)\n back_mask = self._back.get(resized)\n skin_mask = self._skin.get(resized)\n mask = back_mask | skin_mask\n k, labels, clusters_centers = self._cluster.get(resized[~mask])\n centers = self._selector.get(k, labels, clusters_centers)\n colors = [self._name.get(c) for c in centers]\n flattened = list({c for l in colors for c in l})\n\n if self._settings['debug'] is None:\n return flattened\n\n colored_labels = np.zeros((labels.shape[0], 3), np.float64)\n for i, c in enumerate(clusters_centers):\n colored_labels[labels == i] = c\n\n clusters = np.zeros(resized.shape, np.float64)\n clusters[~mask] = colored_labels\n\n return flattened, {\n 'resized': resized,\n 'back': back_mask,\n 'skin': skin_mask,\n 'clusters': clusters\n }\n\n @staticmethod\n def _default_settings():\n return {\n 'resize': {},\n 'back': {},\n 'skin': {},\n 'cluster': {},\n 'selector': {},\n 'name': {},\n }\n"
] | [
[
"numpy.zeros"
]
] |
harsul/SiamYolact | [
"62a76f34e279a7673f51b284e679372d273839b6"
] | [
"reid/reid.py"
] | [
"import keras\nfrom keras.models import Sequential, Model\nfrom keras.applications.xception import Xception, preprocess_input\nfrom keras.layers import Dense, Dropout, Input, Flatten, concatenate\nfrom os.path import isfile, isdir, join\nfrom os import makedirs\nfrom pak.util import download as dl\nimport cv2\nimport numpy as np\nimport h5py\nimport warnings\n\n\nclass ReId:\n\n def __init__(self, root='./tmp', url=None, name=None):\n \"\"\"\n create a new instance of the ReId network\n :param root:\n \"\"\"\n # if url is None:\n # url = 'http://188.138.127.15:81/models/reid.h5'\n if name is None:\n name = 'reid.h5'\n if not isdir(root):\n makedirs(root)\n\n # filepath = join(root, name)\n # if not isfile(filepath):\n # print('could not find model.. downloading it')\n # dl.download(url, filepath)\n\n if keras.__version__.startswith('2.2'):\n warnings.warn(\n \"This model only works properly with keras 2.1.3. Weights for other versions might not work properly\")\n\n # ------- build model -------\n seq = Sequential()\n xception = Xception(weights='imagenet', input_shape=(221, 221, 3),\n include_top=False, pooling='avg')\n seq.add(xception)\n\n # freeze first layers in pre-trained model\n for layer in xception.layers[0:-20]:\n layer.trainable = False\n\n input_a = Input(shape=(221, 221, 3))\n input_b = Input(shape=(221, 221, 3))\n\n out_a = seq(input_a)\n out_b = seq(input_b)\n\n concatenated = concatenate([out_a, out_b])\n hidden1 = Dense(128, activation='relu', name='dense_1')(concatenated)\n hidden_drp1 = Dropout(0.7)(hidden1)\n hidden2 = Dense(32, activation='relu', name='dense_2')(hidden_drp1)\n hidden_drp2 = Dropout(0.1)(hidden2)\n out = Dense(1, activation='sigmoid', name='dense_3')(hidden_drp2)\n\n model = Model([input_a, input_b], out)\n print('Siamese model:', filepath)\n model.load_weights(filepath)\n self.model = model\n\n def predict(self, A, B):\n \"\"\"\n compare two images\n :param A: images, range [0 .. 255]\n :param B:\n :return:\n \"\"\"\n s1 = 221\n s2 = 221\n size = (s1, s2)\n if isinstance(A, list) or len(A.shape) == 4:\n assert len(A) == len(B)\n n = len(A)\n assert n > 0\n Xa = np.zeros((n, s1, s2, 3))\n Xb = np.zeros((n, s1, s2, 3))\n for idx, (a, b) in enumerate(zip(A, B)):\n Xa[idx, :, :, :] = cv2.resize(a, size)\n Xb[idx, :, :, :] = cv2.resize(b, size)\n Xa = preprocess_input(Xa)\n Xb = preprocess_input(Xb)\n elif len(A.shape) == 3:\n a = A\n b = B\n assert len(b.shape) == 3\n w1, h1, c1 = a.shape\n w2, h2, c2 = b.shape\n assert c1 == c2 == 3\n \n if w1 != s1 or h1 != s2:\n a = cv2.resize(a, size)\n if w2 != s1 or h2 != s2:\n b = cv2.resize(b, size)\n Xa = preprocess_input(a.astype('float64'))\n Xb = preprocess_input(b.astype('float64'))\n Xa = np.expand_dims(Xa, axis=0)\n Xb = np.expand_dims(Xb, axis=0)\n else:\n raise ValueError('wrong input shape' + str(A.shape))\n \n Y = self.model.predict([Xa, Xb])\n return Y[:, 0]\n"
] | [
[
"numpy.expand_dims",
"numpy.zeros"
]
] |
PGE310-Students/assignment13 | [
"e0b6781c6b8e0bc95cc9f3850623eca04733df11"
] | [
"test.py"
] | [
"#!/usr/bin/env python\n\n# Copyright 2020-2021 John T. Foster\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom unittest.mock import MagicMock\n\nimport nbconvert\n\nimport numpy as np\n\n\n\nwith open(\"assignment13.ipynb\") as f:\n exporter = nbconvert.PythonExporter()\n python_file, _ = exporter.from_file(f)\n\nwith open(\"assignment13.py\", \"w\") as f:\n f.write(python_file)\n\nfrom assignment13 import LinearSystem\n\ntry:\n from assignment12 import Matrix\nexcept:\n pass\n\ntry:\n from assignment13 import Matrix\nexcept:\n pass\n\nclass TestSolution(unittest.TestCase):\n \n def test_is_derived_from_Matrix(self):\n \n self.assertTrue(issubclass(LinearSystem, Matrix))\n \n def test_row_swap_called(self):\n \n A = np.array([[1, 2], [4, 3]])\n ls = LinearSystem(A)\n ls.row_swap = MagicMock()\n ls.row_echelon()\n assert ls.row_swap.called\n \n def test_row_combine_called(self):\n \n A = np.array([[1, 2], [4, 3]])\n ls = LinearSystem(A)\n ls.row_combine = MagicMock()\n ls.row_echelon()\n assert ls.row_combine.called\n \n def test_row_echelon(self):\n \n A = np.array([[1, 3, 4],[5, 4, 2],[1, 7, 9]])\n ls = LinearSystem(A)\n ls.row_echelon() \n np.testing.assert_array_almost_equal(ls.mat, \n np.array([[ 5., 4., 2.],\n [0., 6.2, 8.6],\n [ 0., 0., 0.5483871]]), decimal=6)\n \n def test_back_substitute(self):\n \n A = np.array([[1, 3, 5],[5, 2, 2],[1, 7, 1]])\n b = np.array([1, 3, 4])\n ls = LinearSystem(A, b)\n ls.row_echelon()\n np.testing.assert_array_almost_equal(ls.back_substitute(),\n np.linalg.solve(A, b),\n decimal=6)\n \n def test_gauss_solve(self):\n \n A = np.array([[1, 3, 5],[5, 2, 2],[1, 7, 1]])\n b = np.array([1, 3, 4])\n ls = LinearSystem(A, b)\n np.testing.assert_array_almost_equal(ls.gauss_solve(),\n np.linalg.solve(A, b),\n decimal=6)\n \n def test_reduced_row_echelon(self):\n \n A = np.array([[1, 3, 5],[5, 2, 2],[1, 7, 1]])\n b = np.array([1, 3, 4])\n ls = LinearSystem(A, b)\n ls.reduced_row_echelon()\n np.testing.assert_array_almost_equal(ls.mat[:,-1],\n np.linalg.solve(A, b),\n decimal=6)\n \n def test_inverse(self):\n \n A = np.array([[1, 2, 5],[5, 22, 17],[11, 7, 1]])\n ls = LinearSystem(A)\n np.testing.assert_array_almost_equal(ls.inverse(),\n np.linalg.inv(A),\n decimal=6)\n \n def test_row_echelon_private(self):\n \n A = np.array([[1, 3, 5],[5, 2, 2],[1, 7, 1]])\n ls = LinearSystem(A)\n ls.row_echelon() \n np.testing.assert_array_almost_equal(ls.mat, \n np.array([[5., 2., 2.],\n [0., 6.6, 0.6],\n [0., 0., 4.36363636]]), decimal=6)\n \n def test_back_substitute_private(self):\n \n A = np.array([[1, 2, 5],[5, 2, 17],[1, 7, 1]])\n b = np.array([1, 3, 12])\n ls = LinearSystem(A, b)\n ls.row_echelon()\n np.testing.assert_array_almost_equal(ls.back_substitute(),\n np.linalg.solve(A, b),\n decimal=6)\n \n def test_gauss_solve_private(self):\n \n A = np.array([[1, 2, 5],[5, 2, 17],[1, 7, 1]])\n b = np.array([1, 3, 12])\n ls = LinearSystem(A, b)\n np.testing.assert_array_almost_equal(ls.gauss_solve(),\n np.linalg.solve(A, b),\n decimal=6)\n \n def test_reduced_row_echelon_private(self):\n \n A = np.array([[1, 2, 5],[5, 2, 17],[1, 7, 1]])\n b = np.array([1, 3, 12])\n ls = LinearSystem(A, b)\n ls.reduced_row_echelon()\n np.testing.assert_array_almost_equal(ls.mat[:,-1],\n np.linalg.solve(A, b),\n decimal=6)\n \n def test_inverse_private(self):\n \n A = np.array([[11, 2, 5],[51, 22, 17],[11, 7, 1]])\n ls = LinearSystem(A)\n np.testing.assert_array_almost_equal(ls.inverse(),\n np.linalg.inv(A),\n decimal=6)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.linalg.inv",
"numpy.linalg.solve"
]
] |
abaheti95/DC-NeuralConversation | [
"14f3c03adfb7379b48a325c0b3416eee39af7fdb"
] | [
"preprocess.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport glob\nimport sys\n\nimport torch\n\nimport onmt.io\nimport opts\n\n\ndef check_existing_pt_files(opt):\n # We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt\n # when training, so check to avoid tampering with existing pt files\n # or mixing them up.\n for t in ['train', 'valid', 'vocab']:\n pattern = opt.save_data + '.' + t + '*.pt'\n if glob.glob(pattern):\n sys.stderr.write(\"Please backup exisiting pt file: %s, \"\n \"to avoid tampering!\\n\" % pattern)\n sys.exit(1)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='preprocess.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.preprocess_opts(parser)\n\n opt = parser.parse_args()\n torch.manual_seed(opt.seed)\n\n check_existing_pt_files(opt)\n\n return opt\n\n\ndef build_save_text_dataset_in_shards(src_corpus, tgt_corpus, fields,\n corpus_type, opt):\n '''\n Divide the big corpus into shards, and build dataset separately.\n This is currently only for data_type=='text'.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.\n\n To tackle this, we only read in part of the corpus file of size\n `max_shard_size`(actually it is multiples of 64 bytes that equals\n or is slightly larger than this size), and process it into dataset,\n then write it to disk along the way. By doing this, we only focus on\n part of the corpus at any moment, thus effectively reducing memory use.\n According to test, this method can reduce memory footprint by ~50%.\n\n Note! As we process along the shards, previous shards might still\n stay in memory, but since we are done with them, and no more\n reference to them, if there is memory tight situation, the OS could\n easily reclaim these memory.\n\n If `max_shard_size` is 0 or is larger than the corpus size, it is\n effectively preprocessed into one dataset, i.e. no sharding.\n '''\n\n corpus_size = os.path.getsize(src_corpus)\n if corpus_size > 10 * (1024**2) and opt.max_shard_size == 0:\n print(\"Warning. The corpus %s is larger than 10M bytes, you can \"\n \"set '-max_shard_size' to process it by small shards \"\n \"to use less memory.\" % src_corpus)\n\n ret_list = []\n src_iter = onmt.io.ShardedTextCorpusIterator(\n src_corpus, opt.src_seq_length_trunc,\n \"src\", opt.max_shard_size)\n tgt_iter = onmt.io.ShardedTextCorpusIterator(\n tgt_corpus, opt.tgt_seq_length_trunc,\n \"tgt\", opt.max_shard_size,\n assoc_iter=src_iter)\n\n print(' * divide corpus into shards and build dataset separately'\n '(shard_size = %d bytes).' % opt.max_shard_size)\n\n index = 0\n while not src_iter.hit_end():\n index += 1\n dataset = onmt.io.TextDataset(\n fields, src_iter, tgt_iter,\n src_iter.num_feats, tgt_iter.num_feats,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n dynamic_dict=opt.dynamic_dict)\n\n # We save fields in vocab.pt seperately, so make it empty.\n dataset.fields = []\n\n pt_file = \"{:s}.{:s}.{:d}.pt\".format(\n opt.save_data, corpus_type, index)\n print(\" * saving train data shard to %s.\" % pt_file)\n torch.save(dataset, pt_file)\n\n ret_list.append(pt_file)\n\n return ret_list\n\n\ndef build_save_dataset(corpus_type, fields, opt):\n assert corpus_type in ['train', 'valid']\n\n if corpus_type == 'train':\n src_corpus = opt.train_src\n tgt_corpus = opt.train_tgt\n else:\n src_corpus = opt.valid_src\n tgt_corpus = opt.valid_tgt\n\n # Currently we only do preprocess sharding for corpus: data_type=='text'.\n if opt.data_type == 'text':\n return build_save_text_dataset_in_shards(\n src_corpus, tgt_corpus, fields,\n corpus_type, opt)\n\n # For data_type == 'img' or 'audio', currently we don't do\n # preprocess sharding. We only build a monolithic dataset.\n # But since the interfaces are uniform, it would be not hard\n # to do this should users need this feature.\n dataset = onmt.io.build_dataset(\n fields, opt.data_type, src_corpus, tgt_corpus,\n src_dir=opt.src_dir,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n src_seq_length_trunc=opt.src_seq_length_trunc,\n tgt_seq_length_trunc=opt.tgt_seq_length_trunc,\n dynamic_dict=opt.dynamic_dict,\n sample_rate=opt.sample_rate,\n window_size=opt.window_size,\n window_stride=opt.window_stride,\n window=opt.window)\n\n # We save fields in vocab.pt seperately, so make it empty.\n dataset.fields = []\n\n pt_file = \"{:s}.{:s}.pt\".format(opt.save_data, corpus_type)\n print(\" * saving train dataset to %s.\" % pt_file)\n torch.save(dataset, pt_file)\n\n return [pt_file]\n\n\ndef build_save_vocab(train_dataset, fields, opt):\n fields = onmt.io.build_vocab(train_dataset, fields, opt.data_type,\n opt.share_vocab,\n opt.src_vocab_size,\n opt.src_words_min_frequency,\n opt.tgt_vocab_size,\n opt.tgt_words_min_frequency)\n\n # Can't save fields, so remove/reconstruct at training time.\n vocab_file = opt.save_data + '.vocab.pt'\n torch.save(onmt.io.save_fields_to_vocab(fields), vocab_file)\n\n\ndef main():\n opt = parse_args()\n\n print(\"Extracting features...\")\n src_nfeats = onmt.io.get_num_features(opt.data_type, opt.train_src, 'src')\n tgt_nfeats = onmt.io.get_num_features(opt.data_type, opt.train_tgt, 'tgt')\n print(\" * number of source features: %d.\" % src_nfeats)\n print(\" * number of target features: %d.\" % tgt_nfeats)\n\n print(\"Loading Fields object...\")\n fields = onmt.io.get_fields(opt.data_type, src_nfeats, tgt_nfeats)\n\n print(\"Building & saving training data...\")\n train_dataset_files = build_save_dataset('train', fields, opt)\n\n print(\"Building & saving vocabulary...\")\n build_save_vocab(train_dataset_files, fields, opt)\n\n print(\"Building & saving validation data...\")\n build_save_dataset('valid', fields, opt)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.manual_seed",
"torch.save"
]
] |
ycjungSubhuman/linearRegressorDemo | [
"f32d19e97a4c5555a0a98080a393c3c8ba29c187"
] | [
"tester.py"
] | [
"import itertools\nimport numpy as np\nfrom scipy.stats import f_oneway\n\ndef _getRMSE(targetFunction, validateX, validateT):\n N = validateT.shape[0]\n return np.sqrt(sum([(validateT[i] - targetFunction(validateX[i]))**2 for i in range(N)]) / N)\n\ndef _normalize(X):\n result = X.copy()\n mins = np.amin(X, axis=0)\n maxs = np.amax(X, axis=0)\n for (m, j), value in np.ndenumerate(result):\n mid = (maxs[j] + mins[j]) / 2\n result[m, j] = (value - mid) / (maxs[j] - mid)\n\n return result\n\nclass CCPPTester:\n _FIELD_LABEL = ['T', 'P', 'H', 'V']\n def __init__(self, regressor):\n self.data = []\n print ('Loading CCPP data...')\n for i in range(0, 5):\n self.data.append( _normalize(np.loadtxt('CCPP/data{}.csv'.format(i+1), delimiter=',')) )\n print ('Loading Done')\n\n self.regressor = regressor\n self.msresOfFeatureSelection = []\n\n def _runFeatureSelection(self, featureIndices):\n print ( ' Parameters : {}'.format( '&'.join([self._FIELD_LABEL[i] for i in featureIndices]) ) )\n msres = np.zeros((5, 2))\n for batch in range(0, 5):\n Xs, Ts = np.split(self.data[batch][:, featureIndices], 2), np.split(self.data[batch][:, 4], 2)\n for i in range(0, 2):\n trainX, validateX = Xs[i], Xs[(i+1) % 2]\n trainT, validateT = Ts[i], Ts[(i+1) % 2]\n targetFunction = self.regressor.fitTargetFunction(trainX, trainT)\n msres[batch, i] = _getRMSE(targetFunction, validateX, validateT)\n self.msresOfFeatureSelection.append( msres.flatten() )\n print ( ' MSRE mean : {}, stddev : {}'.format( np.mean(msres.flatten()), np.std(msres.flatten()) ) )\n\n def _runParamDimension(self, paramDimension):\n indices = [0, 1, 2, 3]\n print ( '---Running {}C{} tests with {} parameters---'.format(4, paramDimension, paramDimension) )\n for featureIndices in itertools.combinations(indices, paramDimension):\n self._runFeatureSelection(featureIndices)\n\n def _anova(self):\n (fValue, pValue) = f_oneway(*self.msresOfFeatureSelection)\n print ( '---ANOVA Result---' )\n print ( ' F Value : {}'.format(fValue) )\n print ( ' P Value : {}'.format(pValue) )\n\n def run(self):\n for paramDimension in range(1, 5):\n self._runParamDimension(paramDimension)\n self._anova()\n\n"
] | [
[
"scipy.stats.f_oneway",
"numpy.ndenumerate",
"numpy.zeros",
"numpy.split",
"numpy.amax",
"numpy.amin"
]
] |
ivirshup/scMetaImmune | [
"3eec62cc61e6fb1cfaf1dcc31679900f6ff8b084"
] | [
"scripts/retrieve_jb_bams.py"
] | [
"import requests\nimport json\nfrom hashlib import md5\nimport requests\nimport pandas as pd\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\ndef download_file_from_google_drive(id, destination, checksum=None):\n \"\"\"\n Retrieves a public file from google drive.\n\n If the file is too large for google's virus scanning, this will download it anyways.\n \"\"\"\n def get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\n def save_response_content(response, destination, checksum=None):\n CHUNK_SIZE = 32768\n if checksum:\n digest = md5()\n with open(destination, \"wb\") as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n if checksum:\n digest.update(chunk)\n if checksum:\n assert checksum == digest.hexdigest()\n\n URL = \"https://drive.google.com/uc?export=download\"\n\n session = requests.Session()\n\n response = session.get(URL, params={'id': id}, stream=True)\n token = get_confirm_token(response)\n\n if token:\n params = { 'id' : id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n\n save_response_content(response, destination, checksum) \n\ndef main():\n parser = ArgumentParser(description=\"Download bams from requested JingleBells dataset.\")\n parser.add_argument(\"dataset\", help=\"ID of dataset to retrieve\", type=str)\n parser.add_argument(\"drive_table\", help=\"TSV containing information from google drive\", type=Path)\n parser.add_argument(\"out_dir\", help=\"Location to download files to\", type=Path)\n parser.add_argument(\"--dryrun\", help=\"Should files actually be downloaded?\", action=\"store_true\")\n\n args = parser.parse_args()\n\n # Check args\n assert args.drive_table.is_file()\n table = pd.read_table(args.drive_table)\n\n assert (table[\"dataset\"] == args.dataset).any(), f\"Couldn't find {args.dataset} in provided drive file.\"\n\n if not args.out_dir.is_dir():\n args.out_dir.mkdir()\n\n # Subset table\n dset_records = table[(table[\"dataset\"] == args.dataset) & table[\"name\"].str.endswith(\".bam\")]\n\n for bam in dset_records.itertuples(index=False):\n bampth = args.out_dir.joinpath(bam.name)\n print(f\"Downloading {bam.name} to {bampth}\")\n if not args.dryrun:\n download_file_from_google_drive(bam.id, bampth, bam.md5Checksum)\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"pandas.read_table"
]
] |
Hiwyl/keras_cnn_finetune | [
"f424302a72c8d05056a9af6f9b293003acb8398d"
] | [
"binary_test.py"
] | [
"'''\r\n@Author : lance\r\n@Email : [email protected]\r\n '''\r\n\r\n\r\nfrom keras.models import load_model\r\nfrom keras_preprocessing.image import ImageDataGenerator\r\nfrom sklearn.metrics import confusion_matrix\r\nimport numpy as np\r\n\r\n\r\n\r\ndef mytest(path,steps,input_shape):\r\n #导入数据\r\n test_path = '测试'\r\n test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path,\r\n target_size=input_shape,\r\n classes=[\"C2F\",\"X2F\"],\r\n class_mode=\"binary\",\r\n batch_size=10, shuffle=False)\r\n\r\n model = load_model(path)\r\n # 测试\r\n steps=steps\r\n test_class=np.array([])\r\n\r\n for i in range(steps):\r\n test_imgs, test_lables = next(test_batches)\r\n test_class=np.hstack((test_class,test_lables ))\r\n print(\"真实类别:\",test_class)\r\n\r\n pred = model.predict_generator(test_batches, steps=steps, verbose=1)\r\n pred=pred.ravel()\r\n pred=list(pred)\r\n for i in range(len(pred)):\r\n if pred[i]<0.5:\r\n pred[i]=0\r\n else:\r\n pred[i]=1\r\n print(\"预测结果:\", pred)\r\n\r\n\r\n # 打印混淆矩阵\r\n cm = confusion_matrix(test_class, pred)\r\n\r\n\r\n print(cm)\r\n\r\n tmp = 0\r\n for i in range(len(cm[0, :])):\r\n tmp += cm[i][i]\r\n accuracy = tmp / np.sum(cm)\r\n print(\"acc:\", accuracy)\r\n\r\n return path, accuracy\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n mytest(\"weights/bcnn_0033.h5\",25,(224,224))#0.77\r\n # mytest(\"weights/densenet_0023.h5\",25,(224,224)) #0.87\r\n # mytest(\"weights/ince_res_0021.h5\",25,(299,299)) #0.85\r\n # mytest(\"weights/inceptionv3_0033.h5\",25,(299,299)) #0.80\r\n # mytest(\"weights/merge_0022.h5\",25,(224,224)) #0.81\r\n # mytest(\"weights/mobilenetv2_0032.h5\",25,(224,224)) #0.87\r\n # mytest(\"weights/nasnet_0017.h5\",25,(224,224)) #0.87\r\n # mytest(\"weights/resnet_0018.h5\",25,(224,224)) #0.79\r\n # mytest(\"weights/vgg19two_0022.h5\",25,(224,224)) #0.82\r\n\r\n\r\n\r\n\r\n"
] | [
[
"numpy.hstack",
"numpy.array",
"numpy.sum",
"sklearn.metrics.confusion_matrix"
]
] |
ricardowiest/Data_Science-Machine_Learning | [
"b03687b5e2ea383a1eaac614637e42336b9d4633"
] | [
"Machine Learning e Data Science com Python ATUALIZADO/Secao 10 - Redes neurais artificiais/keras_credit_data.py"
] | [
"import pandas as pd\n\nbase = pd.read_csv('credit_data.csv')\nbase.loc[base.age < 0, 'age'] = 40.92\n \nprevisores = base.iloc[:, 1:4].values\nclasse = base.iloc[:, 4].values\n\nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\nimputer = imputer.fit(previsores[:, 1:4])\nprevisores[:, 1:4] = imputer.transform(previsores[:, 1:4])\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nprevisores = scaler.fit_transform(previsores)\n\nfrom sklearn.model_selection import train_test_split\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.25, random_state=0)\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nclassificador = Sequential()\nclassificador.add(Dense(units = 2, activation = 'relu', input_dim = 3))\nclassificador.add(Dense(units = 2, activation = 'relu'))\nclassificador.add(Dense(units = 1, activation = 'sigmoid'))\nclassificador.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\nclassificador.fit(previsores_treinamento, classe_treinamento, batch_size = 10, nb_epoch = 100)\nprevisoes = classificador.predict(previsores_teste)\nprevisoes = (previsoes > 0.5)\n\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nprecisao = accuracy_score(classe_teste, previsoes)\nmatriz = confusion_matrix(classe_teste, previsoes)\n"
] | [
[
"sklearn.metrics.confusion_matrix",
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"sklearn.preprocessing.Imputer"
]
] |
kamikaze0923/SATNET | [
"16dda5846d3d7a0c846a6f48b49f63f896b1b224"
] | [
"exps/sudoku.py"
] | [
"#!/usr/bin/env python3\n#\n# Partly derived from:\n# https://github.com/locuslab/optnet/blob/master/sudoku/train.py \n\nimport argparse\n\nimport os\nimport shutil\nimport csv\n\nimport numpy as np\nimport numpy.random as npr\n#import setproctitle\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom tqdm.auto import tqdm\n\nimport satnet\nimport sys\nfrom logic.logic import get_sudoku_matrix\n\ntorch.set_printoptions(linewidth=sys.maxsize)\n\nclass SudokuSolver(nn.Module):\n def __init__(self, boardSz, aux, m, S=None):\n super(SudokuSolver, self).__init__()\n n = boardSz**6\n self.sat = satnet.SATNet(n, m, aux, max_iter=100, eps=1e-6)\n\n def forward(self, y_in, mask):\n out = self.sat(y_in, mask)\n return out\n\n\nclass CSVLogger(object):\n def __init__(self, fname):\n self.f = open(fname, 'w')\n self.logger = csv.writer(self.f)\n\n def log(self, fields):\n self.logger.writerow(fields)\n self.f.flush()\n\n\ndef print_header(msg):\n print('===>', msg)\n\ndef find_unperm(perm):\n unperm = torch.zeros_like(perm)\n for i in range(perm.size(0)):\n unperm[perm[i]] = i\n return unperm\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default='sudoku24to30_medium')\n parser.add_argument('--boardSz', type=int, default=3)\n parser.add_argument('--batchSz', type=int, default=200)\n parser.add_argument('--testBatchSz', type=int, default=200)\n parser.add_argument('--aux', type=int, default=100)\n parser.add_argument('--m', type=int, default=200)\n parser.add_argument('--nEpoch', type=int, default=2000)\n parser.add_argument('--testPct', type=float, default=0.1)\n parser.add_argument('--lr', type=float, default=2e-4)\n parser.add_argument('--save', type=str)\n parser.add_argument('--model', type=str)\n parser.add_argument('--no_cuda', action='store_true')\n parser.add_argument('--perm', action='store_true')\n\n args = parser.parse_args()\n\n # For debugging: fix the random seed\n npr.seed(1)\n torch.manual_seed(7)\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n if args.cuda:\n print('Using', torch.cuda.get_device_name(0))\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.cuda.init()\n\n save = '{}{}.-aux{}-m{}-lr{}-bsz{}'.format(\n args.data_dir, '.perm' if args.perm else '', args.aux, args.m, args.lr, args.batchSz)\n if args.save:\n save = '{}-{}'.format(args.save, save)\n save = os.path.join('logs', save)\n # if os.path.isdir(save):\n # shutil.rmtree(save)\n os.makedirs(save, exist_ok=True)\n\n print_header('Loading data')\n\n with open(os.path.join(args.data_dir, 'features.pt'), 'rb') as f:\n X_in = torch.load(f)\n with open(os.path.join(args.data_dir, 'labels.pt'), 'rb') as f:\n Y_in = torch.load(f)\n with open(os.path.join(args.data_dir, 'perm.pt'), 'rb') as f:\n perm = torch.load(f)\n\n N = X_in.size(0)\n nTrain = int(N*(1.-args.testPct))\n nTest = N-nTrain\n assert(nTrain % args.batchSz == 0)\n assert(nTest % args.testBatchSz == 0)\n\n print_header('Forming inputs')\n X, Y, is_input = process_inputs(X_in, Y_in)\n data = X\n if args.cuda:\n data, is_input, Y = data.cuda(), is_input.cuda(), Y.cuda()\n\n unperm = None\n if args.perm and not args.mnist:\n print('Applying permutation')\n data[:,:], Y[:,:], is_input[:,:] = data[:,perm], Y[:,perm], is_input[:,perm]\n unperm = find_unperm(perm)\n\n train_set = TensorDataset(data[:nTrain], is_input[:nTrain], Y[:nTrain])\n test_set = TensorDataset(data[nTrain:], is_input[nTrain:], Y[nTrain:])\n\n print_header('Building model')\n model = SudokuSolver(args.boardSz, args.aux, args.m)\n\n if args.cuda:\n model = model.cuda()\n\n\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n args.model = f\"logs/{args.data_dir}.-aux100-m200-lr0.0002-bsz200/it507.pth\"\n if args.model:\n print(f\"{args.model} loaded\")\n model.load_state_dict(torch.load(args.model))\n\n train_logger = CSVLogger(os.path.join(save, 'train.csv'))\n test_logger = CSVLogger(os.path.join(save, 'test.csv'))\n fields = ['epoch', 'loss', 'err']\n train_logger.log(fields)\n test_logger.log(fields)\n\n # test(args.boardSz, 0, model, optimizer, test_logger, test_set, args.testBatchSz, unperm)\n # exit(0)\n for epoch in range(1, args.nEpoch+1):\n train(args.boardSz, epoch, model, optimizer, train_logger, train_set, args.batchSz, unperm)\n test(args.boardSz, epoch, model, optimizer, test_logger, test_set, args.testBatchSz, unperm)\n torch.save(model.state_dict(), os.path.join(save, 'it'+str(epoch)+'.pth'))\n\ndef process_inputs(X, Y):\n is_input = X.sum(dim=3, keepdim=True).expand_as(X).int().sign()\n # to_soduku(X[0], Y[0], is_input[0])\n # exit(0)\n\n X = X.view(X.size(0), -1)\n Y = Y.view(Y.size(0), -1)\n is_input = is_input.view(is_input.size(0), -1)\n\n\n return X, Y, is_input\n\ndef to_soduku(X, Y, is_input):\n assert isinstance(X, torch.Tensor)\n assert isinstance(Y, torch.Tensor)\n assert X.size() == (9,9,9)\n assert Y.size() == (9,9,9)\n soduku_X = X.argmax(dim=2) + 1\n soduku_Y = Y.argmax(dim=2) + 1\n is_input = is_input.permute(2,0,1)[0]\n print(soduku_X * is_input)\n print(soduku_Y)\n print(is_input)\n print(torch.sum(is_input))\n return\n\[email protected]_grad()\ndef recursive_inference(preds, mask, model):\n out = preds\n while True:\n confident_out = torch.mul((1 - mask), torch.abs(0.5 - out))\n confident_k = torch.topk(confident_out, k=1, dim=1)\n mask = torch.scatter(mask, dim=1, index=confident_k.indices, value=1)\n # out = torch.scatter(out, dim=1, index=confident_k.indices, src=out[:, confident_k.indices.flatten()].round())\n flag = sum(map(lambda x: torch.is_nonzero(x), (1 - mask).flatten()))\n if flag == 0:\n break\n # print(flag)\n out = model(out, mask)\n return out\n\ndef run(boardSz, epoch, model, optimizer, logger, dataset, batchSz, to_train=False, unperm=None):\n\n loss_final, err_final = 0, 0\n\n loader = DataLoader(dataset, batch_size=batchSz)\n tloader = tqdm(loader, total=len(loader))\n\n for i,(data,is_input,label) in enumerate(tloader):\n if to_train:\n optimizer.zero_grad()\n preds = model(data.contiguous(), is_input.contiguous())\n preds = recursive_inference(preds, is_input, model)\n preds_round = preds.round()\n print(is_input.size())\n print(torch.sum(preds_round == label, dim=1))\n print(torch.sum(is_input, dim=1))\n\n loss = nn.functional.binary_cross_entropy(preds, label)\n\n if to_train:\n loss.backward()\n optimizer.step()\n\n err = computeErr(preds.data, boardSz, unperm)/batchSz\n print(err)\n # exit(0)\n\n tloader.set_description('Epoch {} {} Loss {:.4f} Err: {:.4f}'.format(epoch, ('Train' if to_train else 'Test '), loss.item(), err))\n loss_final += loss.item()\n err_final += err\n\n loss_final, err_final = loss_final/len(loader), err_final/len(loader)\n logger.log((epoch, loss_final, err_final))\n\n if not to_train:\n print('TESTING SET RESULTS: Average loss: {:.4f} Err: {:.4f}'.format(loss_final, err_final))\n\n #print('memory: {:.2f} MB, cached: {:.2f} MB'.format(torch.cuda.memory_allocated()/2.**20, torch.cuda.memory_cached()/2.**20))\n torch.cuda.empty_cache()\n\ndef train(args, epoch, model, optimizer, logger, dataset, batchSz, unperm=None):\n run(args, epoch, model, optimizer, logger, dataset, batchSz, True, unperm)\n\[email protected]_grad()\ndef test(args, epoch, model, optimizer, logger, dataset, batchSz, unperm=None):\n run(args, epoch, model, optimizer, logger, dataset, batchSz, False, unperm)\n\[email protected]_grad()\ndef computeErr(pred_flat, n, unperm):\n if unperm is not None: pred_flat[:,:] = pred_flat[:,unperm]\n\n nsq = n ** 2\n pred = pred_flat.view(-1, nsq, nsq, nsq)\n\n batchSz = pred.size(0)\n s = (nsq-1)*nsq//2 # 0 + 1 + ... + n^2-1\n I = torch.max(pred, 3)[1].squeeze().view(batchSz, nsq, nsq)\n\n def invalidGroups(x):\n valid = (x.min(1)[0] == 0)\n valid *= (x.max(1)[0] == nsq-1)\n valid *= (x.sum(1) == s)\n return valid.logical_not()\n\n boardCorrect = torch.ones(batchSz).type_as(pred)\n for j in range(nsq):\n # Check the jth row and column.\n boardCorrect[invalidGroups(I[:,j,:])] = 0\n boardCorrect[invalidGroups(I[:,:,j])] = 0\n\n # Check the jth block.\n row, col = n*(j // n), n*(j % n)\n M = invalidGroups(I[:,row:row+n,col:col+n].contiguous().view(batchSz,-1))\n boardCorrect[M] = 0\n\n if boardCorrect.sum() == 0:\n return batchSz\n\n return float(batchSz-boardCorrect.sum())\n\nif __name__=='__main__':\n main()"
] | [
[
"torch.cuda.init",
"torch.ones",
"torch.set_printoptions",
"torch.cuda.is_available",
"torch.load",
"torch.topk",
"torch.sum",
"torch.manual_seed",
"torch.abs",
"torch.utils.data.DataLoader",
"torch.zeros_like",
"torch.max",
"torch.cuda.get_device_name",
"torch.cuda.empty_cache",
"torch.is_nonzero",
"torch.utils.data.TensorDataset",
"torch.nn.functional.binary_cross_entropy",
"numpy.random.seed",
"torch.no_grad",
"torch.scatter"
]
] |
mackelab/sbibm | [
"b9781c610a1a80d2de014ee46a29cf061fb6074a"
] | [
"sbibm/tasks/gaussian_linear_uniform/task.py"
] | [
"import logging\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport pyro\nimport torch\nfrom pyro import distributions as pdist\n\nfrom sbibm.tasks.simulator import Simulator\nfrom sbibm.tasks.task import Task\n\n\nclass GaussianLinearUniform(Task):\n def __init__(\n self, dim: int = 10, prior_bound: float = 1.0, simulator_scale: float = 0.1\n ):\n \"\"\"Gaussian Linear Uniform\n\n Inference of mean under uniform prior.\n\n Args:\n dim: Dimensionality of parameters and data.\n prior_bound: Prior is uniform in [-prior_bound, +prior_bound].\n simulator_scale: Standard deviation of noise in simulator.\n \"\"\"\n super().__init__(\n dim_parameters=dim,\n dim_data=dim,\n name=Path(__file__).parent.name,\n name_display=\"Gaussian Linear Uniform\",\n num_observations=10,\n num_posterior_samples=10000,\n num_reference_posterior_samples=10000,\n num_simulations=[100, 1000, 10000, 100000, 1000000],\n path=Path(__file__).parent.absolute(),\n )\n\n self.prior_params = {\n \"low\": -prior_bound * torch.ones((self.dim_parameters,)),\n \"high\": +prior_bound * torch.ones((self.dim_parameters,)),\n }\n\n self.prior_dist = pdist.Uniform(**self.prior_params).to_event(1)\n\n self.simulator_params = {\n \"precision_matrix\": torch.inverse(\n simulator_scale * torch.eye(self.dim_parameters),\n )\n }\n\n def get_prior(self) -> Callable:\n def prior(num_samples=1):\n return pyro.sample(\"parameters\", self.prior_dist.expand_by([num_samples]))\n\n return prior\n\n def get_simulator(self, max_calls: Optional[int] = None) -> Simulator:\n \"\"\"Get function returning samples from simulator given parameters\n\n Args:\n max_calls: Maximum number of function calls. Additional calls will\n result in SimulationBudgetExceeded exceptions. Defaults to None\n for infinite budget\n\n Return:\n Simulator callable\n \"\"\"\n\n def simulator(parameters):\n return pyro.sample(\n \"data\",\n pdist.MultivariateNormal(\n loc=parameters,\n precision_matrix=self.simulator_params[\"precision_matrix\"],\n ),\n )\n\n return Simulator(task=self, simulator=simulator, max_calls=max_calls)\n\n def _sample_reference_posterior(\n self,\n num_samples: int,\n num_observation: Optional[int] = None,\n observation: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n \"\"\"Sample reference posterior for given observation\n\n Uses closed form solution with rejection sampling\n\n Args:\n num_samples: Number of samples to generate\n num_observation: Observation number\n observation: Instead of passing an observation number, an observation may be\n passed directly\n\n Returns:\n Samples from reference posterior\n \"\"\"\n assert not (num_observation is None and observation is None)\n assert not (num_observation is not None and observation is not None)\n\n if num_observation is not None:\n observation = self.get_observation(num_observation=num_observation)\n\n log = logging.getLogger(__name__)\n\n reference_posterior_samples = []\n\n sampling_dist = pdist.MultivariateNormal(\n loc=observation,\n precision_matrix=self.simulator_params[\"precision_matrix\"],\n )\n\n # Reject samples outside of prior bounds\n counter = 0\n while len(reference_posterior_samples) < num_samples:\n counter += 1\n sample = sampling_dist.sample()\n if not torch.isinf(self.prior_dist.log_prob(sample).sum()):\n reference_posterior_samples.append(sample)\n\n reference_posterior_samples = torch.cat(reference_posterior_samples)\n acceptance_rate = float(num_samples / counter)\n\n log.info(\n f\"Acceptance rate for observation {num_observation}: {acceptance_rate}\"\n )\n\n return reference_posterior_samples\n\n\nif __name__ == \"__main__\":\n task = GaussianLinearUniform()\n task._setup()\n"
] | [
[
"torch.cat",
"torch.eye",
"torch.ones"
]
] |
sibonyves/amuse | [
"5557bf88d14df1aa02133a199b6d60c0c57dcab7"
] | [
"src/amuse/ext/grid_remappers.py"
] | [
"import numpy\n\nfrom amuse.units import units\n\nfrom amuse.units.quantities import is_quantity, value_in, to_quantity\n\nfrom amuse.datamodel import UnstructuredGrid, StructuredGrid,StructuredBaseGrid\n\ntry:\n import matplotlib\n from matplotlib import tri\n if not hasattr(tri, \"LinearTriInterpolator\"):\n raise Exception(\"LinearTriInterpolator not in matplotlib.tri\")\n matplotlib_available=True\nexcept:\n matplotlib_available=False\n\nclass interpolating_2D_remapper(object):\n def __init__(self, source, target,axes_names=None):\n \"\"\" this class maps a source grid to a target grid using linear \n interpolation on a triangulation generated by adding a \n midpoint to every cell (source should be a structured grid) \n and thus generating 4 triangles for each cell. Values of the \n midpoints are averaged from the corners. \n \"\"\"\n if len(source.shape) !=2:\n raise Exception(\"source grid is not 2D\")\n if not isinstance(source, StructuredBaseGrid):\n raise Exception(\"source grid is not instance of StructuredBaseGrid\")\n\n self.source=source\n self.target=target\n self._axes_names=list(axes_names or source.get_axes_names())\n self.generate_triangulation()\n\n def _generate_nodes(self,grid,attributes):\n\n Nx,Ny=grid.shape\n\n x,y=numpy.mgrid[0:Nx,0:Ny]\n x1,y1=numpy.mgrid[0:Nx-1,0:Ny-1]\n \n x_=x.flatten()\n y_=y.flatten()\n x1_=x1.flatten()\n y1_=y1.flatten()\n\n l1=Nx*Ny\n\n i=numpy.arange(Nx*Ny).reshape((Nx,Ny))\n i1=(numpy.arange((Nx-1)*(Ny-1))+l1).reshape((Nx-1,Ny-1))\n\n \n nodes=UnstructuredGrid(len(x_)+len(x1_))\n for name in attributes:\n values1=getattr(grid,name)[x_,y_]\n values2=getattr(grid,name)[x1_,y1_]+getattr(grid,name)[x1_+1,y1_]+\\\n getattr(grid,name)[x1_,y1_+1]+getattr(grid,name)[x1_+1,y1_+1]\n setattr(nodes[0], name, 0.*values1[0])\n setattr(nodes[:l1], name, 1.*values1)\n setattr(nodes[l1:], name, values2/4)\n return nodes\n\n def _generate_elements_and_boundaries(self,grid):\n Nx,Ny=grid.shape\n\n l1=Nx*Ny\n\n i=numpy.arange(Nx*Ny).reshape((Nx,Ny))\n i1=(numpy.arange((Nx-1)*(Ny-1))+l1).reshape((Nx-1,Ny-1))\n\n e1=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')\n e2=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')\n e3=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')\n e4=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')\n \n e1[:,0]=i[:-1,:-1].flatten()\n e1[:,1]=i[1:,:-1].flatten()\n e1[:,2]=i1[:,:].flatten()\n \n e2[:,0]=i[1:,:-1].flatten()\n e2[:,1]=i[1:,1:].flatten()\n e2[:,2]=i1[:,:].flatten()\n \n e3[:,0]=i[1:,1:].flatten()\n e3[:,1]=i[:-1,1:].flatten()\n e3[:,2]=i1[:,:].flatten()\n \n e4[:,0]=i[:-1,:-1].flatten()\n e4[:,1]=i1[:,:].flatten()\n e4[:,2]=i[:-1,1:].flatten()\n\n elements=numpy.zeros((4*(Nx-1)*(Ny-1),3),dtype='i8')\n elements[0::4,:]=e1\n elements[1::4,:]=e2\n elements[2::4,:]=e3\n elements[3::4,:]=e4\n \n boundaries=[xx.flatten() for xx in [i[:,0],i[-1,:],i[::-1,-1],i[0,::-1]] ]\n \n elem=UnstructuredGrid(len(elements))\n elem.nodes=elements\n\n return elem,boundaries\n\n def convert_grid_to_nodes_and_elements(self, grid, attributes=None):\n \n if attributes is None:\n attributes=grid.get_attribute_names_defined_in_store()\n\n nodes=self._generate_nodes(grid, attributes)\n elements,boundaries=self._generate_elements_and_boundaries(grid)\n \n return nodes,elements,boundaries\n\n def generate_triangulation(self):\n\n nodes,elements,boundaries=self.convert_grid_to_nodes_and_elements(self.source, self._axes_names)\n\n xpos=to_quantity(getattr(nodes,self._axes_names[0]))\n ypos=to_quantity(getattr(nodes,self._axes_names[1]))\n \n self._xpos_unit=xpos.unit\n xpos=xpos.number\n self._ypos_unit=ypos.unit\n ypos=ypos.number\n\n n1=elements.nodes[:,0]\n n2=elements.nodes[:,1]\n n3=elements.nodes[:,2]\n elem=numpy.column_stack((n1,n2,n3))\n\n self._triangulation=tri.Triangulation(xpos,ypos,elem)\n \n def sample(self, values, xpos, ypos):\n interpolator=tri.LinearTriInterpolator(self._triangulation,values)\n return interpolator(xpos,ypos)\n\n def forward_mapping(self, attributes, target_names=None):\n if attributes is None:\n attributes=self.source.get_attribute_names_defined_in_store()\n if target_names is None:\n target_names=attributes\n \n source=self.source.empty_copy()\n channel1=self.source.new_channel_to(source)\n target=self.target.empty_copy()\n channel2=self.target.new_channel_to(target)\n channel3=target.new_channel_to(self.target)\n \n channel1.copy_attributes(attributes)\n channel2.copy_attributes(self._axes_names)\n \n nodes=self._generate_nodes(source,attributes)\n \n xpos=value_in( getattr(target,self._axes_names[0]), self._xpos_unit)\n ypos=value_in( getattr(target,self._axes_names[1]), self._ypos_unit)\n \n for attribute, target_name in zip(attributes, target_names):\n values=to_quantity( getattr(nodes,attribute) ) \n unit=values.unit\n values=values.number\n samples=self.sample(values,xpos,ypos)\n setattr(target, target_name, (samples if unit is units.none else (samples | unit)))\n\n channel3.copy_attributes(target_names) \n\nclass bilinear_2D_remapper(object):\n def __init__(self, source, target, check_inside=True):\n \"\"\" this class maps a source grid to a target grid using bilinear \n interpolation. If check_inside=True, raise exception if any \n target point outside source grid. \n \"\"\"\n if len(source.shape) !=2:\n raise Exception(\"source grid is not 2D\")\n if not isinstance(source, StructuredBaseGrid):\n raise Exception(\"source grid is not instance of RegularBaseGrid\")\n\n self.source=source\n self.target=target\n self._axes_names=source.get_axes_names()\n self.check_inside=check_inside\n self._weights=None\n self._indices=None\n\n def _calculate_weights(self):\n x0=getattr(self.source[0,0], self._axes_names[0])\n x1=getattr(self.source[1,1], self._axes_names[0])\n y0=getattr(self.source[0,0], self._axes_names[1])\n y1=getattr(self.source[1,1], self._axes_names[1])\n dx=x1-x0\n dy=y1-y0\n \n x=getattr(self.target, self._axes_names[0])\n y=getattr(self.target, self._axes_names[1])\n\n ix=numpy.floor((x-x0)/dx).astype(int)\n iy=numpy.floor((y-y0)/dy).astype(int)\n if self.check_inside:\n if numpy.any(ix<0) or numpy.any(ix>self.source.shape[0]-2) or \\\n numpy.any(iy<0) or numpy.any(iy>self.source.shape[1]-2):\n raise Exception(\"target not fully inside (restricted) source grid as required\")\n ix=numpy.clip(ix,0, self.source.shape[0]-2)\n iy=numpy.clip(iy,0, self.source.shape[1]-2)\n\n wx=(x0+(ix+1)*dx-x)/dx\n wy=(y0+(iy+1)*dy-y)/dy\n wx=numpy.clip(wx,0.,1.)\n wy=numpy.clip(wy,0.,1.)\n\n self._weights=[wx,wy]\n self._indices=[ix,iy]\n \n def _evaluate(self, values):\n ix,iy=self._indices\n wx,wy=self._weights\n result=wx*wy*values[ix,iy]+(1.-wx)*wy*values[ix+1,iy]+ \\\n wx*(1.-wy)*values[ix,iy+1]+(1.-wx)*(1.-wy)*values[ix+1,iy+1]\n return result\n\n def forward_mapping(self, attributes, target_names=None):\n if attributes is None:\n attributes=self.source.get_attribute_names_defined_in_store()\n if target_names is None:\n target_names=attributes\n if self._weights is None:\n self._calculate_weights()\n \n mapped_values=[]\n for attribute, target_name in zip(attributes, target_names):\n values=getattr(self.source,attribute)\n samples=self._evaluate(values)\n mapped_values.append(samples)\n\n self.target.set_values_in_store(None, target_names, mapped_values)\n\n\nclass nearest_2D_remapper(object):\n def __init__(self, source, target, check_inside=True):\n \"\"\" this class maps a source grid to a target grid getting closest\n grid value. If check_inside=True, raise exception if any \n target point outside source grid. \n \"\"\"\n if len(source.shape) !=2:\n raise Exception(\"source grid is not 2D\")\n if not isinstance(source, StructuredBaseGrid):\n raise Exception(\"source grid is not instance of RegularBaseGrid\")\n\n self.source=source\n self.target=target\n self._axes_names=source.get_axes_names()\n self.check_inside=check_inside\n self._indices=None\n\n def _calculate_weights(self): \n x=getattr(self.target, self._axes_names[0])\n y=getattr(self.target, self._axes_names[1])\n\n kwargs={self._axes_names[0]: x, self._axes_names[1]:y}\n indices=self.source.get_index(**kwargs)\n\n ix=indices[...,0]\n iy=indices[...,1]\n if self.check_inside:\n if numpy.any(ix<0) or numpy.any(ix>self.source.shape[0]-1) or \\\n numpy.any(iy<0) or numpy.any(iy>self.source.shape[1]-1):\n raise Exception(\"target not fully inside source grid as required\")\n ix=numpy.clip(ix,0, self.source.shape[0]-1)\n iy=numpy.clip(iy,0, self.source.shape[1]-1)\n \n self._indices=[ix,iy]\n\n def _evaluate(self, values):\n return values[self._indices[0], self._indices[1]]\n\n def forward_mapping(self, attributes, target_names=None):\n if attributes is None:\n attributes=self.source.get_attribute_names_defined_in_store()\n if target_names is None:\n target_names=attributes\n if self._indices is None:\n self._calculate_weights()\n\n mapped_values=[]\n for attribute, target_name in zip(attributes, target_names):\n values=getattr(self.source,attribute)\n samples=self._evaluate(values)\n mapped_values.append(samples)\n\n self.target.set_values_in_store(None, target_names, mapped_values)\n\n\ndef conservative_spherical_remapper(*args,**kwargs):\n raise Exception(\"conservative_spherical_remapper has moved to omuse.ext\")\n"
] | [
[
"numpy.zeros",
"matplotlib.tri.Triangulation",
"numpy.any",
"numpy.arange",
"numpy.clip",
"numpy.column_stack",
"matplotlib.tri.LinearTriInterpolator",
"numpy.floor"
]
] |
alisiahkoohi/survae_flows | [
"e1747b05524c7ab540a211ed360ab3e67bc3e96d"
] | [
"survae/transforms/surjections/dequantization_variational.py"
] | [
"import torch\nfrom survae.distributions import ConditionalDistribution\nfrom survae.transforms.surjections import Surjection\n\n\nclass VariationalDequantization(Surjection):\n '''\n A variational dequantization layer.\n This is useful for converting discrete variables to continuous [1, 2].\n\n Forward:\n `z = (x+u)/K, u~encoder(x)`\n where `x` is discrete, `x \\in {0,1,2,...,K-1}^D`\n and `encoder` is a conditional distribution.\n Inverse:\n `x = Quantize(z, K)`\n\n Args:\n encoder: ConditionalDistribution, a conditional distribution/flow which\n outputs samples in `[0,1]^D` conditioned on `x`.\n num_bits: int, number of bits in quantization,\n i.e. 8 for `x \\in {0,1,2,...,255}^D`\n or 5 for `x \\in {0,1,2,...,31}^D`.\n\n References:\n [1] RNADE: The real-valued neural autoregressive density-estimator,\n Uria et al., 2013, https://arxiv.org/abs/1306.0186\n [2] Flow++: Improving Flow-Based Generative Models with Variational Dequantization and Architecture Design,\n Ho et al., 2019, https://arxiv.org/abs/1902.00275\n '''\n\n stochastic_forward = True\n\n def __init__(self, encoder, num_bits=8):\n super(VariationalDequantization, self).__init__()\n assert isinstance(encoder, ConditionalDistribution)\n self.num_bits = num_bits\n self.quantization_bins = 2**num_bits\n self.register_buffer('ldj_per_dim', -torch.log(torch.tensor(self.quantization_bins, dtype=torch.float)))\n self.encoder = encoder\n\n def _ldj(self, shape):\n batch_size = shape[0]\n num_dims = shape[1:].numel()\n ldj = self.ldj_per_dim * num_dims\n return ldj.repeat(batch_size)\n\n def forward(self, x):\n u, qu = self.encoder.sample_with_log_prob(context=x)\n z = (x.type(u.dtype) + u) / self.quantization_bins\n ldj = self._ldj(z.shape) - qu\n return z, ldj\n\n def inverse(self, z):\n z = self.quantization_bins * z\n return z.floor().clamp(min=0, max=self.quantization_bins-1).long()\n"
] | [
[
"torch.tensor"
]
] |
marcelotrevisani/acorns | [
"682749b0963ffc0a3998a7065ef505fc95123f50"
] | [
"tests/updated_hessian_test_suite_random.py"
] | [
"import sys\nimport math\nimport torch\nfrom timeit import default_timer as timer\nimport os\nimport json\nfrom subprocess import PIPE, run\nimport numpy as np\nimport shutil\nfrom datetime import datetime\n\nsys.path.append('tests/python_test_utils')\n\nsys.path.append('src')\n\nimport forward_diff\nimport tapenade_utils\nimport us_utils\nimport wenzel_utils\nimport pytorch_utils\nimport enoki_utils\nimport generate_function\n\n\ndef generate_params(num_params, function_num):\n # , \" which is: \", functions[function_num][0])\n print(\"Generating params for function_num\", function_num)\n num_variables = len(functions[function_num][1])\n function_params = np.zeros(shape=(num_variables, num_params))\n for i, var in enumerate(functions[function_num][1]):\n variable_params = np.random.rand(num_params) * 10\n np.save(\"./tests/utils/numpy_params/function_{}_param_{}.npy\".format(\n function_num, var), variable_params)\n function_params[i] = variable_params\n reshaped = np.reshape(function_params, num_params*num_variables, order='F')\n param_string = \"\\n\".join(str(x) for x in reshaped)\n param_f = open(\"params.txt\", \"w+\")\n param_f.write(param_string)\n param_f.close()\n return reshaped\n\n\ndef print_param_to_file(params):\n param_string = \"\\n\".join(str(x) for x in params)\n param_f = open(PARAMS_FILENAME, \"w+\")\n param_f.write(param_string)\n param_f.close()\n\n\ndef cleanup():\n if os.path.exists(INPUT_FILENAME):\n os.remove(INPUT_FILENAME)\n if os.path.exists(OUTPUT_FILENAME):\n os.remove(OUTPUT_FILENAME)\n if os.path.exists(PARAMS_FILENAME):\n os.remove(PARAMS_FILENAME)\n\n if os.path.exists(\"./tests/hessian_test/utils\"):\n folder = './tests/hessian/utils'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n if os.path.exists(\"./tests/hessian/results\"):\n folder = './tests/hessian/results'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n import random, string\n functions = []\n alphabets = list(string.ascii_lowercase)\n alphabets.remove('i')\n\n for k in range(1, 2):\n function = generate_function.gen_other(k)\n functions.append(function)\n print(function)\n\n print(functions)\n\n INPUT_FILENAME = './tests/utils/hessian/functions.c'\n UTILS_FILENAME = './tests/utils/windows/windows_utils.c'\n OUTPUT_FILENAME = './tests/utils/hessian/us_output.txt'\n PARAMS_FILENAME = './tests/utils/hessian/params.txt'\n MAX_PARAMS = 1000\n INIT_NUM_PARAMS = 10\n WENZEL_COMPILER_VERSION = \"\"\n NUM_ITERATIONS = 10\n RUN_C = True\n\n output = {}\n\n for func_num, func in enumerate(functions):\n\n print(func)\n\n output[func[0]] = {}\n\n denom = []\n num_params = INIT_NUM_PARAMS\n\n# # generate and compile our code\n us_utils.generate_function_c_file(func_num, functions, INPUT_FILENAME)\n us_utils.generate_derivatives_c_file(func_num, functions, INPUT_FILENAME, RUN_C, derivatives_filename=\"./tests/utils/hessian/ders_hessian\", reverse=False, second_der=True)\n us_utils.compile_ours(RUN_C, runnable_filename=\"./tests/utils/static_code/runnable_hessian\", derivatives_filename=\"./tests/utils/hessian/ders_hessian\")\n\n\n while num_params <= MAX_PARAMS:\n\n # generate parameters\n params = generate_params(num_params, func_num)\n print_param_to_file(params)\n\n # generate pytorch file\n pytorch_utils.generate_pytorch_hessian_file(\n func_num, num_params, functions)\n\n # generate and compile wenzel code static \n wenzel_utils.generate_wenzel_file(func_num, num_params, functions, PARAMS_FILENAME, \"hessian\", True)\n wenzel_utils.compile_wenzel(\"hessian\", True, compiler_version=WENZEL_COMPILER_VERSION)\n\n # wenzel dynamic\n wenzel_utils.generate_wenzel_file(func_num, num_params, functions, PARAMS_FILENAME, \"hessian\", False)\n wenzel_utils.compile_wenzel(\"hessian\", False, compiler_version=WENZEL_COMPILER_VERSION)\n\n\n # initialize arrays for run\n our_times = []\n py_times = []\n wenzel_times_static = []\n wenzel_times_dynamic = []\n\n for i in range(NUM_ITERATIONS):\n\n pytorch = pytorch_utils.run_pytorch_hessian()\n ours = us_utils.run_ours(\n functions[func_num], num_params, functions, PARAMS_FILENAME, OUTPUT_FILENAME, runnable_filename=\"./tests/utils/static_code/runnable_hessian\")\n wenzel_static = wenzel_utils.run_wenzel(\"hessian\", True)\n wenzel_dynamic = wenzel_utils.run_wenzel(\"hessian\", False)\n\n our_times.append(float(ours[1]))\n py_times.append(float(pytorch[1]))\n wenzel_times_static.append(float(wenzel_static[1]))\n wenzel_times_dynamic.append(float(wenzel_dynamic[1]))\n\n # print for debug purposes\n print(\"Parameters: \", params[:10])\n print(\"ours: \", ours[0][:10])\n print(\"pytorch: \", pytorch[0][:10])\n\n output[func[0]][num_params] = {\n \"us\": sum(our_times) / len(our_times),\n \"pytorch\": sum(py_times) / len(py_times),\n \"wenzel_static\": (sum(wenzel_times_static) / len(wenzel_times_static)),\n \"wenzel_dynamic\": (sum(wenzel_times_dynamic) / len(wenzel_times_dynamic)),\n \"flags\": \"-ffast-math -O3\",\n \"compiler_version\": WENZEL_COMPILER_VERSION\n }\n\n denom.append(num_params)\n if num_params < 10000:\n num_params += 2000\n else:\n num_params = num_params + 10000\n\n\n file_suffix = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')\n output_file = open('./tests/results/hess/full_results_hessian-{}.json'.format(file_suffix), \"w+\")\n output_file.write(json.dumps(output, indent=4, sort_keys=True))\n output_file.close()\n"
] | [
[
"numpy.random.rand",
"numpy.reshape",
"numpy.zeros"
]
] |
jieguangzhou/AIChallenger_SentimentAnalysis | [
"4dcd10c2e12bd266fb19c6b5cf50346766a4a37b"
] | [
"hah_classification/data.py"
] | [
"from collections import Counter\nimport logging\nimport random\nimport numpy as np\nimport jieba\nfrom hah_classification.develop.IO import read_file, write_file\nimport pandas as pd\nimport os\n\nlogger = logging.getLogger(__file__)\nlogger.setLevel(logging.INFO)\n\nPAD_IDX = 0\nUNK_IDX = 1\n\nCOLUMNS = ['location_traffic_convenience', 'location_distance_from_business_district', 'location_easy_to_find',\n 'service_wait_time', 'service_waiters_attitude', 'service_parking_convenience', 'service_serving_speed',\n 'price_level', 'price_cost_effective', 'price_discount',\n 'environment_decoration', 'environment_noise', 'environment_space', 'environment_cleaness',\n 'dish_portion', 'dish_taste', 'dish_look', 'dish_recommendation',\n 'others_overall_experience', 'others_willing_to_consume_again']\n\n\ndef segment(sentence):\n return [i for i in sentence if i.strip()]\n\n\ndef load_vocab(vocab_path):\n \"\"\"\n 读取词典\n \"\"\"\n vocab = {token: index for index, token in\n enumerate(read_file(vocab_path, deal_function=lambda x: x.strip() if x != '\\n' else x))}\n logger.info('load vocab (size:%s) to %s' % (len(vocab), vocab_path))\n return vocab\n\n\ndef save_vocab(vocab, vocab_path):\n \"\"\"\n 保存词典\n \"\"\"\n sorted_vocab = sorted(vocab.items(), key=lambda x: x[1])\n write_file(sorted_vocab, vocab_path, deal_function=lambda x: x[0] + '\\n')\n logger.info('save vocab (size:%s) to %s' % (len(vocab), vocab_path))\n\n\ndef load_data(data_path, vocab_path, label_vocab_path, create_vocab=False, create_label_vocab=False, min_freq=1,\n vocab_size=None, return_label_vocab=False):\n msg = 'load data from %s, ' % data_path\n data_set = pd.read_csv(data_path)\n vocab_ = Counter() if create_vocab else load_vocab(vocab_path)\n label_vocab = {} if create_label_vocab else load_vocab(label_vocab_path)\n\n sequences, lengths = [], []\n for content in data_set.iloc[:, 1]:\n tokens = segment(content)\n if create_vocab:\n vocab_.update(tokens)\n sequences.append(tokens)\n lengths.append(len(tokens))\n\n if create_vocab:\n vocab = {'<PAD>': PAD_IDX, '<UNK>': UNK_IDX}\n # vocab_size 必须大于2\n print('ori vocab size %s' % len(vocab_))\n vocab_size = max(vocab_size or len(vocab_), 2) - 2\n logger.info('create vocab, min freq: %s, vocab_size: %s' % (min_freq, vocab_size))\n for token, count in vocab_.most_common(vocab_size):\n if not token:\n continue\n if count < min_freq:\n break\n else:\n vocab[token] = len(vocab)\n save_vocab(vocab, vocab_path)\n else:\n vocab = vocab_\n\n columns = data_set.columns.values.tolist()[2:]\n dict_labels = {}\n dict_label_vocab = {}\n for col in columns:\n labels = [str(i) for i in data_set[col]]\n col_vocab_path = label_vocab_path + '.' + col\n if create_label_vocab:\n label_vocab = {vocab: index for index, vocab in enumerate(sorted(set(labels)))}\n save_vocab(label_vocab, col_vocab_path)\n else:\n label_vocab = load_vocab(col_vocab_path)\n if not return_label_vocab:\n labels = list(map(lambda x: label_vocab[x], labels))\n dict_labels[col] = np.array(labels)\n dict_label_vocab[col] = label_vocab\n\n if create_label_vocab:\n save_vocab(label_vocab, label_vocab_path)\n sequences = [[vocab.get(token, UNK_IDX) for token in sequence] for sequence in sequences]\n msg += 'total : %s' % len(sequences)\n logger.info(msg)\n if return_label_vocab:\n return np.array(sequences), dict_labels, np.array(lengths), dict_label_vocab\n else:\n return np.array(sequences), dict_labels, np.array(lengths)\n\n\ndef load_muti_label_data(data_path, vocab_path, create_vocab=False,\n min_freq=1,\n vocab_size=None):\n msg = 'load data from %s, ' % data_path\n data_set = pd.read_csv(data_path)\n vocab_ = Counter() if create_vocab else load_vocab(vocab_path)\n\n sequences, lengths = [], []\n for content in data_set.iloc[:, 1]:\n tokens = segment(content)\n if create_vocab:\n vocab_.update(tokens)\n sequences.append(tokens)\n lengths.append(len(tokens))\n\n if create_vocab:\n vocab = {'<PAD>': PAD_IDX, '<UNK>': UNK_IDX}\n # vocab_size 必须大于2\n print('ori vocab size %s' % len(vocab_))\n vocab_size = max(vocab_size or len(vocab_), 2) - 2\n logger.info('create vocab, min freq: %s, vocab_size: %s' % (min_freq, vocab_size))\n for token, count in vocab_.most_common(vocab_size):\n if not token:\n continue\n if count < min_freq:\n break\n else:\n vocab[token] = len(vocab)\n save_vocab(vocab, vocab_path)\n else:\n vocab = vocab_\n\n\n labels = data_set[COLUMNS].values + 2\n sequences = [[vocab.get(token, UNK_IDX) for token in sequence] for sequence in sequences]\n msg += 'total : %s' % len(sequences)\n logger.info(msg)\n return np.array(sequences), labels, np.array(lengths)\n\n\ndef batch_iter(sequences, labels, lengths, batch_size=64, reverse=False, cut_length=None, shuffle=True):\n \"\"\"\n 将数据集分成batch输出\n :param sequences: 文本序列\n :param labels: 类别\n :param lengths: 文本长度\n :param reverse: 是否reverse文本\n :param cut_length: 截断文本\n :return:\n \"\"\"\n\n # 打乱数据\n data_num = len(sequences)\n indexs = list(range(len(sequences)))\n if shuffle:\n random.shuffle(indexs)\n batch_start = 0\n shuffle_sequences = sequences[indexs]\n shuffle_labels = labels[indexs]\n shuffle_lengths = lengths[indexs]\n\n while batch_start < data_num:\n batch_end = batch_start + batch_size\n batch_sequences = shuffle_sequences[batch_start:batch_end]\n batch_labels = shuffle_labels[batch_start:batch_end]\n batch_lengths = shuffle_lengths[batch_start:batch_end]\n\n if isinstance(cut_length, int):\n # 截断数据\n batch_sequences = [sequence[:cut_length] for sequence in batch_sequences]\n batch_lengths = np.where(batch_lengths > cut_length, cut_length, batch_lengths)\n\n # padding长度\n batch_max_length = batch_lengths.max()\n\n batch_padding_sequences = []\n for sequence, length in zip(batch_sequences, batch_lengths):\n sequence += [PAD_IDX] * (batch_max_length - length)\n if reverse:\n sequence.reverse()\n batch_padding_sequences.append(sequence)\n\n batch_padding_sequences = np.array(batch_padding_sequences)\n\n yield batch_padding_sequences, batch_labels, batch_lengths\n batch_start = batch_end\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n vocab_path = '../data/vocab.txt'\n label_vocab_path = '../cnews/label.txt'\n data_set = load_data('../data/sentiment_analysis_validationset.csv', vocab_path, label_vocab_path,\n create_vocab=True, create_label_vocab=True, vocab_size=5000)\n # num = 0\n # for sequences, labels, lengths in batch_iter(*data_set, batch_size=64):\n # print(sequences.shape[1], lengths.max(), sequences.shape[1] == lengths.max())\n"
] | [
[
"numpy.array",
"pandas.read_csv",
"numpy.where"
]
] |
OpenXAIProject/Subsampling_aliasing_artifact_eliminator | [
"6a009ef5620bc3f541441ae688c44eac7670913b"
] | [
"test/generator_test.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.utils.data\nimport h5py\nimport numpy as np\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nfrom skimage.transform import resize\nimport os\nos.environ['CUDA_VISIBLE_DEVICES']='2'\n\n\n# Hyper-parameters\nlatent_size = 5184\nhidden_size = 1728\nimage_size = 216\nnum_epochs = 200\nbatch_size = 128\nsample_dir = '/home/nhjeong/MLPGAN/db' # Directory of database\n\n\n# Generator \nG = nn.Sequential(\n nn.Linear(latent_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, image_size))\n\n\n# Device configuration\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:2\" if use_cuda else \"cpu\")\n\n\nG = torch.load('weight_sample.pkl')\n\n\n\nclass MyDataset(torch.utils.data.Dataset):\n \n def __init__(self, train=True):\n \n self.train = train\n \n if self.train:\n self.train_X_mat = h5py.File(os.path.join(sample_dir, 'db.mat'), 'r')\n self.train_X_input = self.train_X_mat['db'][:]\n\n\n self.train_Y_mat = h5py.File(os.path.join(sample_dir, 'gt.mat'), 'r')\n self.train_Y_input = self.train_Y_mat['gt'][:]\n\n self.train_X_mat.close()\n self.train_Y_mat.close()\n\n else:\n self.test_X_mat = h5py.File(os.path.join(sample_dir, 'test_db.mat'), 'r')\n self.test_X_input = self.test_X_mat['test_db'][:] \n\n self.test_Y_mat = h5py.File(os.path.join(sample_dir, 'test_gt.mat'), 'r')\n self.test_Y_input = self.test_Y_mat['test_gt'][:] \n\n self.test_X_mat.close()\n self.test_Y_mat.close()\n \n \n \n def __len__(self):\n if self.train:\n return self.train_X_input.shape[0]\n else:\n return self.test_X_input.shape[0]\n \n def __getitem__(self, index):\n if self.train:\n raw, target = self.train_X_input[index,], self.train_Y_input[index,]\n else:\n raw, target = self.test_X_input[index,], self.test_Y_input[index,]\n \n return raw, target\n \n \ntestset = MyDataset(train=False)\noutput = G(torch.tensor(testset.test_X_input).to(device))\ntest_result = output.cpu().detach().numpy()\n\n\n\nnrmse = []\nfor i in range(36):\n tmp = testset.test_X_input[384*i:384*(i+1),0:2592] + 1j*testset.test_X_input[384*i:384*(i+1),2592:5184]\n undersampled = np.zeros((384, 216))\n for k in range(12):\n undersampled += np.abs(tmp[:,k*216:(k+1)*216])\n ans = testset.test_Y_input[384*i:384*(i+1),:]\n pred = test_result[384*i:384*(i+1),:]\n error = ans - pred\n rmse = (np.sum(error ** 2) / np.sum(ans ** 2)) ** 0.5\n plt.figure(figsize=[40, 10])\n plt.subplot(1,4,1)\n plt.imshow(resize(undersampled, (216, 216), preserve_range=True))\n plt.title('Aliased image')\n plt.axis('off') \n plt.subplot(1,4,2)\n plt.imshow(resize(pred, (216, 216), preserve_range=True))\n plt.title('Predicted image')\n plt.axis('off') \n plt.subplot(1,4,3)\n plt.imshow(resize(ans, (216, 216), preserve_range=True))\n plt.title('Ground truth') \n plt.axis('off') \n plt.subplot(1,4,4)\n plt.imshow(resize(np.abs(error), (216, 216), preserve_range=True), clim=[0,1])\n plt.title('Difference') \n plt.axis('off')\n plt.savefig('test'+str(i+1)) \n plt.show() \n nrmse.append(rmse)\n print('Saved Fig. %d' % (i+1)) \nprint('nRMSE: %.3lf %%' % (np.mean(nrmse)*100))\n"
] | [
[
"torch.nn.Linear",
"torch.device",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.title",
"numpy.mean",
"matplotlib.pyplot.figure",
"torch.nn.ReLU",
"torch.cuda.is_available",
"torch.tensor",
"torch.load",
"numpy.abs",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot"
]
] |
sambitmishra0628/PSP-GNM | [
"4c7cdcde05a3c10664bee82b389af1db4c2961b8"
] | [
"scripts/psp_gnm.py"
] | [
"\"\"\"\nProtein stability change upon point mutations using a weighted Gaussian network model\n\nKey steps:\n 1. Coarse-grain a protein structure using only the alpha carbons\n 2. Model the coarse-grained protein structure as a weighted \nGaussian Network Model (GNM)\n 3. The interactions between amino acid residues in such a model will\nbe weighted using statistical potentials\n 4. Simulate unfolding of the wild-type structure by identifying residues\nwith most internal distance changes as the ones to break contact first\n 5. Calculate the difference in energies obtained from the Miyazawa-Jernigan potential\n and entropies (given by the mean-squared fluctuations in residue distance) for the\n mutant and wild-type structures\n\"\"\"\n\nimport numpy as np\nfrom scipy import linalg\nimport os\nimport re\nimport sys\nimport pandas as pd\nimport click\nimport glob\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom joblib import Parallel, delayed\nfrom sklearn.linear_model import LinearRegression\n\nFILE_MAP = {'MJ': 'MIYS960101.txt'}\nPOT_MAT_DIR = 'potential_matrices' # Directory containing the potential matrix\n\n\ndef map_resname_to_id(res_code):\n \"\"\"\n Convert the 3-lettered residue code to single letter\n \n Parameters:\n res_code: The three-lettered amino acid code\n Returns:\n The corresponding single-letter amino acid code\n \"\"\"\n resname_2_id = {'ALA' : 'A', 'ARG' : 'R', 'ASN' : 'N', 'ASP' : 'D',\n 'CYS' : 'C', 'GLY' : 'G', 'GLN' : 'Q', 'GLU' : 'E',\n 'HIS' : 'H', 'ILE' : 'I', 'LEU' : 'L', 'LYS' : 'K',\n 'MET' : 'M', 'PRO' : 'P', 'PHE' : 'F', 'SER' : 'S',\n 'THR' : 'T', 'TRP' : 'W', 'TYR' : 'Y', 'VAL' : 'V',}\n return resname_2_id[res_code]\n\ndef process_wt_pdb (input_dir, output_dir, pdb_chain_list):\n \"\"\"\n Process the atomic PDB structures\n \n Parameters:\n input_dir: Directory containing the raw PDB files\n\n output_dir: Directory to which the processed files will be written into\n \n pdb_chain_list: List of 4 letter PDBID and Chain ID as a single string \n \"\"\"\n\n if not input_dir.endswith('/'):\n input_dir = input_dir + '/'\n if not output_dir.endswith('/'):\n output_dir = output_dir + '/'\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n \n not_found_list = [] # PDB_CHAIN entries that do not have PDB files in the input_dir \n pdbfiles = glob.glob(input_dir + '*.pdb') # List of pdb files in the input directory\n \n if len(pdbfiles) == 0:\n sys.exit(f\"Error! No *.pdb files found in {input_dir}\")\n \n # Check if the names of the PDB files in the input directory include any chain information.\n pdb_id_lens = [len(pdb_i.split('/')[-1].split('.pdb')[0]) for pdb_i in pdbfiles]\n \n # If all pdb files have ids that are of length 4, then no chain\n # ID is included in the file name\n if len(set(pdb_id_lens)) == 1 and list(set(pdb_id_lens))[0] == 4:\n chain_flag = False\n elif len(set(pdb_id_lens)) == 1 and list(set(pdb_id_lens))[0] == 5:\n print (\"PDB file names have length 5! Assuming the 5th alphabet is the chain ID!\")\n chain_flag = True\n else:\n print (f\"Error! Inconsitent PDB filenames! PDB file names (.pdb extension excluded) in {input_dir} \"\n \"should either have only the PDB ID (e.g., 1cei) or PDB ID with chain (e.g., 1ceiA). Acceptable \"\n \"formats include 1cei.pdb, 1CEI.pdb, 1ceiA.pdb or 1CEIA.pdb. All files should have the same naming format.\")\n sys.exit()\n \n \n # We can expect more pdb files in the output directory compared \n # to the input directory - separate pdb files for each chain\n if len(glob.glob(output_dir + '*.pdb')) >= len(pdbfiles): \n print (f\"Processed pdb files already present at {output_dir}! Nothing to do!\")\n return\n total_files = len(pdb_chain_list)\n count = 0\n # print (f\"Chain flag = {chain_flag}\")\n # print (f\"{pdbfiles}\")\n for pdb_i in pdb_chain_list:\n # The pdb ids present in the input dir may differ from\n # those in the pdb_chain_list by their case. Consider such differences.\n if input_dir + pdb_i[0:4].upper() + '.pdb' in pdbfiles and chain_flag == False:\n pdbfile = input_dir + pdb_i[0:4].upper() + '.pdb'\n elif input_dir + pdb_i[0:4].lower() + '.pdb' in pdbfiles and chain_flag == False:\n pdbfile = input_dir + pdb_i[0:4].lower() + '.pdb'\n elif input_dir + pdb_i[0:5].upper() + '.pdb' in pdbfiles and chain_flag == True:\n pdbfile = input_dir + pdb_i[0:5].upper() + '.pdb'\n elif input_dir + pdb_i[0:5].lower() + '.pdb' in pdbfiles and chain_flag == True:\n pdbfile = input_dir + pdb_i[0:5].lower() + '.pdb' \n else:\n not_found_list.append(pdb_i[0:4] + '.pdb')\n continue\n\n pdb_name = pdb_i\n chain_id = pdb_i[4]\n outfile = output_dir + pdb_name + '.pdb'\n # print (f\"pdb_name = {pdb_name}, outfile = {outfile}\")\n # return\n fh1 = open(pdbfile, 'r')\n fh2 = open(outfile, 'w')\n # print(\"Processing file \", pdbfile)\n is_nmr = False\n for line in fh1:\n if line.startswith('EXPDTA') and 'NMR' in line:\n is_nmr = True\n continue\n elif line.startswith('MODEL'):\n # By default we will use the first model\n is_nmr = True\n model_num = int(line.split()[1])\n fh2.write(line)\n continue\n elif re.match('^ATOM', line) and line[21] == chain_id: # Only get coordinates for the given chain ID\n alt_loc = line[16] # alternate location if any\n alt_loc = alt_loc.replace(' ','')\n if alt_loc == 'A': # we will consider by default the 'A' location\n fh2.write(line)\n elif alt_loc == '':\n fh2.write(line)\n else:\n continue\t\t\t\t\t\t\n elif re.match('^TER', line) and line[21] == chain_id:\n fh2.write(line)\n elif line.startswith('ENDMDL'):\n fh2.write(line)\n break\n fh1.close()\n fh2.close()\n # print(\"Done!\\n\")\n count += 1\n print (f\"Processed {count}/{total_files} pdb files\", end='\\r', flush=True)\n print (\"\\nDone!\")\n \n if len(not_found_list) > 0:\n not_found_str = ','.join(not_found_list)\n sys.exit(f\"Error! Wildtype PDB files could not be found for {not_found_str} in {input_dir}! Cannot continue with prediction!\")\n\ndef parse_calpha_pdb (pdbfile):\n \"\"\"\n Parse the coordinates of the c-alpha atoms from the given pdb file\n \n Parameters:\n pdb_file: The PDB file with path\n Returns:\n PDB_struct: A dictionary containing the parsed information\n \"\"\" \n PDB_struct = {}\n fh = open(pdbfile, 'r')\n for line in fh:\n if re.match('^ATOM', line):\n atomname = line[13:15]\n atomname = atomname.replace(' ', '')\n # Only consider the information for the c-alpha atoms\n if atomname != 'CA':\n continue\n chain = line[21]\n X_coord = line[30:38]\n Y_coord = line[38:46]\n Z_coord = line[46:54]\n \n # Strip white space\n X_coord = re.sub('\\s+','',X_coord)\n Y_coord = re.sub('\\s+','',Y_coord)\n Z_coord = re.sub('\\s+','',Z_coord)\n \n # convert from string to numeric\n X_coord = float(X_coord)\n Y_coord = float(Y_coord) \n Z_coord = float(Z_coord)\t\n \n res_name = line[17:20]\n res_id = line[22:26]\n \n res_name = re.sub('\\s+','',res_name)\n res_id = re.sub('\\s+','',res_id)\n res_id = int(res_id)\n if chain not in PDB_struct.keys():\n PDB_struct[chain] = dict()\n PDB_struct[chain]['COORD'] = []\n PDB_struct[chain]['RESNAME'] = []\n PDB_struct[chain]['RESID'] = []\n PDB_struct[chain]['ATOM_NAME'] = []\n PDB_struct[chain]['COORD'].append([X_coord, Y_coord, Z_coord])\n PDB_struct[chain]['RESNAME'].append(res_name)\n PDB_struct[chain]['RESID'].append(res_id)\n PDB_struct[chain]['ATOM_NAME'].append(atomname)\n fh.close()\n return PDB_struct\n\ndef parse_energy_pot(filename): \n \"\"\"\n Parse the Miyazawa-Jernigan potential matrix file into a dictionary\n \n Parameters:\n filename : The name of the file that includes the MJ potential as given\n in the AA Index database.\n Returns:\n aa_ind_dict: A dictionary containing the parsed MJ potential\n \"\"\" \n with open(POT_MAT_DIR + '/' + filename, 'r') as fh:\n fc = fh.readlines()\n aa_ind_str = ''.join(fc)\n aa_ind_dict = {}\n aa_ind_str = aa_ind_str.strip()\n aa_ind_list = aa_ind_str.split('\\n')\n aa_ind_id = aa_ind_list[0]\n aa_ind_id = str(aa_ind_id[2:])\n\n aa_ind_dict['AA_INDEX_ID'] = aa_ind_id\n aa_order = ''; mat_row_ind = 0;\n for line_i in aa_ind_list:\n # Scan for the line starting with M and capture the order of amino acids used\n # in the contact matrix\n rx = re.compile('^M.*rows = (.*?),')\n if rx.match(line_i):\n aa_order = rx.match(line_i).group(1)\n aa_order = aa_order.replace(' ', '')\n #print (f\" aa_order len = {len(aa_order)}\")\n continue\n elif aa_order: # We have the order of aa. Now parse contact potential values.\n row_elems = line_i.split() # Get the elements in a single row of the matrix\n #if len(row_elems) != len(aa_order):\n # return {}\n if mat_row_ind > len(aa_order)-1:\n #print (\"Condition satisfied\")\n break\n for j in range(0,mat_row_ind+1):\n #print(mat_row_ind, len(aa_order))\n res1 = aa_order[mat_row_ind]\n res2 = aa_order[j]\n val_12 = row_elems[j]\n # check if value can be converted into float or not\n try:\n val_12 = float(val_12)\n except ValueError:\n continue\n aa_ind_dict[res1 + ':' + res2] = val_12\n aa_ind_dict[res2 + ':' + res1] = val_12\n mat_row_ind += 1\n # print (f\"aa ind dict = {aa_ind_dict}\") \n return aa_ind_dict\n\ndef energy_weighted_kirchoff(coord, res_codes, pot_type, cutoff, contact_matrix=None):\n \"\"\" \n Get the Kirchoff's matrix in which the contact springs are assigned based on\n the energetic interaction between residues. If contact matrix is provided, then use it.\n Otherwise, calculate the contact matrix based on the residue coordinates.\n Parameters:\n coord : The C-alpha coordinates\n res_codes: The amino acid sequence of the PDB (as obtained from the PDB file)\n pot_type: By default it's the MJ potential\n cutoff: The C-alpha distance cutoff for interacting residues\n contact_matrix: The C-alpha residue-residue contact matrix.\n Returns:\n K: The weighted Kirchhoff matrix\n energy_matrix: The matrix of interaction energies for the interacting residues\n \"\"\"\n pot_file = FILE_MAP[pot_type]\n pot_dict = parse_energy_pot(pot_file)\n del(pot_dict['AA_INDEX_ID'])\n\n # print (pot_dict)\n r,c = np.shape(coord)\n # Convert the potential dictionary into a matrix of size r x r \n # having weights of interactions.\n P = np.zeros((r,r))\n # Have another matrix contain the interaction energies\n E_matrix = np.zeros((r,r))\n for i in range(0,r):\n for j in range(0,r):\n res_i, res_j = res_codes[i], res_codes[j]\n # res_i, res_j = map_resname_to_id(res_names[i]), map_resname_to_id(res_names[j])\n # For neighboring residues, the weight will be more than\n # the maximum weight based on the potential used.\n if abs(i-j) == 1:\n # Make the interaction energies of the adjacent c_alpha atoms stronger\n pot_ij = min(pot_dict.values())-1 \n else: \n pot_ij = pot_dict[res_i + ':' + res_j]\n # The potential value is in energy terms. Convert to weights.\n pot_ij = round(np.exp(-pot_ij), 2)\n P[i,j] = pot_ij\n if contact_matrix.size != 0:\n # Typically useful when we want to provide a customized contact_matrix\n C = contact_matrix\n else:\n C, D = get_ca_contacts(coord, cutoff, r)\n energy_matrix = np.multiply(C,P)\n C = -C\n\n # We will weigh the residue-residue contacts by their energy potentials.\n K = np.multiply(C,P)\n K_cpy = K.copy()\n for i in range(0,r):\n # Diagonals in Kirchoff matrix are sum of the rows/columns\n # except the diagonal. \n K[i,i] = -(np.sum(C[i,:] * P[i,:])-K_cpy[i,i])\n return K, energy_matrix \n\ndef get_ca_contacts(coord, cutoff, num_res):\n \"\"\"\n Get the contact matrix for the C-alpha atoms \n \n Parameters:\n coord: The coordinates of the C-alpha atoms as N-by-3 numpy array.\n N is the total number of residues.\n \n cutoff: Distance cutoff of C-alpha atoms that defines an interacting pair\n \n num_res: Total number of residues in the protein\n Returns:\n C: C-alpha contact matrix\n D: C-alpha distance matrix\n \"\"\"\n C = np.zeros((num_res,num_res)) # Contact matrix\n D = np.ones((num_res, num_res)) # Distance matrix\n for rn in range(0,num_res-1):\n #print (rn)\n coord_rn = coord[rn]\n coord_rem = coord[rn+1:]\n r2,c2 = np.shape(coord_rem)\n dist = np.linalg.norm(np.tile(coord_rn,(r2, 1))-coord_rem, axis=1)\n for i,j,d in zip([rn]*r2, list(range(rn+1,num_res)), dist):\n d = round(d,2)\n if i != j:\n D[i,j] = d\n D[j,i] = d\n if d <= cutoff:\n C[i,j] = 1\n C[j,i] = 1\n return C, D\n\ndef calc_residue_cross_corr(V,E,n_modes=20):\n \"\"\"\n Calculate the cross correlations between residues \n \n Parameters:\n V: The matrix including the modes (eigen values)\n E: The eigen vectors associated with each mode\n n_modes: Total number of modes\n Returns:\n C: Cross correlation matrix\n bfact: The theoretical residue mean-squared fluctuations\n \"\"\" \n\n n_res = len(V[:,0])\n nrow,ncol = np.shape(V)\n if n_modes > ncol:\n n_modes = ncol\n Hinv = np.zeros((n_res, n_res))\n for m in list(range(0,n_modes)):\n Hinv += (1/E[m]) *np.outer(V[:,m], V[:,m])\n C = np.zeros((n_res, n_res))\n for i in range(0,n_res):\n for j in range(0,n_res):\n C[i,j] = Hinv[i,j]/(Hinv[i,i]*Hinv[j,j])**0.5\n bfact = np.diagonal(Hinv)\n return C, bfact \n \ndef calc_internal_dist_change(C):\n \"\"\" \n Calculate the internal distance change (aka mean-squared fluctuation in distance \n between a residue pair) given the cross-correlation matrix\n \n Parameters:\n C: The matrix including correlations and cross-correlations between the C-alpha atoms\n Returns:\n I: Internal distance change matrix\n \"\"\"\n r,c = np.shape(C)\n I = np.zeros((r,r))\n for i in range(0,r):\n for j in range(0,r):\n # Round the internal distance change to 3 decimal points\n I[i,j] = round((C[i,i] + C[j,j] - 2*C[i,j]), 3)\n return I\n\ndef calc_gnm(coord, cutoff=9, num_modes=10, spring_type=None, res_codes=None, contact_matrix=None):\n \"\"\" \n Run calculations for GNM in which the interactions are weighted\n between the residues.\n \n Parameters:\n coord: The C-alpha coordinates\n \n cutoff: Cutoff distance between residue C-alpha atoms that defines\n the interactions between residues\n \n num_modes: The total number of low frequency modes with non-zero\n eigen values to be returned\n \n spring_type: Type of weighting for the interactions between residues.\n The interaction strength is obtained from the Miyazawa-Jernigan contact\n potential. Default spring type in this implementation is 'MJ'\n \n res_codes: The single letter amino acid sequence of the protein that\n corresponds to the PDB sequence.\n \n contact_matrix: The C-alpha contact matrix\n Returns:\n V: Matrix of low-frequency modes (eigen vectors). The total number\n of modes is equal to the num_modes\n E: Eigen values for the num_modes low-frequency modes\n e_matrix: The potential matrix defining the interaction strengths of the\n interacting residues \n \"\"\"\n if spring_type == None:\n sys.exit(\"Error! spring_type cannot be None for weighted GNM\")\n if type(res_codes) != list:\n sys.exit(\"Error! res_codes must be a list of 3-letter residue codes of all residues\")\n\n stat_pot = ['MJ']\n if spring_type in stat_pot:\n K, e_matrix = energy_weighted_kirchoff(coord, res_codes, spring_type, cutoff, contact_matrix)\n else:\n sys.exit(f\"Invalid spring type {spring_type}\")\n \n E,V = linalg.eigh(K, turbo=True) # Returns eigen values and vectors \n return V[:,1:num_modes+1], E[1:num_modes+1], e_matrix\n\ndef sanity_check(data_file, pdb_dir, rev_mut_pdb):\n \"\"\"\n Check the data_file to see if the residues in the WILD_RES column (forward mutants) or\n MUTANT_RES (reverse_mutants when rev_mut_pdb is not set) column of the data_file are \n indeed present in the PDB file at the positions specified by RES_NUM_PDB.\n\n Parameters:\n data_file : Name of the data file with information on PDB_CHAIN, wt_residue, mut_residue, residue_position\n pdb_dir : The directory including the atomic processed PDB files\n\n Returns: Total number of records that show correct mapping\n \"\"\"\n if not pdb_dir.endswith('/'):\n pdb_dir += '/'\n df = pd.read_csv(data_file, encoding='utf8')\n total_recs = len(df)\n correct_recs = 0\n serial_resnum_list = []\n for pdb_id_i, res_i_wt, res_i_mut, category_i, pos_i in zip(df['PDB_CHAIN'], df['WILD_RES'], df['MUTANT_RES'], df['Category'], df['RES_NUM_PDB']):\n if (not np.isnan(pos_i)):\n # Get the amino acid sequence from the PDB file\n pdbfile_i = pdb_dir + pdb_id_i + '.pdb'\n ca_dict = parse_calpha_pdb(pdbfile_i)\n # Get the target chain from the pdb id\n chain = pdb_id_i[-1]\n # Get the single letter residue names \n seq_i = [map_resname_to_id(res_i) for res_i in ca_dict[chain]['RESNAME']]\n # Get the PDB residue numbers for all residues\n pdb_res_nums = ca_dict[chain]['RESID']\n if pos_i not in pdb_res_nums:\n sys.exit(f\"Error! Residue {res_i_wt} not found at position {pos_i} in the PDB file for {pdb_id_i}\") \n serial_res_num = pdb_res_nums.index(int(pos_i)) # Map the pos_i (the pdb residue number) to serial index\n serial_resnum_list.append(serial_res_num)\n if category_i == 'Forward' and seq_i[int(serial_res_num)] == res_i_wt:\n correct_recs += 1\n elif category_i == 'Reverse' and not rev_mut_pdb and seq_i[int(serial_res_num)] == res_i_mut:\n # If rev_mut_pdb is not set, we expect the wildtype files to be the native state PDB file\n # for the forward mutants\n correct_recs += 1\n elif category_i == 'Reverse' and rev_mut_pdb and seq_i[int(serial_res_num)] == res_i_wt:\n correct_recs += 1 \n else:\n print (f\"Residue mismatch for {category_i} mutant: {pdb_id_i}, {pos_i}, wt_res = {res_i_wt}, mut_res = {res_i_mut}\") \n else:\n sys.exit(f\"RES_NUM_PDB is not defined in the data file for {pdb_id_i} WT:{res_i_wt}, MUT:{res_i_mut}!\") \n print (f\"{correct_recs}/{total_recs} records show correct mutant position and amino acid match\")\n return correct_recs, serial_resnum_list\n\ndef simulate_unfolding(ca_coord, res_codes, pdb_id, dist_cutoff, num_modes, mut_or_wt='wt',serial_res_num=None):\n \"\"\"\n Simulate unfolding based on change in internal distance (mean-squared fluctuation in distance)\n between residues. We will simulate the unfolding until 50 percent contacts in the starting structure\n are broken.\n \n Parameters:\n ca_coord: The C-alpha coordinates\n \n res_codes: The single letter amino acid sequence of the protein that\n corresponds to the PDB sequence.\n pdb_id: The four-letterd PDB ID plus the chain ID\n \n dist_cutoff: Cutoff distance between residue C-alpha atoms that defines\n the interactions between residues\n \n num_modes: The total number of low frequency modes with non-zero\n eigen values to be returned\n mut_or_wt: Whether the simulation is being done for the mutant or wildtype protein\n \n serial_res_num: The serial index of the mutant position (not the PDB position).\n Returns:\n df_contact_breaks: A dataframe including information on the contacts broken\n \"\"\"\n df_contact_breaks = pd.DataFrame()\n break_threshold = 0.5\n\n #dist_cutoff = 9 # Cutoff distance for interacting residues\n L_adj = 3 # Strength of interactions between adjacent residues.\n # This value will be reset based on the type of potential used.\n # If this value is less than the maximum value in the calculated\n # contact potential, then this value will be reset in the energy_weighted_kirchoff\n # subroutine.\n int_potential = 'MJ' # Statistical potential for residue-residue interactions\n #num_modes = 10\n num_res = len(res_codes)\n C,D = get_ca_contacts(ca_coord, dist_cutoff, num_res)\n tot_contacts_folded = C.sum()/2\n # print (f\"Total contacts in folded protein = {tot_contacts_folded}\")\n if mut_or_wt != 'wt':\n res_at_mut_pos = res_codes[serial_res_num] # Get the residue at the mutant position\n \n # When simulating for mutant, then get contacts for mutant position\n if mut_or_wt != 'wt':\n mut_pos_contacts = list(np.where(C[serial_res_num,:] == 1)[0])\n mut_pos_tot_contacts = len(mut_pos_contacts) * 2 # Consider both (i,j) and (j,i) pairs\n \n # Parse the potential matrix\n pot_file = FILE_MAP[int_potential]\n pot_dict = parse_energy_pot(pot_file)\n del(pot_dict['AA_INDEX_ID']) # Remove the identifer key \"AA_INDEX_ID\"\n\n mut_pos_num_broken_contacts = 0 # Number of contacts made by the mutant position that are broken\n total_contacts_broken = 0\n contact_matrix = C\n iteration = 1\n # Run simulation until 50% of all the contacts are broken\n while (total_contacts_broken <= round(tot_contacts_folded*break_threshold)):\n # print (f\"Simulating unfolding iteration {iteration}...\", end='\\r', flush=True)\n # Run GNM. For the first iteration, we will not calculate the contact_matrix\n # For subsequent iterations, we will calculate the contact_matrix based on\n # the internal distance changes.\n #print (\"Running calc_gnm...\", end='', flush=True)\n V,E, e_matrix = calc_gnm(ca_coord, dist_cutoff, num_modes, int_potential, res_codes, contact_matrix)\n #print (\"Done!\")\n \n # If the second eigen value (calc_gnm excludes the first eigen value and \n # vector) is less than 0.00001 (close to 0) then stop the simulation\n # as the model is no longer stable\n if E[0] < 0.00001:\n break\n # Calculate atomic cross correlations\n #print (\"Running calc_residue_cross_corr...\", end='', flush=True)\n corr_matrix, bfact = calc_residue_cross_corr(V,E,num_modes)\n #print (\"Done!\")\n #print (\"Running calculations for internal distance change...\", end='', flush=True)\n int_dist_matrix = calc_internal_dist_change(corr_matrix)\n #print (\"Done!\")\n \n #print (\"Getting coordinates for true contacts broken...\", end='', flush=True) \n # Get the product of internal dist matrix and the contact matrix\n P_mat = contact_matrix * int_dist_matrix\n \n # Get the coordinates of the maximum value in the P_mat\n max_int_dist_val = np.max(P_mat)\n max_ind_arr = np.where(P_mat == max_int_dist_val) \n num_nonzero_contacts = len(max_ind_arr[0])\n #print (\"Done!\")\n # Break the contacts between the residue pairs\n # having the selected high internal distance change value.\n # The actual number of broken contacts is half since the contact matrix is symmetric\n #print (\"Running break contacts loop...\", flush=True, end='')\n for row_index, col_index in zip(max_ind_arr[0], max_ind_arr[1]):\n if contact_matrix[row_index, col_index] == 1:\n contact_matrix[row_index,col_index] = 0\n # If the simulation is being done for a mutant position\n if mut_or_wt != 'wt':\n # If the broken contact is between the residue at the mutant position\n # and another residue, then note it.\n if (row_index == serial_res_num and col_index in mut_pos_contacts) or (col_index == serial_res_num and row_index in mut_pos_contacts):\n if row_index == serial_res_num:\n pair_res_num = col_index\n else:\n pair_res_num = row_index \n mut_pos_num_broken_contacts += 1\n # contact_break_str = 'Iteration-' + str(iteration) + ',(' + str(row_index) + ',' + str(col_index) + ')'\n res_code_i = res_codes[serial_res_num]\n res_code_j = res_codes[pair_res_num]\n \n column_names = ['PDB_ID', 'WT_or_Mut', 'Mutation_position', 'Contact_Position', 'Res_at_Mut_Position', 'Res_at_Contact_Pos', 'Energy_MJ', 'Int_dist_change', 'Contact_break_rank']\n df_tmp = pd.DataFrame([[pdb_id, mut_or_wt, serial_res_num, pair_res_num, res_code_i,\n res_code_j, pot_dict[res_code_i + ':' + res_code_j], max_int_dist_val, iteration]], columns=column_names)\n df_contact_breaks = df_contact_breaks.append(df_tmp)\n else:\n # If simulation is being done for a wildtype structure\n res_code_i = res_codes[row_index]\n res_code_j = res_codes[col_index]\n column_names = ['PDB_ID', 'WT_or_Mut', 'Mutation_position', 'Contact_Position', 'Res_at_Mut_Position', 'Res_at_Contact_Pos', 'Energy_MJ', 'Int_dist_change', 'Contact_break_rank']\n df_tmp = pd.DataFrame([[pdb_id, mut_or_wt, row_index, col_index, res_code_i,\n res_code_j, pot_dict[res_code_i + ':' + res_code_j], max_int_dist_val, iteration]], columns=column_names)\n df_contact_breaks = df_contact_breaks.append(df_tmp) \n total_contacts_broken += num_nonzero_contacts/2\n #print(\"Done!\")\n # figfile = pdb_id + '_' + mut_or_wt + '_cont_mat_iter_' + str(iteration) + '.png' \n # create_contact_map_fig(contact_matrix,'',figfile, res_codes)\n iteration += 1\n #print (f\"Total contacts broken = {total_contacts_broken}/{tot_contacts_folded}\")\n return df_contact_breaks\n\ndef calc_mut_energy_folded_unfolded(processed_pdb_dir, pdb_i, wt_res, mut_res, res_num, dist_cutoff, num_modes):\n \"\"\" \n Calculate the interaction energies in the folded and unfolded mutant structure.\n \n Parameters:\n processed_pdb_dir: The directory having the processed PDB files\n \n pdb_i: The four-letterd PDB ID plus the chain ID\n wt_res: The amino acid (single letter) at the mutation position in the wildtype \n structure\n mut_res: The amino acid (single letter) at the mutation position in the mutant\n structure\n res_num: The mutation postion number as in the PDB file\n dist_cutoff: Cutoff distance between residue C-alpha atoms that defines\n the interactions between residues\n \n num_modes: The total number of low frequency modes with non-zero\n eigen values to be used for calculations\n Returns:\n df_contact_breaks: A dataframe including information on the contacts broken\n \"\"\"\n pdb_id = pdb_i\n pdb_id.replace('.pdb', '') # Just the pdb id for naming purpose\n if not pdb_i.endswith('.pdb'):\n pdb_i += '.pdb'\n pdbfile = processed_pdb_dir + pdb_i\n \n # Parse the c-alpha atom information\n ca_dict = parse_calpha_pdb(pdbfile)\n # print (f\"c-alpha dictionary = {ca_dict}\")\n \n # Get the target chain from the pdb id\n chain = pdb_i.split('.pdb')[0][-1]\n # print (ca_dict)\n # Get the single letter residue names \n res_codes = [map_resname_to_id(res_i) for res_i in ca_dict[chain]['RESNAME']]\n # print (res_codes)\n \n # Get the residue numbers from the pdb file\n pdb_res_nums = ca_dict[chain]['RESID']\n serial_res_num = pdb_res_nums.index(int(res_num)) # Convert the res_num (the pdb residue number) to serial number\n\n # Change the residue code at the serial_res_num to mut_res\n res_codes[serial_res_num] = mut_res\n\n # Simulate unfolding and calculate the energy associated with unfolding\n mut_tag = wt_res + str(res_num) + mut_res\n df_contact_breaks = simulate_unfolding(ca_dict[chain]['COORD'], res_codes, pdb_id, dist_cutoff, num_modes, mut_tag, serial_res_num)\n return df_contact_breaks\n\ndef calc_wt_energy_folded_unfolded(processed_pdb_dir, pdb_i, dist_cutoff, num_modes):\n \"\"\" \n Calculate the interaction energies in the folded and unfolded wildtype structure.\n \n Parameters:\n processed_pdb_dir: The directory having the processed PDB files\n \n pdb_i: The four-letterd PDB ID plus the chain ID\n dist_cutoff: Cutoff distance between residue C-alpha atoms that defines\n the interactions between residues\n \n num_modes: The total number of low frequency modes with non-zero\n eigen values to be used for calculations\n Returns:\n df_contact_breaks: A dataframe including information on the contacts broken\n \n \"\"\"\n pdb_id = pdb_i\n pdb_id = pdb_id.replace('.pdb', '') # Just the pdb id for naming purpose\n if not pdb_i.endswith('.pdb'):\n pdb_i += '.pdb'\n pdbfile = processed_pdb_dir + pdb_i\n \n # Parse the c-alpha atom information\n ca_dict = parse_calpha_pdb(pdbfile)\n # print (f\"c-alpha dictionary = {ca_dict}\")\n \n # Get the target chain from the pdb id\n chain = pdb_i.split('.pdb')[0][-1]\n \n # Get the single letter residue names \n res_codes = [map_resname_to_id(res_i) for res_i in ca_dict[chain]['RESNAME']]\n \n # Simulate unfolding and calculate the energy associated with unfolding\n df_contact_breaks = simulate_unfolding(ca_dict[chain]['COORD'], res_codes, pdb_id, dist_cutoff, num_modes, 'wt', '')\n return df_contact_breaks\n\ndef run_ab_initio_stability_prediction_wildtype(pdb_i, outdir, processed_pdb_dir, dist_cutoff, num_modes):\n \"\"\" \n Wrapper function for running calculations on wildtype proteins and write the \n contact break information into a .csv file.\n \n Parameters:\n pdb_i: The four lettered PDB ID plus the chain\n outdir: The directory to which the files will be written to\n processed_pdb_dir: The directory having the processed PDB files\n \n dist_cutoff: Cutoff distance between residue C-alpha atoms that defines\n the interactions between residues\n \n num_modes: The total number of low frequency modes with non-zero\n eigen values to be used for calculations\n \"\"\"\n outfile = pdb_i + '_wt_contact_breaks.csv'\n \n # Do not run calculations if outputfile already exists\n if os.path.isfile(outdir + outfile):\n print (f\"Skipping unfolding simulation for wildtype {pdb_i} as contact-break file is already present!\", flush=True)\n return\n print (f\"Running calculations for {pdb_i} wild type, dist_cutoff = {dist_cutoff}, num_modes = {num_modes}...\", flush=True)\n df_contact_breaks_wt = calc_wt_energy_folded_unfolded(processed_pdb_dir, pdb_i, dist_cutoff, num_modes)\n \n # Write the calculation output to a file\n df_contact_breaks_wt.to_csv(outdir + outfile, index=False)\n\ndef run_ab_initio_stability_prediction_mutant(row_i, outdir, processed_pdb_dir, dist_cutoff, num_modes, rev_mut_pdb):\n \"\"\" \n Wrapper function for running calculations on mutant\n \n Parameters:\n row_i: The ith row as given in the benchmark .csv datafile\n outdir: The directory to which the files will be written to\n processed_pdb_dir: The directory having the processed PDB files\n \n dist_cutoff: Cutoff distance between residue C-alpha atoms that defines\n the interactions between residues\n \n num_modes: The total number of low frequency modes with non-zero\n eigen values to be used for calculations\n \"\"\"\n pdb_i, wt_res, mut_res, res_num_pdb, mut_category = row_i['PDB_CHAIN'], row_i['WILD_RES'], row_i['MUTANT_RES'], row_i['RES_NUM_PDB'], row_i['Category']\n\n # Skip calculations if output file is already present\n outfile = pdb_i + '_' + row_i['WILD_RES'] + str(row_i['RES_NUM_PDB']) + row_i['MUTANT_RES'] + '_contact_breaks.csv'\n if mut_category == 'Forward' and os.path.isfile(outdir + outfile):\n print (f\"Skipping unfolding simulation for mutant {pdb_i}: {row_i['WILD_RES']}{row_i['RES_NUM_PDB']}{row_i['MUTANT_RES']} as contact-break file is already present!\")\n return\n if ((mut_category == 'Forward') or (mut_category == 'Reverse' and rev_mut_pdb)) and (not os.path.isfile(outdir + outfile)):\n # If the rev_mut_pdb is set for reverse mutants, that means the PDB files in the wt_pdb_dir have \n # the forward mutations at the RES_NUM_PDB position. We need to generate the contact break calculations\n # for the reverse mutations.\n #print (f\"Condition satisfied, rev_mut_pdb = {rev_mut_pdb}, {type(rev_mut_pdb)}\")\n print (f\"Running calculations for {pdb_i}, mutation {wt_res}{res_num_pdb}{mut_res} dist_cutoff = {dist_cutoff}, num_modes = {num_modes}...\",flush=True)\n df_contact_breaks_mut = calc_mut_energy_folded_unfolded(processed_pdb_dir, pdb_i, wt_res, mut_res, res_num_pdb, dist_cutoff, num_modes)\n \n # If this mutant is a reverse mutant, we also need to generate the contact breaks file for the forward mutant\n # if that is not already present. This is required only if the rev_mut_pdb option is not set.\n outfile2 = pdb_i + '_' + row_i['MUTANT_RES'] + str(row_i['RES_NUM_PDB']) + row_i['WILD_RES'] + '_contact_breaks.csv'\n if mut_category == 'Reverse' and not rev_mut_pdb and not os.path.isfile(outdir + outfile2):\n print (f\"Running calculations for {pdb_i}, mutation {mut_res}{res_num_pdb}{wt_res} dist_cutoff = {dist_cutoff}, num_modes = {num_modes}...\",flush=True)\n df_contact_breaks_mut_2 = calc_mut_energy_folded_unfolded(processed_pdb_dir, pdb_i, mut_res, wt_res, res_num_pdb, dist_cutoff, num_modes) # We switch the mutant and the wildtype residues\n \n # Add info on exp ddG if df_contact_breaks_mut variable exists\n if 'df_contact_breaks_mut' in locals():\n # Write the calculation output to a file\n df_contact_breaks_mut.to_csv(outdir + outfile, index=False)\n \n # If the mutation category is reverse, we will need the contact break info\n # on the forward mutation if not already present.\n if mut_category == 'Reverse' and 'df_contact_breaks_mut_2' in locals():\n df_contact_breaks_mut_2.to_csv(outdir + outfile2, index=False)\n return \n\n\[email protected]()\[email protected]('--data_file', required=True, type=str, help='Name of the \\\n.csv file containing the information on ddG for the mutants')\[email protected]('--outfile', required=True, type=str, help='Name of the file to \\\nwhich the PSP-GNM-calculated energies and experimental energies will be written')\[email protected]('--outdir', required=True, type=str, help='Name of the directory to \\\nwhich the intermittent result files will be written to') \[email protected]('--wt_pdb_dir',required=True, type=str, help='Directory containing \\\nthe wild type atomic pdb files. For a reverse mutant, the wildtype is the forward mutant.')\[email protected]('--num_jobs',required=True, type=str, help='Maximum number \\\nof jobs to be run in parallel')\[email protected]('--dist_cutoff',required=True, default=9, type=str, help='Distance cutoff \\\nfor interactions in GNM', show_default=True)\[email protected]('--num_modes',required=True, default=10, type=str, help='Number \\\nof modes to be used', show_default=True)\[email protected]('--rev_mut_pdb',required=False, is_flag=True, help='Set this option \\\nif data_file includes reverse mutants and all the reverse mutants have the corresponding \\\npdb files of the forward mutant in wt_pdb_dir.', show_default=True)\n\n\ndef run_ab_initio_stability_prediction_wrapper(data_file, outfile, outdir, wt_pdb_dir, num_jobs, dist_cutoff, num_modes, rev_mut_pdb):\n # Wrapper function that parallely performs calculations for each \n # First perform a sanity check on the mutant csv file- check how many records correctly\n # correspond to the residue position and whether the sequence included in the\n # mutant_csv_file has the specified residue at that particular position.\n\n # Input paramter definitions are same as described for the command-line arguments.\n\n # Writes the PSP-GNM-calculated ddG into outfile\n df_data = pd.read_csv(data_file, encoding='utf8')\n \n if not os.path.isdir(wt_pdb_dir):\n sys.exit(f\"Error! {wt_pdb_dir} not found!\")\n if not wt_pdb_dir.endswith('/'):\n wt_pdb_dir += '/'\n \n # Process the raw pdb files\n pdb_uniq = df_data['PDB_CHAIN'].unique().tolist()\n processed_pdb_dir = wt_pdb_dir[:-1] + '_processed/'\n process_wt_pdb(wt_pdb_dir, processed_pdb_dir, pdb_uniq)\n\n # First perform a sanity check on the mutant csv file- check how many records correctly\n # correspond to the residue position and whether the sequence included in the\n # mutant_csv_file has the specified residue at that particular position.\n print (\"Running sanity check...\", flush=True) \n num_corr_map, serial_resnum_list = sanity_check(data_file, processed_pdb_dir, rev_mut_pdb)\n if num_corr_map < len(df_data):\n print (f\"Only {num_corr_map}/{len(df_data)} records in {data_file} mapped correctly. Please fix the other records and then re-run.\")\n #sys.exit()\n else:\n print (\"Done!\", flush=True)\n\n # Include the serial residue numbers for mutant positions (as opposed to PDB Residue number)\n # obtained as a column in df_data\n df_data['RES_IND_SEQ'] = serial_resnum_list \n\n if not outdir.endswith('/'):\n outdir += '/'\n # Create output directory if not already present\n if not os.path.isdir(outdir):\n os.mkdir(outdir) \n \n # Convert num_jobs, dist_cutoff and num_modes to int\n num_jobs = int(num_jobs) \n num_modes = int(num_modes)\n dist_cutoff = float(dist_cutoff)\n \n # Store the overall output here\n df_output_all = pd.DataFrame()\n\n # First run calculations for the wild type structures \n Parallel(n_jobs=num_jobs)(delayed(run_ab_initio_stability_prediction_wildtype)(pdb_i, outdir, processed_pdb_dir, dist_cutoff, num_modes) for pdb_i in pdb_uniq)\n \n # Next run calculations for all the mutant rows\n Parallel(n_jobs=num_jobs)(delayed(run_ab_initio_stability_prediction_mutant)(row_i, outdir, processed_pdb_dir, dist_cutoff, num_modes, rev_mut_pdb) for idx, row_i in df_data.iterrows())\n \n # Define the columns in the output file\n col_list = list(df_data.columns) + ['Calc_ddG', 'Calc_ddI', 'Calc_ddG_mean', 'Calc_ddI_mean', 'Num_contacts']\n\n # Go through each row of protherm data and perform calculations using the \n # intermediary contact breaks files.\n for idx_i, row_i in df_data.iterrows():\n # res_num is the residue number in the pdb file\n pdb_i, wt_res, mut_res, res_num_pdb, res_num_serial, mut_category = row_i['PDB_CHAIN'], row_i['WILD_RES'], row_i['MUTANT_RES'], row_i['RES_NUM_PDB'], row_i['RES_IND_SEQ'], row_i['Category']\n if mut_category == 'Reverse' and not rev_mut_pdb:\n # Switch the contact break files (i.e., wildtype contact break is now treated as the mutant\n # contact break file and the mutant contact break file as the wildtype)\n #print (\"rev_mut_pdb is not set\")\n wt_cont_brk_file = pdb_i + '_' + row_i['MUTANT_RES'] + str(row_i['RES_NUM_PDB']) + row_i['WILD_RES'] + '_contact_breaks.csv' \n mut_cont_brk_file = pdb_i + '_wt_contact_breaks.csv' \n elif mut_category == 'Forward' or (mut_category == 'Reverse' and rev_mut_pdb): \n # If the mutation category is Forward or if it is reverse but, includes the\n # the PDB files for the forward mutants, then read the original wild-type and mutant \n # contact break files\n wt_cont_brk_file = pdb_i + '_wt_contact_breaks.csv'\n\n # Read the mutant contact breaks file\n mut_cont_brk_file = pdb_i + '_' + row_i['WILD_RES'] + str(row_i['RES_NUM_PDB']) + row_i['MUTANT_RES'] + '_contact_breaks.csv' \n else:\n sys.exit(f\"Invalid mutation category {mut_category} for {pdb_i} {wt_res}{res_num_pdb}{mut_res}. Mutation category must be either Forward or Reverse.\")\n\n try:\n df_cont_brk_wt = pd.read_csv(outdir + wt_cont_brk_file)\n except pd.errors.EmptyDataError:\n print (f\"{outdir + wt_cont_brk_file} is empty! No contacts involving mutation position broken during simulation! Will assign ddG value 0!\")\n df_output_tmp = pd.DataFrame(data=[list(row_i) + [0, 0, 0, 0, 0]],\n columns = col_list)\n df_output_all = df_output_all.append(df_output_tmp)\n continue\n \n try: \n df_cont_brk_mut = pd.read_csv(outdir + mut_cont_brk_file)\n except pd.errors.EmptyDataError:\n print (f\"{outdir + mut_cont_brk_file} is empty! No contacts involving mutation position broken during simulation! Will assign ddG value 0!\")\n df_output_tmp = pd.DataFrame(data=[list(row_i) + [0, 0, 0, 0, 0]],\n columns = col_list)\n df_output_all = df_output_all.append(df_output_tmp)\n continue \n \n # Skip the record if no contacts are broken with the residue at the mutation position\n if len(df_cont_brk_mut) == 0:\n print (f\"No contacts involving mutation position broken for {mut_category} mutant, {pdb_i} : {wt_res}{res_num_pdb}{mut_res}\")\n df_output_tmp = pd.DataFrame(data=[list(row_i) + [0, 0, 0, 0, 0]],\n columns = col_list)\n df_output_all = df_output_all.append(df_output_tmp)\n continue\n elif len(df_cont_brk_wt) == 0:\n print (f\"No contacts involving mutation position broken for wildtype of the {mut_category} mutant, {pdb_i} : {wt_res}{res_num_pdb}{mut_res}\")\n df_output_tmp = pd.DataFrame(data=[list(row_i) + [0, 0, 0, 0, 0]],\n columns = col_list)\n df_output_all = df_output_all.append(df_output_tmp)\n continue\n\n # Calculate the theoretical ddG\n df_wt = df_cont_brk_wt.copy()\n df_mut = df_cont_brk_mut.copy()\n\n if mut_category == 'Forward':\n df_wt = df_wt.loc[(df_wt['PDB_ID'] == pdb_i) & (df_wt['WT_or_Mut'] == 'wt') & (df_wt['Mutation_position'] == res_num_serial)]\n df_mut = df_mut.loc[(df_mut['PDB_ID'] == pdb_i) & (df_mut['WT_or_Mut'] != 'wt') & (df_mut['Mutation_position'] == res_num_serial) & (df_mut['Res_at_Mut_Position'] == mut_res)]\n elif mut_category == 'Reverse':\n df_wt = df_wt.loc[(df_wt['PDB_ID'] == pdb_i) & (df_wt['Mutation_position'] == res_num_serial) ]\n df_mut = df_mut.loc[(df_mut['PDB_ID'] == pdb_i) & (df_mut['Mutation_position'] == res_num_serial) & (df_mut['Res_at_Mut_Position'] == mut_res)]\n\n # Drop duplicate rows\n df_wt.drop_duplicates(inplace=True)\n df_mut.drop_duplicates(inplace=True)\n # print (f\"df_mut = {df_mut}\")\n # print (f\"df_wt = {df_wt}\")\n if len(df_mut) == 0:\n print (f\"No contacts involving mutant position broken while unfolding mutant structure of the {mut_category} mutant: {pdb_i} {wt_res}{res_num_pdb}{mut_res}. Will assign ddG value of 0!\")\n df_output_tmp = pd.DataFrame(data=[list(row_i) + [0, 0, 0, 0, 0]],\n columns = col_list)\n df_output_all = df_output_all.append(df_output_tmp)\n continue\n \n # If no contacts are broken in the wild type for the mutation position, then skip this position\n if len(df_wt) == 0:\n print (f\"No contacts involving mutant position broken while unfolding wildtype structure of the {mut_category} mutant: {pdb_i} {wt_res}{res_num_pdb}{mut_res}. Will assign ddG value of 0!\")\n df_output_tmp = pd.DataFrame(data=[list(row_i) + [0, 0, 0, 0, 0]],\n columns = col_list)\n df_output_all = df_output_all.append(df_output_tmp)\n continue\n df_wt.reset_index(drop=True, inplace=True)\n df_mut.reset_index(drop=True, inplace=True)\n \n # Sort by contact break rank\n df_wt = df_wt.sort_values(by=['Contact_break_rank'])\n df_mut = df_mut.sort_values(by=['Contact_break_rank'])\n df_wt.reset_index(drop=True, inplace=True)\n df_mut.reset_index(drop=True, inplace=True)\n \n # Only consider the minimum number of contacts broken either\n # in the wild type or in the mutant\n min_len = len(df_wt)\n\n if len(df_mut) < min_len:\n min_len = len(df_mut)\n del_energy = df_mut['Energy_MJ'][0:min_len] - df_wt['Energy_MJ'][0:min_len]\n del_int_dist = df_mut['Int_dist_change'][0:min_len] - df_wt['Int_dist_change'][0:min_len]\n calc_ddG = sum(del_energy)\n calc_ddI = sum(del_int_dist)\n calc_ddG_mean = calc_ddG/len(del_energy)\n calc_ddI_mean = calc_ddI/len(del_int_dist)\n\n df_output_tmp = pd.DataFrame(data=[list(row_i) + [calc_ddG, calc_ddI, calc_ddG_mean, calc_ddI_mean, min_len]],\n columns = col_list)\n df_output_all = df_output_all.append(df_output_tmp)\n # Scale the calculated energy and ddI using the coefficients obtained by fitting\n # the calculated energy to the experimental energy\n \n # For the forward mutants we will scale using the coefficients obtained from the S350 data\n # Fit linear regression model for forward mutations\n df_output_all_fw = df_output_all.copy()\n df_output_all_fw = df_output_all_fw.loc[df_output_all_fw['Category'] == 'Forward']\n if len(df_output_all_fw) > 0 :\n # Block comment #\n #\n # Uncomment when fitting to S2298 data\n # calc_ddG_unscaled = -(df_output_all_fw['Calc_ddG']-df_output_all_fw['Calc_ddI'])\n # reg_mdl_fw = LinearRegression().fit(np.array(calc_ddG_unscaled).reshape(-1,1),\n # df_output_all_fw['EXP_DDG'])\n # coeff,intercept = reg_mdl_fw.coef_[0], reg_mdl_fw.intercept_\n # coeff = round(coeff,2)\n # intercept = round(intercept,2)\n #\n # End Block comment #\n \n if dist_cutoff == 9 and num_modes == 10:\n coeff = 0.08\n intercept = -1.01\n # Coeff and intercepts for energies\n coeff_calc_ddg = -0.07\n intercept_calc_ddg = -1.0\n # Coeff and intercepts for entropies\n coeff_calc_ddi = -0.2\n intercept_calc_ddi = -1.01\n elif dist_cutoff == 9 and num_modes == 20:\n coeff = 0.08\n intercept = -0.99\n # Coeff and intercepts for energies\n coeff_calc_ddg = -0.07\n intercept_calc_ddg = -0.98\n # Coeff and intercepts for entropies\n coeff_calc_ddi = -0.22\n intercept_calc_ddi = -1.01\n else:\n print (f\"Scaling coefficients and intercepts for num_modes={num_modes} and dist_cutoff={dist_cutoff} unavailable.\\n\"\n \"Using coefficients for num_modes = 10, dist_cutoff = 9 instead!\")\n coeff = 0.08\n intercept = -1.01\n # Coeff and intercepts for energies\n coeff_calc_ddg = -0.07\n intercept_calc_ddg = -1.0\n # Coeff and intercepts for entropies\n coeff_calc_ddi = -0.2\n intercept_calc_ddi = -1.01 \n df_output_all_fw['Calc_Energy_scaled'] = np.array(list(df_output_all_fw['Calc_ddG'])) * coeff_calc_ddg + intercept_calc_ddg\n df_output_all_fw['Calc_Entropy_scaled'] = np.array(list(df_output_all_fw['Calc_ddI'])) * coeff_calc_ddi + intercept_calc_ddi \n calc_ddG_unscaled = -(df_output_all_fw['Calc_ddG']-df_output_all_fw['Calc_ddI'])\n ddG_PSP_GNM_fw = np.array(list(calc_ddG_unscaled))*coeff + intercept\n df_output_all_fw['ddG_PSP_GNM'] = ddG_PSP_GNM_fw\n\n # Scale the calculated ddG for the reverse mutations\n df_output_all_rev = df_output_all.copy()\n df_output_all_rev = df_output_all_rev.loc[df_output_all_rev['Category'] == 'Reverse']\n\n if len(df_output_all_rev) > 0:\n #coeff = 0.11\n #intercept = 0.85\n if dist_cutoff == 9 and num_modes == 10:\n coeff = 0.08\n intercept = 1.01\n # Coeff and intercepts for energies\n coeff_calc_ddg = -0.07\n intercept_calc_ddg = 1.0\n # Coeff and intercepts for entropies\n coeff_calc_ddi = -0.2\n intercept_calc_ddi = 1.01\n elif dist_cutoff == 9 and num_modes == 20:\n coeff = 0.08\n intercept = 0.99\n # Coeff and intercepts for energies\n coeff_calc_ddg = -0.07\n intercept_calc_ddg = 0.98\n # Coeff and intercepts for entropies\n coeff_calc_ddi = -0.22\n intercept_calc_ddi = 1.01\n else:\n print (f\"Scaling coefficients and intercepts for num_modes={num_modes} and dist_cutoff={dist_cutoff} unavailable.\\n\"\n \"Using coefficients for num_modes = 10, dist_cutoff = 9 instead!\")\n coeff = 0.08\n intercept = 1.01\n # Coeff and intercepts for energies\n coeff_calc_ddg = -0.07\n intercept_calc_ddg = 1.0\n # Coeff and intercepts for entropies\n coeff_calc_ddi = -0.2\n intercept_calc_ddi = 1.01 \n df_output_all_rev['Calc_Energy_scaled'] = np.array(list(df_output_all_rev['Calc_ddG'])) * coeff_calc_ddg + intercept_calc_ddg\n df_output_all_rev['Calc_Entropy_scaled'] = np.array(list(df_output_all_rev['Calc_ddI'])) * coeff_calc_ddi + intercept_calc_ddi\n calc_ddG_unscaled = -(df_output_all_rev['Calc_ddG']-df_output_all_rev['Calc_ddI'])\n ddG_PSP_GNM_rev = np.array(list(calc_ddG_unscaled))*coeff + intercept\n df_output_all_rev['ddG_PSP_GNM'] = ddG_PSP_GNM_rev\n\n \n if len(df_output_all_fw) > 0 and len(df_output_all_rev) > 0:\n df_output_all_new = pd.concat([df_output_all_fw, df_output_all_rev])\n elif len(df_output_all_fw) > 0:\n df_output_all_new = df_output_all_fw\n else:\n df_output_all_new = df_output_all_rev \n\n df_output_all_new.to_csv(outfile, index=False)\n print (f\"Wrote all calculations to {outfile}\")\n\n\nif __name__ == '__main__':\n run_ab_initio_stability_prediction_wrapper() \n"
] | [
[
"numpy.max",
"numpy.isnan",
"numpy.zeros",
"scipy.linalg.eigh",
"pandas.DataFrame",
"numpy.sum",
"numpy.ones",
"numpy.tile",
"numpy.exp",
"numpy.shape",
"numpy.diagonal",
"numpy.multiply",
"numpy.where",
"pandas.concat",
"numpy.outer",
"pandas.read_csv"
]
] |
Felihong/ml-practices | [
"5193d14e01884ea2d808f621e758c2f645d1980c"
] | [
"ada-boost/AdaBoost.py"
] | [
"import numpy as np\n''' wir packen 3 Klassifizieren Algorithmen in unsere Klassenpool '''\nimport ClassiferPool.FischerDiskriminante as fischer\nimport ClassiferPool.Perzeptron as perzeptron\nimport ClassiferPool.KNN as knn\n\n\ndef initial_weight_data(data):\n return np.ones((len(data), 1))\n\n\ndef initial_weight_class(MaxInter):\n return np.zeros((MaxInter, 1))\n\n\ndef sigmoid(x):\n tmp = 1.0 + np.exp(-x)\n result = 1.0 / tmp\n return result\n\n''' hier berechnen wir das exponential Error von den Gewicht '''\ndef cal_error_weight(data, dataWeightVector, resultVector):\n sum = 0\n errorIndex = []\n for i in range(0, len(data)):\n if (data[i][-1] != resultVector[i]):\n sum += dataWeightVector[i]\n errorIndex.append(i)\n return sum, errorIndex\n\n''' hier wählen wir die Klasse aus dem Klassenpool, die das exponentiale Error minimiert,\n gleichzeitig merken wir die Hit Dateien und Miss Dateien, damit wir den Datensatz später passend \n aktualisieren '''\ndef get_next_class(data, dataWeight):\n arrayClass = [fischer, knn, perzeptron]\n arrayError = []\n errorIndex = []\n #print('ja')\n result1 = fischer.prediction(data)\n #print('hello')\n arrayError.append(cal_error_weight(data, dataWeight, result1)[0])\n errorIndex.append(cal_error_weight(data, dataWeight, result1)[1])\n\n result2 = knn.prediction(data)\n #print('hi')\n arrayError.append(cal_error_weight(data, dataWeight, result2)[0])\n errorIndex.append(cal_error_weight(data, dataWeight, result2)[1])\n\n result3 = perzeptron.prediction(data)\n #print('hey')\n arrayError.append(cal_error_weight(data, dataWeight, result3)[0])\n errorIndex.append(cal_error_weight(data, dataWeight, result3)[1])\n\n index = np.argmin(arrayError)\n error = np.amin(arrayError)\n\n return arrayClass[index], error, errorIndex\n\n\ndef adaBoosting(data, MaxInter):\n classWeight = initial_weight_class(MaxInter)\n dataWeight = initial_weight_data(data)\n classPool = []\n\n for i in range(0, MaxInter):\n result = get_next_class(data, dataWeight)\n #print(result[1])\n classPool.append(result[0]) # füge neue Klasse hinzu\n\n e = result[1] / np.sum(dataWeight)\n right = 0.5 * (np.log((1-e)/e))\n classWeight[i] = right # aktualisiere das Gewicht von der neuen Klasse\n\n errorIndex = result[2]\n update_data_weight(dataWeight, errorIndex, right) # aktualisiere das Gewicht des Datensätzes\n\n return classPool, classWeight # wir bekommen unsere Committee und deren \"speak right\"\n\n''' aktualisieren den Datensatz, je nach ob es richtig geschätzt wird'''\ndef update_data_weight(dataWeight, errorIndex, right):\n j = 0\n for i in range(0, len(dataWeight)):\n if (i != errorIndex[j]):\n dataWeight[i] = dataWeight[i] * np.exp(right)\n else:\n dataWeight[i] = dataWeight[i] * np.exp(0-right)\n j += 1\n return dataWeight\n\n\ndef prediction(data, testsample, MaxInter):\n adaBoost = adaBoosting(data, MaxInter)\n classes = adaBoost[0]\n weights = adaBoost[1]\n resultArray = []\n\n for i in range(0, len(classes)):\n result = classes[i].prediction_all(data, testsample)\n resultArray.append(result)\n\n C = np.dot(np.transpose(weights), np.transpose(np.matrix(resultArray)))\n\n if (sigmoid(C) >= 0.5):\n return 1\n else: return 0\n\n\ndef error_rate(data, test, MaxInter):\n error = 0\n\n for i in range(0, len(test)):\n result = prediction(data, test[i], MaxInter)\n print(result)\n if (result != test[i][-1]):\n error += 1\n print('error rate mit', MaxInter, 'Iterationen ist', error / len(test))\n\n\n\n\n"
] | [
[
"numpy.matrix",
"numpy.zeros",
"numpy.argmin",
"numpy.log",
"numpy.sum",
"numpy.exp",
"numpy.transpose",
"numpy.amin"
]
] |
SHK2018/Gasyori100knock | [
"3fab0a2906ac99a37281269e1618e8ac74629dfa"
] | [
"Question_91_100/answers/answer_96.py"
] | [
"import cv2\nimport numpy as np\n\nnp.random.seed(0)\n\n\n# get HOG\ndef HOG(img):\n # Grayscale\n def BGR2GRAY(img):\n gray = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]\n return gray\n\n # Magnitude and gradient\n def get_gradXY(gray):\n H, W = gray.shape\n\n # padding before grad\n gray = np.pad(gray, (1, 1), 'edge')\n\n # get grad x\n gx = gray[1:H+1, 2:] - gray[1:H+1, :W]\n # get grad y\n gy = gray[2:, 1:W+1] - gray[:H, 1:W+1]\n # replace 0 with \n gx[gx == 0] = 1e-6\n\n return gx, gy\n\n # get magnitude and gradient\n def get_MagGrad(gx, gy):\n # get gradient maginitude\n magnitude = np.sqrt(gx ** 2 + gy ** 2)\n\n # get gradient angle\n gradient = np.arctan(gy / gx)\n\n gradient[gradient < 0] = np.pi / 2 + gradient[gradient < 0] + np.pi / 2\n\n return magnitude, gradient\n\n # Gradient histogram\n def quantization(gradient):\n # prepare quantization table\n gradient_quantized = np.zeros_like(gradient, dtype=np.int)\n\n # quantization base\n d = np.pi / 9\n\n # quantization\n for i in range(9):\n gradient_quantized[np.where((gradient >= d * i) & (gradient <= d * (i + 1)))] = i\n\n return gradient_quantized\n\n\n # get gradient histogram\n def gradient_histogram(gradient_quantized, magnitude, N=8):\n # get shape\n H, W = magnitude.shape\n\n # get cell num\n cell_N_H = H // N\n cell_N_W = W // N\n histogram = np.zeros((cell_N_H, cell_N_W, 9), dtype=np.float32)\n\n # each pixel\n for y in range(cell_N_H):\n for x in range(cell_N_W):\n for j in range(N):\n for i in range(N):\n histogram[y, x, gradient_quantized[y * 4 + j, x * 4 + i]] += magnitude[y * 4 + j, x * 4 + i]\n\n return histogram\n\n\t\t# histogram normalization\n def normalization(histogram, C=3, epsilon=1):\n cell_N_H, cell_N_W, _ = histogram.shape\n ## each histogram\n for y in range(cell_N_H):\n \t for x in range(cell_N_W):\n \t #for i in range(9):\n histogram[y, x] /= np.sqrt(np.sum(histogram[max(y - 1, 0) : min(y + 2, cell_N_H),\n max(x - 1, 0) : min(x + 2, cell_N_W)] ** 2) + epsilon)\n\n return histogram\n\n # 1. BGR -> Gray\n gray = BGR2GRAY(img)\n\n # 1. Gray -> Gradient x and y\n gx, gy = get_gradXY(gray)\n\n # 2. get gradient magnitude and angle\n magnitude, gradient = get_MagGrad(gx, gy)\n\n # 3. Quantization\n gradient_quantized = quantization(gradient)\n\n # 4. Gradient histogram\n histogram = gradient_histogram(gradient_quantized, magnitude)\n \n # 5. Histogram normalization\n histogram = normalization(histogram)\n\n return histogram\n\n\n# get IoU overlap ratio\ndef iou(a, b):\n\t# get area of a\n area_a = (a[2] - a[0]) * (a[3] - a[1])\n\t# get area of b\n area_b = (b[2] - b[0]) * (b[3] - b[1])\n\n\t# get left top x of IoU\n iou_x1 = np.maximum(a[0], b[0])\n\t# get left top y of IoU\n iou_y1 = np.maximum(a[1], b[1])\n\t# get right bottom of IoU\n iou_x2 = np.minimum(a[2], b[2])\n\t# get right bottom of IoU\n iou_y2 = np.minimum(a[3], b[3])\n\n\t# get width of IoU\n iou_w = iou_x2 - iou_x1\n\t# get height of IoU\n iou_h = iou_y2 - iou_y1\n\n\t# get area of IoU\n area_iou = iou_w * iou_h\n\t# get overlap ratio between IoU and all area\n iou = area_iou / (area_a + area_b - area_iou)\n\n return iou\n\n# resize using bi-linear\ndef resize(img, h, w):\n # get shape\n _h, _w, _c = img.shape\n\n # get resize ratio\n ah = 1. * h / _h\n aw = 1. * w / _w\n\n # get index of each y\n y = np.arange(h).repeat(w).reshape(w, -1)\n # get index of each x\n x = np.tile(np.arange(w), (h, 1))\n\n # get coordinate toward x and y of resized image\n y = (y / ah)\n x = (x / aw)\n\n # transfer to int\n ix = np.floor(x).astype(np.int32)\n iy = np.floor(y).astype(np.int32)\n\n # clip index\n ix = np.minimum(ix, _w-2)\n iy = np.minimum(iy, _h-2)\n\n # get distance between original image index and resized image index\n dx = x - ix\n dy = y - iy\n\n dx = np.tile(dx, [_c, 1, 1]).transpose(1, 2, 0)\n dy = np.tile(dy, [_c, 1, 1]).transpose(1, 2, 0)\n \n # resize\n out = (1 - dx) * (1 - dy) * img[iy, ix] + dx * (1 - dy) * img[iy, ix + 1] + (1 - dx) * dy * img[iy + 1, ix] + dx * dy * img[iy + 1, ix + 1]\n out[out > 255] = 255\n\n return out\n\n\n# neural network\nclass NN:\n def __init__(self, ind=2, w=64, w2=64, outd=1, lr=0.1):\n # layer 1 weight\n self.w1 = np.random.normal(0, 1, [ind, w])\n # layer 1 bias\n self.b1 = np.random.normal(0, 1, [w])\n # layer 2 weight\n self.w2 = np.random.normal(0, 1, [w, w2])\n # layer 2 bias\n self.b2 = np.random.normal(0, 1, [w2])\n # output layer weight\n self.wout = np.random.normal(0, 1, [w2, outd])\n # output layer bias\n self.bout = np.random.normal(0, 1, [outd])\n # learning rate\n self.lr = lr\n\n def forward(self, x):\n # input tensor\n self.z1 = x\n # layer 1 output tensor\n self.z2 = sigmoid(np.dot(self.z1, self.w1) + self.b1)\n # layer 2 output tensor\n self.z3 = sigmoid(np.dot(self.z2, self.w2) + self.b2)\n # output layer tensor\n self.out = sigmoid(np.dot(self.z3, self.wout) + self.bout)\n return self.out\n\n def train(self, x, t):\n # backpropagation output layer\n #En = t * np.log(self.out) + (1-t) * np.log(1-self.out)\n En = (self.out - t) * self.out * (1 - self.out)\n # get gradients for weight and bias\n grad_wout = np.dot(self.z3.T, En)\n grad_bout = np.dot(np.ones([En.shape[0]]), En)\n # update weight and bias\n self.wout -= self.lr * grad_wout\n self.bout -= self.lr * grad_bout\n\n # backpropagation inter layer\n # get gradients for weight and bias\n grad_u2 = np.dot(En, self.wout.T) * self.z3 * (1 - self.z3)\n grad_w2 = np.dot(self.z2.T, grad_u2)\n grad_b2 = np.dot(np.ones([grad_u2.shape[0]]), grad_u2)\n # update weight and bias\n self.w2 -= self.lr * grad_w2\n self.b2 -= self.lr * grad_b2\n \n # get gradients for weight and bias\n grad_u1 = np.dot(grad_u2, self.w2.T) * self.z2 * (1 - self.z2)\n grad_w1 = np.dot(self.z1.T, grad_u1)\n grad_b1 = np.dot(np.ones([grad_u1.shape[0]]), grad_u1)\n # update weight and bias\n self.w1 -= self.lr * grad_w1\n self.b1 -= self.lr * grad_b1\n\n# sigmoid\ndef sigmoid(x):\n return 1. / (1. + np.exp(-x))\n\n# train\ndef train_nn(nn, train_x, train_t, iteration_N=10000):\n # each iteration\n for i in range(iteration_N):\n # feed-forward data\n nn.forward(train_x)\n # update parameter\n nn.train(train_x, train_t)\n\n return nn\n\n# test\ndef test_nn(nn, test_x, test_t, pred_th=0.5):\n accuracy_N = 0.\n\n # each data\n for data, t in zip(test_x, test_t):\n # get prediction\n prob = nn.forward(data)\n\n # count accuracy\n pred = 1 if prob >= pred_th else 0\n if t == pred:\n accuracy_N += 1\n\n # get accuracy \n accuracy = accuracy_N / len(db)\n\n print(\"Accuracy >> {} ({} / {})\".format(accuracy, accuracy_N, len(db)))\n\n\n# crop bounding box and make dataset\ndef make_dataset(img, gt, Crop_N=200, L=60, th=0.5, H_size=32):\n # get shape\n H, W, _ = img.shape\n\n # get HOG feature dimension\n HOG_feature_N = ((H_size // 8) ** 2) * 9\n\n # prepare database\n db = np.zeros([Crop_N, HOG_feature_N + 1])\n\n # each crop\n for i in range(Crop_N):\n # get left top x of crop bounding box\n x1 = np.random.randint(W - L)\n # get left top y of crop bounding box\n y1 = np.random.randint(H - L)\n # get right bottom x of crop bounding box\n x2 = x1 + L\n # get right bottom y of crop bounding box\n y2 = y1 + L\n\n # get bounding box\n crop = np.array((x1, y1, x2, y2))\n\n _iou = np.zeros((3,))\n _iou[0] = iou(gt, crop)\n #_iou[1] = iou(gt2, crop)\n #_iou[2] = iou(gt3, crop)\n\n # get label\n if _iou.max() >= th:\n cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 1)\n label = 1\n else:\n cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 1)\n label = 0\n\n # crop area\n crop_area = img[y1:y2, x1:x2]\n\n # resize crop area\n crop_area = resize(crop_area, H_size, H_size)\n\n # get HOG feature\n hog = HOG(crop_area)\n \n # store HOG feature and label\n db[i, :HOG_feature_N] = hog.ravel()\n db[i, -1] = label\n\n return db\n\n# Read image\nimg = cv2.imread(\"../imori.jpg\").astype(np.float32)\n\n# get HOG\nhistogram = HOG(img)\n\n# prepare gt bounding box\ngt = np.array((47, 41, 129, 103), dtype=np.float32)\n\n# get database\ndb = make_dataset(img, gt)\n\n\n# train neural network\n# get input feature dimension\ninput_dim = db.shape[1] - 1\n# prepare train data X\ntrain_x = db[:, :input_dim]\n# prepare train data t\ntrain_t = db[:, -1][..., None]\n\n# prepare neural network\nnn = NN(ind=input_dim, lr=0.01)\n# training\nnn = train_nn(nn, train_x, train_t, iteration_N=10000)\n\n# test\ntest_nn(nn, train_x, train_t)\n\n"
] | [
[
"numpy.random.normal",
"numpy.array",
"numpy.pad",
"numpy.zeros_like",
"numpy.dot",
"numpy.zeros",
"numpy.minimum",
"numpy.random.seed",
"numpy.ones",
"numpy.tile",
"numpy.exp",
"numpy.where",
"numpy.arctan",
"numpy.arange",
"numpy.random.randint",
"numpy.sqrt",
"numpy.floor",
"numpy.maximum"
]
] |
romanramirezmena/keras-retinanet | [
"0879dcd2573f04a9ef7f8aa472d8223d58fbd272"
] | [
"keras_retinanet/bin/train.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport warnings\n\nfrom tensorflow import keras\nimport tensorflow as tf\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin # noqa: F401\n __package__ = \"keras_retinanet.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom .. import layers # noqa: F401\nfrom .. import losses\nfrom .. import models\nfrom ..callbacks import RedirectModel\nfrom ..callbacks.eval import Evaluate\nfrom ..models.retinanet import retinanet_bbox\nfrom ..preprocessing.csv_generator import CSVGenerator\nfrom ..preprocessing.kitti import KittiGenerator\nfrom ..preprocessing.open_images import OpenImagesGenerator\nfrom ..preprocessing.pascal_voc import PascalVocGenerator\nfrom ..utils.anchors import make_shapes_callback\nfrom ..utils.config import read_config_file, parse_anchor_parameters, parse_pyramid_levels\nfrom ..utils.gpu import setup_gpu\nfrom ..utils.image import random_visual_effect_generator\nfrom ..utils.model import freeze as freeze_model\nfrom ..utils.tf_version import check_tf_version\nfrom ..utils.transform import random_transform_generator\n\n\ndef makedirs(path):\n # Intended behavior: try to create the directory,\n # pass if the directory exists already, fails otherwise.\n # Meant for Python 2.7/3.n compatibility.\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef model_with_weights(model, weights, skip_mismatch):\n \"\"\" Load weights for model.\n\n Args\n model : The model to load weights for.\n weights : The weights to load.\n skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.\n \"\"\"\n if weights is not None:\n model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)\n return model\n\n\ndef create_models(backbone_retinanet, num_classes, weights, multi_gpu=0,\n freeze_backbone=False, lr=1e-5, optimizer_clipnorm=0.001, config=None):\n \"\"\" Creates three models (model, training_model, prediction_model).\n\n Args\n backbone_retinanet : A function to call to create a retinanet model with a given backbone.\n num_classes : The number of classes to train.\n weights : The weights to load into the model.\n multi_gpu : The number of GPUs to use for training.\n freeze_backbone : If True, disables learning for the backbone.\n config : Config parameters, None indicates the default configuration.\n\n Returns\n model : The base model. This is also the model that is saved in snapshots.\n training_model : The training model. If multi_gpu=0, this is identical to model.\n prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).\n \"\"\"\n\n modifier = freeze_model if freeze_backbone else None\n\n # load anchor parameters, or pass None (so that defaults will be used)\n anchor_params = None\n num_anchors = None\n pyramid_levels = None\n if config and 'anchor_parameters' in config:\n anchor_params = parse_anchor_parameters(config)\n num_anchors = anchor_params.num_anchors()\n if config and 'pyramid_levels' in config:\n pyramid_levels = parse_pyramid_levels(config)\n\n # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.\n # optionally wrap in a parallel model\n if multi_gpu > 1:\n from keras.utils import multi_gpu_model\n with tf.device('/cpu:0'):\n model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier, pyramid_levels=pyramid_levels), weights=weights, skip_mismatch=True)\n training_model = multi_gpu_model(model, gpus=multi_gpu)\n else:\n model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier, pyramid_levels=pyramid_levels), weights=weights, skip_mismatch=True)\n training_model = model\n\n # make prediction model\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params, pyramid_levels=pyramid_levels)\n\n # compile model\n training_model.compile(\n loss={\n 'regression' : losses.smooth_l1(),\n 'classification': losses.focal()\n },\n optimizer=keras.optimizers.Adam(lr=lr, clipnorm=optimizer_clipnorm)\n )\n\n return model, training_model, prediction_model\n\n\ndef create_callbacks(model, training_model, prediction_model, validation_generator, args):\n \"\"\" Creates the callbacks to use during training.\n\n Args\n model: The base model.\n training_model: The model that is used for training.\n prediction_model: The model that should be used for validation.\n validation_generator: The generator for creating validation data.\n args: parseargs args object.\n\n Returns:\n A list of callbacks used for training.\n \"\"\"\n callbacks = []\n\n tensorboard_callback = None\n\n if args.tensorboard_dir:\n makedirs(args.tensorboard_dir)\n update_freq = args.tensorboard_freq\n if update_freq not in ['epoch', 'batch']:\n update_freq = int(update_freq)\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir = args.tensorboard_dir,\n histogram_freq = 0,\n batch_size = args.batch_size,\n write_graph = True,\n write_grads = False,\n write_images = False,\n update_freq = update_freq,\n embeddings_freq = 0,\n embeddings_layer_names = None,\n embeddings_metadata = None\n )\n\n if args.evaluation and validation_generator:\n if args.dataset_type == 'coco':\n from ..callbacks.coco import CocoEval\n\n # use prediction model for evaluation\n evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)\n else:\n evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)\n evaluation = RedirectModel(evaluation, prediction_model)\n callbacks.append(evaluation)\n\n # save the model\n if args.snapshots:\n # ensure directory created first; otherwise h5py will error after epoch.\n makedirs(args.snapshot_path)\n checkpoint = keras.callbacks.ModelCheckpoint(\n os.path.join(\n args.snapshot_path,\n '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)\n ),\n verbose=1,\n # save_best_only=True,\n # monitor=\"mAP\",\n # mode='max'\n )\n checkpoint = RedirectModel(checkpoint, model)\n callbacks.append(checkpoint)\n\n callbacks.append(keras.callbacks.ReduceLROnPlateau(\n monitor = 'loss',\n factor = args.reduce_lr_factor,\n patience = args.reduce_lr_patience,\n verbose = 1,\n mode = 'auto',\n min_delta = 0.0001,\n cooldown = 0,\n min_lr = 0\n ))\n\n if args.evaluation and validation_generator:\n callbacks.append(keras.callbacks.EarlyStopping(\n monitor = 'mAP',\n patience = 5,\n mode = 'max',\n min_delta = 0.01\n ))\n\n if args.tensorboard_dir:\n callbacks.append(tensorboard_callback)\n\n return callbacks\n\n\ndef create_generators(args, preprocess_image):\n \"\"\" Create generators for training and validation.\n\n Args\n args : parseargs object containing configuration for generators.\n preprocess_image : Function that preprocesses an image for the network.\n \"\"\"\n common_args = {\n 'batch_size' : args.batch_size,\n 'config' : args.config,\n 'image_min_side' : args.image_min_side,\n 'image_max_side' : args.image_max_side,\n 'no_resize' : args.no_resize,\n 'preprocess_image' : preprocess_image,\n 'group_method' : args.group_method\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n visual_effect_generator = random_visual_effect_generator(\n contrast_range=(0.9, 1.1),\n brightness_range=(-.1, .1),\n hue_range=(-0.05, 0.05),\n saturation_range=(0.95, 1.05)\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n visual_effect_generator = None\n\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n train_generator = CocoGenerator(\n args.coco_path,\n args.train_set,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = CocoGenerator(\n args.coco_path,\n args.validation_set,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'pascal':\n train_generator = PascalVocGenerator(\n args.pascal_path,\n 'train',\n image_extension=args.image_extension,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = PascalVocGenerator(\n args.pascal_path,\n 'val',\n image_extension=args.image_extension,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'csv':\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations,\n args.classes,\n shuffle_groups=False,\n **common_args\n )\n else:\n validation_generator = None\n elif args.dataset_type == 'oid':\n train_generator = OpenImagesGenerator(\n args.main_dir,\n subset='train',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = OpenImagesGenerator(\n args.main_dir,\n subset='validation',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'kitti':\n train_generator = KittiGenerator(\n args.kitti_path,\n subset='train',\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = KittiGenerator(\n args.kitti_path,\n subset='val',\n shuffle_groups=False,\n **common_args\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return train_generator, validation_generator\n\n\ndef check_args(parsed_args):\n \"\"\" Function to check for inherent contradictions within parsed arguments.\n For example, batch_size < num_gpus\n Intended to raise errors prior to backend initialisation.\n\n Args\n parsed_args: parser.parse_args()\n\n Returns\n parsed_args\n \"\"\"\n\n if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:\n raise ValueError(\n \"Batch size ({}) must be equal to or higher than the number of GPUs ({})\".format(parsed_args.batch_size,\n parsed_args.multi_gpu))\n\n if parsed_args.multi_gpu > 1 and parsed_args.snapshot:\n raise ValueError(\n \"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.\".format(parsed_args.multi_gpu,\n parsed_args.snapshot))\n\n if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:\n raise ValueError(\"Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.\")\n\n if 'resnet' not in parsed_args.backbone:\n warnings.warn('Using experimental backbone {}. Only resnet50 has been properly tested.'.format(parsed_args.backbone))\n\n return parsed_args\n\n\ndef parse_args(args):\n \"\"\" Parse the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')\n subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')\n subparsers.required = True\n\n coco_parser = subparsers.add_parser('coco')\n coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')\n coco_parser.add_argument('--val-set', help='Name dataset json to train (ie. if default.json write --set_name default).', default='val2017')\n coco_parser.add_argument('--train-set', help='Name dataset json to validate (ie. if default.json write --set_name default).', default='train2017')\n\n pascal_parser = subparsers.add_parser('pascal')\n pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')\n pascal_parser.add_argument('--image-extension', help='Declares the dataset images\\' extension.', default='.jpg')\n\n kitti_parser = subparsers.add_parser('kitti')\n kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')\n\n def csv_list(string):\n return string.split(',')\n\n oid_parser = subparsers.add_parser('oid')\n oid_parser.add_argument('main_dir', help='Path to dataset directory.')\n oid_parser.add_argument('--version', help='The current dataset version is v4.', default='v4')\n oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)\n oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')\n oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)\n\n csv_parser = subparsers.add_parser('csv')\n csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')\n csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')\n csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--snapshot', help='Resume training from a snapshot.')\n group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)\n group.add_argument('--weights', help='Initialize the model with weights from a file.')\n group.add_argument('--no-weights', help='Don\\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)\n parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)\n parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)\n parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')\n parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)\n parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')\n parser.add_argument('--initial-epoch', help='Epoch from which to begin the train, useful if resuming from snapshot.', type=int, default=0)\n parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)\n parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)\n parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)\n parser.add_argument('--optimizer-clipnorm', help='Clipnorm parameter for optimizer.', type=float, default=0.001)\n parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \\'./snapshots\\')', default='./snapshots')\n parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='') # default='./logs') => https://github.com/tensorflow/tensorflow/pull/34870\n parser.add_argument('--tensorboard-freq', help='Update frequency for Tensorboard output. Values \\'epoch\\', \\'batch\\' or int', default='epoch')\n parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')\n parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')\n parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')\n parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')\n parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)\n parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)\n parser.add_argument('--no-resize', help='Don''t rescale the image.', action='store_true')\n parser.add_argument('--config', help='Path to a configuration parameters .ini file.')\n parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')\n parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss', action='store_true')\n parser.add_argument('--reduce-lr-patience', help='Reduce learning rate after validation loss decreases over reduce_lr_patience epochs', type=int, default=2)\n parser.add_argument('--reduce-lr-factor', help='When learning rate is reduced due to reduce_lr_patience, multiply by reduce_lr_factor', type=float, default=0.1)\n parser.add_argument('--group-method', help='Determines how images are grouped together', type=str, default='ratio', choices=['none', 'random', 'ratio'])\n\n # Fit generator arguments\n parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')\n parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)\n parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int, default=10)\n\n return check_args(parser.parse_args(args))\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # create object that stores backbone information\n backbone = models.backbone(args.backbone)\n\n # make sure tensorflow is the minimum required version\n check_tf_version()\n\n # optionally choose specific GPU\n if args.gpu is not None:\n setup_gpu(args.gpu)\n\n # optionally load config parameters\n if args.config:\n args.config = read_config_file(args.config)\n\n # create the generators\n train_generator, validation_generator = create_generators(args, backbone.preprocess_image)\n\n # create the model\n if args.snapshot is not None:\n print('Loading model, this may take a second...')\n model = models.load_model(args.snapshot, backbone_name=args.backbone)\n training_model = model\n anchor_params = None\n pyramid_levels = None\n if args.config and 'anchor_parameters' in args.config:\n anchor_params = parse_anchor_parameters(args.config)\n if args.config and 'pyramid_levels' in args.config:\n pyramid_levels = parse_pyramid_levels(args.config)\n\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params, pyramid_levels=pyramid_levels)\n else:\n weights = args.weights\n # default to imagenet if nothing else is specified\n if weights is None and args.imagenet_weights:\n weights = backbone.download_imagenet()\n\n print('Creating model, this may take a second...')\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone.retinanet,\n num_classes=train_generator.num_classes(),\n weights=weights,\n multi_gpu=args.multi_gpu,\n freeze_backbone=args.freeze_backbone,\n lr=args.lr,\n optimizer_clipnorm=args.optimizer_clipnorm,\n config=args.config\n )\n\n # print model summary\n print(model.summary())\n\n # this lets the generator compute backbone layer shapes using the actual backbone model\n if 'vgg' in args.backbone or 'densenet' in args.backbone:\n train_generator.compute_shapes = make_shapes_callback(model)\n if validation_generator:\n validation_generator.compute_shapes = train_generator.compute_shapes\n\n # create the callbacks\n callbacks = create_callbacks(\n model,\n training_model,\n prediction_model,\n validation_generator,\n args,\n )\n\n if not args.compute_val_loss:\n validation_generator = None\n\n # start training\n return training_model.fit_generator(\n generator=train_generator,\n steps_per_epoch=args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks,\n workers=args.workers,\n use_multiprocessing=args.multiprocessing,\n max_queue_size=args.max_queue_size,\n validation_data=validation_generator,\n initial_epoch=args.initial_epoch\n )\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.device",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.callbacks.EarlyStopping"
]
] |
yelojakit/qiskit-terra | [
"05355b103817dcd7aae0839a485b7ce28fb354a5"
] | [
"qiskit/visualization/matplotlib.py"
] | [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name,missing-docstring,inconsistent-return-statements\n\n\"\"\"mpl circuit visualization backend.\"\"\"\n\nimport collections\nimport fractions\nimport itertools\nimport json\nimport logging\nimport math\n\nimport numpy as np\n\ntry:\n from matplotlib import get_backend\n from matplotlib import patches\n from matplotlib import pyplot as plt\n\n HAS_MATPLOTLIB = True\nexcept ImportError:\n HAS_MATPLOTLIB = False\n\nfrom qiskit.circuit import ControlledGate\nfrom qiskit.visualization import exceptions\nfrom qiskit.visualization.qcstyle import DefaultStyle, BWStyle\nfrom qiskit import user_config\nfrom .tools.pi_check import pi_check\n\nlogger = logging.getLogger(__name__)\n\nWID = 0.65\nHIG = 0.65\nDEFAULT_SCALE = 4.3\nPORDER_GATE = 5\nPORDER_LINE = 3\nPORDER_REGLINE = 2\nPORDER_GRAY = 3\nPORDER_TEXT = 6\nPORDER_SUBP = 4\n\n\nclass Anchor:\n def __init__(self, reg_num, yind, fold):\n self.__yind = yind\n self.__fold = fold\n self.__reg_num = reg_num\n self.__gate_placed = []\n self.gate_anchor = 0\n\n def plot_coord(self, index, gate_width, x_offset):\n h_pos = index % self.__fold + 1\n # check folding\n if self.__fold > 0:\n if h_pos + (gate_width - 1) > self.__fold:\n index += self.__fold - (h_pos - 1)\n x_pos = index % self.__fold + 1 + 0.5 * (gate_width - 1)\n y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1)\n else:\n x_pos = index + 1 + 0.5 * (gate_width - 1)\n y_pos = self.__yind\n\n # could have been updated, so need to store\n self.gate_anchor = index\n return x_pos + x_offset, y_pos\n\n def is_locatable(self, index, gate_width):\n hold = [index + i for i in range(gate_width)]\n for p in hold:\n if p in self.__gate_placed:\n return False\n return True\n\n def set_index(self, index, gate_width):\n h_pos = index % self.__fold + 1\n if h_pos + (gate_width - 1) > self.__fold:\n _index = index + self.__fold - (h_pos - 1)\n else:\n _index = index\n for ii in range(gate_width):\n if _index + ii not in self.__gate_placed:\n self.__gate_placed.append(_index + ii)\n self.__gate_placed.sort()\n\n def get_index(self):\n if self.__gate_placed:\n return self.__gate_placed[-1] + 1\n return 0\n\n\nclass MatplotlibDrawer:\n def __init__(self, qregs, cregs, ops,\n scale=1.0, style=None, plot_barriers=True,\n reverse_bits=False, layout=None, fold=25, ax=None):\n\n if not HAS_MATPLOTLIB:\n raise ImportError('The class MatplotlibDrawer needs matplotlib. '\n 'Run \"pip install matplotlib\" before.')\n\n self._ast = None\n self._scale = DEFAULT_SCALE * scale\n self._creg = []\n self._qreg = []\n self._registers(cregs, qregs)\n self._ops = ops\n\n self._qreg_dict = collections.OrderedDict()\n self._creg_dict = collections.OrderedDict()\n self._cond = {\n 'n_lines': 0,\n 'xmax': 0,\n 'ymax': 0,\n }\n config = user_config.get_config()\n if config and (style is None):\n config_style = config.get('circuit_mpl_style', 'default')\n if config_style == 'default':\n self._style = DefaultStyle()\n elif config_style == 'bw':\n self._style = BWStyle()\n elif style is False:\n self._style = BWStyle()\n else:\n self._style = DefaultStyle()\n\n self.plot_barriers = plot_barriers\n self.reverse_bits = reverse_bits\n self.layout = layout\n if style:\n if isinstance(style, dict):\n self._style.set_style(style)\n elif isinstance(style, str):\n with open(style, 'r') as infile:\n dic = json.load(infile)\n self._style.set_style(dic)\n if ax is None:\n self.return_fig = True\n self.figure = plt.figure()\n self.figure.patch.set_facecolor(color=self._style.bg)\n self.ax = self.figure.add_subplot(111)\n else:\n self.return_fig = False\n self.ax = ax\n self.figure = ax.get_figure()\n\n # TODO: self._style.fold should be removed after deprecation\n self.fold = self._style.fold or fold\n if self.fold < 2:\n self.fold = -1\n\n self.ax.axis('off')\n self.ax.set_aspect('equal')\n self.ax.tick_params(labelbottom=False, labeltop=False,\n labelleft=False, labelright=False)\n\n self.x_offset = 0\n\n def _registers(self, creg, qreg):\n self._creg = []\n for r in creg:\n self._creg.append(r)\n self._qreg = []\n for r in qreg:\n self._qreg.append(r)\n\n @property\n def ast(self):\n return self._ast\n\n def _custom_multiqubit_gate(self, xy, cxy=None, fc=None, wide=True, text=None,\n subtext=None):\n xpos = min([x[0] for x in xy])\n ypos = min([y[1] for y in xy])\n ypos_max = max([y[1] for y in xy])\n\n if cxy:\n ypos = min([y[1] for y in cxy])\n if wide:\n if subtext:\n boxes_length = round(max([len(text), len(subtext)]) / 7) or 1\n else:\n boxes_length = math.ceil(len(text) / 7) or 1\n wid = WID * 2.5 * boxes_length\n else:\n wid = WID\n\n if fc:\n _fc = fc\n else:\n if self._style.name != 'bw':\n if self._style.gc != DefaultStyle().gc:\n _fc = self._style.gc\n else:\n _fc = self._style.dispcol['multi']\n _ec = self._style.dispcol['multi']\n else:\n _fc = self._style.gc\n\n qubit_span = abs(ypos) - abs(ypos_max) + 1\n height = HIG + (qubit_span - 1)\n box = patches.Rectangle(\n xy=(xpos - 0.5 * wid, ypos - .5 * HIG),\n width=wid, height=height,\n fc=_fc,\n ec=self._style.dispcol['multi'],\n linewidth=1.5, zorder=PORDER_GATE)\n self.ax.add_patch(box)\n # Annotate inputs\n for bit, y in enumerate([x[1] for x in xy]):\n self.ax.text(xpos - 0.45 * wid, y, str(bit), ha='left', va='center',\n fontsize=self._style.fs, color=self._style.gt,\n clip_on=True, zorder=PORDER_TEXT)\n\n if text:\n\n disp_text = text\n if subtext:\n self.ax.text(xpos, ypos + 0.5 * height, disp_text, ha='center',\n va='center', fontsize=self._style.fs,\n color=self._style.gt, clip_on=True,\n zorder=PORDER_TEXT)\n self.ax.text(xpos, ypos + 0.3 * height, subtext, ha='center',\n va='center', fontsize=self._style.sfs,\n color=self._style.sc, clip_on=True,\n zorder=PORDER_TEXT)\n else:\n self.ax.text(xpos, ypos + .5 * (qubit_span - 1), disp_text,\n ha='center',\n va='center',\n fontsize=self._style.fs,\n color=self._style.gt,\n clip_on=True,\n zorder=PORDER_TEXT,\n wrap=True)\n\n def _gate(self, xy, fc=None, wide=False, text=None, subtext=None):\n xpos, ypos = xy\n\n if wide:\n if subtext:\n subtext_len = len(subtext)\n if '$\\\\pi$' in subtext:\n pi_count = subtext.count('pi')\n subtext_len = subtext_len - (4 * pi_count)\n\n boxes_wide = round(max(subtext_len, len(text)) / 10, 1) or 1\n wid = WID * 1.5 * boxes_wide\n else:\n boxes_wide = round(len(text) / 10) or 1\n wid = WID * 2.2 * boxes_wide\n if wid < WID:\n wid = WID\n else:\n wid = WID\n if fc:\n _fc = fc\n elif self._style.gc != DefaultStyle().gc:\n _fc = self._style.gc\n elif text and text in self._style.dispcol:\n _fc = self._style.dispcol[text]\n else:\n _fc = self._style.gc\n\n box = patches.Rectangle(\n xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG,\n fc=_fc, ec=self._style.edge_color, linewidth=1.5, zorder=PORDER_GATE)\n self.ax.add_patch(box)\n\n if text:\n font_size = self._style.fs\n sub_font_size = self._style.sfs\n # check if gate is not unitary\n if text in ['reset']:\n disp_color = self._style.not_gate_lc\n sub_color = self._style.not_gate_lc\n font_size = self._style.math_fs\n\n else:\n disp_color = self._style.gt\n sub_color = self._style.sc\n\n if text in self._style.dispcol:\n disp_text = \"${}$\".format(self._style.disptex[text])\n else:\n disp_text = text\n\n if subtext:\n self.ax.text(xpos, ypos + 0.15 * HIG, disp_text, ha='center',\n va='center', fontsize=font_size,\n color=disp_color, clip_on=True,\n zorder=PORDER_TEXT)\n self.ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',\n va='center', fontsize=sub_font_size,\n color=sub_color, clip_on=True,\n zorder=PORDER_TEXT)\n else:\n self.ax.text(xpos, ypos, disp_text, ha='center', va='center',\n fontsize=font_size,\n color=disp_color,\n clip_on=True,\n zorder=PORDER_TEXT)\n\n def _subtext(self, xy, text):\n xpos, ypos = xy\n\n self.ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',\n fontsize=self._style.sfs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n\n def _sidetext(self, xy, text):\n xpos, ypos = xy\n\n # 0.15 = the initial gap, each char means it needs to move\n # another 0.0375 over\n xp = xpos + 0.15 + (0.0375 * len(text))\n self.ax.text(xp, ypos + HIG, text, ha='center', va='top',\n fontsize=self._style.sfs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n\n def _line(self, xy0, xy1, lc=None, ls=None, zorder=PORDER_LINE):\n x0, y0 = xy0\n x1, y1 = xy1\n if lc is None:\n linecolor = self._style.lc\n else:\n linecolor = lc\n if ls is None:\n linestyle = 'solid'\n else:\n linestyle = ls\n\n if linestyle == 'doublet':\n theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))\n dx = 0.05 * WID * np.cos(theta)\n dy = 0.05 * WID * np.sin(theta)\n self.ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],\n color=linecolor,\n linewidth=2,\n linestyle='solid',\n zorder=zorder)\n self.ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],\n color=linecolor,\n linewidth=2,\n linestyle='solid',\n zorder=zorder)\n else:\n self.ax.plot([x0, x1], [y0, y1],\n color=linecolor,\n linewidth=2,\n linestyle=linestyle,\n zorder=zorder)\n\n def _measure(self, qxy, cxy, cid):\n qx, qy = qxy\n cx, cy = cxy\n\n self._gate(qxy, fc=self._style.dispcol['meas'])\n\n # add measure symbol\n arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,\n height=HIG * 0.7, theta1=0, theta2=180, fill=False,\n ec=self._style.not_gate_lc, linewidth=2,\n zorder=PORDER_GATE)\n self.ax.add_patch(arc)\n self.ax.plot([qx, qx + 0.35 * WID],\n [qy - 0.15 * HIG, qy + 0.20 * HIG],\n color=self._style.not_gate_lc, linewidth=2, zorder=PORDER_GATE)\n # arrow\n self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style.cc,\n ls=self._style.cline)\n arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),\n (cx + 0.20 * WID, cy + 0.35 * WID),\n (cx, cy)),\n fc=self._style.cc,\n ec=None)\n self.ax.add_artist(arrowhead)\n # target\n if self._style.bundle:\n self.ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',\n fontsize=0.8 * self._style.fs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n\n def _conds(self, xy, istrue=False):\n xpos, ypos = xy\n\n if istrue:\n _fc = self._style.lc\n else:\n _fc = self._style.gc\n\n box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,\n fc=_fc, ec=self._style.lc,\n linewidth=1.5, zorder=PORDER_GATE)\n self.ax.add_patch(box)\n\n def _ctrl_qubit(self, xy, fc=None, ec=None):\n if self._style.gc != DefaultStyle().gc:\n fc = self._style.gc\n ec = self._style.gc\n if fc is None:\n fc = self._style.lc\n if ec is None:\n ec = self._style.lc\n xpos, ypos = xy\n box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,\n fc=fc, ec=ec,\n linewidth=1.5, zorder=PORDER_GATE)\n self.ax.add_patch(box)\n\n def _tgt_qubit(self, xy, fc=None, ec=None, ac=None,\n add_width=None):\n if self._style.gc != DefaultStyle().gc:\n fc = self._style.gc\n ec = self._style.gc\n if fc is None:\n fc = self._style.dispcol['target']\n if ec is None:\n ec = self._style.lc\n if ac is None:\n ac = self._style.lc\n if add_width is None:\n add_width = 0.35\n\n linewidth = 2\n\n if self._style.dispcol['target'] == '#ffffff':\n add_width = self._style.colored_add_width\n\n xpos, ypos = xy\n\n box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,\n fc=fc, ec=ec, linewidth=linewidth,\n zorder=PORDER_GATE)\n self.ax.add_patch(box)\n # add '+' symbol\n self.ax.plot([xpos, xpos], [ypos - add_width * HIG,\n ypos + add_width * HIG],\n color=ac, linewidth=linewidth, zorder=PORDER_GATE + 1)\n\n self.ax.plot([xpos - add_width * HIG, xpos + add_width * HIG],\n [ypos, ypos], color=ac, linewidth=linewidth,\n zorder=PORDER_GATE + 1)\n\n def _swap(self, xy):\n xpos, ypos = xy\n color = self._style.dispcol['swap']\n self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],\n [ypos - 0.20 * WID, ypos + 0.20 * WID],\n color=color, linewidth=2, zorder=PORDER_LINE + 1)\n self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],\n [ypos + 0.20 * WID, ypos - 0.20 * WID],\n color=color, linewidth=2, zorder=PORDER_LINE + 1)\n\n def _barrier(self, config, anc):\n xys = config['coord']\n group = config['group']\n y_reg = []\n for qreg in self._qreg_dict.values():\n if qreg['group'] in group:\n y_reg.append(qreg['y'])\n x0 = xys[0][0]\n\n box_y0 = min(y_reg) - int(anc / self.fold) * (self._cond['n_lines'] + 1) - 0.5\n box_y1 = max(y_reg) - int(anc / self.fold) * (self._cond['n_lines'] + 1) + 0.5\n box = patches.Rectangle(xy=(x0 - 0.3 * WID, box_y0),\n width=0.6 * WID, height=box_y1 - box_y0,\n fc=self._style.bc, ec=None, alpha=0.6,\n linewidth=1.5, zorder=PORDER_GRAY)\n self.ax.add_patch(box)\n for xy in xys:\n xpos, ypos = xy\n self.ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5],\n linewidth=1, linestyle=\"dashed\",\n color=self._style.lc,\n zorder=PORDER_TEXT)\n\n def _linefeed_mark(self, xy):\n xpos, ypos = xy\n\n self.ax.plot([xpos - .1, xpos - .1],\n [ypos, ypos - self._cond['n_lines'] + 1],\n color=self._style.lc, zorder=PORDER_LINE)\n self.ax.plot([xpos + .1, xpos + .1],\n [ypos, ypos - self._cond['n_lines'] + 1],\n color=self._style.lc, zorder=PORDER_LINE)\n\n def draw(self, filename=None, verbose=False):\n self._draw_regs()\n self._draw_ops(verbose)\n _xl = - self._style.margin[0]\n _xr = self._cond['xmax'] + self._style.margin[1]\n _yb = - self._cond['ymax'] - self._style.margin[2] + 1 - 0.5\n _yt = self._style.margin[3] + 0.5\n self.ax.set_xlim(_xl, _xr)\n self.ax.set_ylim(_yb, _yt)\n # update figure size\n fig_w = _xr - _xl\n fig_h = _yt - _yb\n if self._style.figwidth < 0.0:\n self._style.figwidth = fig_w * self._scale * self._style.fs / 72 / WID\n self.figure.set_size_inches(self._style.figwidth, self._style.figwidth * fig_h / fig_w)\n if filename:\n self.figure.savefig(filename, dpi=self._style.dpi,\n bbox_inches='tight')\n if self.return_fig:\n if get_backend() in ['module://ipykernel.pylab.backend_inline',\n 'nbAgg']:\n plt.close(self.figure)\n return self.figure\n\n def _draw_regs(self):\n\n len_longest_label = 0\n # quantum register\n for ii, reg in enumerate(self._qreg):\n if len(self._qreg) > 1:\n if self.layout is None:\n label = '${{{name}}}_{{{index}}}$'.format(name=reg.register.name,\n index=reg.index)\n else:\n label = '${{{name}}}_{{{index}}} \\\\mapsto {{{physical}}}$'.format(\n name=self.layout[reg.index].register.name,\n index=self.layout[reg.index].index,\n physical=reg.index)\n else:\n label = '${name}$'.format(name=reg.register.name)\n\n if len(label) > len_longest_label:\n len_longest_label = len(label)\n\n pos = -ii\n self._qreg_dict[ii] = {\n 'y': pos,\n 'label': label,\n 'index': reg.index,\n 'group': reg.register\n }\n self._cond['n_lines'] += 1\n # classical register\n if self._creg:\n n_creg = self._creg.copy()\n n_creg.pop(0)\n idx = 0\n y_off = -len(self._qreg)\n for ii, (reg, nreg) in enumerate(itertools.zip_longest(\n self._creg, n_creg)):\n pos = y_off - idx\n if self._style.bundle:\n label = '${}$'.format(reg.register.name)\n self._creg_dict[ii] = {\n 'y': pos,\n 'label': label,\n 'index': reg.index,\n 'group': reg.register\n }\n if not (not nreg or reg.register != nreg.register):\n continue\n else:\n label = '${}_{{{}}}$'.format(reg.register.name, reg.index)\n self._creg_dict[ii] = {\n 'y': pos,\n 'label': label,\n 'index': reg.index,\n 'group': reg.register\n }\n if len(label) > len_longest_label:\n len_longest_label = len(label)\n\n self._cond['n_lines'] += 1\n idx += 1\n\n # 7 is the length of the smallest possible label\n self.x_offset = -.5 + 0.18 * (len_longest_label - 7)\n\n def _draw_regs_sub(self, n_fold, feedline_l=False, feedline_r=False):\n # quantum register\n for qreg in self._qreg_dict.values():\n if n_fold == 0:\n label = qreg['label']\n else:\n label = qreg['label']\n y = qreg['y'] - n_fold * (self._cond['n_lines'] + 1)\n self.ax.text(self.x_offset - 0.2, y, label, ha='right', va='center',\n fontsize=1.25 * self._style.fs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n self._line([self.x_offset + 0.2, y], [self._cond['xmax'], y],\n zorder=PORDER_REGLINE)\n # classical register\n this_creg_dict = {}\n for creg in self._creg_dict.values():\n if n_fold == 0:\n label = creg['label']\n else:\n label = creg['label']\n y = creg['y'] - n_fold * (self._cond['n_lines'] + 1)\n if y not in this_creg_dict.keys():\n this_creg_dict[y] = {'val': 1, 'label': label}\n else:\n this_creg_dict[y]['val'] += 1\n for y, this_creg in this_creg_dict.items():\n # bundle\n if this_creg['val'] > 1:\n self.ax.plot([self.x_offset + 1.1, self.x_offset + 1.2], [y - .1, y + .1],\n color=self._style.cc,\n zorder=PORDER_LINE)\n self.ax.text(self.x_offset + 1.0, y + .1, str(this_creg['val']), ha='left',\n va='bottom',\n fontsize=0.8 * self._style.fs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n self.ax.text(self.x_offset - 0.2, y, this_creg['label'], ha='right', va='center',\n fontsize=1.5 * self._style.fs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n self._line([self.x_offset + 0.2, y], [self._cond['xmax'], y], lc=self._style.cc,\n ls=self._style.cline, zorder=PORDER_REGLINE)\n\n # lf line\n if feedline_r:\n self._linefeed_mark((self.fold + self.x_offset + 1 - 0.1,\n - n_fold * (self._cond['n_lines'] + 1)))\n if feedline_l:\n self._linefeed_mark((self.x_offset + 0.3,\n - n_fold * (self._cond['n_lines'] + 1)))\n\n def _draw_ops(self, verbose=False):\n _wide_gate = ['u2', 'u3', 'cu2', 'cu3', 'unitary', 'r']\n _barriers = {'coord': [], 'group': []}\n\n #\n # generate coordinate manager\n #\n q_anchors = {}\n for key, qreg in self._qreg_dict.items():\n q_anchors[key] = Anchor(reg_num=self._cond['n_lines'],\n yind=qreg['y'],\n fold=self.fold)\n c_anchors = {}\n for key, creg in self._creg_dict.items():\n c_anchors[key] = Anchor(reg_num=self._cond['n_lines'],\n yind=creg['y'],\n fold=self.fold)\n #\n # draw gates\n #\n prev_anc = -1\n for layer in self._ops:\n layer_width = 1\n\n for op in layer:\n\n if op.name in _wide_gate:\n if layer_width < 2:\n layer_width = 2\n if op.type == 'op' and hasattr(op.op, 'params'):\n param = self.param_parse(op.op.params)\n if '$\\\\pi$' in param:\n pi_count = param.count('pi')\n len_param = len(param) - (4 * pi_count)\n else:\n len_param = len(param)\n if len_param > len(op.name):\n box_width = math.floor(len(param) / 10)\n if op.name == 'unitary':\n box_width = 2\n # If more than 4 characters min width is 2\n if box_width <= 1:\n box_width = 2\n if layer_width < box_width:\n if box_width > 2:\n layer_width = box_width\n else:\n layer_width = 2\n continue\n\n # if custom gate with a longer than standard name determine\n # width\n elif op.name not in ['barrier', 'snapshot', 'load', 'save',\n 'noise', 'cswap', 'swap', 'measure'] and len(op.name) >= 4:\n box_width = math.ceil(len(op.name) / 6)\n\n # handle params/subtext longer than op names\n if op.type == 'op' and hasattr(op.op, 'params'):\n param = self.param_parse(op.op.params)\n if '$\\\\pi$' in param:\n pi_count = param.count('pi')\n len_param = len(param) - (4 * pi_count)\n else:\n len_param = len(param)\n if len_param > len(op.name):\n box_width = math.floor(len(param) / 8)\n # If more than 4 characters min width is 2\n if box_width <= 1:\n box_width = 2\n if layer_width < box_width:\n if box_width > 2:\n layer_width = box_width * 2\n else:\n layer_width = 2\n continue\n # If more than 4 characters min width is 2\n layer_width = math.ceil(box_width * WID * 2.5)\n\n this_anc = prev_anc + 1\n\n for op in layer:\n\n _iswide = op.name in _wide_gate\n if op.name not in ['barrier', 'snapshot', 'load', 'save',\n 'noise', 'cswap', 'swap', 'measure',\n 'reset'] and len(op.name) >= 4:\n _iswide = True\n\n # get qreg index\n q_idxs = []\n for qarg in op.qargs:\n for index, reg in self._qreg_dict.items():\n if (reg['group'] == qarg.register and\n reg['index'] == qarg.index):\n q_idxs.append(index)\n break\n\n # get creg index\n c_idxs = []\n for carg in op.cargs:\n for index, reg in self._creg_dict.items():\n if (reg['group'] == carg.register and\n reg['index'] == carg.index):\n c_idxs.append(index)\n break\n\n # Only add the gate to the anchors if it is going to be plotted.\n # This prevents additional blank wires at the end of the line if\n # the last instruction is a barrier type\n if self.plot_barriers or \\\n op.name not in ['barrier', 'snapshot', 'load', 'save',\n 'noise']:\n\n for ii in q_idxs:\n q_anchors[ii].set_index(this_anc, layer_width)\n\n # qreg coordinate\n q_xy = [q_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset)\n for ii in q_idxs]\n # creg coordinate\n c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset)\n for ii in c_idxs]\n # bottom and top point of qreg\n qreg_b = min(q_xy, key=lambda xy: xy[1])\n qreg_t = max(q_xy, key=lambda xy: xy[1])\n\n # update index based on the value from plotting\n this_anc = q_anchors[q_idxs[0]].gate_anchor\n\n if verbose:\n print(op)\n\n if op.type == 'op' and hasattr(op.op, 'params'):\n param = self.param_parse(op.op.params)\n else:\n param = None\n # conditional gate\n if op.condition:\n c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset) for\n ii in self._creg_dict]\n mask = 0\n for index, cbit in enumerate(self._creg):\n if cbit.register == op.condition[0]:\n mask |= (1 << index)\n val = op.condition[1]\n # cbit list to consider\n fmt_c = '{{:0{}b}}'.format(len(c_xy))\n cmask = list(fmt_c.format(mask))[::-1]\n # value\n fmt_v = '{{:0{}b}}'.format(cmask.count('1'))\n vlist = list(fmt_v.format(val))[::-1]\n # plot conditionals\n v_ind = 0\n xy_plot = []\n for xy, m in zip(c_xy, cmask):\n if m == '1':\n if xy not in xy_plot:\n if vlist[v_ind] == '1' or self._style.bundle:\n self._conds(xy, istrue=True)\n else:\n self._conds(xy, istrue=False)\n xy_plot.append(xy)\n v_ind += 1\n creg_b = sorted(xy_plot, key=lambda xy: xy[1])[0]\n self._subtext(creg_b, hex(val))\n self._line(qreg_t, creg_b, lc=self._style.cc,\n ls=self._style.cline)\n #\n # draw special gates\n #\n if op.name == 'measure':\n vv = self._creg_dict[c_idxs[0]]['index']\n self._measure(q_xy[0], c_xy[0], vv)\n elif op.name in ['barrier', 'snapshot', 'load', 'save',\n 'noise']:\n _barriers = {'coord': [], 'group': []}\n for index, qbit in enumerate(q_idxs):\n q_group = self._qreg_dict[qbit]['group']\n\n if q_group not in _barriers['group']:\n _barriers['group'].append(q_group)\n _barriers['coord'].append(q_xy[index])\n if self.plot_barriers:\n self._barrier(_barriers, this_anc)\n elif op.name == 'initialize':\n vec = '[%s]' % param\n self._custom_multiqubit_gate(q_xy, wide=_iswide,\n text=\"|psi>\",\n subtext=vec)\n elif op.name == 'unitary':\n # TODO(mtreinish): Look into adding the unitary to the\n # subtext\n self._custom_multiqubit_gate(q_xy, wide=_iswide,\n text=\"Unitary\")\n elif isinstance(op.op, ControlledGate) and op.name not in [\n 'ccx', 'cx', 'cz', 'cu1', 'ccz', 'cu3', 'crz',\n 'cswap']:\n disp = op.op.base_gate.name\n num_ctrl_qubits = op.op.num_ctrl_qubits\n num_qargs = len(q_xy) - num_ctrl_qubits\n\n for i in range(num_ctrl_qubits):\n self._ctrl_qubit(q_xy[i], fc=self._style.dispcol['multi'],\n ec=self._style.dispcol['multi'])\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])\n if num_qargs == 1:\n self._gate(q_xy[-1], wide=_iswide, text=disp)\n else:\n self._custom_multiqubit_gate(\n q_xy[num_ctrl_qubits:], wide=_iswide, text=disp)\n\n #\n # draw single qubit gates\n #\n elif len(q_xy) == 1:\n disp = op.name\n if param:\n self._gate(q_xy[0], wide=_iswide, text=disp,\n subtext=str(param))\n else:\n self._gate(q_xy[0], wide=_iswide, text=disp)\n #\n # draw multi-qubit gates (n=2)\n #\n elif len(q_xy) == 2:\n # cx\n if op.name == 'cx':\n if self._style.dispcol['cx'] != '#ffffff':\n add_width = self._style.colored_add_width\n else:\n add_width = None\n self._ctrl_qubit(q_xy[0], fc=self._style.dispcol['cx'],\n ec=self._style.dispcol['cx'])\n if self._style.name != 'bw':\n self._tgt_qubit(q_xy[1], fc=self._style.dispcol['cx'],\n ec=self._style.dispcol['cx'],\n ac=self._style.dispcol['target'],\n add_width=add_width)\n else:\n self._tgt_qubit(q_xy[1], fc=self._style.dispcol['target'],\n ec=self._style.dispcol['cx'],\n ac=self._style.dispcol['cx'],\n add_width=add_width)\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t, lc=self._style.dispcol['cx'])\n # cz for latexmode\n elif op.name == 'cz':\n disp = op.name.replace('c', '')\n if self._style.name != 'bw':\n color = self._style.dispcol['multi']\n self._ctrl_qubit(q_xy[0],\n fc=color,\n ec=color)\n else:\n self._ctrl_qubit(q_xy[0])\n self._gate(q_xy[1], wide=_iswide, text=disp, fc=color)\n # add qubit-qubit wiring\n if self._style.name != 'bw':\n self._line(qreg_b, qreg_t,\n lc=self._style.dispcol['multi'])\n else:\n self._line(qreg_b, qreg_t, zorder=PORDER_LINE + 1)\n # control gate\n elif op.name in ['cy', 'ch', 'cu3', 'crz']:\n disp = op.name.replace('c', '')\n\n color = None\n if self._style.name != 'bw':\n color = self._style.dispcol['multi']\n\n self._ctrl_qubit(q_xy[0], fc=color, ec=color)\n if param:\n self._gate(q_xy[1], wide=_iswide,\n text=disp,\n fc=color,\n subtext='{}'.format(param))\n else:\n self._gate(q_xy[1], wide=_iswide, text=disp,\n fc=color)\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t, lc=color)\n\n # rzz gate\n elif op.name == 'rzz':\n self._ctrl_qubit(q_xy[0])\n self._ctrl_qubit(q_xy[1])\n self._sidetext(qreg_b, text='zz({})'.format(param))\n\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t)\n\n # cu1 gate\n elif op.name == 'cu1':\n self._ctrl_qubit(q_xy[0])\n self._ctrl_qubit(q_xy[1])\n self._sidetext(qreg_b, text='U1 ({})'.format(param))\n\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t)\n\n # swap gate\n elif op.name == 'swap':\n self._swap(q_xy[0])\n self._swap(q_xy[1])\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t, lc=self._style.dispcol['swap'])\n # Custom gate\n else:\n self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,\n text=op.name)\n #\n # draw multi-qubit gates (n=3)\n #\n elif len(q_xy) == 3:\n # cswap gate\n if op.name == 'cswap':\n self._ctrl_qubit(q_xy[0],\n fc=self._style.dispcol['multi'],\n ec=self._style.dispcol['multi'])\n self._swap(q_xy[1])\n self._swap(q_xy[2])\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])\n # ccx gate\n elif op.name == 'ccx':\n self._ctrl_qubit(q_xy[0], fc=self._style.dispcol['multi'],\n ec=self._style.dispcol['multi'])\n self._ctrl_qubit(q_xy[1], fc=self._style.dispcol['multi'],\n ec=self._style.dispcol['multi'])\n if self._style.name != 'bw':\n self._tgt_qubit(q_xy[2], fc=self._style.dispcol['multi'],\n ec=self._style.dispcol['multi'],\n ac=self._style.dispcol['target'])\n else:\n self._tgt_qubit(q_xy[2], fc=self._style.dispcol['target'],\n ec=self._style.dispcol['multi'],\n ac=self._style.dispcol['multi'])\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])\n # custom gate\n else:\n self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,\n text=op.name)\n\n # draw custom multi-qubit gate\n elif len(q_xy) > 3:\n self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,\n text=op.name)\n else:\n logger.critical('Invalid gate %s', op)\n raise exceptions.VisualizationError('invalid gate {}'.format(op))\n\n # adjust the column if there have been barriers encountered, but not plotted\n barrier_offset = 0\n if not self.plot_barriers:\n # only adjust if everything in the layer wasn't plotted\n barrier_offset = -1 if all([op.name in\n ['barrier', 'snapshot', 'load', 'save', 'noise']\n for op in layer]) else 0\n prev_anc = this_anc + layer_width + barrier_offset - 1\n #\n # adjust window size and draw horizontal lines\n #\n anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict]\n if anchors:\n max_anc = max(anchors)\n else:\n max_anc = 0\n n_fold = max(0, max_anc - 1) // self.fold\n # window size\n if max_anc > self.fold > 0:\n self._cond['xmax'] = self.fold + 1 + self.x_offset\n self._cond['ymax'] = (n_fold + 1) * (self._cond['n_lines'] + 1) - 1\n else:\n self._cond['xmax'] = max_anc + 1 + self.x_offset\n self._cond['ymax'] = self._cond['n_lines']\n # add horizontal lines\n for ii in range(n_fold + 1):\n feedline_r = (n_fold > 0 and n_fold > ii)\n feedline_l = (ii > 0)\n self._draw_regs_sub(ii, feedline_l, feedline_r)\n # draw gate number\n if self._style.index:\n for ii in range(max_anc):\n if self.fold > 0:\n x_coord = ii % self.fold + 1\n y_coord = - (ii // self.fold) * (self._cond['n_lines'] + 1) + 0.7\n else:\n x_coord = ii + 1\n y_coord = 0.7\n self.ax.text(x_coord, y_coord, str(ii + 1), ha='center',\n va='center', fontsize=self._style.sfs,\n color=self._style.tc, clip_on=True,\n zorder=PORDER_TEXT)\n\n @staticmethod\n def param_parse(v):\n # create an empty list to store the parameters in\n param_parts = [None] * len(v)\n for i, e in enumerate(v):\n try:\n param_parts[i] = pi_check(e, output='mpl', ndigits=3)\n except TypeError:\n param_parts[i] = str(e)\n\n if param_parts[i].startswith('-'):\n param_parts[i] = '$-$' + param_parts[i][1:]\n\n param_parts = ', '.join(param_parts)\n return param_parts\n\n @staticmethod\n def format_numeric(val, tol=1e-5):\n if isinstance(val, complex):\n return str(val)\n elif complex(val).imag != 0:\n val = complex(val)\n abs_val = abs(val)\n if math.isclose(abs_val, 0.0, abs_tol=1e-100):\n return '0'\n if math.isclose(math.fmod(abs_val, 1.0),\n 0.0, abs_tol=tol) and 0.5 < abs_val < 9999.5:\n return str(int(val))\n if 0.1 <= abs_val < 100.0:\n return '{:.2f}'.format(val)\n return '{:.1e}'.format(val)\n\n @staticmethod\n def fraction(val, base=np.pi, n=100, tol=1e-5):\n abs_val = abs(val)\n for i in range(1, n):\n for j in range(1, n):\n if math.isclose(abs_val, i / j * base, rel_tol=tol):\n if val < 0:\n i *= -1\n return fractions.Fraction(i, j)\n return None\n"
] | [
[
"numpy.sin",
"matplotlib.get_backend",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.patches.Circle",
"numpy.abs",
"numpy.cos",
"matplotlib.patches.Rectangle",
"matplotlib.patches.Arc"
]
] |
kipoi/kipoi | [
"56825e141daa2cb7d94ac33809603b75704d12a7"
] | [
"example/models/tal1_model/dataloader.py"
] | [
"\"\"\"DeepSEA dataloader\n\"\"\"\n# python2, 3 compatibility\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport pandas as pd\nimport pybedtools\nfrom pybedtools import BedTool\nfrom kipoi.data import Dataset\nfrom kipoi.metadata import GenomicRanges\nimport linecache\nimport pyfaidx\n# --------------------------------------------\n\n\nclass BedToolLinecache(BedTool):\n \"\"\"Fast BedTool accessor by Ziga Avsec\n\n Normal BedTools loops through the whole file to get the\n line of interest. Hence the access it o(n)\n \"\"\"\n\n def __getitem__(self, idx):\n line = linecache.getline(self.fn, idx + 1)\n return pybedtools.create_interval_from_list(line.strip().split(\"\\t\"))\n\ndef to_onehot(seq):\n x = np.zeros((seq.shape[0], 4), dtype=np.float32)\n alphabet = [\"A\", \"C\", \"G\", \"T\"]\n for i in range(len(alphabet)):\n sel = np.where(seq == alphabet[i])\n x[sel[0], i] = 1\n return x\n\nclass FastaExtractor(object):\n \"\"\"\n Class by Roman Kreuzhuber\n Fasta extractor using pyfaidx. Complies with genomelake.extractors.FastaExtractor I/O as used here.\n \"\"\"\n\n def __init__(self, fasta_file_path):\n self.faidx_obj = pyfaidx.Fasta(fasta_file_path)\n\n def __call__(self, intervals):\n assert isinstance(intervals, list)\n one_hots = []\n for interval in intervals:\n # pyfaidx uses 1-based cooridnates!\n seq = np.array(list(self.faidx_obj.get_seq(interval.chrom ,interval.start+1,interval.end).seq.upper()))\n one_hots.append(to_onehot(seq))\n return np.array(one_hots)\n\n\nclass SeqDataset(Dataset):\n \"\"\"\n Args:\n intervals_file: bed3 file containing intervals\n fasta_file: file path; Genome sequence\n target_file: file path; path to the targets in the csv format\n \"\"\"\n\n SEQ_WIDTH = 500\n\n def __init__(self, intervals_file, fasta_file,\n target_file=None, use_linecache=False):\n\n # intervals\n if use_linecache:\n self.bt = BedToolLinecache(intervals_file)\n else:\n self.bt = BedTool(intervals_file)\n self.fasta_file = fasta_file\n self.fasta_extractor = None\n\n # Targets\n if target_file is not None:\n self.targets = pd.read_csv(target_file)\n else:\n self.targets = None\n\n def __len__(self):\n return len(self.bt)\n\n def __getitem__(self, idx):\n if self.fasta_extractor is None:\n self.fasta_extractor = FastaExtractor(self.fasta_file)\n interval = self.bt[idx]\n\n if interval.stop - interval.start != self.SEQ_WIDTH:\n center = (interval.start + interval.stop) // 2\n interval.start = center - self.SEQ_WIDTH // 2\n interval.end = center + self.SEQ_WIDTH // 2 + self.SEQ_WIDTH % 2\n\n if self.targets is not None:\n y = self.targets.iloc[idx].values\n else:\n y = {}\n\n # Run the fasta extractor\n seq = np.squeeze(self.fasta_extractor([interval]), axis=0)\n return {\n \"inputs\": seq,\n \"targets\": y,\n \"metadata\": {\n \"ranges\": GenomicRanges.from_interval(interval)\n }\n }\n"
] | [
[
"numpy.where",
"numpy.array",
"numpy.zeros",
"pandas.read_csv"
]
] |
brs1977/page_dewarp | [
"b642fe921ec561570611a24e942e365258cf448a"
] | [
"page_dewarp.py"
] | [
"#!/usr/bin/env python\n######################################################################\n# page_dewarp.py - Proof-of-concept of page-dewarping based on a\n# \"cubic sheet\" model. Requires OpenCV (version 3 or greater),\n# PIL/Pillow, and scipy.optimize.\n######################################################################\n# Author: Matt Zucker\n# Date: July 2016\n# License: MIT License (see LICENSE.txt)\n######################################################################\n\nimport os\nimport sys\nimport datetime\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport scipy.optimize\n\n# for some reason pylint complains about cv2 members being undefined :(\n# pylint: disable=E1101\n\nPAGE_MARGIN_X = 50 # reduced px to ignore near L/R edge\nPAGE_MARGIN_Y = 20 # reduced px to ignore near T/B edge\n\nOUTPUT_ZOOM = 1.0 # how much to zoom output relative to *original* image\nOUTPUT_DPI = 300 # just affects stated DPI of PNG, not appearance\nREMAP_DECIMATE = 16 # downscaling factor for remapping image\n\nADAPTIVE_WINSZ = 55 # window size for adaptive threshold in reduced px\n\nTEXT_MIN_WIDTH = 15 # min reduced px width of detected text contour\nTEXT_MIN_HEIGHT = 2 # min reduced px height of detected text contour\nTEXT_MIN_ASPECT = 1.5 # filter out text contours below this w/h ratio\nTEXT_MAX_THICKNESS = 10 # max reduced px thickness of detected text contour\n\nEDGE_MAX_OVERLAP = 1.0 # max reduced px horiz. overlap of contours in span\nEDGE_MAX_LENGTH = 100.0 # max reduced px length of edge connecting contours\nEDGE_ANGLE_COST = 10.0 # cost of angles in edges (tradeoff vs. length)\nEDGE_MAX_ANGLE = 7.5 # maximum change in angle allowed between contours\n\nRVEC_IDX = slice(0, 3) # index of rvec in params vector\nTVEC_IDX = slice(3, 6) # index of tvec in params vector\nCUBIC_IDX = slice(6, 8) # index of cubic slopes in params vector\n\nSPAN_MIN_WIDTH = 30 # minimum reduced px width for span\nSPAN_PX_PER_STEP = 20 # reduced px spacing for sampling along spans\nFOCAL_LENGTH = 1.2 # normalized focal length of camera\n\nDEBUG_LEVEL = 0 # 0=none, 1=some, 2=lots, 3=all\nDEBUG_OUTPUT = 'file' # file, screen, both\n\nWINDOW_NAME = 'Dewarp' # Window name for visualization\n\n# nice color palette for visualizing contours, etc.\nCCOLORS = [\n (255, 0, 0),\n (255, 63, 0),\n (255, 127, 0),\n (255, 191, 0),\n (255, 255, 0),\n (191, 255, 0),\n (127, 255, 0),\n (63, 255, 0),\n (0, 255, 0),\n (0, 255, 63),\n (0, 255, 127),\n (0, 255, 191),\n (0, 255, 255),\n (0, 191, 255),\n (0, 127, 255),\n (0, 63, 255),\n (0, 0, 255),\n (63, 0, 255),\n (127, 0, 255),\n (191, 0, 255),\n (255, 0, 255),\n (255, 0, 191),\n (255, 0, 127),\n (255, 0, 63),\n]\n\n# default intrinsic parameter matrix\nK = np.array([\n [FOCAL_LENGTH, 0, 0],\n [0, FOCAL_LENGTH, 0],\n [0, 0, 1]], dtype=np.float32)\n\n\ndef debug_show(name, step, text, display):\n\n if DEBUG_OUTPUT != 'screen':\n filetext = text.replace(' ', '_')\n outfile = name + '_debug_' + str(step) + '_' + filetext + '.png'\n cv2.imwrite(outfile, display)\n\n if DEBUG_OUTPUT != 'file':\n\n image = display.copy()\n height = image.shape[0]\n\n cv2.putText(image, text, (16, height-16),\n cv2.FONT_HERSHEY_SIMPLEX, 1.0,\n (0, 0, 0), 3, cv2.LINE_AA)\n\n cv2.putText(image, text, (16, height-16),\n cv2.FONT_HERSHEY_SIMPLEX, 1.0,\n (255, 255, 255), 1, cv2.LINE_AA)\n\n cv2.imshow(WINDOW_NAME, image)\n\n while cv2.waitKey(5) < 0:\n pass\n\n\ndef round_nearest_multiple(i, factor):\n i = int(i)\n rem = i % factor\n if not rem:\n return i\n else:\n return i + factor - rem\n\n\ndef pix2norm(shape, pts):\n height, width = shape[:2]\n scl = 2.0/(max(height, width))\n offset = np.array([width, height], dtype=pts.dtype).reshape((-1, 1, 2))*0.5\n return (pts - offset) * scl\n\n\ndef norm2pix(shape, pts, as_integer):\n height, width = shape[:2]\n scl = max(height, width)*0.5\n offset = np.array([0.5*width, 0.5*height],\n dtype=pts.dtype).reshape((-1, 1, 2))\n rval = pts * scl + offset\n if as_integer:\n return (rval + 0.5).astype(int)\n else:\n return rval\n\n\ndef fltp(point):\n return tuple(point.astype(int).flatten())\n\n\ndef draw_correspondences(img, dstpoints, projpts):\n\n display = img.copy()\n dstpoints = norm2pix(img.shape, dstpoints, True)\n projpts = norm2pix(img.shape, projpts, True)\n\n for pts, color in [(projpts, (255, 0, 0)),\n (dstpoints, (0, 0, 255))]:\n\n for point in pts:\n cv2.circle(display, fltp(point), 3, color, -1, cv2.LINE_AA)\n\n for point_a, point_b in zip(projpts, dstpoints):\n cv2.line(display, fltp(point_a), fltp(point_b),\n (255, 255, 255), 1, cv2.LINE_AA)\n\n return display\n\n\ndef get_default_params(corners, ycoords, xcoords):\n\n # page width and height\n page_width = np.linalg.norm(corners[1] - corners[0])\n page_height = np.linalg.norm(corners[-1] - corners[0])\n rough_dims = (page_width, page_height)\n\n # our initial guess for the cubic has no slope\n cubic_slopes = [0.0, 0.0]\n\n # object points of flat page in 3D coordinates\n corners_object3d = np.array([\n [0, 0, 0],\n [page_width, 0, 0],\n [page_width, page_height, 0],\n [0, page_height, 0]])\n\n # estimate rotation and translation from four 2D-to-3D point\n # correspondences\n _, rvec, tvec = cv2.solvePnP(corners_object3d,\n corners, K, np.zeros(5))\n\n span_counts = [len(xc) for xc in xcoords]\n\n params = np.hstack((np.array(rvec).flatten(),\n np.array(tvec).flatten(),\n np.array(cubic_slopes).flatten(),\n ycoords.flatten()) +\n tuple(xcoords))\n\n return rough_dims, span_counts, params\n\n\ndef project_xy(xy_coords, pvec):\n\n # get cubic polynomial coefficients given\n #\n # f(0) = 0, f'(0) = alpha\n # f(1) = 0, f'(1) = beta\n\n alpha, beta = tuple(pvec[CUBIC_IDX])\n\n poly = np.array([\n alpha + beta,\n -2*alpha - beta,\n alpha,\n 0])\n\n xy_coords = xy_coords.reshape((-1, 2))\n z_coords = np.polyval(poly, xy_coords[:, 0])\n\n objpoints = np.hstack((xy_coords, z_coords.reshape((-1, 1))))\n\n image_points, _ = cv2.projectPoints(objpoints,\n pvec[RVEC_IDX],\n pvec[TVEC_IDX],\n K, np.zeros(5))\n\n return image_points\n\n\ndef project_keypoints(pvec, keypoint_index):\n\n xy_coords = pvec[keypoint_index]\n xy_coords[0, :] = 0\n\n return project_xy(xy_coords, pvec)\n\n\ndef resize_to_screen(src, maxw=1280, maxh=700, copy=False):\n\n height, width = src.shape[:2]\n\n scl_x = float(width)/maxw\n scl_y = float(height)/maxh\n\n scl = int(np.ceil(max(scl_x, scl_y)))\n\n if scl > 1.0:\n inv_scl = 1.0/scl\n img = cv2.resize(src, (0, 0), None, inv_scl, inv_scl, cv2.INTER_AREA)\n elif copy:\n img = src.copy()\n else:\n img = src\n\n return img\n\n\ndef box(width, height):\n return np.ones((height, width), dtype=np.uint8)\n\n\ndef get_page_extents(small):\n\n height, width = small.shape[:2]\n\n xmin = PAGE_MARGIN_X\n ymin = PAGE_MARGIN_Y\n xmax = width-PAGE_MARGIN_X\n ymax = height-PAGE_MARGIN_Y\n\n page = np.zeros((height, width), dtype=np.uint8)\n cv2.rectangle(page, (xmin, ymin), (xmax, ymax), (255, 255, 255), -1)\n\n outline = np.array([\n [xmin, ymin],\n [xmin, ymax],\n [xmax, ymax],\n [xmax, ymin]])\n\n return page, outline\n\n\ndef get_mask(name, small, pagemask, masktype):\n\n sgray = cv2.cvtColor(small, cv2.COLOR_RGB2GRAY)\n\n if masktype == 'text':\n\n mask = cv2.adaptiveThreshold(sgray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY_INV,\n ADAPTIVE_WINSZ,\n 25)\n\n if DEBUG_LEVEL >= 3:\n debug_show(name, 0.1, 'thresholded', mask)\n\n mask = cv2.dilate(mask, box(9, 1))\n\n if DEBUG_LEVEL >= 3:\n debug_show(name, 0.2, 'dilated', mask)\n\n mask = cv2.erode(mask, box(1, 3))\n\n if DEBUG_LEVEL >= 3:\n debug_show(name, 0.3, 'eroded', mask)\n\n else:\n\n mask = cv2.adaptiveThreshold(sgray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY_INV,\n ADAPTIVE_WINSZ,\n 7)\n\n if DEBUG_LEVEL >= 3:\n debug_show(name, 0.4, 'thresholded', mask)\n\n mask = cv2.erode(mask, box(3, 1), iterations=3)\n\n if DEBUG_LEVEL >= 3:\n debug_show(name, 0.5, 'eroded', mask)\n\n mask = cv2.dilate(mask, box(8, 2))\n\n if DEBUG_LEVEL >= 3:\n debug_show(name, 0.6, 'dilated', mask)\n\n return np.minimum(mask, pagemask)\n\n\ndef interval_measure_overlap(int_a, int_b):\n return min(int_a[1], int_b[1]) - max(int_a[0], int_b[0])\n\n\ndef angle_dist(angle_b, angle_a):\n\n diff = angle_b - angle_a\n\n while diff > np.pi:\n diff -= 2*np.pi\n\n while diff < -np.pi:\n diff += 2*np.pi\n\n return np.abs(diff)\n\n\ndef blob_mean_and_tangent(contour):\n\n moments = cv2.moments(contour)\n\n area = moments['m00']\n\n mean_x = moments['m10'] / area\n mean_y = moments['m01'] / area\n\n moments_matrix = np.array([\n [moments['mu20'], moments['mu11']],\n [moments['mu11'], moments['mu02']]\n ]) / area\n\n _, svd_u, _ = cv2.SVDecomp(moments_matrix)\n\n center = np.array([mean_x, mean_y])\n tangent = svd_u[:, 0].flatten().copy()\n\n return center, tangent\n\n\nclass ContourInfo(object):\n\n def __init__(self, contour, rect, mask):\n\n self.contour = contour\n self.rect = rect\n self.mask = mask\n\n self.center, self.tangent = blob_mean_and_tangent(contour)\n\n self.angle = np.arctan2(self.tangent[1], self.tangent[0])\n\n clx = [self.proj_x(point) for point in contour]\n\n lxmin = min(clx)\n lxmax = max(clx)\n\n self.local_xrng = (lxmin, lxmax)\n\n self.point0 = self.center + self.tangent * lxmin\n self.point1 = self.center + self.tangent * lxmax\n\n self.pred = None\n self.succ = None\n\n def proj_x(self, point):\n return np.dot(self.tangent, point.flatten()-self.center)\n\n def local_overlap(self, other):\n xmin = self.proj_x(other.point0)\n xmax = self.proj_x(other.point1)\n return interval_measure_overlap(self.local_xrng, (xmin, xmax))\n\n\ndef generate_candidate_edge(cinfo_a, cinfo_b):\n\n # we want a left of b (so a's successor will be b and b's\n # predecessor will be a) make sure right endpoint of b is to the\n # right of left endpoint of a.\n if cinfo_a.point0[0] > cinfo_b.point1[0]:\n tmp = cinfo_a\n cinfo_a = cinfo_b\n cinfo_b = tmp\n\n x_overlap_a = cinfo_a.local_overlap(cinfo_b)\n x_overlap_b = cinfo_b.local_overlap(cinfo_a)\n\n overall_tangent = cinfo_b.center - cinfo_a.center\n overall_angle = np.arctan2(overall_tangent[1], overall_tangent[0])\n\n delta_angle = max(angle_dist(cinfo_a.angle, overall_angle),\n angle_dist(cinfo_b.angle, overall_angle)) * 180/np.pi\n\n # we want the largest overlap in x to be small\n x_overlap = max(x_overlap_a, x_overlap_b)\n\n dist = np.linalg.norm(cinfo_b.point0 - cinfo_a.point1)\n\n if (dist > EDGE_MAX_LENGTH or\n x_overlap > EDGE_MAX_OVERLAP or\n delta_angle > EDGE_MAX_ANGLE):\n return None\n else:\n score = dist + delta_angle*EDGE_ANGLE_COST\n return (score, cinfo_a, cinfo_b)\n\n\ndef make_tight_mask(contour, xmin, ymin, width, height):\n\n tight_mask = np.zeros((height, width), dtype=np.uint8)\n tight_contour = contour - np.array((xmin, ymin)).reshape((-1, 1, 2))\n\n cv2.drawContours(tight_mask, [tight_contour], 0,\n (1, 1, 1), -1)\n\n return tight_mask\n\n\ndef get_contours(name, small, pagemask, masktype):\n\n mask = get_mask(name, small, pagemask, masktype)\n\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n\n contours_out = []\n\n for contour in contours:\n\n rect = cv2.boundingRect(contour)\n xmin, ymin, width, height = rect\n\n if (width < TEXT_MIN_WIDTH or\n height < TEXT_MIN_HEIGHT or\n width < TEXT_MIN_ASPECT*height):\n continue\n\n tight_mask = make_tight_mask(contour, xmin, ymin, width, height)\n\n if tight_mask.sum(axis=0).max() > TEXT_MAX_THICKNESS:\n continue\n\n contours_out.append(ContourInfo(contour, rect, tight_mask))\n\n if DEBUG_LEVEL >= 2:\n visualize_contours(name, small, contours_out)\n\n return contours_out\n\n\ndef assemble_spans(name, small, pagemask, cinfo_list):\n\n # sort list\n cinfo_list = sorted(cinfo_list, key=lambda cinfo: cinfo.rect[1])\n\n # generate all candidate edges\n candidate_edges = []\n\n for i, cinfo_i in enumerate(cinfo_list):\n for j in range(i):\n # note e is of the form (score, left_cinfo, right_cinfo)\n edge = generate_candidate_edge(cinfo_i, cinfo_list[j])\n if edge is not None:\n candidate_edges.append(edge)\n\n # sort candidate edges by score (lower is better)\n candidate_edges.sort()\n\n # for each candidate edge\n for _, cinfo_a, cinfo_b in candidate_edges:\n # if left and right are unassigned, join them\n if cinfo_a.succ is None and cinfo_b.pred is None:\n cinfo_a.succ = cinfo_b\n cinfo_b.pred = cinfo_a\n\n # generate list of spans as output\n spans = []\n\n # until we have removed everything from the list\n while cinfo_list:\n\n # get the first on the list\n cinfo = cinfo_list[0]\n\n # keep following predecessors until none exists\n while cinfo.pred:\n cinfo = cinfo.pred\n\n # start a new span\n cur_span = []\n\n width = 0.0\n\n # follow successors til end of span\n while cinfo:\n # remove from list (sadly making this loop *also* O(n^2)\n cinfo_list.remove(cinfo)\n # add to span\n cur_span.append(cinfo)\n width += cinfo.local_xrng[1] - cinfo.local_xrng[0]\n # set successor\n cinfo = cinfo.succ\n\n # add if long enough\n if width > SPAN_MIN_WIDTH:\n spans.append(cur_span)\n\n if DEBUG_LEVEL >= 2:\n visualize_spans(name, small, pagemask, spans)\n\n return spans\n\n\ndef sample_spans(shape, spans):\n\n span_points = []\n\n for span in spans:\n\n contour_points = []\n\n for cinfo in span:\n\n yvals = np.arange(cinfo.mask.shape[0]).reshape((-1, 1))\n totals = (yvals * cinfo.mask).sum(axis=0)\n means = totals / cinfo.mask.sum(axis=0)\n\n xmin, ymin = cinfo.rect[:2]\n\n step = SPAN_PX_PER_STEP\n start = ((len(means)-1) % step) / 2\n\n contour_points += [(x+xmin, means[x]+ymin)\n for x in range(start, len(means), step)]\n\n contour_points = np.array(contour_points,\n dtype=np.float32).reshape((-1, 1, 2))\n\n contour_points = pix2norm(shape, contour_points)\n\n span_points.append(contour_points)\n\n return span_points\n\n\ndef keypoints_from_samples(name, small, pagemask, page_outline,\n span_points):\n\n all_evecs = np.array([[0.0, 0.0]])\n all_weights = 0\n\n for points in span_points:\n\n _, evec = cv2.PCACompute(points.reshape((-1, 2)),\n None, maxComponents=1)\n\n weight = np.linalg.norm(points[-1] - points[0])\n\n all_evecs += evec * weight\n all_weights += weight\n\n evec = all_evecs / all_weights\n\n x_dir = evec.flatten()\n\n if x_dir[0] < 0:\n x_dir = -x_dir\n\n y_dir = np.array([-x_dir[1], x_dir[0]])\n\n pagecoords = cv2.convexHull(page_outline)\n pagecoords = pix2norm(pagemask.shape, pagecoords.reshape((-1, 1, 2)))\n pagecoords = pagecoords.reshape((-1, 2))\n\n px_coords = np.dot(pagecoords, x_dir)\n py_coords = np.dot(pagecoords, y_dir)\n\n px0 = px_coords.min()\n px1 = px_coords.max()\n\n py0 = py_coords.min()\n py1 = py_coords.max()\n\n p00 = px0 * x_dir + py0 * y_dir\n p10 = px1 * x_dir + py0 * y_dir\n p11 = px1 * x_dir + py1 * y_dir\n p01 = px0 * x_dir + py1 * y_dir\n\n corners = np.vstack((p00, p10, p11, p01)).reshape((-1, 1, 2))\n\n ycoords = []\n xcoords = []\n\n for points in span_points:\n pts = points.reshape((-1, 2))\n px_coords = np.dot(pts, x_dir)\n py_coords = np.dot(pts, y_dir)\n ycoords.append(py_coords.mean() - py0)\n xcoords.append(px_coords - px0)\n\n if DEBUG_LEVEL >= 2:\n visualize_span_points(name, small, span_points, corners)\n\n return corners, np.array(ycoords), xcoords\n\n\ndef visualize_contours(name, small, cinfo_list):\n\n regions = np.zeros_like(small)\n\n for j, cinfo in enumerate(cinfo_list):\n\n cv2.drawContours(regions, [cinfo.contour], 0,\n CCOLORS[j % len(CCOLORS)], -1)\n\n mask = (regions.max(axis=2) != 0)\n\n display = small.copy()\n display[mask] = (display[mask]/2) + (regions[mask]/2)\n\n for j, cinfo in enumerate(cinfo_list):\n color = CCOLORS[j % len(CCOLORS)]\n color = tuple([c/4 for c in color])\n\n cv2.circle(display, fltp(cinfo.center), 3,\n (255, 255, 255), 1, cv2.LINE_AA)\n\n cv2.line(display, fltp(cinfo.point0), fltp(cinfo.point1),\n (255, 255, 255), 1, cv2.LINE_AA)\n\n debug_show(name, 1, 'contours', display)\n\n\ndef visualize_spans(name, small, pagemask, spans):\n\n regions = np.zeros_like(small)\n\n for i, span in enumerate(spans):\n contours = [cinfo.contour for cinfo in span]\n cv2.drawContours(regions, contours, -1,\n CCOLORS[i*3 % len(CCOLORS)], -1)\n\n mask = (regions.max(axis=2) != 0)\n\n display = small.copy()\n display[mask] = (display[mask]/2) + (regions[mask]/2)\n display[pagemask == 0] /= 4\n\n debug_show(name, 2, 'spans', display)\n\n\ndef visualize_span_points(name, small, span_points, corners):\n\n display = small.copy()\n\n for i, points in enumerate(span_points):\n\n points = norm2pix(small.shape, points, False)\n\n mean, small_evec = cv2.PCACompute(points.reshape((-1, 2)),\n None,\n maxComponents=1)\n\n dps = np.dot(points.reshape((-1, 2)), small_evec.reshape((2, 1)))\n dpm = np.dot(mean.flatten(), small_evec.flatten())\n\n point0 = mean + small_evec * (dps.min()-dpm)\n point1 = mean + small_evec * (dps.max()-dpm)\n\n for point in points:\n cv2.circle(display, fltp(point), 3,\n CCOLORS[i % len(CCOLORS)], -1, cv2.LINE_AA)\n\n cv2.line(display, fltp(point0), fltp(point1),\n (255, 255, 255), 1, cv2.LINE_AA)\n\n cv2.polylines(display, [norm2pix(small.shape, corners, True)],\n True, (255, 255, 255))\n\n debug_show(name, 3, 'span points', display)\n\n\ndef imgsize(img):\n height, width = img.shape[:2]\n return '{}x{}'.format(width, height)\n\n\ndef make_keypoint_index(span_counts):\n\n nspans = len(span_counts)\n npts = sum(span_counts)\n keypoint_index = np.zeros((npts+1, 2), dtype=int)\n start = 1\n\n for i, count in enumerate(span_counts):\n end = start + count\n keypoint_index[start:start+end, 1] = 8+i\n start = end\n\n keypoint_index[1:, 0] = np.arange(npts) + 8 + nspans\n\n return keypoint_index\n\n\ndef optimize_params(name, small, dstpoints, span_counts, params):\n\n keypoint_index = make_keypoint_index(span_counts)\n\n def objective(pvec):\n ppts = project_keypoints(pvec, keypoint_index)\n return np.sum((dstpoints - ppts)**2)\n\n print (' initial objective is', objective(params))\n\n if DEBUG_LEVEL >= 1:\n projpts = project_keypoints(params, keypoint_index)\n display = draw_correspondences(small, dstpoints, projpts)\n debug_show(name, 4, 'keypoints before', display)\n\n print (' optimizing', len(params), 'parameters...')\n start = datetime.datetime.now()\n res = scipy.optimize.minimize(objective, params,\n method='Powell')\n end = datetime.datetime.now()\n print (' optimization took', round((end-start).total_seconds(), 2), 'sec.')\n print (' final objective is', res.fun)\n params = res.x\n\n if DEBUG_LEVEL >= 1:\n projpts = project_keypoints(params, keypoint_index)\n display = draw_correspondences(small, dstpoints, projpts)\n debug_show(name, 5, 'keypoints after', display)\n\n return params\n\n\ndef get_page_dims(corners, rough_dims, params):\n\n dst_br = corners[2].flatten()\n\n dims = np.array(rough_dims)\n\n def objective(dims):\n proj_br = project_xy(dims, params)\n return np.sum((dst_br - proj_br.flatten())**2)\n\n res = scipy.optimize.minimize(objective, dims, method='Powell')\n dims = res.x\n\n print (' got page dims', dims[0], 'x', dims[1])\n\n return dims\n\n\ndef remap_image(name, img, small, page_dims, params):\n\n height = 0.5 * page_dims[1] * OUTPUT_ZOOM * img.shape[0]\n height = round_nearest_multiple(height, REMAP_DECIMATE)\n\n width = round_nearest_multiple(height * page_dims[0] / page_dims[1],\n REMAP_DECIMATE)\n\n print (' output will be {}x{}'.format(width, height))\n\n height_small = height / REMAP_DECIMATE\n width_small = width / REMAP_DECIMATE\n\n page_x_range = np.linspace(0, page_dims[0], width_small)\n page_y_range = np.linspace(0, page_dims[1], height_small)\n\n page_x_coords, page_y_coords = np.meshgrid(page_x_range, page_y_range)\n\n page_xy_coords = np.hstack((page_x_coords.flatten().reshape((-1, 1)),\n page_y_coords.flatten().reshape((-1, 1))))\n\n page_xy_coords = page_xy_coords.astype(np.float32)\n\n image_points = project_xy(page_xy_coords, params)\n image_points = norm2pix(img.shape, image_points, False)\n\n image_x_coords = image_points[:, 0, 0].reshape(page_x_coords.shape)\n image_y_coords = image_points[:, 0, 1].reshape(page_y_coords.shape)\n\n image_x_coords = cv2.resize(image_x_coords, (width, height),\n interpolation=cv2.INTER_CUBIC)\n\n image_y_coords = cv2.resize(image_y_coords, (width, height),\n interpolation=cv2.INTER_CUBIC)\n\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n remapped = cv2.remap(img_gray, image_x_coords, image_y_coords,\n cv2.INTER_CUBIC,\n None, cv2.BORDER_REPLICATE)\n\n thresh = cv2.adaptiveThreshold(remapped, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY, ADAPTIVE_WINSZ, 25)\n\n pil_image = Image.fromarray(thresh)\n pil_image = pil_image.convert('1')\n\n threshfile = name + '_thresh.png'\n pil_image.save(threshfile, dpi=(OUTPUT_DPI, OUTPUT_DPI))\n\n if DEBUG_LEVEL >= 1:\n height = small.shape[0]\n width = int(round(height * float(thresh.shape[1])/thresh.shape[0]))\n display = cv2.resize(thresh, (width, height),\n interpolation=cv2.INTER_AREA)\n debug_show(name, 6, 'output', display)\n\n return threshfile\n\ndef dewarp(img):\n small = resize_to_screen(img)\n basename = os.path.basename(imgfile)\n name, _ = os.path.splitext(basename)\n\n print ('loaded', basename, 'with size', imgsize(img))\n print ('and resized to', imgsize(small))\n\n if DEBUG_LEVEL >= 3:\n debug_show(name, 0.0, 'original', small)\n\n pagemask, page_outline = get_page_extents(small)\n\n cinfo_list = get_contours(name, small, pagemask, 'text')\n spans = assemble_spans(name, small, pagemask, cinfo_list)\n\n if len(spans) < 3:\n print (' detecting lines because only', len(spans), 'text spans')\n cinfo_list = get_contours(name, small, pagemask, 'line')\n spans2 = assemble_spans(name, small, pagemask, cinfo_list)\n if len(spans2) > len(spans):\n spans = spans2\n\n if len(spans) < 1:\n print ('skipping', name, 'because only', len(spans), 'spans')\n return None\n\n span_points = sample_spans(small.shape, spans)\n\n print (' got', len(spans), 'spans')\n print ('with', sum([len(pts) for pts in span_points]), 'points.')\n\n corners, ycoords, xcoords = keypoints_from_samples(name, small,\n pagemask,\n page_outline,\n span_points)\n\n rough_dims, span_counts, params = get_default_params(corners,\n ycoords, xcoords)\n\n dstpoints = np.vstack((corners[0].reshape((1, 1, 2)),) +\n tuple(span_points))\n\n params = optimize_params(name, small,\n dstpoints,\n span_counts, params)\n\n page_dims = get_page_dims(corners, rough_dims, params)\n\n return remap_image(name, img, small, page_dims, params)\n\n\n\ndef main():\n\n if len(sys.argv) < 2:\n print ('usage:', sys.argv[0], 'IMAGE1 [IMAGE2 ...]')\n sys.exit(0)\n\n if DEBUG_LEVEL > 0 and DEBUG_OUTPUT != 'file':\n cv2.namedWindow(WINDOW_NAME)\n\n outfiles = []\n\n for imgfile in sys.argv[1:]:\n\n img = cv2.imread(imgfile)\n\n outfile = dewarp(img)\n\n outfiles.append(outfile)\n\n print (') wrote', outfile) \n\n print ('to convert to PDF (requires ImageMagick):')\n print (' convert -compress Group4 ' + ' '.join(outfiles) + ' output.pdf')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.zeros_like",
"numpy.zeros",
"numpy.minimum",
"numpy.sum",
"numpy.ones",
"numpy.polyval",
"numpy.arange",
"numpy.arctan2",
"numpy.abs",
"numpy.linspace",
"numpy.meshgrid",
"numpy.vstack"
]
] |
AllSafeCyberSecur1ty/Nuclear-Engineering | [
"302d6dcc7c0a85a9191098366b076cf9cb5a9f6e"
] | [
"pyne/tests/test_activation_responses.py"
] | [
"import os\nimport warnings\nfrom nose.tools import assert_equal, assert_almost_equal, assert_true\nfrom nose.plugins.skip import SkipTest\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport multiprocessing\nimport filecmp\nimport sys\nfrom shutil import copyfile\nimport tables as tb\n\nfrom pyne.mcnp import Meshtal\nfrom pyne.material import Material\nfrom pyne.utils import QAWarning, file_block_almost_same\nfrom pyne.alara import response_to_hdf5, response_hdf5_to_mesh, _make_response_dtype\nfrom pyne.mesh import Mesh, NativeMeshTag, HAVE_PYMOAB\n\nif not HAVE_PYMOAB:\n raise SkipTest\n\nif sys.version_info[0] > 2:\n izip = zip\nelse:\n from itertools import izip\n\nwarnings.simplefilter(\"ignore\", QAWarning)\n\nthisdir = os.path.dirname(__file__)\n\nresponses = [\n \"decay_heat\",\n \"specific_activity\",\n \"alpha_heat\",\n \"beta_heat\",\n \"gamma_heat\",\n \"wdr\",\n \"photon_source\",\n]\n\n\ndef _generate_exp_h5(filename, response, exp_h5_filename):\n \"\"\"\n This function is used to generate expected h5 file for different responses.\n Supported responses is defined at the begining of this file.\n Filename could can be an output.txt contains specific response for a file\n contains multiple responses.\n \"\"\"\n # generate expected h5 file\n f = open(filename, \"r\")\n f.seek(0)\n dt = _make_response_dtype(response)\n filters = tb.Filters(complevel=1, complib=\"zlib\")\n h5f = tb.open_file(exp_h5_filename, \"w\", filters=filters)\n tab = h5f.create_table(\"/\", \"data\", dt)\n rows = np.empty(12, dtype=dt)\n rows[0] = (0, \"h-3\", \"shutdown\", 9.5258e-18)\n rows[1] = (0, \"h-3\", \"1000 s\", 9.5258e-18)\n rows[2] = (0, \"h-3\", \"12 h\", 9.5251e-18)\n rows[3] = (0, \"h-3\", \"3 d\", 9.5214e-18)\n rows[4] = (0, \"n-16\", \"shutdown\", 3.1588e-9)\n rows[5] = (0, \"n-16\", \"1000 s\", 0.0000e0)\n rows[6] = (0, \"n-16\", \"12 h\", 0.0000e0)\n rows[7] = (0, \"n-16\", \"3 d\", 0.0000e0)\n rows[8] = (0, \"TOTAL\", \"shutdown\", 3.1588e-9)\n rows[9] = (0, \"TOTAL\", \"1000 s\", 9.5258e-18)\n rows[10] = (0, \"TOTAL\", \"12 h\", 9.5251e-18)\n rows[11] = (0, \"TOTAL\", \"3 d\", 9.5214e-18)\n tab.append(rows[:])\n # close the file\n h5f.close()\n f.close()\n\n\ndef test_response_to_hdf5():\n \"\"\"\n This function test alara.response_to_hdf5, with response of:\n - decay_heat\n - specific_activity\n - alpha_heat\n - beta_heat\n - gamma_heat\n - wdr\n - photon_source\n \"\"\"\n # skip test if h5diff not exist\n is_h5diff = os.system(\"which h5diff\")\n if is_h5diff != 0:\n raise SkipTest\n\n for response in responses:\n # read output.txt and write h5 file\n filename = os.path.join(\n thisdir,\n \"files_test_activation_responses\",\n \"\".join([response, \"_output.txt\"]),\n )\n h5_filename = os.path.join(\n thisdir, \"files_test_activation_responses\", \"\".join([response, \".h5\"])\n )\n response_to_hdf5(filename, response)\n\n # generate expected h5 file\n exp_h5_filename = os.path.join(\n thisdir,\n \"files_test_activation_responses\",\n \"\".join([\"exp_\", response, \".h5\"]),\n )\n _generate_exp_h5(filename, response, exp_h5_filename)\n\n # compare two h5 files\n command = \"\".join([\"h5diff \", h5_filename, \" \", exp_h5_filename])\n diff_flag = os.system(command)\n # return value 0 if no difference, 1 if differences found, 2 if error\n assert_equal(diff_flag, 0)\n\n # remove generated files\n os.remove(h5_filename)\n os.remove(exp_h5_filename)\n\n\ndef test_responses_to_hdf5_multiple():\n \"\"\"\n This function test alara.response_to_hdf5, read output.txt of multiple responses:\n - decay_heat\n - specific_activity\n - alpha_heat\n - beta_heat\n - gamma_heat\n - wdr\n - photon_source\n \"\"\"\n # skip test if h5diff not exist\n is_h5diff = os.system(\"which h5diff\")\n if is_h5diff != 0:\n raise SkipTest\n\n for response in responses:\n # read output.txt and write h5 file\n filename = os.path.join(\n thisdir, \"files_test_activation_responses\", \"multiple_output.txt\"\n )\n h5_filename = os.path.join(\n thisdir, \"files_test_activation_responses\", \"\".join([response, \".h5\"])\n )\n response_to_hdf5(filename, response)\n\n # generate expected h5 file\n exp_h5_filename = os.path.join(\n thisdir,\n \"files_test_activation_responses\",\n \"\".join([\"exp_\", response, \".h5\"]),\n )\n _generate_exp_h5(filename, response, exp_h5_filename)\n\n # compare two h5 files\n command = \"\".join([\"h5diff \", h5_filename, \" \", exp_h5_filename])\n diff_flag = os.system(command)\n # return value 0 if no difference, 1 if differences found, 2 if error\n assert_equal(diff_flag, 0)\n\n # remove generated files\n os.remove(h5_filename)\n os.remove(exp_h5_filename)\n\n\ndef test_response_hdf5_to_mesh():\n \"\"\"Tests the function photon source_h5_to_mesh.\"\"\"\n\n for response in responses:\n # read output.txt and write h5 file\n filename = os.path.join(\n thisdir,\n \"files_test_activation_responses\",\n \"\".join([response, \"_output.txt\"]),\n )\n h5_filename = os.path.join(\n thisdir, \"files_test_activation_responses\", \"\".join([response, \".h5\"])\n )\n response_to_hdf5(filename, response)\n assert_true(os.path.exists(h5_filename))\n\n mesh = Mesh(structured=True, structured_coords=[[0, 1], [0, 1], [0, 1]])\n\n tags = {(\"h-3\", \"shutdown\"): \"tag1\", (\"TOTAL\", \"12 h\"): \"tag2\"}\n response_hdf5_to_mesh(mesh, h5_filename, tags, response)\n\n # create lists of lists of expected results\n tag1_answers = [9.5258e-18]\n tag2_answers = [9.5251e-18]\n\n ves = list(mesh.structured_iterate_hex(\"xyz\"))\n for i, ve in enumerate(ves):\n assert_equal(mesh.tag1[ve], tag1_answers[i])\n assert_equal(mesh.tag2[ve], tag2_answers[i])\n\n if os.path.isfile(h5_filename):\n os.remove(h5_filename)\n\n\ndef _activation_responses_test_step1(activation_responses_run_dir):\n os.chdir(thisdir)\n # copy ../scripts/activation_responses.py to activation_responses_run_dir/activation_responses.py\n os.chdir(\"..\")\n folderpath = os.getcwd()\n dst = os.path.join(activation_responses_run_dir, \"activation_responses.py\")\n copyfile(os.path.join(folderpath, \"scripts\", \"activation_responses.py\"), dst)\n\n # run activation_responses step1\n os.chdir(activation_responses_run_dir)\n os.system(\"python activation_responses.py step1\")\n\n # output files of activation_responses step1\n alara_inp = os.path.join(activation_responses_run_dir, \"alara_inp\")\n alara_matlib = os.path.join(activation_responses_run_dir, \"alara_matlib\")\n alara_fluxin = os.path.join(activation_responses_run_dir, \"alara_fluxin\")\n blank_mesh = os.path.join(activation_responses_run_dir, \"blank_mesh.h5m\")\n step1_file = os.path.join(\n activation_responses_run_dir, \"activation_responses_step1.h5m\"\n )\n\n exp_alara_inp = os.path.join(activation_responses_run_dir, \"exp_alara_inp\")\n exp_alara_matlib = os.path.join(activation_responses_run_dir, \"exp_alara_matlib\")\n exp_alara_fluxin = os.path.join(activation_responses_run_dir, \"exp_alara_fluxin\")\n\n # compare the output file of step1\n f1 = filecmp.cmp(alara_inp, exp_alara_inp)\n f2 = file_block_almost_same(alara_matlib, exp_alara_matlib)\n f3 = filecmp.cmp(alara_fluxin, exp_alara_fluxin)\n\n # remove test output files\n os.remove(alara_inp)\n os.remove(alara_fluxin)\n os.remove(alara_matlib)\n os.remove(blank_mesh)\n os.remove(step1_file)\n os.remove(dst)\n\n assert_equal(f1, True)\n assert_equal(f2, True)\n assert_equal(f3, True)\n\n\ndef _activation_responses_test_step2(activation_responses_run_dir):\n # skip test if h5diff not exist\n is_h5diff = os.system(\"which h5diff\")\n if is_h5diff != 0:\n raise SkipTest\n\n os.chdir(thisdir)\n # copy ../scripts/activation_responses.py to activation_responses_run_dir/activation_responses.py\n os.chdir(\"..\")\n folderpath = os.getcwd()\n dst = os.path.join(activation_responses_run_dir, \"activation_responses.py\")\n copyfile(os.path.join(folderpath, \"scripts\", \"activation_responses.py\"), dst)\n\n # output files of activation_responses step1\n alara_inp = os.path.join(activation_responses_run_dir, \"alara_inp\")\n copyfile(os.path.join(activation_responses_run_dir, \"exp_alara_inp\"), alara_inp)\n blank_mesh = os.path.join(activation_responses_run_dir, \"blank_mesh.h5m\")\n copyfile(\n os.path.join(activation_responses_run_dir, \"exp_blank_mesh.h5m\"), blank_mesh\n )\n\n # run activation_responses step2\n os.chdir(activation_responses_run_dir)\n os.system(\"python activation_responses.py step2\")\n\n response = \"decay_heat\"\n # output files of activation_responses step2\n h5_filename = os.path.join(activation_responses_run_dir, \"\".join([response, \".h5\"]))\n exp_h5_filename = os.path.join(\n activation_responses_run_dir, \"\".join([\"exp_\", response, \".h5\"])\n )\n h5m_filename = os.path.join(\n activation_responses_run_dir, \"\".join([response, \"_1.h5m\"])\n )\n exp_h5m_filename = os.path.join(\n activation_responses_run_dir, \"\".join([\"exp_\", response, \"_1.h5m\"])\n )\n\n # compare the results\n # compare two h5 files\n command = \"\".join([\"h5diff \", h5_filename, \" \", exp_h5_filename])\n diff_flag4 = os.system(command)\n # compare two h5m files\n command = \"\".join(\n [\n \"h5diff \",\n h5m_filename,\n \" \",\n exp_h5m_filename,\n \"\".join([\" tstt/tags/\", response, \" /tstt/tags/\", response]),\n ]\n )\n\n diff_flag5 = os.system(command)\n\n # remove test generated files\n os.remove(blank_mesh)\n os.remove(alara_inp)\n os.remove(h5_filename)\n os.remove(h5m_filename)\n os.remove(dst)\n\n # return value 0 if no difference, 1 if differences found, 2 if error\n assert_equal(diff_flag4, 0)\n assert_equal(diff_flag5, 0)\n\n\ndef test_activation_responses_script():\n # skip test without dagmc\n try:\n from pyne import dagmc\n except ImportError:\n raise SkipTest\n\n activation_responses_run_dir = os.path.join(\n thisdir, \"files_test_activation_responses\", \"activation_responses_examples\"\n )\n _activation_responses_test_step1(activation_responses_run_dir)\n _activation_responses_test_step2(activation_responses_run_dir)\n"
] | [
[
"numpy.empty"
]
] |
harveenchadha/bol | [
"0f720813107ab2f41e895917cd0359e8c0738dd1"
] | [
"bol/data/wav2vec2/_wav2vec2_ts_dataloader.py"
] | [
"import torch\nfrom torch.utils.data import Dataset\nfrom bol.utils.helper_functions import read_wav_file, convert_to_tensor\nfrom bol.utils.resampler import resample_using_sox\n\n\ndef get_batch_encoder_input(batch_samples):\n # features = [get_feature(batch_sample[0]) for batch_sample in batch_samples]\n features = [batch_sample[0].squeeze(dim=0) for batch_sample in batch_samples]\n\n filenames = [filename[1] for filename in batch_samples]\n # features = batch_samples[0][0]\n # filenames = batch_samples[1][0]\n # print(features)\n # print(features[0])\n # print(\"Zer: \", features[0].size())\n\n # print(\"Max size is :\", max(sizes))\n\n # for feature in features:\n\n # features = torch.nn.utils.rnn.pad_sequence(features, batch_first=True, padding_value=0)\n return features, filenames\n\n\nclass Wav2Vec2TsDataSet(Dataset):\n def __init__(self, audio_path, convert):\n self.audio_paths = audio_path\n self.convert = convert\n\n def __len__(self):\n return len(self.audio_paths)\n\n def __getitem__(self, index):\n features = self._get_feature(self.audio_paths[index])\n return features, self.audio_paths[index]\n\n def _get_feature(self, filepath):\n wav, sample_rate = read_wav_file(filepath, 'sf')\n if sample_rate != 16000 and self.convert:\n wav = resample_using_sox(wav, input_type='array', output_type='array', sample_rate_in=sample_rate)\n wav = convert_to_tensor(wav)\n return wav\n\n\nclass Wav2Vec2TsDataLoader:\n def __init__(self, batch_size, num_workers, file_data_path, convert):\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.convert = convert\n\n file_data_loader = self.create_data_loaders_from_dataset(\n file_data_path, batch_size, num_workers\n )\n self.file_data_loader = file_data_loader\n\n def create_data_loaders_from_dataset(self, file_data_path, batch_size, num_workers):\n train_dataset = Wav2Vec2TsDataSet(file_data_path, self.convert)\n file_data_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n pin_memory=True,\n )\n # collate_fn=get_batch_encoder_input)\n\n return file_data_loader\n\n def get_file_data_loader(self):\n return self.file_data_loader\n"
] | [
[
"torch.utils.data.DataLoader"
]
] |
ErikHumphrey/sustain-seq2seq | [
"c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4"
] | [
"models/components/encoders/LSTMEncoder.py"
] | [
"import os, sys\nsys.path.insert(0, '../../..')\n\nimport torch\nimport torch.nn as nn\n\nclass Encoder(nn.Module):\n def __init__(self, vocab_size, emb_dim, hidden_dim, num_layers, lstm_dropout, dropout, device):\n \"\"\"\n Creates an Encoder model.\n\n Args:\n vocab_size (int): Number of classes/ Vocabulary size.\n emb_dim (int): Embeddings dimension.\n hidden_dim (int): LSTM hidden layer dimension.\n num_layers (int): Number of LSTM layers.\n lstm_dropout (float): LSTM dropout.\n dropout (float): Embeddings dropout.\n device : The device to run the model on.\n \"\"\"\n assert hidden_dim % 2 == 0, \"Encoder hidden_dim should be even as the LSTM is bidirectional.\"\n super().__init__()\n \n self.vocab_size = vocab_size\n self.emb_dim = emb_dim\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers \n\n self.embedding = nn.Embedding(vocab_size, emb_dim) \n self.dropout = nn.Dropout(dropout)\n self.lstm = nn.LSTM(emb_dim, int(hidden_dim/2), num_layers, dropout=lstm_dropout, bidirectional=True, batch_first=True)\n\n self.to(device)\n\n def forward(self, input_tuple):\n \"\"\"\n Args:\n input (tensor): The input of the encoder. It must be a 2-D tensor of integers. \n Shape: [batch_size, seq_len_enc].\n\n Returns:\n A tuple containing the output and the states of the last LSTM layer. The states of the LSTM layer is also a\n tuple that contains the hidden and the cell state, respectively . \n Output shape: [batch_size, seq_len_enc, hidden_dim * 2]\n Hidden/cell state shape: [num_layers*2, batch_size, hidden_dim]\n \"\"\"\n \n X, X_lengths = input_tuple[0], input_tuple[1]\n \n # Creates the embeddings and adds dropout. [batch_size, seq_len] -> [batch_size, seq_len, emb_dim].\n embeddings = self.dropout(self.embedding(X))\n \n # pack padded sequences\n pack_padded_lstm_input = torch.nn.utils.rnn.pack_padded_sequence(embeddings, X_lengths, batch_first=True)\n\n # now run through LSTM\n pack_padded_lstm_output, states = self.lstm(pack_padded_lstm_input)\n \n # undo the packing operation\n output, _ = torch.nn.utils.rnn.pad_packed_sequence(pack_padded_lstm_output, batch_first=True) \n \n return {'output':output, 'states':states}"
] | [
[
"torch.nn.Dropout",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Embedding",
"torch.nn.utils.rnn.pack_padded_sequence"
]
] |
dpgrote/WarpX | [
"038af1cc1ebf250d0633f21d4a2579c1bad03f34"
] | [
"Examples/Tests/photon_pusher/analysis_photon_pusher.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright 2019 Luca Fedeli, Maxence Thevenet, Weiqun Zhang\n#\n#\n# This file is part of WarpX.\n#\n# License: BSD-3-Clause-LBNL\n\nimport yt\nimport numpy as np\nimport os\nimport sys\nsys.path.insert(1, '../../../../warpx/Regression/Checksum/')\nimport checksumAPI\n\n#This script checks if photons initialized with different momenta and\n#different initial directions propagate along straight lines at the speed of\n#light. The plotfile to be analyzed is passed as a command line argument.\n\n#If the script is run without a command line argument, it regenerates a new\n#inputfile according to the initial conditions listed below.\n\n\n#Physical constants\nc = 299792458.\nm_e = 9.1093837015e-31\n#________________________________________\n\n#Test cases\nspec_names = [\"p_xp_1\", \"p_xn_1\", \"p_yp_1\", \"p_yn_1\",\n \"p_zp_1\", \"p_zn_1\",\"p_dp_1\", \"p_dn_1\",\n \"p_xp_10\", \"p_xn_10\", \"p_yp_10\", \"p_yn_10\",\n \"p_zp_10\", \"p_zn_10\", \"p_dp_10\", \"p_dn_10\"]\n#photon momenta are in units of m_e c\nmxp1 = np.array([1, 0.0, 0.0])\nmxn1 = np.array([-1, 0.0, 0.0])\nmyp1 = np.array([0.0, 1, 0.0])\nmyn1 = np.array([0.0, -1, 0.0])\nmzp1 = np.array([0.0, 0.0, 1])\nmzn1 = np.array([0.0, 0.0, -1])\nmdp1 = np.array([1, 1, 1])\nmdn1 = np.array([-1, -1, -1])\nmxp10 = np.array([10, 0.0, 0.0])\nmxn10 = np.array([-10, 0.0, 0.0])\nmyp10 = np.array([0.0, 10, 0.0])\nmyn10 = np.array([0.0, -10, 0.0])\nmzp10 = np.array([0.0, 0.0, 10])\nmzn10 = np.array([0.0, 0.0, -10])\nmdp10 = np.array([10, 10, 10])\nmdn10 = np.array([-10,-10, -10])\ngamma_beta_list = np.array([mxp1, mxn1, myp1, myn1, mzp1, mzn1, mdp1, mdn1,\n mxp10, mxn10, myp10, myn10, mzp10, mzn10, mdp10, mdn10])\ninit_pos = np.array([0.0, 0.0, 0.0])\n#________________________________________\n\n#Tolerance\ntol_pos = 1.0e-14;\ntol_mom = 0.0; #momentum should be conserved exactly\n#________________________________________\n\n#Input filename\ninputname = \"inputs\"\n#________________________________________\n\n# This function reads the WarpX plotfile given as the first command-line\n# argument, and check if the position of each photon agrees with theory.\ndef check():\n filename = sys.argv[1]\n data_set_end = yt.load(filename)\n\n sim_time = data_set_end.current_time.to_value()\n\n #expected positions list\n ll = sim_time*c\n answ_pos = init_pos + \\\n ll*gamma_beta_list/np.linalg.norm(gamma_beta_list,axis=1, keepdims=True)\n\n #expected momenta list\n answ_mom = m_e * c *gamma_beta_list #momenta don't change\n\n #simulation results\n all_data = data_set_end.all_data()\n res_pos = [np.array([\n all_data[sp, 'particle_position_x'].v[0],\n all_data[sp, 'particle_position_y'].v[0],\n all_data[sp, 'particle_position_z'].v[0]])\n for sp in spec_names]\n res_mom = [np.array([\n all_data[sp, 'particle_momentum_x'].v[0],\n all_data[sp, 'particle_momentum_y'].v[0],\n all_data[sp, 'particle_momentum_z'].v[0]])\n for sp in spec_names]\n\n #check discrepancies\n disc_pos = [np.linalg.norm(a-b)/np.linalg.norm(b)\n for a,b in zip(res_pos, answ_pos)]\n disc_mom = [np.linalg.norm(a-b)/np.linalg.norm(b)\n for a,b in zip(res_mom, answ_mom)]\n\n print(\"max(disc_pos) = %s\" %max(disc_pos))\n print(\"tol_pos = %s\" %tol_pos)\n print(\"max(disc_mom) = %s\" %max(disc_mom))\n print(\"tol_mom = %s\" %tol_mom)\n\n assert ((max(disc_pos) <= tol_pos) and (max(disc_mom) <= tol_mom))\n\n test_name = os.path.split(os.getcwd())[1]\n checksumAPI.evaluate_checksum(test_name, filename)\n\n# This function generates the input file to test the photon pusher.\ndef generate():\n with open(inputname,'w') as f:\n f.write(\"#Automatically generated inputfile\\n\")\n f.write(\"#Run check.py without arguments to regenerate\\n\")\n f.write(\"#\\n\\n\")\n f.write(\"max_step = 50\\n\")\n f.write(\"amr.n_cell = 64 64 64\\n\")\n f.write(\"amr.max_level = 0\\n\")\n f.write(\"amr.blocking_factor = 8\\n\")\n f.write(\"amr.max_grid_size = 8\\n\")\n f.write(\"amr.plot_int = 1\\n\")\n f.write(\"geometry.dims = 3\\n\")\n f.write(\"boundary.field_lo = periodic periodic periodic\\n\")\n f.write(\"boundary.field_hi = periodic periodic periodic\\n\")\n f.write(\"geometry.prob_lo = -0.5e-6 -0.5e-6 -0.5e-6\\n\")\n f.write(\"geometry.prob_hi = 0.5e-6 0.5e-6 0.5e-6\\n\")\n f.write(\"algo.charge_deposition = standard\\n\")\n f.write(\"algo.field_gathering = energy-conserving\\n\")\n f.write(\"warpx.cfl = 1.0\\n\")\n\n f.write(\"particles.species_names = {}\\n\".format(' '.join(spec_names)))\n f.write(\"particles.photon_species = {}\\n\".format(' '.join(spec_names)))\n\n f.write(\"\\namr.plot_int = 50\\n\\n\")\n\n for name in spec_names:\n f.write(\"diag1.{}.variables = ux uy uz\\n\".format(name))\n\n f.write(\"\\n\")\n\n data = zip(spec_names, gamma_beta_list)\n for case in data:\n name = case[0]\n velx, vely ,velz = case[1]\n f.write(\"{}.species_type = photon\\n\".format(name))\n f.write('{}.injection_style = \"SingleParticle\"\\n'.format(name))\n f.write(\"{}.single_particle_pos = {} {} {}\\n\".\n format(name, init_pos[0], init_pos[1], init_pos[2]))\n f.write(\"{}.single_particle_vel = {} {} {}\\n\".\n format(name, velx, vely, velz))\n f.write(\"{}.single_particle_weight = 1.0\\n\".format(name))\n f.write(\"\\n\".format(name))\n\ndef main():\n if (len(sys.argv) < 2):\n generate()\n else:\n check()\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array",
"numpy.linalg.norm"
]
] |
yasarc4/Auto_time_series | [
"5a9aa5c535fbe09a4cc59e44124a5de435ac5059"
] | [
"utils/dataloader.py"
] | [
"import pandas as pd\n\ndef load_csv(path, max_rows = None, date_col='txn_dttm', ts_col='amt_atmcam'):\n df = pd.read_csv(path)\n df = df[[date_col, ts_col]]\n df[date_col] = pd.to_datetime(df[date_col])\n df = df.sort_values(date_col).reset_index(drop=True)\n if max_rows==None:\n return df\n else:\n return df.iloc[-max_rows-df[date_col].iloc[-max_rows].hour:]\n"
] | [
[
"pandas.to_datetime",
"pandas.read_csv"
]
] |
anntzer/h5py | [
"7a413101ccaf2a8ac853c36aa887ada4e8955d98"
] | [
"setup_build.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\n Implements a custom Distutils build_ext replacement, which handles the\n full extension module build process, from api_gen to C compilation and\n linking.\n\"\"\"\n\ntry:\n from setuptools import Extension\nexcept ImportError:\n from distutils.extension import Extension\nfrom distutils.command.build_ext import build_ext\nimport sys\nimport os\nimport os.path as op\nfrom pathlib import Path\nimport subprocess\n\nimport api_gen\nfrom setup_configure import BuildConfig\n\n\ndef localpath(*args):\n return op.abspath(op.join(op.dirname(__file__), *args))\n\n\nMODULES = ['defs', '_errors', '_objects', '_proxy', 'h5fd', 'h5z',\n 'h5', 'h5i', 'h5r', 'utils', '_selector',\n '_conv', 'h5t', 'h5s',\n 'h5p',\n 'h5d', 'h5a', 'h5f', 'h5g',\n 'h5l', 'h5o',\n 'h5ds', 'h5ac',\n 'h5pl']\n\nEXTRA_SRC = {'h5z': [ localpath(\"lzf/lzf_filter.c\"),\n localpath(\"lzf/lzf/lzf_c.c\"),\n localpath(\"lzf/lzf/lzf_d.c\")]}\n\nCOMPILER_SETTINGS = {\n 'libraries' : ['hdf5', 'hdf5_hl'],\n 'include_dirs' : [localpath('lzf')],\n 'library_dirs' : [],\n 'define_macros' : [('H5_USE_18_API', None),\n ('NPY_NO_DEPRECATED_API', 0),\n ]\n}\n\nif sys.platform.startswith('win'):\n COMPILER_SETTINGS['include_dirs'].append(localpath('windows'))\n COMPILER_SETTINGS['define_macros'].extend([\n ('_HDF5USEDLL_', None),\n ('H5_BUILT_AS_DYNAMIC_LIB', None)\n ])\n\n\nclass h5py_build_ext(build_ext):\n\n \"\"\"\n Custom distutils command which encapsulates api_gen pre-building,\n Cython building, and C compilation.\n\n Also handles making the Extension modules, since we can't rely on\n NumPy being present in the main body of the setup script.\n \"\"\"\n\n @staticmethod\n def _make_extensions(config):\n \"\"\" Produce a list of Extension instances which can be passed to\n cythonize().\n\n This is the point at which custom directories, MPI options, etc.\n enter the build process.\n \"\"\"\n import numpy\n\n settings = COMPILER_SETTINGS.copy()\n\n settings['include_dirs'][:0] = config.hdf5_includedirs\n settings['library_dirs'][:0] = config.hdf5_libdirs\n settings['define_macros'].extend(config.hdf5_define_macros)\n\n try:\n numpy_includes = numpy.get_include()\n except AttributeError:\n # if numpy is not installed get the headers from the .egg directory\n import numpy.core\n numpy_includes = os.path.join(os.path.dirname(numpy.core.__file__), 'include')\n\n settings['include_dirs'] += [numpy_includes]\n if config.mpi:\n import mpi4py\n settings['include_dirs'] += [mpi4py.get_include()]\n\n # TODO: should this only be done on UNIX?\n if os.name != 'nt':\n settings['runtime_library_dirs'] = settings['library_dirs']\n\n def make_extension(module):\n sources = [localpath('h5py', module + '.pyx')] + EXTRA_SRC.get(module, [])\n return Extension('h5py.' + module, sources, **settings)\n\n return [make_extension(m) for m in MODULES]\n\n def run(self):\n \"\"\" Distutils calls this method to run the command \"\"\"\n\n from Cython import __version__ as cython_version\n from Cython.Build import cythonize\n import numpy\n\n # This allows ccache to recognise the files when pip builds in a temp\n # directory. It speeds up repeatedly running tests through tox with\n # ccache configured (CC=\"ccache gcc\"). It should have no effect if\n # ccache is not in use.\n os.environ['CCACHE_BASEDIR'] = op.dirname(op.abspath(__file__))\n os.environ['CCACHE_NOHASHDIR'] = '1'\n\n # Get configuration from environment variables\n config = BuildConfig.from_env()\n config.summarise()\n\n defs_file = localpath('h5py', 'defs.pyx')\n func_file = localpath('h5py', 'api_functions.txt')\n config_file = localpath('h5py', 'config.pxi')\n\n # Rebuild low-level defs if missing or stale\n if not op.isfile(defs_file) or os.stat(func_file).st_mtime > os.stat(defs_file).st_mtime:\n print(\"Executing api_gen rebuild of defs\")\n api_gen.run()\n\n # Rewrite config.pxi file if needed\n s = \"\"\"\\\n# This file is automatically generated by the h5py setup script. Don't modify.\n\nDEF MPI = %(mpi)s\nDEF HDF5_VERSION = %(version)s\nDEF SWMR_MIN_HDF5_VERSION = (1,9,178)\nDEF VDS_MIN_HDF5_VERSION = (1,9,233)\nDEF VOL_MIN_HDF5_VERSION = (1,11,5)\nDEF COMPLEX256_SUPPORT = %(complex256_support)s\nDEF NUMPY_BUILD_VERSION = '%(numpy_version)s'\nDEF CYTHON_BUILD_VERSION = '%(cython_version)s'\n\"\"\"\n s %= {\n 'mpi': bool(config.mpi),\n 'version': config.hdf5_version,\n 'complex256_support': hasattr(numpy, 'complex256'),\n 'numpy_version': numpy.__version__,\n 'cython_version': cython_version,\n }\n write_if_changed(config_file, s)\n\n # Run Cython\n print(\"Executing cythonize()\")\n self.extensions = cythonize(self._make_extensions(config),\n force=config.changed() or self.force,\n language_level=3)\n\n # Perform the build\n build_ext.run(self)\n\n # Record the configuration we built\n config.record_built()\n\n\ndef write_if_changed(target_path, s: str):\n \"\"\"Overwrite target_path unless the contents already match s\n\n Avoids changing the mtime when we're just writing the same data.\n \"\"\"\n p = Path(target_path)\n b = s.encode('utf-8')\n try:\n if p.read_bytes() == b:\n return\n except FileNotFoundError:\n pass\n\n p.write_bytes(b)\n"
] | [
[
"numpy.get_include"
]
] |
ENSYSTRA/geokit | [
"510ec5c3fe3c034f1dff776c813eb28c6cd07c40"
] | [
"geokit/core/location.py"
] | [
"import re\nimport numpy as np\nfrom osgeo import ogr\nimport pandas as pd\n\nfrom . import util as UTIL\nfrom . import srs as SRS\nfrom . import geom as GEOM\n\n\nclass GeoKitLocationError(UTIL.GeoKitError):\n pass\n\n\nLocationMatcher = re.compile(\n r\"\\((?P<lon> *[0-9.-]+ *),(?P<lat> *[0-9.-]+ *)\\)\")\n\n\nclass Location(object):\n \"\"\"Represents a single location using lat/lon as a base coordinate system\n\n Initializations:\n ----------------\n\n # If you trust my programming skills and have any of the argument types listed\n below:\n >>> Location.load( args, srs=SRS )\n\n # If you have a latitude and longitude value\n >>> Location( latitude, longitude )\n\n # If you have an X and a Y coordinate in any arbitrary SRS\n >>> Location.fromXY( X, Y, srs=SRS)\n\n # If you have a string structured like such: \"(5.12243,52,11342)\"\n >>> Location.fromString( string, srs=SRS )\n\n # If you have a point geometry\n >>> Location.fromPointGeom( pointGeometryObject )\n\n \"\"\"\n _TYPE_KEY_ = \"Location\"\n _e = 1e-5\n\n def __init__(self, lon, lat):\n \"\"\"Initialize a Location Object by explicitly providing lat/lon coordinates\n\n Parameters\n ----------\n lon : numeric\n The location's longitude value\n\n lat : numeric\n The location's latitude value\n \"\"\"\n\n if not (isinstance(lat, float) or isinstance(lat, int)):\n raise GeoKitLocationError(\"lat input is not a float\")\n if not (isinstance(lon, float) or isinstance(lon, int)):\n raise GeoKitLocationError(\"lon input is not a float\")\n self.lat = lat\n self.lon = lon\n self._geom = None\n\n def __hash__(self): # I need this to make pandas indexing work when location objects are used as columns and indexes\n return hash((int(self.lon/self._e), int(self.lat/self._e)))\n\n def __eq__(self, o):\n if isinstance(o, Location):\n return abs(self.lon-o.lon) < self._e and abs(self.lat-o.lat) < self._e\n elif isinstance(o, ogr.Geometry):\n return self == Location.fromPointGeom(o)\n elif isinstance(o, tuple) and len(o) == 2:\n return abs(self.lon-o[0]) < self._e and abs(self.lat-o[1]) < self._e\n else:\n return False\n\n def __ne__(self, o):\n return not(self == o)\n\n def __str__(self):\n return \"(%.5f,%.5f)\" % (self.lon, self.lat)\n\n def __repr__(self):\n return self.__str__()\n\n @staticmethod\n def fromString(self, srs=None):\n \"\"\"Initialize a Location Object by providing a string\n\n * Must be formated like such: \"(5.12243,52,11342)\"\n * Whitespace is okay\n * Will only take the FIRST match it finds\n\n Parameters\n ----------\n s : string\n The string to parse\n\n srs : Anything acceptable to gk.srs.loadSRS; optional\n The srs for input coordinates\n\n Returns:\n --------\n Locations\n \"\"\"\n m = LocationMatcher.search(self)\n if m is None:\n raise GeoKitLocationError(\n \"string does not match Location specification\")\n lon, lat = m.groups()\n if srs is None:\n return Location(lon=float(lon), lat=float(lat))\n else:\n return Location.fromXY(x=float(lon), y=float(lat), srs=srs)\n\n @staticmethod\n def fromPointGeom(g):\n \"\"\"Initialize a Location Object by providing an OGR Point Object\n\n * Must have an SRS within the object\n\n Parameters\n ----------\n g : ogr.Geometry\n The string to parse\n\n Returns:\n --------\n Locations\n \"\"\"\n if g.GetGeometryName() != \"POINT\":\n raise GeoKitLocationError(\"Invalid geometry given\")\n if not g.GetSpatialReference().IsSame(SRS.EPSG4326):\n g = g.Clone()\n g.TransformTo(SRS.EPSG4326)\n\n return Location(lon=g.GetX(), lat=g.GetY())\n\n @staticmethod\n def fromXY(x, y, srs=3035):\n \"\"\"Initialize a Location Object by providing a n X and Y coordinate\n\n Parameters\n ----------\n x : numeric\n The location's x value\n\n y : numeric\n The location's y value\n\n srs : Anything acceptable to gk.srs.loadSRS\n The srs for input coordinates\n\n Returns:\n --------\n Locations\n \"\"\"\n g = GEOM.point(x, y, srs=srs)\n return Location.fromPointGeom(g)\n\n @property\n def latlon(self): return self.lat, self.lon\n\n def asGeom(self, srs='latlon'):\n \"\"\"Extract the Location as an ogr.Geometry object in an arbitrary SRS\n\n Parameters\n ----------\n srs : Anything acceptable to gk.srs.loadSRS\n The srs for the created object\n\n Returns:\n --------\n ogr.Geometry\n \"\"\"\n g = self.geom\n return GEOM.transform(g, toSRS=srs)\n\n def asXY(self, srs=3035):\n \"\"\"Extract the Location as an (X,Y) tuple in an arbitrary SRS\n\n Parameters\n ----------\n srs : Anything acceptable to gk.srs.loadSRS\n The srs for the created tuple\n\n Returns:\n --------\n tuple -> (X, Y)\n \"\"\"\n g = self.asGeom(srs=srs)\n return g.GetX(), g.GetY()\n\n @property\n def geom(self):\n if self._geom is None:\n self._geom = GEOM.point(self.lon, self.lat, srs=SRS.EPSG4326)\n return self._geom\n\n def makePickleable(self):\n \"\"\"Clears OGR objects from the Location's internals so that it becomes\n \"pickleable\"\n \"\"\"\n self._geom = None\n\n @staticmethod\n def load(loc, srs=4326):\n \"\"\"Tries to load a Location object in the correct manner by inferring \n from the input type\n\n * Ends up calling one of the Location.from??? initializers\n\n Parameters\n ----------\n loc : Location or ogr.Geometry or str or tuple\n The location data to interpret\n\n srs : Anything acceptable to gk.srs.loadSRS\n The srs for input coordinates\n * If not given, latitude and longitude coordinates are expected\n\n Returns:\n --------\n Locations\n \"\"\"\n if isinstance(loc, Location):\n output = loc\n elif isinstance(loc, ogr.Geometry):\n output = Location.fromPointGeom(loc)\n elif isinstance(loc, str):\n output = Location.fromString(loc)\n elif isinstance(loc, UTIL.Feature):\n output = Location.fromPointGeom(loc.geom)\n\n elif ((isinstance(loc, tuple) or isinstance(loc, list) or isinstance(loc, np.ndarray))) and len(loc) == 2:\n if srs is None or srs == 4326 or srs == 'latlon':\n output = Location(lon=loc[0], lat=loc[1])\n else:\n output = Location.fromXY(x=loc[0], y=loc[1], srs=srs)\n else: # Assume iteratable\n raise GeoKitLocationError(\n \"Could not understand location input:\", loc)\n\n return output\n\n\nclass LocationSet(object):\n \"\"\"Represents a collection of location using lat/lon as a base coordinate \n system\n\n Note:\n -----\n When initializing, an iterable of anything acceptable by Location.load is\n expected\n\n Initializations:\n ----------------\n >>> LocationSet( iterable )\n \"\"\"\n _TYPE_KEY_ = \"LocationSet\"\n\n def __init__(self, locations, srs=4326, _skip_check=False):\n \"\"\"Initialize a LocationSet Object\n\n * If only a single location is given, a set is still created\n\n Parameters\n ----------\n locations : iterable\n The locations to collect\n * Can be anything acceptable by Location.load()\n\n srs : Anything acceptable to gk.srs.loadSRS; optional\n The srs for input coordinates\n * if not given, lat/lon coordinates are expected\n\n \"\"\"\n if not _skip_check:\n if isinstance(locations, ogr.Geometry) or isinstance(locations, Location):\n self._locations = np.array(\n [Location.load(locations, srs=srs), ])\n elif isinstance(locations, LocationSet):\n self._locations = locations[:]\n elif isinstance(locations, pd.DataFrame):\n self._locations = LocationSet(locations[\"geom\"])[:]\n else:\n try: # Try loading all locations one at a time\n self._locations = np.array(\n [Location.load(l, srs=srs) for l in locations])\n except GeoKitLocationError as err:\n try:\n # Try loading the input as as single Location\n self._locations = np.array(\n [Location.load(locations, srs=srs), ])\n except GeoKitLocationError:\n raise err\n else:\n self._locations = locations\n\n self._lons = None\n self._lats = None\n self._bounds4326 = None\n self.count = len(self._locations)\n self.shape = (self.count,)\n\n def __len__(self): return self.count\n\n def __getitem__(self, i): return self._locations[i]\n\n def __repr__(self):\n out = \" , Lon , Lat\\n\"\n if self.count > 10:\n for i in range(5):\n out += \"%d, %-9.5f, %-9.5f\\n\" % (i, self[i].lon, self[i].lat)\n out += \"...\\n\"\n for i in range(5):\n out += \"%d, %-9.5f, %-9.5f\\n\" % (self.count -\n 6+i, self[-6+i].lon, self[-6+i].lat)\n else:\n for i in range(self.count):\n out += \"%d, %-9.5f, %-9.5f\\n\" % (i, self[i].lon, self[i].lat)\n\n return out\n\n def getBounds(self, srs=4326):\n \"\"\"Returns the bounding box of all locations in the set in an arbitrary\n SRS\n\n Parameters\n ----------\n srs : Anything acceptable to gk.srs.loadSRS; optional\n The srs for output coordinates\n * if not given, lat/lon coordinates are expected\n\n Returns:\n --------\n tuple -> (xMin, yMin, xMax, yMax) \n\n \"\"\"\n if srs == 4326 and not self._bounds4326 is None:\n return self._bounds4326\n elif srs == 4326:\n self._bounds4326 = (self.lons.min(), self.lats.min(),\n self.lons.max(), self.lats.max())\n return self._bounds4326\n else:\n geoms = GEOM.transform([l.geom for l in self._locations],\n fromSRS=SRS.EPSG4326, toSRS=srs)\n\n yVals = np.array([g.GetY() for g in geoms])\n xVals = np.array([g.GetX() for g in geoms])\n\n return (xVals.min(), yVals.min(), xVals.max(), yVals.max())\n\n @property\n def lats(self):\n if self._lats is None:\n self._lats = np.array([l.lat for l in self._locations])\n return self._lats\n\n @property\n def lons(self):\n if self._lons is None:\n self._lons = np.array([l.lon for l in self._locations])\n return self._lons\n\n def asString(self):\n \"\"\"Create a list of string representations of all locations in the set\n\n Returns:\n --------\n list -> [ '(lon1,lat1)', (lon2,lat2)', ... ]\n\n \"\"\"\n return [str(l) for l in self._locations]\n\n def makePickleable(self):\n \"\"\"Clears OGR objects from all individual Location's internals so that \n they become \"pickleable\"\n \"\"\"\n for l in self._locations:\n l.makePickleable()\n\n def asGeom(self, srs=4326):\n \"\"\"Create a list of ogr.Geometry representations of all locations in the \n set\n\n Parameters\n ----------\n srs : Anything acceptable to gk.srs.loadSRS; optional\n The srs for output coordinates\n * if not given, lat/lon coordinates are expected\n\n Returns:\n --------\n list -> [ Geometry1, Geometry1, ... ]\n\n \"\"\"\n srs = SRS.loadSRS(srs)\n geoms4326 = [l.geom for l in self._locations]\n if SRS.EPSG4326.IsSame(srs):\n return geoms4326\n else:\n return GEOM.transform(geoms4326, fromSRS=SRS.EPSG4326, toSRS=srs)\n\n def asXY(self, srs=3035):\n \"\"\"Create an Nx2 array of x and y coordinates for all locations in the set\n\n Parameters\n ----------\n srs : Anything acceptable to gk.srs.loadSRS; optional\n The srs for output coordinates\n * if not given, EPSG3035 coordinates are assumed\n\n Returns:\n --------\n numpy.ndarray -> Nx2\n\n \"\"\"\n srs = SRS.loadSRS(srs)\n if SRS.EPSG4326.IsSame(srs):\n return np.column_stack([self.lons, self.lats])\n else:\n geoms4326 = [l.geom for l in self._locations]\n geomsSRS = GEOM.transform(\n geoms4326, fromSRS=SRS.EPSG4326, toSRS=srs)\n return np.array([(g.GetX(), g.GetY()) for g in geomsSRS])\n\n def asHash(self): return [hash(l) for l in self._locations]\n\n def splitKMeans(self, groups=2, **kwargs):\n \"\"\"Split the locations into groups according to KMEans clustering\n\n * An equal count of locations in each group is not guaranteed\n\n Parameters\n ----------\n groups : int\n The number of groups to split the locations into\n\n kwargs : \n All other keyword arguments are passed on to sklearn.cluster.KMeans\n\n Yields:\n --------\n LocationSet -> A location set of each clustered group\n\n \"\"\"\n from sklearn.cluster import KMeans\n\n obs = np.column_stack([self.lons, self.lats])\n\n km = KMeans(n_clusters=groups, **kwargs).fit(obs)\n for i in range(groups):\n sel = km.labels_ == i\n yield LocationSet(self[sel], _skip_check=True)\n\n def bisect(self, lon=True, lat=True, delta=0.005):\n \"\"\"Cluster the locations by finding a bisecting line in lat/lon \n coordinates in either (or both) directions\n\n * An equal count of locations in each group is not guaranteed\n * Will always either return 2 or 4 cluster groups\n\n Parameters\n ----------\n lon : bool\n Split locations in the longitude direction\n\n lat : bool\n Split locations in the latitude direction\n\n delta : float \n The search speed\n * Smaller values will take longer to converge on the true bisector\n\n Yields:\n --------\n LocationSet -> A location set of each clustered group\n \"\"\"\n\n # MAX_ATTEMPTS = 100\n\n lonDiv = np.median(self.lons)\n latDiv = np.median(self.lats)\n\n if lon and lat:\n yield LocationSet(self[(self.lons < lonDiv) & (self.lats < latDiv)], _skip_check=True)\n yield LocationSet(self[(self.lons >= lonDiv) & (self.lats < latDiv)], _skip_check=True)\n yield LocationSet(self[(self.lons < lonDiv) & (self.lats >= latDiv)], _skip_check=True)\n yield LocationSet(self[(self.lons >= lonDiv) & (self.lats >= latDiv)], _skip_check=True)\n\n elif lon and not lat:\n yield LocationSet(self[(self.lons < lonDiv)], _skip_check=True)\n yield LocationSet(self[(self.lons >= lonDiv)], _skip_check=True)\n\n elif lat and not lon:\n yield LocationSet(self[(self.lats < latDiv)], _skip_check=True)\n yield LocationSet(self[(self.lats >= latDiv)], _skip_check=True)\n"
] | [
[
"numpy.median",
"numpy.array",
"numpy.column_stack",
"sklearn.cluster.KMeans"
]
] |
Leo-xxx/model-optimization | [
"0e0e618f72c510b4a45fd7ada8eae2c060ac58b3"
] | [
"tensorflow_model_optimization/python/core/quantization/keras/quant_ops.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python support for quantization operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.training import moving_averages\n\n\ndef FixedQuantize(inputs, init_min=-6.0, init_max=6.0, scope=None):\n \"\"\"Adds a fake quantize layer with fixed quantization interval.\n\n Args:\n inputs: a tensor containing values to be quantized.\n init_min: the lower end of quantization interval.\n init_max: the upper end of quantization interval.\n scope: Optional scope for name_scope.\n Returns:\n a tensor containing quantized values.\n \"\"\"\n with ops.name_scope(scope, 'FixedQuantize', values=[inputs]):\n return array_ops.fake_quant_with_min_max_args(\n inputs, min=init_min, max=init_max)\n\n\ndef _ModelVariable(name,\n shape=None,\n initializer=None,\n collections=None,\n trainable=None):\n collections = list(collections or [])\n collections += [ops.GraphKeys.GLOBAL_VARIABLES]\n return variable_scope.get_variable(\n name,\n shape=shape,\n initializer=initializer,\n collections=collections,\n trainable=trainable)\n\n\ndef LastValueQuantize(inputs,\n per_channel=False,\n init_min=-6.0,\n init_max=6.0,\n vars_collection=None,\n name_prefix='LastValueQuant',\n reuse=None,\n is_training=True,\n num_bits=8,\n narrow_range=False,\n symmetric=False):\n \"\"\"Adds a layer that collects quantization ranges as last input ranges.\n\n LastValueQuantize creates variables called 'min' and 'max', representing the\n interval used for quantization and clamping.\n\n Args:\n inputs: a tensor containing values to be quantized.\n per_channel: (Optional) a boolean specifying whether to use different\n quantization ranges per output channel.\n init_min: a float scalar, the initial value for variable min.\n init_max: a float scalar, the initial value for variable max.\n vars_collection: (Optional) collection where to store variables for\n quantization interval ends.\n name_prefix: name_prefix for created nodes.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n is_training: Whether the op is applied to a training or eval graph.\n num_bits: Number of bits to use for quantization, must be between 2 and 8.\n narrow_range: Whether to use the narrow quantization range\n [1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].\n symmetric: If true, use symmetric quantization limits instead of training\n the minimum and maximum of each quantization range separately.\n Returns:\n a tensor containing quantized values.\n \"\"\"\n with variable_scope.variable_scope(\n None, default_name=name_prefix, values=[inputs], reuse=reuse) as scope:\n scope.set_partitioner(None)\n input_shape = inputs.get_shape()\n input_dim = len(input_shape)\n if per_channel:\n # Only support quantizing 1-, 2- and 4-dimensional tensors.\n assert input_dim in [1, 2, 4], ('Expected 1D, 2D or 4D input, was: %s in '\n ' scope: %s' % (input_shape, name_prefix))\n min_max_shape = [input_shape[-1]]\n else:\n min_max_shape = []\n\n vars_collections = [vars_collection] if vars_collection else []\n min_var = _ModelVariable(\n 'min',\n shape=min_max_shape,\n initializer=init_ops.constant_initializer(init_min),\n collections=vars_collections,\n trainable=False)\n max_var = _ModelVariable(\n 'max',\n shape=min_max_shape,\n initializer=init_ops.constant_initializer(init_max),\n collections=vars_collections,\n trainable=False)\n if not is_training:\n return _FakeQuantWithMinMaxVars(\n inputs,\n min_var,\n max_var,\n per_channel=per_channel,\n num_bits=num_bits,\n narrow_range=narrow_range)\n\n if per_channel:\n if input_dim == 2:\n reduce_dims = [0]\n elif input_dim == 4:\n reduce_dims = [0, 1, 2]\n\n if per_channel:\n if input_dim >= 2:\n batch_min = math_ops.reduce_min(\n inputs, reduction_indices=reduce_dims, name='BatchMin')\n else:\n batch_min = inputs\n else:\n batch_min = math_ops.reduce_min(inputs, name='BatchMin')\n\n if per_channel:\n if input_dim >= 2:\n batch_max = math_ops.reduce_max(\n inputs, reduction_indices=reduce_dims, name='BatchMax')\n else:\n batch_max = inputs\n else:\n batch_max = math_ops.reduce_max(inputs, name='BatchMax')\n\n if symmetric:\n if narrow_range:\n min_max_ratio = -1\n else:\n # In two's complement notation, the negative range is slightly larger\n # than the positive range.\n min_max_ratio = -((1 << num_bits) - 2) / (1 << num_bits)\n\n # TFLite requires that 0.0 if always in the [min; max] range. Because\n # batch_min <= batch_max, it follows that range_min <= 0 <= range_max.\n range_min = math_ops.minimum(batch_min, batch_max / min_max_ratio)\n range_max = math_ops.maximum(batch_max, batch_min * min_max_ratio)\n else:\n # TFLite requires that 0.0 if always in the [min; max] range.\n range_min = math_ops.minimum(batch_min, 0.0)\n range_max = math_ops.maximum(batch_max, 0.0)\n\n assign_min = state_ops.assign(min_var, range_min, name='AssignMinLast')\n assign_max = state_ops.assign(max_var, range_max, name='AssignMaxLast')\n\n return _FakeQuantWithMinMaxVars(\n inputs,\n assign_min,\n assign_max,\n per_channel=per_channel,\n num_bits=num_bits,\n narrow_range=narrow_range)\n\n\ndef MovingAvgQuantize(inputs,\n per_channel=False,\n init_min=-6.0,\n init_max=6.0,\n ema_decay=0.999,\n vars_collection=ops.GraphKeys.MOVING_AVERAGE_VARIABLES,\n name_prefix='MovingAvgQuantize',\n reuse=None,\n is_training=True,\n num_bits=8,\n narrow_range=False,\n symmetric=False):\n \"\"\"Adds a layer that collects quantization ranges as EMAs of input ranges.\n\n MovingAvgQuantize creates variables called 'min' and 'max', representing the\n interval used for quantization and clamping.\n\n Args:\n inputs: a tensor containing values to be quantized.\n per_channel: (default False) a boolean specifying whether to use different\n quantization ranges per output channel.\n init_min: a float scalar, the initial value for variable min.\n init_max: a float scalar, the initial value for variable max.\n ema_decay: EMA decay parameter.\n vars_collection: (Optional) collection where to store variables for\n quantization interval ends.\n name_prefix: name_prefix for created nodes.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n is_training: Whether the op is applied to a training or eval graph.\n num_bits: Number of bits to use for quantization, must be between 2 and 8.\n narrow_range: Whether to use the narrow quantization range\n [1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].\n symmetric: If true, use symmetric quantization limits instead of training\n the minimum and maximum of each quantization range separately.\n Returns:\n a tensor containing quantized values.\n \"\"\"\n with variable_scope.variable_scope(\n None, default_name=name_prefix, values=[inputs], reuse=reuse) as scope:\n scope.set_partitioner(None)\n input_shape = inputs.get_shape()\n input_dim = len(input_shape)\n if per_channel:\n # Only support quantizing 1-, 2- and 4-dimensional tensors.\n assert input_dim in [1, 2, 4], ('Expected 1D, 2D or 4D input, was: %s in '\n ' scope: %s' % (input_shape, name_prefix))\n min_max_shape = [input_shape[-1]]\n else:\n min_max_shape = []\n\n vars_collections = [vars_collection] if vars_collection else []\n min_var = _ModelVariable(\n 'min',\n shape=min_max_shape,\n initializer=init_ops.constant_initializer(init_min),\n collections=vars_collections,\n trainable=False)\n max_var = _ModelVariable(\n 'max',\n shape=min_max_shape,\n initializer=init_ops.constant_initializer(init_max),\n collections=vars_collections,\n trainable=False)\n if not is_training:\n return _FakeQuantWithMinMaxVars(\n inputs,\n min_var,\n max_var,\n per_channel=per_channel,\n num_bits=num_bits,\n narrow_range=narrow_range)\n if per_channel:\n if input_dim == 2:\n reduce_dims = [0]\n elif input_dim == 4:\n reduce_dims = [0, 1, 2]\n\n if per_channel:\n if input_dim >= 2:\n batch_min = math_ops.reduce_min(\n inputs, reduction_indices=reduce_dims, name='BatchMin')\n else:\n batch_min = inputs\n else:\n batch_min = math_ops.reduce_min(inputs, name='BatchMin')\n\n if per_channel:\n if input_dim >= 2:\n batch_max = math_ops.reduce_max(\n inputs, reduction_indices=reduce_dims, name='BatchMax')\n else:\n batch_max = inputs\n else:\n batch_max = math_ops.reduce_max(inputs, name='BatchMax')\n\n if symmetric:\n if narrow_range:\n min_max_ratio = -1\n else:\n # In two's complement notation, the negative range is slightly larger\n # than the positive range.\n min_max_ratio = -((1 << num_bits) - 2) / (1 << num_bits)\n\n # TFLite requires that 0.0 if always in the [min; max] range. Because\n # batch_min <= batch_max, it follows that range_min <= 0 <= range_max.\n range_min = math_ops.minimum(batch_min, batch_max / min_max_ratio)\n range_max = math_ops.maximum(batch_max, batch_min * min_max_ratio)\n else:\n # TFLite requires that 0.0 if always in the [min; max] range.\n range_min = math_ops.minimum(batch_min, 0.0)\n range_max = math_ops.maximum(batch_max, 0.0)\n\n assign_min = moving_averages.assign_moving_average(\n min_var, range_min, ema_decay, name='AssignMinEma')\n assign_max = moving_averages.assign_moving_average(\n max_var, range_max, ema_decay, name='AssignMaxEma')\n\n return _FakeQuantWithMinMaxVars(\n inputs,\n assign_min,\n assign_max,\n per_channel=per_channel,\n num_bits=num_bits,\n narrow_range=narrow_range)\n\n\ndef _FakeQuantWithMinMaxVars(inputs, min_var, max_var, per_channel, num_bits,\n narrow_range):\n \"\"\"Adds a fake quantization operation.\n\n Depending on value of per_channel, this operation may do global quantization\n or per channel quantization. min_var and max_var should have corresponding\n shapes: [1] when per_channel == False and [d] when per_channel == True.\n\n Args:\n inputs: a tensor containing values to be quantized.\n min_var: a variable containing quantization range lower end(s).\n max_var: a variable containing quantization range upper end(s).\n per_channel: a boolean specifying whether to use per-channel quantization.\n num_bits: Number of bits to use for quantization, must be between 2 and 8.\n narrow_range: Whether to use the narrow quantization range\n [1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].\n Returns:\n a tensor containing quantized values.\n \"\"\"\n\n if per_channel:\n assert len(min_var.get_shape()) == 1\n assert len(max_var.get_shape()) == 1\n return array_ops.fake_quant_with_min_max_vars_per_channel(\n inputs, min_var, max_var, num_bits=num_bits, narrow_range=narrow_range)\n else:\n assert min_var.get_shape() == [] # pylint: disable=g-explicit-bool-comparison\n assert max_var.get_shape() == [] # pylint: disable=g-explicit-bool-comparison\n return array_ops.fake_quant_with_min_max_vars(\n inputs, min_var, max_var, num_bits=num_bits, narrow_range=narrow_range)\n"
] | [
[
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.array_ops.fake_quant_with_min_max_vars",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.ops.array_ops.fake_quant_with_min_max_args",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.reduce_min",
"tensorflow.python.ops.array_ops.fake_quant_with_min_max_vars_per_channel",
"tensorflow.python.training.moving_averages.assign_moving_average"
]
] |
gzli929/ContactTracing | [
"7be34c9e2ebbd305bef9d4c2ab91beb7b23bed91"
] | [
"scripts/SegDegree/combined_seg_degree.py"
] | [
"# %%\nimport numpy as np\nfrom ctrace import PROJECT_ROOT\nfrom ctrace.simulation import InfectionState\nfrom ctrace.exec.param import GraphParam, SIRParam, FileParam, ParamBase, LambdaParam\nfrom ctrace.exec.parallel import CsvWorker, MultiExecutor, CsvSchemaWorker\nfrom ctrace.recommender import SegDegree\nimport json\nimport shutil\nimport random\nimport copy\nimport pickle\nfrom pathlib import PurePath\n\n# Example Usage\nin_schema = [\n ('graph', ParamBase), # nx.Graph\n ('agent', ParamBase), # lambda\n ('agent_params', dict),\n ('from_cache', str), # PartitionSEIR\n ('budget', int),\n ('policy', str),\n ('transmission_rate', float),\n ('transmission_known', bool),\n ('compliance_rate', float),\n ('compliance_known', bool),\n ('discovery_rate', float),\n ('snitch_rate', float),\n ('trial_id', int),\n]\n# Must include \"id\"\nmain_out_schema = [\"id\", \"peak\", \"total\"]\naux_out_schema = [\"id\", \"sir_history\"]\n\nmain_handler = CsvSchemaWorker(\n name=\"csv_main\", schema=main_out_schema, relpath=PurePath('main.csv'))\naux_handler = CsvWorker(\n name=\"csv_inf_history\", relpath=PurePath('inf_history.csv'))\n\n\ndef runner(\n queues,\n id,\n path,\n # User Specific attributes\n graph,\n agent,\n agent_params,\n from_cache,\n budget,\n policy,\n transmission_rate,\n transmission_known,\n compliance_rate,\n compliance_known,\n discovery_rate,\n snitch_rate,\n trial_id,\n # Allow additonal args to be passed (to be ignored)\n **args,\n):\n # Execute logic here ...\n\n with open(PROJECT_ROOT / \"data\" / \"SIR_Cache\" / from_cache, 'r') as infile:\n j = json.load(infile)\n\n (S, I1, I2, R) = (j[\"S\"], j[\"I1\"], j[\"I2\"], j[\"R\"])\n # An array of previous infection accounts\n infections = j[\"infections\"]\n\n raw_history = []\n state = InfectionState(graph, (S, I1, I2, R), budget, policy, transmission_rate,\n transmission_known, compliance_rate, compliance_known, snitch_rate)\n\n while len(state.SIR.I1) + len(state.SIR.I2) != 0:\n to_quarantine = agent(state, **agent_params)\n # Record full history\n if trial_id == 0:\n raw_history.append({\n 'state': {\n 'S': list(state.SIR.S),\n 'I1': list(state.SIR.I1),\n 'I2': list(state.SIR.I2),\n 'R': list(state.SIR.R),\n },\n 'action': list(to_quarantine),\n })\n state.step(to_quarantine)\n infections.append(len(state.SIR.I2))\n\n # Output data to workers and folders\n\n main_out = {\n \"id\": id,\n \"peak\": max(infections),\n # Total infections (number of people recovered)\n \"total\": len(state.SIR.R),\n }\n aux_out = [id, *infections]\n\n queues[\"csv_main\"].put(main_out)\n queues[\"csv_inf_history\"].put(aux_out)\n\n # if trial_id == 0: # Only record 1 entry\n # path = path / \"data\" / str(id)\n # path.mkdir(parents=True, exist_ok=True)\n # with open(path / \"sir_history.json\", \"w\") as f:\n # json.dump(raw_history, f)\n\n\ndef runner_star(x):\n return runner(**x)\n\n\ndef post_execution(self):\n compress = False\n delete = False\n if (self.output_directory / \"data\").exists() and compress:\n print(\"Compressing files ...\")\n shutil.make_archive(\n str(self.output_directory / \"data\"), 'zip', base_dir=\"data\")\n if delete:\n shutil.rmtree(self.output_directory / \"data\")\n\n\nrun = MultiExecutor(\n runner_star, \n in_schema,\n post_execution=post_execution, \n seed=True, \n num_process=80,\n name_prefix='seg_extra'\n)\n\n# Add compact tasks (expand using cartesian)\nmontgomery = GraphParam('montgomery_extra')\ncville = GraphParam('cville_extra')\n\n# Schema\nrun.add_cartesian({\n \"graph\": [montgomery],\n \"budget\": [750],\n \"agent\": [LambdaParam(SegDegree)],\n \"agent_params\": [{'k1': round(p, 3)} for p in np.arange(0, 1.01, 0.01)],\n # \"budget\": [i for i in range(400, 1260, 50)],\n \"policy\": [\"A\"],\n \"transmission_rate\": [0.05],\n \"transmission_known\": [False],\n \"compliance_rate\": [-1.0],\n \"compliance_known\": [False],\n \"discovery_rate\": [1.0],\n \"snitch_rate\": [1.0],\n \"from_cache\": [\"mont.json\"],\n \"trial_id\": [i for i in range(5)]\n})\nrun.add_cartesian({\n \"graph\": [cville],\n \"budget\": [1350],\n # \"budget\": [i for i in range(720, 2270, 20)],\n \"agent\": [LambdaParam(SegDegree)],\n \"agent_params\": [{'k1': round(p, 3)} for p in np.arange(0, 1.01, 0.01)],\n \"policy\": [\"A\"],\n \"transmission_rate\": [0.05],\n \"transmission_known\": [False],\n \"compliance_rate\": [-1.0],\n \"compliance_known\": [False],\n \"discovery_rate\": [1.0],\n \"snitch_rate\": [1.0],\n \"from_cache\": [\"albe.json\"],\n \"trial_id\": [i for i in range(5)],\n})\n\nrun.add_cartesian({\n \"graph\": [montgomery],\n \"budget\": [750],\n \"agent\": [LambdaParam(SegDegree)],\n \"agent_params\": [{'k2': round(p, 3)} for p in np.arange(0, 1.01, 0.01)],\n # \"budget\": [i for i in range(400, 1260, 50)],\n \"policy\": [\"A\"],\n \"transmission_rate\": [0.05],\n \"transmission_known\": [False],\n \"compliance_rate\": [-1.0],\n \"compliance_known\": [False],\n \"discovery_rate\": [1.0],\n \"snitch_rate\": [1.0],\n \"from_cache\": [\"mont_star.json\"],\n \"trial_id\": [i for i in range(5)]\n})\nrun.add_cartesian({\n \"graph\": [cville],\n \"budget\": [1350],\n # \"budget\": [i for i in range(720, 2270, 20)],\n \"agent\": [LambdaParam(SegDegree)],\n \"agent_params\": [{'k2': round(p, 3)} for p in np.arange(0, 1.01, 0.01)],\n \"policy\": [\"A\"],\n \"transmission_rate\": [0.05],\n \"transmission_known\": [False],\n \"compliance_rate\": [-1.0],\n \"compliance_known\": [False],\n \"discovery_rate\": [1.0],\n \"snitch_rate\": [1.0],\n \"from_cache\": [\"albe_star.json\"],\n \"trial_id\": [i for i in range(5)],\n})\n\n# main_out_schema = [\"mean_objective_value\", \"max_objective_value\", \"std_objective_value\"]\n\nrun.attach(main_handler)\nrun.attach(aux_handler)\n\n# %%\nrun.exec()\n"
] | [
[
"numpy.arange"
]
] |
sudipg4112001/BenchmarkTransferLearning | [
"4e4ed3db9fc0554bc995af7cc04aeae89e90c381"
] | [
"models.py"
] | [
"import os\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics.ranking import roc_auc_score\n\nimport torch\nimport torch.nn as nn\nimport torchvision.models as models\n\nimport resnet_wider\nimport densenet\n\n\n\ndef ClassificationNet(arch_name, num_class, conv=None, weight=None, activation=None):\n if weight is None:\n weight = \"none\"\n\n if conv is None:\n try:\n model = resnet_wider.__dict__[arch_name](sobel=False)\n except:\n model = models.__dict__[arch_name](pretrained=False)\n else:\n if arch_name.lower().startswith(\"resnet\"):\n model = resnet_wider.__dict__[arch_name + \"_layerwise\"](conv, sobel=False)\n elif arch_name.lower().startswith(\"densenet\"):\n model = densenet.__dict__[arch_name + \"_layerwise\"](conv)\n\n if arch_name.lower().startswith(\"resnet\"):\n kernelCount = model.fc.in_features\n if activation is None:\n model.fc = nn.Linear(kernelCount, num_class)\n elif activation == \"Sigmoid\":\n model.fc = nn.Sequential(nn.Linear(kernelCount, num_class), nn.Sigmoid())\n\n\n # init the fc layer\n if activation is None:\n model.fc.weight.data.normal_(mean=0.0, std=0.01)\n model.fc.bias.data.zero_()\n else:\n model.fc[0].weight.data.normal_(mean=0.0, std=0.01)\n model.fc[0].bias.data.zero_()\n elif arch_name.lower().startswith(\"densenet\"):\n kernelCount = model.classifier.in_features\n if activation is None:\n model.classifier = nn.Linear(kernelCount, num_class)\n elif activation == \"Sigmoid\":\n model.classifier = nn.Sequential(nn.Linear(kernelCount, num_class), nn.Sigmoid())\n\n # init the classifier layer\n if activation is None:\n model.classifier.weight.data.normal_(mean=0.0, std=0.01)\n model.classifier.bias.data.zero_()\n else:\n model.classifier[0].weight.data.normal_(mean=0.0, std=0.01)\n model.classifier[0].bias.data.zero_()\n\n def _weight_loading_check(_arch_name, _activation, _msg):\n if len(_msg.missing_keys) != 0:\n if _arch_name.lower().startswith(\"resnet\"):\n if _activation is None:\n assert set(_msg.missing_keys) == {\"fc.weight\", \"fc.bias\"}\n else:\n assert set(_msg.missing_keys) == {\"fc.0.weight\", \"fc.0.bias\"}\n elif _arch_name.lower().startswith(\"densenet\"):\n if _activation is None:\n assert set(_msg.missing_keys) == {\"classifier.weight\", \"classifier.bias\"}\n else:\n assert set(_msg.missing_keys) == {\"classifier.0.weight\", \"classifier.0.bias\"}\n\n if weight.lower() == \"imagenet\":\n pretrained_model = models.__dict__[arch_name](pretrained=True)\n state_dict = pretrained_model.state_dict()\n\n # delete fc layer\n for k in list(state_dict.keys()):\n if k.startswith('fc') or k.startswith('classifier'):\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=False)\n _weight_loading_check(arch_name, activation, msg)\n print(\"=> loaded supervised ImageNet pre-trained model\")\n elif os.path.isfile(weight):\n checkpoint = torch.load(weight, map_location=\"cpu\")\n if \"state_dict\" in checkpoint:\n state_dict = checkpoint[\"state_dict\"]\n else:\n state_dict = checkpoint\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n state_dict = {k.replace(\"module.encoder_q.\", \"\"): v for k, v in state_dict.items()}\n\n for k in list(state_dict.keys()):\n if k.startswith('fc') or k.startswith('classifier') or k.startswith('projection_head') or k.startswith('prototypes'):\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=False)\n _weight_loading_check(arch_name, activation, msg)\n print(\"=> loaded pre-trained model '{}'\".format(weight))\n print(\"missing keys:\", msg.missing_keys)\n\n\n # reinitialize fc layer again\n if arch_name.lower().startswith(\"resnet\"):\n if activation is None:\n model.fc.weight.data.normal_(mean=0.0, std=0.01)\n model.fc.bias.data.zero_()\n else:\n model.fc[0].weight.data.normal_(mean=0.0, std=0.01)\n model.fc[0].bias.data.zero_()\n elif arch_name.lower().startswith(\"densenet\"):\n if activation is None:\n model.classifier.weight.data.normal_(mean=0.0, std=0.01)\n model.classifier.bias.data.zero_()\n else:\n model.classifier[0].weight.data.normal_(mean=0.0, std=0.01)\n\n return model\n\n\ndef build_classification_model(args):\n if args.init.lower() ==\"random\" or args.init.lower() ==\"imagenet\":\n model = ClassificationNet(args.model_name.lower(), args.num_class, weight=args.init,\n activation=args.activate)\n\n else:\n model = ClassificationNet(args.model_name.lower(), args.num_class, weight=args.proxy_dir,\n activation=args.activate)\n\n\n return model\n\ndef save_checkpoint(state,filename='model'):\n\n torch.save( state,filename + '.pth.tar')\n\n\n\n\n"
] | [
[
"torch.nn.Linear",
"torch.save",
"torch.load",
"torch.nn.Sigmoid"
]
] |
harsh2912/attendance-system | [
"89f3eff43a109861a12fb8e58a39495a628fef52"
] | [
"deploy/detector.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy import misc\nimport sys\nimport os\nimport argparse\n#import tensorflow as tf\nimport numpy as np\nimport mxnet as mx\nimport random\nimport cv2\nimport sklearn\nfrom sklearn.decomposition import PCA\nfrom time import sleep\nfrom easydict import EasyDict as edict\n# from mtcnn_detector import MtcnnDetector\nsys.path.append(os.path.join(os.path.dirname('__file__'), '..', 'src', 'common'))\nimport face_image\nimport face_preprocess\nsys.path.append('..')\nsys.path.append(os.path.join(os.path.dirname('__file__'), '..', 'RetinaFace'))\nfrom RetinaFace.retinaface import RetinaFace\n\n\nclass Detector:\n def __init__(self,model_path):\n self.model = RetinaFace(model_path,0, ctx_id=0)\n \n def get_face_patch(self,img):\n bboxes,points = self.model.detect(img, 0.7,scales=[1.0],do_flip=False)\n if isinstance(img,str):\n img=cv2.imread(img)\n faces_=[]\n key_points_=[]\n bboxes_=[]\n for face,point in zip(bboxes,points):\n #import pdb; pdb.set_trace()\n bbox = face[0:4].astype(np.int)\n to_add_face=img[bbox[1]:bbox[3],bbox[0]:bbox[2]]\n to_add_face=cv2.cvtColor(to_add_face, cv2.COLOR_BGR2RGB)\n faces_.append(to_add_face)\n key_points_.append((points.astype(np.int),face[4]))\n bboxes_.append(bbox)\n #print(to_add_face.shape)\n\n return faces_,np.array(key_points_),np.array(bboxes_)\n "
] | [
[
"numpy.array"
]
] |
aasensio/DeepLearning | [
"71838115ce93e0ca96c8314cff3f07de1d64c235"
] | [
"multiframe_star/training/trainResnet.py"
] | [
"import numpy as np\nimport h5py\nimport numpy as np\nimport platform\nimport os\nimport json\nimport sys\nimport argparse\nimport scipy.ndimage as nd\nimport pickle\nfrom contextlib import redirect_stdout\nfrom ipdb import set_trace as stop\n\nos.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\n\nif (platform.node() != 'viga'):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nfrom keras.layers import Input, Convolution2D, add, Activation, BatchNormalization\nfrom keras.callbacks import ModelCheckpoint, Callback\nfrom keras.models import Model, model_from_json\nfrom keras.optimizers import Adam\nfrom keras.utils import plot_model\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as ktf\n\nclass LossHistory(Callback):\n def __init__(self, root, losses):\n self.root = root \n self.losses = losses\n\n def on_epoch_end(self, batch, logs={}):\n self.losses.append(logs)\n with open(\"{0}_loss.json\".format(self.root), 'w') as f:\n json.dump(self.losses, f)\n\n def finalize(self):\n pass\n\nclass resnet(object):\n\n def __init__(self, root, noise, option):\n\n# Only allocate needed memory\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True\n session = tf.Session(config=config)\n ktf.set_session(session)\n\n\n self.root = root\n self.option = option\n\n self.n_filters = 64\n self.kernel_size = 3 \n self.batch_size = 32\n self.n_conv_layers = 10 \n\n self.input_file_images_training = \"/net/duna/scratch1/aasensio/deepLearning/stars/database/database.h5\"\n self.input_file_images_validation = \"/net/duna/scratch1/aasensio/deepLearning/stars/database/database_validation.h5\"\n \n f = h5py.File(self.input_file_images_training, 'r')\n self.n_training_orig, self.nx, self.ny, self.n_images = f.get(\"image\").shape \n f.close()\n\n f = h5py.File(self.input_file_images_validation, 'r')\n self.n_validation_orig, self.nx, self.ny, self.n_images = f.get(\"image\").shape \n f.close()\n\n self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)\n self.batchs_per_epoch_validation = int(self.n_validation_orig / self.batch_size)\n\n self.n_training = self.batchs_per_epoch_training * self.batch_size\n self.n_validation = self.batchs_per_epoch_validation * self.batch_size \n\n print(\"Original training set size: {0}\".format(self.n_training_orig))\n print(\" - Final training set size: {0}\".format(self.n_training))\n print(\" - Batch size: {0}\".format(self.batch_size))\n print(\" - Batches per epoch: {0}\".format(self.batchs_per_epoch_training))\n\n print(\"Original validation set size: {0}\".format(self.n_validation_orig))\n print(\" - Final validation set size: {0}\".format(self.n_validation))\n print(\" - Batch size: {0}\".format(self.batch_size))\n print(\" - Batches per epoch: {0}\".format(self.batchs_per_epoch_validation))\n\n def training_generator(self):\n f_images = h5py.File(self.input_file_images_training, 'r')\n images = f_images.get(\"image\")\n\n while 1: \n for i in range(self.batchs_per_epoch_training):\n\n input_train = images[i*self.batch_size:(i+1)*self.batch_size,:,:,0:1].astype('float32')\n output_train = images[i*self.batch_size:(i+1)*self.batch_size,:,:,1:2].astype('float32')\n\n yield input_train, output_train\n\n f_images.close()\n\n def validation_generator(self):\n f_images = h5py.File(self.input_file_images_validation, 'r')\n images = f_images.get(\"image\")\n \n while 1: \n for i in range(self.batchs_per_epoch_validation):\n\n input_validation = images[i*self.batch_size:(i+1)*self.batch_size,:,:,0:1].astype('float32')\n output_validation = images[i*self.batch_size:(i+1)*self.batch_size,:,:,1:2].astype('float32')\n\n yield input_validation, output_validation\n\n f_images.close()\n\n def residual(self, inputs):\n x = Convolution2D(self.n_filters, (3, 3), padding='same', kernel_initializer='he_normal')(inputs)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Convolution2D(self.n_filters, (3, 3), padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = add([x, inputs])\n\n return x\n \n def defineNetwork(self):\n print(\"Setting up network...\")\n\n inputs = Input(shape=(self.nx, self.ny, 1))\n conv = Convolution2D(self.n_filters, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(inputs)\n\n x = self.residual(conv)\n for i in range(self.n_conv_layers):\n x = self.residual(x)\n\n x = Convolution2D(self.n_filters, (3, 3), padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = add([x, conv])\n\n final = Convolution2D(1, (1, 1), activation='linear', padding='same', kernel_initializer='he_normal')(x)\n\n self.model = Model(inputs=inputs, outputs=final)\n \n json_string = self.model.to_json()\n f = open('{0}_model.json'.format(self.root), 'w')\n f.write(json_string)\n f.close()\n\n with open('{0}_summary.txt'.format(self.root), 'w') as f:\n with redirect_stdout(f):\n self.model.summary()\n\n plot_model(self.model, to_file='{0}_model.png'.format(self.root), show_shapes=True)\n\n def compileNetwork(self): \n self.model.compile(loss='mse', optimizer=Adam(lr=1e-4))\n \n def readNetwork(self):\n print(\"Reading previous network...\")\n \n f = open('{0}_model.json'.format(self.root), 'r')\n json_string = f.read()\n f.close()\n\n self.model = model_from_json(json_string)\n self.model.load_weights(\"{0}_weights.hdf5\".format(self.root))\n\n def trainCNN(self, n_iterations):\n print(\"Training network...\") \n \n # Recover losses from previous run\n if (self.option == 'continue'):\n with open(\"{0}_loss.json\".format(self.root), 'r') as f:\n losses = json.load(f)\n else:\n losses = []\n\n self.checkpointer = ModelCheckpoint(filepath=\"{0}_weights.hdf5\".format(self.root), verbose=1, save_best_only=True)\n self.history = LossHistory(self.root, losses)\n \n self.metrics = self.model.fit_generator(self.training_generator(), self.batchs_per_epoch_training, epochs=n_iterations, \n callbacks=[self.checkpointer, self.history], validation_data=self.validation_generator(), validation_steps=self.batchs_per_epoch_validation)\n \n self.history.finalize()\n\n\nif (__name__ == '__main__'):\n\n parser = argparse.ArgumentParser(description='Train/predict for spectra')\n parser.add_argument('-o','--out', help='Output files')\n parser.add_argument('-e','--epochs', help='Number of epochs', default=10)\n parser.add_argument('-n','--noise', help='Noise to add during training/prediction', default=0.0)\n parser.add_argument('-a','--action', help='Action', choices=['start', 'continue', 'predict'], required=True)\n parsed = vars(parser.parse_args())\n\n root = parsed['out']\n nEpochs = int(parsed['epochs'])\n option = parsed['action']\n noise = parsed['noise']\n\n out = resnet(root, noise, option)\n\n if (option == 'start'): \n out.defineNetwork() \n \n if (option == 'continue' or option == 'predict'):\n out.readNetwork()\n\n if (option == 'start' or option == 'continue'):\n out.compileNetwork()\n out.trainCNN(nEpochs)\n \n if (option == 'predict'):\n out.predictCNN()"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] |
ciguaran/pymc | [
"7cdc070571d91c8919e39bbb9eb39ea3cd227386"
] | [
"pymc/distributions/continuous.py"
] | [
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\"\"\"\nA collection of common probability distributions for stochastic\nnodes in PyMC.\n\"\"\"\n\nfrom typing import List, Optional, Tuple, Union\n\nimport aesara\nimport aesara.tensor as at\nimport numpy as np\n\nfrom aeppl.logprob import _logprob, logcdf\nfrom aesara.graph.basic import Apply, Variable\nfrom aesara.graph.op import Op\nfrom aesara.raise_op import Assert\nfrom aesara.tensor import gammaln\nfrom aesara.tensor.extra_ops import broadcast_shape\nfrom aesara.tensor.math import tanh\nfrom aesara.tensor.random.basic import (\n BetaRV,\n WeibullRV,\n cauchy,\n chisquare,\n exponential,\n gamma,\n gumbel,\n halfcauchy,\n halfnormal,\n invgamma,\n laplace,\n logistic,\n lognormal,\n normal,\n pareto,\n triangular,\n uniform,\n vonmises,\n)\nfrom aesara.tensor.random.op import RandomVariable\nfrom aesara.tensor.var import TensorConstant, TensorVariable\n\ntry:\n from polyagamma import polyagamma_cdf, polyagamma_pdf, random_polyagamma\nexcept ImportError: # pragma: no cover\n\n def random_polyagamma(*args, **kwargs):\n raise RuntimeError(\"polyagamma package is not installed!\")\n\n def polyagamma_pdf(*args, **kwargs):\n raise RuntimeError(\"polyagamma package is not installed!\")\n\n def polyagamma_cdf(*args, **kwargs):\n raise RuntimeError(\"polyagamma package is not installed!\")\n\n\nfrom scipy import stats\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.special import expit\n\nfrom pymc.aesaraf import floatX\nfrom pymc.distributions import logp_transform, transforms\nfrom pymc.distributions.dist_math import (\n SplineWrapper,\n check_parameters,\n clipped_beta_rvs,\n i0e,\n log_normal,\n logpow,\n normal_lccdf,\n normal_lcdf,\n zvalue,\n)\nfrom pymc.distributions.distribution import DIST_PARAMETER_TYPES, Continuous\nfrom pymc.distributions.shape_utils import rv_size_is_none\nfrom pymc.math import invlogit, logdiffexp, logit\nfrom pymc.util import UNSET\n\n__all__ = [\n \"Uniform\",\n \"Flat\",\n \"HalfFlat\",\n \"Normal\",\n \"TruncatedNormal\",\n \"Beta\",\n \"Kumaraswamy\",\n \"Exponential\",\n \"Laplace\",\n \"StudentT\",\n \"Cauchy\",\n \"HalfCauchy\",\n \"Gamma\",\n \"Weibull\",\n \"HalfStudentT\",\n \"LogNormal\",\n \"ChiSquared\",\n \"HalfNormal\",\n \"Wald\",\n \"Pareto\",\n \"InverseGamma\",\n \"ExGaussian\",\n \"VonMises\",\n \"SkewNormal\",\n \"Triangular\",\n \"Gumbel\",\n \"Logistic\",\n \"LogitNormal\",\n \"Interpolated\",\n \"Rice\",\n \"Moyal\",\n \"AsymmetricLaplace\",\n \"PolyaGamma\",\n]\n\n\nclass PositiveContinuous(Continuous):\n \"\"\"Base class for positive continuous distributions\"\"\"\n\n\nclass UnitContinuous(Continuous):\n \"\"\"Base class for continuous distributions on [0,1]\"\"\"\n\n\nclass CircularContinuous(Continuous):\n \"\"\"Base class for circular continuous distributions\"\"\"\n\n\n@logp_transform.register(PositiveContinuous)\ndef pos_cont_transform(op):\n return transforms.log\n\n\n@logp_transform.register(UnitContinuous)\ndef unit_cont_transform(op):\n return transforms.logodds\n\n\n@logp_transform.register(CircularContinuous)\ndef circ_cont_transform(op):\n return transforms.circular\n\n\nclass BoundedContinuous(Continuous):\n \"\"\"Base class for bounded continuous distributions\"\"\"\n\n # Indices of the arguments that define the lower and upper bounds of the distribution\n bound_args_indices: Optional[List[int]] = None\n\n def __new__(cls, *args, **kwargs):\n transform = kwargs.get(\"transform\", UNSET)\n if transform is UNSET:\n kwargs[\"transform\"] = cls.default_transform()\n return super().__new__(cls, *args, **kwargs)\n\n @classmethod\n def default_transform(cls):\n if cls.bound_args_indices is None:\n raise ValueError(\n f\"Must specify bound_args_indices for {cls.__name__} bounded distribution\"\n )\n\n def transform_params(*args):\n\n lower, upper = None, None\n if cls.bound_args_indices[0] is not None:\n lower = args[cls.bound_args_indices[0]]\n if cls.bound_args_indices[1] is not None:\n upper = args[cls.bound_args_indices[1]]\n\n if lower is not None:\n if isinstance(lower, TensorConstant) and np.all(lower.value == -np.inf):\n lower = None\n else:\n lower = at.as_tensor_variable(lower)\n\n if upper is not None:\n if isinstance(upper, TensorConstant) and np.all(upper.value == np.inf):\n upper = None\n else:\n upper = at.as_tensor_variable(upper)\n\n return lower, upper\n\n return transforms.interval(transform_params)\n\n\ndef assert_negative_support(var, label, distname, value=-1e-6):\n msg = f\"The variable specified for {label} has negative support for {distname}, \"\n msg += \"likely making it unsuitable for this parameter.\"\n return Assert(msg)(var, at.all(at.ge(var, 0.0)))\n\n\ndef get_tau_sigma(tau=None, sigma=None):\n r\"\"\"\n Find precision and standard deviation. The link between the two\n parameterizations is given by the inverse relationship:\n\n .. math::\n \\tau = \\frac{1}{\\sigma^2}\n\n Parameters\n ----------\n tau: array-like, optional\n sigma: array-like, optional\n\n Results\n -------\n Returns tuple (tau, sigma)\n\n Notes\n -----\n If neither tau nor sigma is provided, returns (1., 1.)\n \"\"\"\n if tau is None:\n if sigma is None:\n sigma = 1.0\n tau = 1.0\n else:\n if isinstance(sigma, Variable):\n sigma_ = check_parameters(sigma, sigma > 0, msg=\"sigma > 0\")\n else:\n assert np.all(np.asarray(sigma) > 0)\n sigma_ = sigma\n tau = sigma_**-2.0\n\n else:\n if sigma is not None:\n raise ValueError(\"Can't pass both tau and sigma\")\n else:\n if isinstance(tau, Variable):\n tau_ = check_parameters(tau, tau > 0, msg=\"tau > 0\")\n else:\n assert np.all(np.asarray(tau) > 0)\n tau_ = tau\n\n sigma = tau_**-0.5\n\n return floatX(tau), floatX(sigma)\n\n\nclass Uniform(BoundedContinuous):\n r\"\"\"\n Continuous uniform log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid lower, upper) = \\frac{1}{upper-lower}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-3, 3, 500)\n ls = [0., -2]\n us = [2., 1]\n for l, u in zip(ls, us):\n y = np.zeros(500)\n y[(x<u) & (x>l)] = 1.0/(u-l)\n plt.plot(x, y, label='lower = {}, upper = {}'.format(l, u))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.ylim(0, 1)\n plt.legend(loc=1)\n plt.show()\n\n ======== =====================================\n Support :math:`x \\in [lower, upper]`\n Mean :math:`\\dfrac{lower + upper}{2}`\n Variance :math:`\\dfrac{(upper - lower)^2}{12}`\n ======== =====================================\n\n Parameters\n ----------\n lower : float, optional\n Lower limit. Defaults to 0.\n upper : float, optional\n Upper limit. Defaults to 1.\n \"\"\"\n rv_op = uniform\n bound_args_indices = (3, 4) # Lower, Upper\n\n @classmethod\n def dist(cls, lower=0, upper=1, **kwargs):\n lower = at.as_tensor_variable(floatX(lower))\n upper = at.as_tensor_variable(floatX(upper))\n return super().dist([lower, upper], **kwargs)\n\n def get_moment(rv, size, lower, upper):\n lower, upper = at.broadcast_arrays(lower, upper)\n moment = (lower + upper) / 2\n if not rv_size_is_none(size):\n moment = at.full(size, moment)\n return moment\n\n def logcdf(value, lower, upper):\n \"\"\"\n Compute the log of the cumulative distribution function for Uniform distribution\n at the specified value.\n\n Parameters\n ----------\n value : numeric or ndarray or TensorVariable\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or `TensorVariable`.\n lower : float, optional\n Lower limit. Defaults to 0.\n upper : float, optional\n Upper limit. Defaults to 1.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n return at.switch(\n at.lt(value, lower) | at.lt(upper, lower),\n -np.inf,\n at.switch(\n at.lt(value, upper),\n at.log(value - lower) - at.log(upper - lower),\n 0,\n ),\n )\n\n\nclass FlatRV(RandomVariable):\n name = \"flat\"\n ndim_supp = 0\n ndims_params = []\n dtype = \"floatX\"\n _print_name = (\"Flat\", \"\\\\operatorname{Flat}\")\n\n @classmethod\n def rng_fn(cls, rng, size):\n raise NotImplementedError(\"Cannot sample from flat variable\")\n\n\nflat = FlatRV()\n\n\nclass Flat(Continuous):\n \"\"\"\n Uninformative log-likelihood that returns 0 regardless of\n the passed value.\n \"\"\"\n\n rv_op = flat\n\n def __new__(cls, *args, **kwargs):\n kwargs.setdefault(\"initval\", \"moment\")\n return super().__new__(cls, *args, **kwargs)\n\n @classmethod\n def dist(cls, *, size=None, **kwargs):\n res = super().dist([], size=size, **kwargs)\n return res\n\n def get_moment(rv, size):\n return at.zeros(size)\n\n def logp(value):\n \"\"\"\n Calculate log-probability of Flat distribution at specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n return at.zeros_like(value)\n\n def logcdf(value):\n \"\"\"\n Compute the log of the cumulative distribution function for Flat distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n return at.switch(\n at.eq(value, -np.inf), -np.inf, at.switch(at.eq(value, np.inf), 0, at.log(0.5))\n )\n\n\nclass HalfFlatRV(RandomVariable):\n name = \"half_flat\"\n ndim_supp = 0\n ndims_params = []\n dtype = \"floatX\"\n _print_name = (\"HalfFlat\", \"\\\\operatorname{HalfFlat}\")\n\n @classmethod\n def rng_fn(cls, rng, size):\n raise NotImplementedError(\"Cannot sample from half_flat variable\")\n\n\nhalfflat = HalfFlatRV()\n\n\nclass HalfFlat(PositiveContinuous):\n \"\"\"Improper flat prior over the positive reals.\"\"\"\n\n rv_op = halfflat\n\n def __new__(cls, *args, **kwargs):\n kwargs.setdefault(\"initval\", \"moment\")\n return super().__new__(cls, *args, **kwargs)\n\n @classmethod\n def dist(cls, *, size=None, **kwargs):\n res = super().dist([], size=size, **kwargs)\n return res\n\n def get_moment(rv, size):\n return at.ones(size)\n\n def logp(value):\n \"\"\"\n Calculate log-probability of HalfFlat distribution at specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n return at.switch(at.lt(value, 0), -np.inf, at.zeros_like(value))\n\n def logcdf(value):\n \"\"\"\n Compute the log of the cumulative distribution function for HalfFlat distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n return at.switch(at.lt(value, np.inf), -np.inf, at.switch(at.eq(value, np.inf), 0, -np.inf))\n\n\nclass Normal(Continuous):\n r\"\"\"\n Univariate normal log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, \\tau) =\n \\sqrt{\\frac{\\tau}{2\\pi}}\n \\exp\\left\\{ -\\frac{\\tau}{2} (x-\\mu)^2 \\right\\}\n\n Normal distribution can be parameterized either in terms of precision\n or standard deviation. The link between the two parametrizations is\n given by\n\n .. math::\n\n \\tau = \\dfrac{1}{\\sigma^2}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-5, 5, 1000)\n mus = [0., 0., 0., -2.]\n sigmas = [0.4, 1., 2., 0.4]\n for mu, sigma in zip(mus, sigmas):\n pdf = st.norm.pdf(x, mu, sigma)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\sigma$ = {}'.format(mu, sigma))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ==========================================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu`\n Variance :math:`\\dfrac{1}{\\tau}` or :math:`\\sigma^2`\n ======== ==========================================\n\n Parameters\n ----------\n mu : tensor_like of float, default 0\n Mean.\n sigma : tensor_like of float, optional\n Standard deviation (sigma > 0) (only required if tau is not specified).\n Defaults to 1 if neither sigma nor tau is specified.\n tau : tensor_like of float, optional\n Precision (tau > 0) (only required if sigma is not specified).\n\n Examples\n --------\n .. code-block:: python\n\n with pm.Model():\n x = pm.Normal('x', mu=0, sigma=10)\n\n with pm.Model():\n x = pm.Normal('x', mu=0, tau=1/23)\n \"\"\"\n rv_op = normal\n\n @classmethod\n def dist(cls, mu=0, sigma=None, tau=None, sd=None, no_assert=False, **kwargs):\n if sd is not None:\n sigma = sd\n tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)\n sigma = at.as_tensor_variable(sigma)\n\n # sd = sigma\n # tau = at.as_tensor_variable(tau)\n # mean = median = mode = mu = at.as_tensor_variable(floatX(mu))\n # variance = 1.0 / self.tau\n\n if not no_assert:\n assert_negative_support(sigma, \"sigma\", \"Normal\")\n\n return super().dist([mu, sigma], **kwargs)\n\n def get_moment(rv, size, mu, sigma):\n mu, _ = at.broadcast_arrays(mu, sigma)\n if not rv_size_is_none(size):\n mu = at.full(size, mu)\n return mu\n\n def logcdf(value, mu, sigma):\n \"\"\"\n Compute the log of the cumulative distribution function for Normal distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or `TensorVariable`.\n mu : tensor_like of float\n Mean.\n sigma : tensor_like of float\n Standard deviation (sigma > 0).\n\n Returns\n -------\n TensorVariable\n \"\"\"\n return check_parameters(\n normal_lcdf(mu, sigma, value),\n 0 < sigma,\n msg=\"sigma > 0\",\n )\n\n\nclass TruncatedNormalRV(RandomVariable):\n name = \"truncated_normal\"\n ndim_supp = 0\n ndims_params = [0, 0, 0, 0]\n dtype = \"floatX\"\n _print_name = (\"TruncatedNormal\", \"\\\\operatorname{TruncatedNormal}\")\n\n @classmethod\n def rng_fn(\n cls,\n rng: np.random.RandomState,\n mu: Union[np.ndarray, float],\n sigma: Union[np.ndarray, float],\n lower: Union[np.ndarray, float],\n upper: Union[np.ndarray, float],\n size: Optional[Union[List[int], int]],\n ) -> np.ndarray:\n return stats.truncnorm.rvs(\n a=(lower - mu) / sigma,\n b=(upper - mu) / sigma,\n loc=mu,\n scale=sigma,\n size=size,\n random_state=rng,\n )\n\n\ntruncated_normal = TruncatedNormalRV()\n\n\nclass TruncatedNormal(BoundedContinuous):\n r\"\"\"\n Univariate truncated normal log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x;\\mu ,\\sigma ,a,b)={\\frac {\\phi ({\\frac {x-\\mu }{\\sigma }})}{\n \\sigma \\left(\\Phi ({\\frac {b-\\mu }{\\sigma }})-\\Phi ({\\frac {a-\\mu }{\\sigma }})\\right)}}\n\n Truncated normal distribution can be parameterized either in terms of precision\n or standard deviation. The link between the two parametrizations is\n given by\n\n .. math::\n\n \\tau = \\dfrac{1}{\\sigma^2}\n\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-10, 10, 1000)\n mus = [0., 0., 0.]\n sigmas = [3.,5.,7.]\n a1 = [-3, -5, -5]\n b1 = [7, 5, 4]\n for mu, sigma, a, b in zip(mus, sigmas,a1,b1):\n an, bn = (a - mu) / sigma, (b - mu) / sigma\n pdf = st.truncnorm.pdf(x, an,bn, loc=mu, scale=sigma)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\sigma$ = {}, a={}, b={}'.format(mu, sigma, a, b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ==========================================\n Support :math:`x \\in [a, b]`\n Mean :math:`\\mu +{\\frac {\\phi (\\alpha )-\\phi (\\beta )}{Z}}\\sigma`\n Variance :math:`\\sigma ^{2}\\left[1+{\\frac {\\alpha \\phi (\\alpha )-\\beta \\phi (\\beta )}{Z}}-\\left({\\frac {\\phi (\\alpha )-\\phi (\\beta )}{Z}}\\right)^{2}\\right]`\n ======== ==========================================\n\n Parameters\n ----------\n mu : tensor_like of float, default 0\n Mean.\n sigma : tensor_like of float, optional\n Standard deviation (sigma > 0) (only required if tau is not specified).\n Defaults to 1 if neither sigma nor tau is specified.\n tau : tensor_like of float, optional\n Precision (tau > 0) (only required if sigma is not specified).\n lower : tensor_like of float, default - numpy.inf\n Left bound.\n upper : tensor_like of float, default numpy.inf\n Right bound.\n\n Examples\n --------\n .. code-block:: python\n\n with pm.Model():\n x = pm.TruncatedNormal('x', mu=0, sigma=10, lower=0)\n\n with pm.Model():\n x = pm.TruncatedNormal('x', mu=0, sigma=10, upper=1)\n\n with pm.Model():\n x = pm.TruncatedNormal('x', mu=0, sigma=10, lower=0, upper=1)\n\n \"\"\"\n\n rv_op = truncated_normal\n bound_args_indices = (5, 6) # indexes for lower and upper args\n\n @classmethod\n def dist(\n cls,\n mu: Optional[DIST_PARAMETER_TYPES] = None,\n sigma: Optional[DIST_PARAMETER_TYPES] = None,\n tau: Optional[DIST_PARAMETER_TYPES] = None,\n sd: Optional[DIST_PARAMETER_TYPES] = None,\n lower: Optional[DIST_PARAMETER_TYPES] = None,\n upper: Optional[DIST_PARAMETER_TYPES] = None,\n *args,\n **kwargs,\n ) -> RandomVariable:\n sigma = sd if sd is not None else sigma\n tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)\n sigma = at.as_tensor_variable(sigma)\n tau = at.as_tensor_variable(tau)\n mu = at.as_tensor_variable(floatX(mu))\n assert_negative_support(sigma, \"sigma\", \"TruncatedNormal\")\n assert_negative_support(tau, \"tau\", \"TruncatedNormal\")\n\n lower = at.as_tensor_variable(floatX(lower)) if lower is not None else at.constant(-np.inf)\n upper = at.as_tensor_variable(floatX(upper)) if upper is not None else at.constant(np.inf)\n return super().dist([mu, sigma, lower, upper], **kwargs)\n\n def get_moment(rv, size, mu, sigma, lower, upper):\n mu, _, lower, upper = at.broadcast_arrays(mu, sigma, lower, upper)\n moment = at.switch(\n at.eq(lower, -np.inf),\n at.switch(\n at.eq(upper, np.inf),\n # lower = -inf, upper = inf\n mu,\n # lower = -inf, upper = x\n upper - 1,\n ),\n at.switch(\n at.eq(upper, np.inf),\n # lower = x, upper = inf\n lower + 1,\n # lower = x, upper = x\n (lower + upper) / 2,\n ),\n )\n\n if not rv_size_is_none(size):\n moment = at.full(size, moment)\n\n return moment\n\n def logp(\n value,\n mu: Union[float, np.ndarray, TensorVariable],\n sigma: Union[float, np.ndarray, TensorVariable],\n lower: Union[float, np.ndarray, TensorVariable],\n upper: Union[float, np.ndarray, TensorVariable],\n ) -> RandomVariable:\n \"\"\"\n Calculate log-probability of TruncatedNormal distribution at specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n unbounded_lower = isinstance(lower, TensorConstant) and np.all(lower.value == -np.inf)\n unbounded_upper = isinstance(upper, TensorConstant) and np.all(upper.value == np.inf)\n\n if not unbounded_lower and not unbounded_upper:\n lcdf_a = normal_lcdf(mu, sigma, lower)\n lcdf_b = normal_lcdf(mu, sigma, upper)\n lsf_a = normal_lccdf(mu, sigma, lower)\n lsf_b = normal_lccdf(mu, sigma, upper)\n norm = at.switch(lower > 0, logdiffexp(lsf_a, lsf_b), logdiffexp(lcdf_b, lcdf_a))\n elif not unbounded_lower:\n norm = normal_lccdf(mu, sigma, lower)\n elif not unbounded_upper:\n norm = normal_lcdf(mu, sigma, upper)\n else:\n norm = 0.0\n\n logp = _logprob(normal, (value,), None, None, None, mu, sigma) - norm\n bounds = []\n if not unbounded_lower:\n bounds.append(value >= lower)\n if not unbounded_upper:\n bounds.append(value <= upper)\n if not unbounded_lower and not unbounded_upper:\n bounds.append(lower <= upper)\n return check_parameters(logp, *bounds)\n\n\nclass HalfNormal(PositiveContinuous):\n r\"\"\"\n Half-normal log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\tau) =\n \\sqrt{\\frac{2\\tau}{\\pi}}\n \\exp\\left(\\frac{-x^2 \\tau}{2}\\right)\n\n f(x \\mid \\sigma) =\n \\sqrt{\\frac{2}{\\pi\\sigma^2}}\n \\exp\\left(\\frac{-x^2}{2\\sigma^2}\\right)\n\n .. note::\n\n The parameters ``sigma``/``tau`` (:math:`\\sigma`/:math:`\\tau`) refer to\n the standard deviation/precision of the unfolded normal distribution, for\n the standard deviation of the half-normal distribution, see below. For\n the half-normal, they are just two parameterisation :math:`\\sigma^2\n \\equiv \\frac{1}{\\tau}` of a scale parameter\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 5, 200)\n for sigma in [0.4, 1., 2.]:\n pdf = st.halfnorm.pdf(x, scale=sigma)\n plt.plot(x, pdf, label=r'$\\sigma$ = {}'.format(sigma))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ==========================================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`\\sqrt{\\dfrac{2}{\\tau \\pi}}` or :math:`\\dfrac{\\sigma \\sqrt{2}}{\\sqrt{\\pi}}`\n Variance :math:`\\dfrac{1}{\\tau}\\left(1 - \\dfrac{2}{\\pi}\\right)` or :math:`\\sigma^2\\left(1 - \\dfrac{2}{\\pi}\\right)`\n ======== ==========================================\n\n Parameters\n ----------\n sigma: float\n Scale parameter :math:`sigma` (``sigma`` > 0) (only required if ``tau`` is not specified).\n tau: float\n Precision :math:`tau` (tau > 0) (only required if sigma is not specified).\n\n Examples\n --------\n .. code-block:: python\n\n with pm.Model():\n x = pm.HalfNormal('x', sigma=10)\n\n with pm.Model():\n x = pm.HalfNormal('x', tau=1/15)\n \"\"\"\n rv_op = halfnormal\n\n @classmethod\n def dist(cls, sigma=None, tau=None, sd=None, *args, **kwargs):\n if sd is not None:\n sigma = sd\n\n tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)\n\n assert_negative_support(tau, \"tau\", \"HalfNormal\")\n assert_negative_support(sigma, \"sigma\", \"HalfNormal\")\n\n return super().dist([0.0, sigma], **kwargs)\n\n def get_moment(rv, size, loc, sigma):\n moment = loc + sigma\n if not rv_size_is_none(size):\n moment = at.full(size, moment)\n return moment\n\n def logcdf(value, loc, sigma):\n \"\"\"\n Compute the log of the cumulative distribution function for HalfNormal distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n z = zvalue(value, mu=loc, sigma=sigma)\n logcdf = at.switch(\n at.lt(value, loc),\n -np.inf,\n at.log1p(-at.erfc(z / at.sqrt(2.0))),\n )\n\n return check_parameters(\n logcdf,\n 0 < sigma,\n msg=\"sigma > 0\",\n )\n\n\nclass WaldRV(RandomVariable):\n name = \"wald\"\n ndim_supp = 0\n ndims_params = [0, 0, 0]\n dtype = \"floatX\"\n _print_name = (\"Wald\", \"\\\\operatorname{Wald}\")\n\n @classmethod\n def rng_fn(cls, rng, mu, lam, alpha, size) -> np.ndarray:\n return np.asarray(rng.wald(mu, lam, size=size) + alpha)\n\n\nwald = WaldRV()\n\n\nclass Wald(PositiveContinuous):\n r\"\"\"\n Wald log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, \\lambda) =\n \\left(\\frac{\\lambda}{2\\pi}\\right)^{1/2} x^{-3/2}\n \\exp\\left\\{\n -\\frac{\\lambda}{2x}\\left(\\frac{x-\\mu}{\\mu}\\right)^2\n \\right\\}\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 3, 500)\n mus = [1., 1., 1., 3.]\n lams = [1., .2, 3., 1.]\n for mu, lam in zip(mus, lams):\n pdf = st.invgauss.pdf(x, mu/lam, scale=lam)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\lambda$ = {}'.format(mu, lam))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== =============================\n Support :math:`x \\in (0, \\infty)`\n Mean :math:`\\mu`\n Variance :math:`\\dfrac{\\mu^3}{\\lambda}`\n ======== =============================\n\n Wald distribution can be parameterized either in terms of lam or phi.\n The link between the two parametrizations is given by\n\n .. math::\n\n \\phi = \\dfrac{\\lambda}{\\mu}\n\n Parameters\n ----------\n mu: float, optional\n Mean of the distribution (mu > 0).\n lam: float, optional\n Relative precision (lam > 0).\n phi: float, optional\n Alternative shape parameter (phi > 0).\n alpha: float, optional\n Shift/location parameter (alpha >= 0).\n\n Notes\n -----\n To instantiate the distribution specify any of the following\n\n - only mu (in this case lam will be 1)\n - mu and lam\n - mu and phi\n - lam and phi\n\n References\n ----------\n .. [Tweedie1957] Tweedie, M. C. K. (1957).\n Statistical Properties of Inverse Gaussian Distributions I.\n The Annals of Mathematical Statistics, Vol. 28, No. 2, pp. 362-377\n\n .. [Michael1976] Michael, J. R., Schucany, W. R. and Hass, R. W. (1976).\n Generating Random Variates Using Transformations with Multiple Roots.\n The American Statistician, Vol. 30, No. 2, pp. 88-90\n\n .. [Giner2016] Göknur Giner, Gordon K. Smyth (2016)\n statmod: Probability Calculations for the Inverse Gaussian Distribution\n \"\"\"\n rv_op = wald\n\n @classmethod\n def dist(\n cls,\n mu: Optional[Union[float, np.ndarray]] = None,\n lam: Optional[Union[float, np.ndarray]] = None,\n phi: Optional[Union[float, np.ndarray]] = None,\n alpha: Union[float, np.ndarray] = 0.0,\n *args,\n **kwargs,\n ) -> RandomVariable:\n mu, lam, phi = cls.get_mu_lam_phi(mu, lam, phi)\n alpha = at.as_tensor_variable(floatX(alpha))\n mu = at.as_tensor_variable(floatX(mu))\n lam = at.as_tensor_variable(floatX(lam))\n\n assert_negative_support(phi, \"phi\", \"Wald\")\n assert_negative_support(mu, \"mu\", \"Wald\")\n assert_negative_support(lam, \"lam\", \"Wald\")\n\n return super().dist([mu, lam, alpha], **kwargs)\n\n def get_moment(rv, size, mu, lam, alpha):\n mu, _, _ = at.broadcast_arrays(mu, lam, alpha)\n if not rv_size_is_none(size):\n mu = at.full(size, mu)\n return mu\n\n @staticmethod\n def get_mu_lam_phi(\n mu: Optional[float], lam: Optional[float], phi: Optional[float]\n ) -> Tuple[Union[float, np.ndarray], Union[float, np.ndarray], Union[float, np.ndarray]]:\n if mu is None:\n if lam is not None and phi is not None:\n return lam / phi, lam, phi\n else:\n if lam is None:\n if phi is None:\n return mu, 1.0, 1.0 / mu\n else:\n return mu, mu * phi, phi\n else:\n if phi is None:\n return mu, lam, lam / mu\n\n raise ValueError(\n \"Wald distribution must specify either mu only, \"\n \"mu and lam, mu and phi, or lam and phi.\"\n )\n\n def logp(\n value,\n mu: Union[float, np.ndarray, TensorVariable],\n lam: Union[float, np.ndarray, TensorVariable],\n alpha: Union[float, np.ndarray, TensorVariable],\n ) -> RandomVariable:\n \"\"\"\n Calculate log-probability of Wald distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n mu: float or TensorVariable\n Mean of the distribution (mu > 0).\n lam: float or TensorVariable\n Relative precision (lam > 0).\n alpha: float or TensorVariable\n Shift/location parameter (alpha >= 0).\n\n Returns\n -------\n TensorVariable\n \"\"\"\n centered_value = value - alpha\n logp = at.switch(\n at.le(centered_value, 0),\n -np.inf,\n (\n logpow(lam / (2.0 * np.pi), 0.5)\n - logpow(centered_value, 1.5)\n - (0.5 * lam / centered_value * ((centered_value - mu) / mu) ** 2)\n ),\n )\n\n return check_parameters(\n logp,\n mu > 0,\n lam > 0,\n alpha >= 0,\n msg=\"mu > 0, lam > 0, alpha >= 0\",\n )\n\n def logcdf(\n value,\n mu: Union[float, np.ndarray, TensorVariable],\n lam: Union[float, np.ndarray, TensorVariable],\n alpha: Union[float, np.ndarray, TensorVariable],\n ) -> RandomVariable:\n \"\"\"\n Compute the log of the cumulative distribution function for Wald distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n mu: float or TensorVariable\n Mean of the distribution (mu > 0).\n lam: float or TensorVariable\n Relative precision (lam > 0).\n alpha: float or TensorVariable\n Shift/location parameter (alpha >= 0).\n\n Returns\n -------\n TensorVariable\n \"\"\"\n value -= alpha\n q = value / mu\n l = lam * mu\n r = at.sqrt(value * lam)\n\n a = normal_lcdf(0, 1, (q - 1.0) / r)\n b = 2.0 / l + normal_lcdf(0, 1, -(q + 1.0) / r)\n\n logcdf = at.switch(\n at.le(value, 0),\n -np.inf,\n at.switch(\n at.lt(value, np.inf),\n a + at.log1pexp(b - a),\n 0,\n ),\n )\n\n return check_parameters(\n logcdf, 0 < mu, 0 < lam, 0 <= alpha, msg=\"mu > 0, lam > 0, alpha >= 0\"\n )\n\n\nclass BetaClippedRV(BetaRV):\n @classmethod\n def rng_fn(cls, rng, alpha, beta, size) -> np.ndarray:\n return np.asarray(clipped_beta_rvs(alpha, beta, size=size, random_state=rng))\n\n\nbeta = BetaClippedRV()\n\n\nclass Beta(UnitContinuous):\n r\"\"\"\n Beta log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{x^{\\alpha - 1} (1 - x)^{\\beta - 1}}{B(\\alpha, \\beta)}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 1, 200)\n alphas = [.5, 5., 1., 2., 2.]\n betas = [.5, 1., 3., 2., 5.]\n for a, b in zip(alphas, betas):\n pdf = st.beta.pdf(x, a, b)\n plt.plot(x, pdf, label=r'$\\alpha$ = {}, $\\beta$ = {}'.format(a, b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.ylim(0, 4.5)\n plt.legend(loc=9)\n plt.show()\n\n ======== ==============================================================\n Support :math:`x \\in (0, 1)`\n Mean :math:`\\dfrac{\\alpha}{\\alpha + \\beta}`\n Variance :math:`\\dfrac{\\alpha \\beta}{(\\alpha+\\beta)^2(\\alpha+\\beta+1)}`\n ======== ==============================================================\n\n Beta distribution can be parameterized either in terms of alpha and\n beta or mean and standard deviation. The link between the two\n parametrizations is given by\n\n .. math::\n\n \\alpha &= \\mu \\kappa \\\\\n \\beta &= (1 - \\mu) \\kappa\n\n \\text{where } \\kappa = \\frac{\\mu(1-\\mu)}{\\sigma^2} - 1\n\n Parameters\n ----------\n alpha : tensor_like of float, optional\n ``alpha`` > 0. If not specified, then calculated using ``mu`` and ``sigma``.\n beta : tensor_like of float, optional\n ``beta`` > 0. If not specified, then calculated using ``mu`` and ``sigma``.\n mu : tensor_like of float, optional\n Alternative mean (0 < ``mu`` < 1).\n sigma : tensor_like of float, optional\n Alternative standard deviation (1 < ``sigma`` < sqrt(``mu`` * (1 - ``mu``))).\n\n Notes\n -----\n Beta distribution is a conjugate prior for the parameter :math:`p` of\n the binomial distribution.\n \"\"\"\n\n rv_op = aesara.tensor.random.beta\n\n @classmethod\n def dist(cls, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs):\n if sd is not None:\n sigma = sd\n\n alpha, beta = cls.get_alpha_beta(alpha, beta, mu, sigma)\n alpha = at.as_tensor_variable(floatX(alpha))\n beta = at.as_tensor_variable(floatX(beta))\n\n assert_negative_support(alpha, \"alpha\", \"Beta\")\n assert_negative_support(beta, \"beta\", \"Beta\")\n\n return super().dist([alpha, beta], **kwargs)\n\n def get_moment(rv, size, alpha, beta):\n mean = alpha / (alpha + beta)\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n @classmethod\n def get_alpha_beta(self, alpha=None, beta=None, mu=None, sigma=None):\n if (alpha is not None) and (beta is not None):\n pass\n elif (mu is not None) and (sigma is not None):\n kappa = mu * (1 - mu) / sigma**2 - 1\n alpha = mu * kappa\n beta = (1 - mu) * kappa\n else:\n raise ValueError(\n \"Incompatible parameterization. Either use alpha \"\n \"and beta, or mu and sigma to specify distribution.\"\n )\n\n return alpha, beta\n\n def logcdf(value, alpha, beta):\n \"\"\"\n Compute the log of the cumulative distribution function for Beta distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n alpha : tensor_like of float\n ``alpha`` > 0.\n beta : tensor_like of float\n ``beta`` > 0.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n\n logcdf = at.switch(\n at.lt(value, 0),\n -np.inf,\n at.switch(\n at.lt(value, 1),\n at.log(at.betainc(alpha, beta, value)),\n 0,\n ),\n )\n\n return check_parameters(\n logcdf,\n 0 < alpha,\n 0 < beta,\n msg=\"alpha > 0, beta > 0\",\n )\n\n\nclass KumaraswamyRV(RandomVariable):\n name = \"kumaraswamy\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"Kumaraswamy\", \"\\\\operatorname{Kumaraswamy}\")\n\n @classmethod\n def rng_fn(cls, rng, a, b, size) -> np.ndarray:\n u = rng.uniform(size=size)\n return np.asarray((1 - (1 - u) ** (1 / b)) ** (1 / a))\n\n\nkumaraswamy = KumaraswamyRV()\n\n\nclass Kumaraswamy(UnitContinuous):\n r\"\"\"\n Kumaraswamy log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid a, b) =\n abx^{a-1}(1-x^a)^{b-1}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 1, 200)\n a_s = [.5, 5., 1., 2., 2.]\n b_s = [.5, 1., 3., 2., 5.]\n for a, b in zip(a_s, b_s):\n pdf = a * b * x ** (a - 1) * (1 - x ** a) ** (b - 1)\n plt.plot(x, pdf, label=r'$a$ = {}, $b$ = {}'.format(a, b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.ylim(0, 3.)\n plt.legend(loc=9)\n plt.show()\n\n ======== ==============================================================\n Support :math:`x \\in (0, 1)`\n Mean :math:`b B(1 + \\tfrac{1}{a}, b)`\n Variance :math:`b B(1 + \\tfrac{2}{a}, b) - (b B(1 + \\tfrac{1}{a}, b))^2`\n ======== ==============================================================\n\n Parameters\n ----------\n a : tensor_like of float\n a > 0.\n b : tensor_like of float\n b > 0.\n \"\"\"\n rv_op = kumaraswamy\n\n @classmethod\n def dist(cls, a, b, *args, **kwargs):\n a = at.as_tensor_variable(floatX(a))\n b = at.as_tensor_variable(floatX(b))\n\n assert_negative_support(a, \"a\", \"Kumaraswamy\")\n assert_negative_support(b, \"b\", \"Kumaraswamy\")\n\n return super().dist([a, b], *args, **kwargs)\n\n def get_moment(rv, size, a, b):\n mean = at.exp(at.log(b) + at.gammaln(1 + 1 / a) + at.gammaln(b) - at.gammaln(1 + 1 / a + b))\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logp(value, a, b):\n \"\"\"\n Calculate log-probability of Kumaraswamy distribution at specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = at.log(a) + at.log(b) + (a - 1) * at.log(value) + (b - 1) * at.log(1 - value**a)\n res = at.switch(\n at.or_(at.lt(value, 0), at.gt(value, 1)),\n -np.inf,\n res,\n )\n return check_parameters(\n res,\n a > 0,\n b > 0,\n msg=\"a > 0, b > 0\",\n )\n\n def logcdf(value, a, b):\n r\"\"\"\n Compute the log of cumulative distribution function for the Kumaraswamy distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for\n multiple values are desired the values must be provided in a numpy\n array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = at.switch(\n at.lt(value, 0),\n -np.inf,\n at.switch(\n at.lt(value, 1),\n at.log1mexp(b * at.log1p(-(value**a))),\n 0,\n ),\n )\n\n return check_parameters(\n res,\n a > 0,\n b > 0,\n msg=\"a > 0, b > 0\",\n )\n\n\nclass Exponential(PositiveContinuous):\n r\"\"\"\n Exponential log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\lambda) = \\lambda \\exp\\left\\{ -\\lambda x \\right\\}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 3, 100)\n for lam in [0.5, 1., 2.]:\n pdf = st.expon.pdf(x, scale=1.0/lam)\n plt.plot(x, pdf, label=r'$\\lambda$ = {}'.format(lam))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ============================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`\\dfrac{1}{\\lambda}`\n Variance :math:`\\dfrac{1}{\\lambda^2}`\n ======== ============================\n\n Notes\n -----\n Logp calculation is defined in `aeppl.logprob <https://github.com/aesara-devs/aeppl/blob/main/aeppl/logprob.py/>`_.\n\n Parameters\n ----------\n lam : tensor_like of float\n Rate or inverse scale (``lam`` > 0)\n \"\"\"\n rv_op = exponential\n\n @classmethod\n def dist(cls, lam, *args, **kwargs):\n lam = at.as_tensor_variable(floatX(lam))\n\n assert_negative_support(lam, \"lam\", \"Exponential\")\n\n # Aesara exponential op is parametrized in terms of mu (1/lam)\n return super().dist([at.inv(lam)], **kwargs)\n\n def get_moment(rv, size, mu):\n if not rv_size_is_none(size):\n mu = at.full(size, mu)\n return mu\n\n def logcdf(value, mu):\n r\"\"\"\n Compute the log of cumulative distribution function for the Exponential distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like\n Value(s) for which log CDF is calculated. If the log CDF for\n multiple values are desired the values must be provided in a numpy\n array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n lam = at.inv(mu)\n res = at.switch(\n at.lt(value, 0),\n -np.inf,\n at.log1mexp(-lam * value),\n )\n\n return check_parameters(res, 0 <= lam, msg=\"lam >= 0\")\n\n\nclass Laplace(Continuous):\n r\"\"\"\n Laplace log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, b) =\n \\frac{1}{2b} \\exp \\left\\{ - \\frac{|x - \\mu|}{b} \\right\\}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-10, 10, 1000)\n mus = [0., 0., 0., -5.]\n bs = [1., 2., 4., 4.]\n for mu, b in zip(mus, bs):\n pdf = st.laplace.pdf(x, loc=mu, scale=b)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $b$ = {}'.format(mu, b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu`\n Variance :math:`2 b^2`\n ======== ========================\n\n Parameters\n ----------\n mu : tensor_like of float\n Location parameter.\n b : tensor_like of float\n Scale parameter (b > 0).\n \"\"\"\n rv_op = laplace\n\n @classmethod\n def dist(cls, mu, b, *args, **kwargs):\n b = at.as_tensor_variable(floatX(b))\n mu = at.as_tensor_variable(floatX(mu))\n\n assert_negative_support(b, \"b\", \"Laplace\")\n return super().dist([mu, b], *args, **kwargs)\n\n def get_moment(rv, size, mu, b):\n mu, _ = at.broadcast_arrays(mu, b)\n if not rv_size_is_none(size):\n mu = at.full(size, mu)\n return mu\n\n def logcdf(value, mu, b):\n \"\"\"\n Compute the log of the cumulative distribution function for Laplace distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n y = (value - mu) / b\n\n res = at.switch(\n at.le(value, mu),\n at.log(0.5) + y,\n at.switch(\n at.gt(y, 1),\n at.log1p(-0.5 * at.exp(-y)),\n at.log(1 - 0.5 * at.exp(-y)),\n ),\n )\n\n return check_parameters(\n res,\n 0 < b,\n msg=\"b > 0\",\n )\n\n\nclass AsymmetricLaplaceRV(RandomVariable):\n name = \"asymmetriclaplace\"\n ndim_supp = 0\n ndims_params = [0, 0, 0]\n dtype = \"floatX\"\n _print_name = (\"AsymmetricLaplace\", \"\\\\operatorname{AsymmetricLaplace}\")\n\n @classmethod\n def rng_fn(cls, rng, b, kappa, mu, size=None) -> np.ndarray:\n u = rng.uniform(size=size)\n switch = kappa**2 / (1 + kappa**2)\n non_positive_x = mu + kappa * np.log(u * (1 / switch)) / b\n positive_x = mu - np.log((1 - u) * (1 + kappa**2)) / (kappa * b)\n draws = non_positive_x * (u <= switch) + positive_x * (u > switch)\n return np.asarray(draws)\n\n\nasymmetriclaplace = AsymmetricLaplaceRV()\n\n\nclass AsymmetricLaplace(Continuous):\n r\"\"\"\n Asymmetric-Laplace log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n {f(x|\\\\b,\\kappa,\\mu) =\n \\left({\\frac{\\\\b}{\\kappa + 1/\\kappa}}\\right)\\,e^{-(x-\\mu)\\\\b\\,s\\kappa ^{s}}}\n\n where\n\n .. math::\n\n s = sgn(x-\\mu)\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu-\\frac{\\\\\\kappa-1/\\kappa}b`\n Variance :math:`\\frac{1+\\kappa^{4}}{b^2\\kappa^2 }`\n ======== ========================\n\n Parameters\n ----------\n b: float\n Scale parameter (b > 0)\n kappa: float\n Symmetry parameter (kappa > 0)\n mu: float\n Location parameter\n\n See Also:\n --------\n `Reference <https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution>`_\n \"\"\"\n rv_op = asymmetriclaplace\n\n @classmethod\n def dist(cls, b, kappa, mu=0, *args, **kwargs):\n b = at.as_tensor_variable(floatX(b))\n kappa = at.as_tensor_variable(floatX(kappa))\n mu = mu = at.as_tensor_variable(floatX(mu))\n\n # mean = mu - (kappa - 1 / kappa) / b\n # variance = (1 + kappa ** 4) / (kappa ** 2 * b ** 2)\n\n assert_negative_support(kappa, \"kappa\", \"AsymmetricLaplace\")\n assert_negative_support(b, \"b\", \"AsymmetricLaplace\")\n\n return super().dist([b, kappa, mu], *args, **kwargs)\n\n def get_moment(rv, size, b, kappa, mu):\n mean = mu - (kappa - 1 / kappa) / b\n\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logp(value, b, kappa, mu):\n \"\"\"\n Calculate log-probability of Asymmetric-Laplace distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n value = value - mu\n res = at.log(b / (kappa + (kappa**-1))) + (\n -value * b * at.sgn(value) * (kappa ** at.sgn(value))\n )\n\n return check_parameters(res, 0 < b, 0 < kappa, msg=\"b > 0, kappa > 0\")\n\n\nclass LogNormal(PositiveContinuous):\n r\"\"\"\n Log-normal log-likelihood.\n\n Distribution of any random variable whose logarithm is normally\n distributed. A variable might be modeled as log-normal if it can\n be thought of as the multiplicative product of many small\n independent factors.\n\n Note: Class name Lognormal is deprecated, use LogNormal now!\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, \\tau) =\n \\frac{1}{x} \\sqrt{\\frac{\\tau}{2\\pi}}\n \\exp\\left\\{ -\\frac{\\tau}{2} (\\ln(x)-\\mu)^2 \\right\\}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 3, 100)\n mus = [0., 0., 0.]\n sigmas = [.25, .5, 1.]\n for mu, sigma in zip(mus, sigmas):\n pdf = st.lognorm.pdf(x, sigma, scale=np.exp(mu))\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\sigma$ = {}'.format(mu, sigma))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== =========================================================================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`\\exp\\{\\mu + \\frac{1}{2\\tau}\\}`\n Variance :math:`(\\exp\\{\\frac{1}{\\tau}\\} - 1) \\times \\exp\\{2\\mu + \\frac{1}{\\tau}\\}`\n ======== =========================================================================\n\n Parameters\n ----------\n mu : tensor_like of float, default 0\n Location parameter.\n sigma : tensor_like of float, optional\n Standard deviation. (sigma > 0). (only required if tau is not specified).\n Defaults to 1.\n tau : tensor_like of float, optional\n Scale parameter (tau > 0). (only required if sigma is not specified).\n Defaults to 1.\n\n Examples\n --------\n\n .. code-block:: python\n\n # Example to show that we pass in only ``sigma`` or ``tau`` but not both.\n with pm.Model():\n x = pm.LogNormal('x', mu=2, sigma=30)\n\n with pm.Model():\n x = pm.LogNormal('x', mu=2, tau=1/100)\n \"\"\"\n\n rv_op = lognormal\n\n @classmethod\n def dist(cls, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs):\n if sd is not None:\n sigma = sd\n\n tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)\n\n mu = at.as_tensor_variable(floatX(mu))\n sigma = at.as_tensor_variable(floatX(sigma))\n\n assert_negative_support(tau, \"tau\", \"LogNormal\")\n assert_negative_support(sigma, \"sigma\", \"LogNormal\")\n\n return super().dist([mu, sigma], *args, **kwargs)\n\n def get_moment(rv, size, mu, sigma):\n mean = at.exp(mu + 0.5 * sigma**2)\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logcdf(value, mu, sigma):\n \"\"\"\n Compute the log of the cumulative distribution function for LogNormal distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = at.switch(\n at.le(value, 0),\n -np.inf,\n normal_lcdf(mu, sigma, at.log(value)),\n )\n\n return check_parameters(res, 0 < sigma, msg=\"sigma > 0\")\n\n\nLognormal = LogNormal\n\n\nclass StudentTRV(RandomVariable):\n name = \"studentt\"\n ndim_supp = 0\n ndims_params = [0, 0, 0]\n dtype = \"floatX\"\n _print_name = (\"StudentT\", \"\\\\operatorname{StudentT}\")\n\n @classmethod\n def rng_fn(cls, rng, nu, mu, sigma, size=None) -> np.ndarray:\n return np.asarray(stats.t.rvs(nu, mu, sigma, size=size, random_state=rng))\n\n\nstudentt = StudentTRV()\n\n\nclass StudentT(Continuous):\n r\"\"\"\n Student's T log-likelihood.\n\n Describes a normal variable whose precision is gamma distributed.\n If only nu parameter is passed, this specifies a standard (central)\n Student's T.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x|\\mu,\\lambda,\\nu) =\n \\frac{\\Gamma(\\frac{\\nu + 1}{2})}{\\Gamma(\\frac{\\nu}{2})}\n \\left(\\frac{\\lambda}{\\pi\\nu}\\right)^{\\frac{1}{2}}\n \\left[1+\\frac{\\lambda(x-\\mu)^2}{\\nu}\\right]^{-\\frac{\\nu+1}{2}}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-8, 8, 200)\n mus = [0., 0., -2., -2.]\n sigmas = [1., 1., 1., 2.]\n dfs = [1., 5., 5., 5.]\n for mu, sigma, df in zip(mus, sigmas, dfs):\n pdf = st.t.pdf(x, df, loc=mu, scale=sigma)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\sigma$ = {}, $\\nu$ = {}'.format(mu, sigma, df))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n ======== ========================\n\n Parameters\n ----------\n nu : tensor_like of float\n Degrees of freedom, also known as normality parameter (nu > 0).\n mu : tensor_like of float, default 0\n Location parameter.\n sigma : tensor_like of float, optional\n Scale parameter (sigma > 0). Converges to the standard deviation as nu\n increases (only required if lam is not specified). Defaults to 1.\n lam : tensor_like of float, optional\n Scale parameter (lam > 0). Converges to the precision as nu\n increases (only required if sigma is not specified). Defaults to 1.\n\n Examples\n --------\n .. code-block:: python\n\n with pm.Model():\n x = pm.StudentT('x', nu=15, mu=0, sigma=10)\n\n with pm.Model():\n x = pm.StudentT('x', nu=15, mu=0, lam=1/23)\n \"\"\"\n rv_op = studentt\n\n @classmethod\n def dist(cls, nu, mu=0, lam=None, sigma=None, sd=None, *args, **kwargs):\n if sd is not None:\n sigma = sd\n nu = at.as_tensor_variable(floatX(nu))\n lam, sigma = get_tau_sigma(tau=lam, sigma=sigma)\n sigma = at.as_tensor_variable(sigma)\n\n assert_negative_support(sigma, \"sigma (lam)\", \"StudentT\")\n assert_negative_support(nu, \"nu\", \"StudentT\")\n\n return super().dist([nu, mu, sigma], **kwargs)\n\n def get_moment(rv, size, nu, mu, sigma):\n mu, _, _ = at.broadcast_arrays(mu, nu, sigma)\n if not rv_size_is_none(size):\n mu = at.full(size, mu)\n return mu\n\n def logp(value, nu, mu, sigma):\n \"\"\"\n Calculate log-probability of StudentT distribution at specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n lam, _ = get_tau_sigma(sigma=sigma)\n\n res = (\n gammaln((nu + 1.0) / 2.0)\n + 0.5 * at.log(lam / (nu * np.pi))\n - gammaln(nu / 2.0)\n - (nu + 1.0) / 2.0 * at.log1p(lam * (value - mu) ** 2 / nu)\n )\n\n return check_parameters(res, lam > 0, nu > 0, msg=\"lam > 0, nu > 0\")\n\n def logcdf(value, nu, mu, sigma):\n \"\"\"\n Compute the log of the cumulative distribution function for Student's T distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n _, sigma = get_tau_sigma(sigma=sigma)\n\n t = (value - mu) / sigma\n sqrt_t2_nu = at.sqrt(t**2 + nu)\n x = (t + sqrt_t2_nu) / (2.0 * sqrt_t2_nu)\n\n res = at.log(at.betainc(nu / 2.0, nu / 2.0, x))\n\n return check_parameters(res, 0 < nu, 0 < sigma, msg=\"nu > 0, sigma > 0\")\n\n\nclass Pareto(BoundedContinuous):\n r\"\"\"\n Pareto log-likelihood.\n\n Often used to characterize wealth distribution, or other examples of the\n 80/20 rule.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\alpha, m) = \\frac{\\alpha m^{\\alpha}}{x^{\\alpha+1}}\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 4, 1000)\n alphas = [1., 2., 5., 5.]\n ms = [1., 1., 1., 2.]\n for alpha, m in zip(alphas, ms):\n pdf = st.pareto.pdf(x, alpha, scale=m)\n plt.plot(x, pdf, label=r'$\\alpha$ = {}, m = {}'.format(alpha, m))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== =============================================================\n Support :math:`x \\in [m, \\infty)`\n Mean :math:`\\dfrac{\\alpha m}{\\alpha - 1}` for :math:`\\alpha \\ge 1`\n Variance :math:`\\dfrac{m \\alpha}{(\\alpha - 1)^2 (\\alpha - 2)}`\n for :math:`\\alpha > 2`\n ======== =============================================================\n\n Parameters\n ----------\n alpha: float\n Shape parameter (alpha > 0).\n m: float\n Scale parameter (m > 0).\n \"\"\"\n rv_op = pareto\n bound_args_indices = (4, None) # lower-bounded by `m`\n\n @classmethod\n def dist(\n cls, alpha: float = None, m: float = None, no_assert: bool = False, **kwargs\n ) -> RandomVariable:\n alpha = at.as_tensor_variable(floatX(alpha))\n m = at.as_tensor_variable(floatX(m))\n\n assert_negative_support(alpha, \"alpha\", \"Pareto\")\n assert_negative_support(m, \"m\", \"Pareto\")\n\n return super().dist([alpha, m], **kwargs)\n\n def get_moment(rv, size, alpha, m):\n median = m * 2 ** (1 / alpha)\n if not rv_size_is_none(size):\n median = at.full(size, median)\n return median\n\n def logcdf(\n value: Union[float, np.ndarray, TensorVariable],\n alpha: Union[float, np.ndarray, TensorVariable],\n m: Union[float, np.ndarray, TensorVariable],\n ):\n \"\"\"\n Compute the log of the cumulative distribution function for Pareto distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n arg = (m / value) ** alpha\n\n res = at.switch(\n at.lt(value, m),\n -np.inf,\n at.switch(\n at.le(arg, 1e-5),\n at.log1p(-arg),\n at.log(1 - arg),\n ),\n )\n\n return check_parameters(res, 0 < alpha, 0 < m, msg=\"alpha > 0, m > 0\")\n\n\nclass Cauchy(Continuous):\n r\"\"\"\n Cauchy log-likelihood.\n\n Also known as the Lorentz or the Breit-Wigner distribution.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{1}{\\pi \\beta [1 + (\\frac{x-\\alpha}{\\beta})^2]}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-5, 5, 500)\n alphas = [0., 0., 0., -2.]\n betas = [.5, 1., 2., 1.]\n for a, b in zip(alphas, betas):\n pdf = st.cauchy.pdf(x, loc=a, scale=b)\n plt.plot(x, pdf, label=r'$\\alpha$ = {}, $\\beta$ = {}'.format(a, b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n Mode :math:`\\alpha`\n Mean undefined\n Variance undefined\n ======== ========================\n\n Parameters\n ----------\n alpha : tensor_like of float\n Location parameter.\n beta : tensor_like of float\n Scale parameter > 0.\n \"\"\"\n rv_op = cauchy\n\n @classmethod\n def dist(cls, alpha, beta, *args, **kwargs):\n alpha = at.as_tensor_variable(floatX(alpha))\n beta = at.as_tensor_variable(floatX(beta))\n\n assert_negative_support(beta, \"beta\", \"Cauchy\")\n return super().dist([alpha, beta], **kwargs)\n\n def get_moment(rv, size, alpha, beta):\n alpha, _ = at.broadcast_arrays(alpha, beta)\n if not rv_size_is_none(size):\n alpha = at.full(size, alpha)\n return alpha\n\n def logcdf(value, alpha, beta):\n \"\"\"\n Compute the log of the cumulative distribution function for Cauchy distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = at.log(0.5 + at.arctan((value - alpha) / beta) / np.pi)\n return check_parameters(\n res,\n 0 < beta,\n msg=\"beta > 0\",\n )\n\n\nclass HalfCauchy(PositiveContinuous):\n r\"\"\"\n Half-Cauchy log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\beta) = \\frac{2}{\\pi \\beta [1 + (\\frac{x}{\\beta})^2]}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 5, 200)\n for b in [0.5, 1.0, 2.0]:\n pdf = st.cauchy.pdf(x, scale=b)\n plt.plot(x, pdf, label=r'$\\beta$ = {}'.format(b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ========================\n Support :math:`x \\in [0, \\infty)`\n Mode 0\n Mean undefined\n Variance undefined\n ======== ========================\n\n Parameters\n ----------\n beta : tensor_like of float\n Scale parameter (beta > 0).\n \"\"\"\n rv_op = halfcauchy\n\n @classmethod\n def dist(cls, beta, *args, **kwargs):\n beta = at.as_tensor_variable(floatX(beta))\n assert_negative_support(beta, \"beta\", \"HalfCauchy\")\n return super().dist([0.0, beta], **kwargs)\n\n def get_moment(rv, size, loc, beta):\n if not rv_size_is_none(size):\n beta = at.full(size, beta)\n return beta\n\n def logcdf(value, loc, beta):\n \"\"\"\n Compute the log of the cumulative distribution function for HalfCauchy distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = at.switch(\n at.lt(value, loc),\n -np.inf,\n at.log(2 * at.arctan((value - loc) / beta) / np.pi),\n )\n\n return check_parameters(res, 0 < beta, msg=\"beta > 0\")\n\n\nclass Gamma(PositiveContinuous):\n r\"\"\"\n Gamma log-likelihood.\n\n Represents the sum of alpha exponentially distributed random variables,\n each of which has rate beta.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{\\beta^{\\alpha}x^{\\alpha-1}e^{-\\beta x}}{\\Gamma(\\alpha)}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 20, 200)\n alphas = [1., 2., 3., 7.5]\n betas = [.5, .5, 1., 1.]\n for a, b in zip(alphas, betas):\n pdf = st.gamma.pdf(x, a, scale=1.0/b)\n plt.plot(x, pdf, label=r'$\\alpha$ = {}, $\\beta$ = {}'.format(a, b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ===============================\n Support :math:`x \\in (0, \\infty)`\n Mean :math:`\\dfrac{\\alpha}{\\beta}`\n Variance :math:`\\dfrac{\\alpha}{\\beta^2}`\n ======== ===============================\n\n Gamma distribution can be parameterized either in terms of alpha and\n beta or mean and standard deviation. The link between the two\n parametrizations is given by\n\n .. math::\n\n \\alpha &= \\frac{\\mu^2}{\\sigma^2} \\\\\n \\beta &= \\frac{\\mu}{\\sigma^2}\n\n Parameters\n ----------\n alpha : tensor_like of float, optional\n Shape parameter (alpha > 0).\n beta : tensor_like of float, optional\n Rate parameter (beta > 0).\n mu : tensor_like of float, optional\n Alternative shape parameter (mu > 0).\n sigma : tensor_like of float, optional\n Alternative scale parameter (sigma > 0).\n \"\"\"\n rv_op = gamma\n\n @classmethod\n def dist(cls, alpha=None, beta=None, mu=None, sigma=None, sd=None, no_assert=False, **kwargs):\n if sd is not None:\n sigma = sd\n\n alpha, beta = cls.get_alpha_beta(alpha, beta, mu, sigma)\n alpha = at.as_tensor_variable(floatX(alpha))\n beta = at.as_tensor_variable(floatX(beta))\n\n if not no_assert:\n assert_negative_support(alpha, \"alpha\", \"Gamma\")\n assert_negative_support(beta, \"beta\", \"Gamma\")\n\n # The Aesara `GammaRV` `Op` will invert the `beta` parameter itself\n return super().dist([alpha, beta], **kwargs)\n\n @classmethod\n def get_alpha_beta(cls, alpha=None, beta=None, mu=None, sigma=None):\n if (alpha is not None) and (beta is not None):\n pass\n elif (mu is not None) and (sigma is not None):\n if isinstance(sigma, Variable):\n sigma = check_parameters(sigma, sigma > 0, msg=\"sigma > 0\")\n else:\n assert np.all(np.asarray(sigma) > 0)\n alpha = mu**2 / sigma**2\n beta = mu / sigma**2\n else:\n raise ValueError(\n \"Incompatible parameterization. Either use \"\n \"alpha and beta, or mu and sigma to specify \"\n \"distribution.\"\n )\n\n return alpha, beta\n\n def get_moment(rv, size, alpha, inv_beta):\n # The Aesara `GammaRV` `Op` inverts the `beta` parameter itself\n mean = alpha * inv_beta\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logcdf(value, alpha, inv_beta):\n \"\"\"\n Compute the log of the cumulative distribution function for Gamma distribution\n at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for\n multiple values are desired the values must be provided in a numpy\n array or `TensorVariable`.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n beta = at.inv(inv_beta)\n res = at.switch(\n at.lt(value, 0),\n -np.inf,\n at.log(at.gammainc(alpha, beta * value)),\n )\n\n return check_parameters(res, 0 < alpha, 0 < beta, msg=\"alpha > 0, beta > 0\")\n\n\nclass InverseGamma(PositiveContinuous):\n r\"\"\"\n Inverse gamma log-likelihood, the reciprocal of the gamma distribution.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{\\beta^{\\alpha}}{\\Gamma(\\alpha)} x^{-\\alpha - 1}\n \\exp\\left(\\frac{-\\beta}{x}\\right)\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 3, 500)\n alphas = [1., 2., 3., 3.]\n betas = [1., 1., 1., .5]\n for a, b in zip(alphas, betas):\n pdf = st.invgamma.pdf(x, a, scale=b)\n plt.plot(x, pdf, label=r'$\\alpha$ = {}, $\\beta$ = {}'.format(a, b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ======================================================\n Support :math:`x \\in (0, \\infty)`\n Mean :math:`\\dfrac{\\beta}{\\alpha-1}` for :math:`\\alpha > 1`\n Variance :math:`\\dfrac{\\beta^2}{(\\alpha-1)^2(\\alpha - 2)}`\n for :math:`\\alpha > 2`\n ======== ======================================================\n\n Parameters\n ----------\n alpha : tensor_like of float, optional\n Shape parameter (alpha > 0).\n beta : tensor_like of float, optional\n Scale parameter (beta > 0).\n mu : tensor_like of float, optional\n Alternative shape parameter (mu > 0).\n sigma : tensor_like of float, optional\n Alternative scale parameter (sigma > 0).\n \"\"\"\n rv_op = invgamma\n\n @classmethod\n def dist(cls, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs):\n if sd is not None:\n sigma = sd\n\n alpha, beta = cls._get_alpha_beta(alpha, beta, mu, sigma)\n alpha = at.as_tensor_variable(floatX(alpha))\n beta = at.as_tensor_variable(floatX(beta))\n\n assert_negative_support(alpha, \"alpha\", \"InverseGamma\")\n assert_negative_support(beta, \"beta\", \"InverseGamma\")\n\n return super().dist([alpha, beta], **kwargs)\n\n def get_moment(rv, size, alpha, beta):\n mean = beta / (alpha - 1.0)\n mode = beta / (alpha + 1.0)\n moment = at.switch(alpha > 1, mean, mode)\n if not rv_size_is_none(size):\n moment = at.full(size, moment)\n return moment\n\n @classmethod\n def _get_alpha_beta(cls, alpha, beta, mu, sigma):\n if alpha is not None:\n if beta is not None:\n pass\n else:\n beta = 1\n elif (mu is not None) and (sigma is not None):\n if isinstance(sigma, Variable):\n sigma = check_parameters(sigma, sigma > 0, msg=\"sigma > 0\")\n else:\n assert np.all(np.asarray(sigma) > 0)\n alpha = (2 * sigma**2 + mu**2) / sigma**2\n beta = mu * (mu**2 + sigma**2) / sigma**2\n else:\n raise ValueError(\n \"Incompatible parameterization. Either use \"\n \"alpha and (optionally) beta, or mu and sigma to specify \"\n \"distribution.\"\n )\n\n return alpha, beta\n\n @classmethod\n def _distr_parameters_for_repr(self):\n return [\"alpha\", \"beta\"]\n\n def logcdf(value, alpha, beta):\n \"\"\"\n Compute the log of the cumulative distribution function for Inverse Gamma\n distribution at the specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara\n tensor.\n alpha : tensor_like of float\n Shape parameter (alpha > 0).\n beta : tensor_like of float\n Scale parameter (beta > 0).\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = at.switch(\n at.lt(value, 0),\n -np.inf,\n at.log(at.gammaincc(alpha, beta / value)),\n )\n\n return check_parameters(res, 0 < alpha, 0 < beta, msg=\"alpha > 0, beta > 0\")\n\n\nclass ChiSquared(PositiveContinuous):\n r\"\"\"\n :math:`\\chi^2` log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\nu) = \\frac{x^{(\\nu-2)/2}e^{-x/2}}{2^{\\nu/2}\\Gamma(\\nu/2)}\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 15, 200)\n for df in [1, 2, 3, 6, 9]:\n pdf = st.chi2.pdf(x, df)\n plt.plot(x, pdf, label=r'$\\nu$ = {}'.format(df))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.ylim(0, 0.6)\n plt.legend(loc=1)\n plt.show()\n\n ======== ===============================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`\\nu`\n Variance :math:`2 \\nu`\n ======== ===============================\n\n Parameters\n ----------\n nu: float\n Degrees of freedom (nu > 0).\n \"\"\"\n rv_op = chisquare\n\n @classmethod\n def dist(cls, nu, *args, **kwargs):\n nu = at.as_tensor_variable(floatX(nu))\n return super().dist([nu], *args, **kwargs)\n\n def get_moment(rv, size, nu):\n moment = nu\n if not rv_size_is_none(size):\n moment = at.full(size, moment)\n return moment\n\n def logcdf(value, nu):\n \"\"\"\n Compute the log of the cumulative distribution function for ChiSquared distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or `TensorVariable`\n Value(s) for which log CDF is calculated. If the log CDF for\n multiple values are desired the values must be provided in a numpy\n array or `TensorVariable`.\n Returns\n -------\n TensorVariable\n \"\"\"\n return logcdf(Gamma.dist(alpha=nu / 2, beta=0.5), value)\n\n\n# TODO: Remove this once logpt for multiplication is working!\nclass WeibullBetaRV(WeibullRV):\n ndims_params = [0, 0]\n\n @classmethod\n def rng_fn(cls, rng, alpha, beta, size) -> np.ndarray:\n return np.asarray(beta * rng.weibull(alpha, size=size))\n\n\nweibull_beta = WeibullBetaRV()\n\n\nclass Weibull(PositiveContinuous):\n r\"\"\"\n Weibull log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{\\alpha x^{\\alpha - 1}\n \\exp(-(\\frac{x}{\\beta})^{\\alpha})}{\\beta^\\alpha}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 3, 200)\n alphas = [.5, 1., 1.5, 5., 5.]\n betas = [1., 1., 1., 1., 2]\n for a, b in zip(alphas, betas):\n pdf = st.weibull_min.pdf(x, a, scale=b)\n plt.plot(x, pdf, label=r'$\\alpha$ = {}, $\\beta$ = {}'.format(a, b))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.ylim(0, 2.5)\n plt.legend(loc=1)\n plt.show()\n\n ======== ====================================================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`\\beta \\Gamma(1 + \\frac{1}{\\alpha})`\n Variance :math:`\\beta^2 \\Gamma(1 + \\frac{2}{\\alpha} - \\mu^2/\\beta^2)`\n ======== ====================================================\n\n Parameters\n ----------\n alpha : float\n Shape parameter (alpha > 0).\n beta : float\n Scale parameter (beta > 0).\n \"\"\"\n\n rv_op = weibull_beta\n\n @classmethod\n def dist(cls, alpha, beta, *args, **kwargs):\n alpha = at.as_tensor_variable(floatX(alpha))\n beta = at.as_tensor_variable(floatX(beta))\n\n assert_negative_support(alpha, \"alpha\", \"Weibull\")\n assert_negative_support(beta, \"beta\", \"Weibull\")\n\n return super().dist([alpha, beta], *args, **kwargs)\n\n def get_moment(rv, size, alpha, beta):\n mean = beta * at.gamma(1 + 1 / alpha)\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logcdf(value, alpha, beta):\n r\"\"\"\n Compute the log of the cumulative distribution function for Weibull distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n a = (value / beta) ** alpha\n\n res = at.switch(\n at.lt(value, 0),\n -np.inf,\n at.log1mexp(-a),\n )\n\n return check_parameters(res, 0 < alpha, 0 < beta, msg=\"alpha > 0, beta > 0\")\n\n\nclass HalfStudentTRV(RandomVariable):\n name = \"halfstudentt\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"HalfStudentT\", \"\\\\operatorname{HalfStudentT}\")\n\n @classmethod\n def rng_fn(cls, rng, nu, sigma, size=None) -> np.ndarray:\n return np.asarray(np.abs(stats.t.rvs(nu, sigma, size=size, random_state=rng)))\n\n\nhalfstudentt = HalfStudentTRV()\n\n\nclass HalfStudentT(PositiveContinuous):\n r\"\"\"\n Half Student's T log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\sigma,\\nu) =\n \\frac{2\\;\\Gamma\\left(\\frac{\\nu+1}{2}\\right)}\n {\\Gamma\\left(\\frac{\\nu}{2}\\right)\\sqrt{\\nu\\pi\\sigma^2}}\n \\left(1+\\frac{1}{\\nu}\\frac{x^2}{\\sigma^2}\\right)^{-\\frac{\\nu+1}{2}}\n\n .. plot::\n :context: close-figs\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 5, 200)\n sigmas = [1., 1., 2., 1.]\n nus = [.5, 1., 1., 30.]\n for sigma, nu in zip(sigmas, nus):\n pdf = st.t.pdf(x, df=nu, loc=0, scale=sigma)\n plt.plot(x, pdf, label=r'$\\sigma$ = {}, $\\nu$ = {}'.format(sigma, nu))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ========================\n Support :math:`x \\in [0, \\infty)`\n ======== ========================\n\n Parameters\n ----------\n nu : tensor_like of float, default 1\n Degrees of freedom, also known as normality parameter (nu > 0).\n sigma : tensor_like of float, optional\n Scale parameter (sigma > 0). Converges to the standard deviation as nu\n increases (only required if lam is not specified). Defaults to 1.\n lam : tensor_like of float, optional\n Scale parameter (lam > 0). Converges to the precision as nu\n increases (only required if sigma is not specified). Defaults to 1.\n\n Examples\n --------\n .. code-block:: python\n\n # Only pass in one of lam or sigma, but not both.\n with pm.Model():\n x = pm.HalfStudentT('x', sigma=10, nu=10)\n\n with pm.Model():\n x = pm.HalfStudentT('x', lam=4, nu=10)\n \"\"\"\n rv_op = halfstudentt\n\n @classmethod\n def dist(cls, nu=1, sigma=None, lam=None, sd=None, *args, **kwargs):\n\n if sd is not None:\n sigma = sd\n\n nu = at.as_tensor_variable(floatX(nu))\n lam, sigma = get_tau_sigma(lam, sigma)\n sigma = at.as_tensor_variable(sigma)\n\n assert_negative_support(nu, \"nu\", \"HalfStudentT\")\n assert_negative_support(lam, \"lam\", \"HalfStudentT\")\n assert_negative_support(sigma, \"sigma\", \"HalfStudentT\")\n\n return super().dist([nu, sigma], *args, **kwargs)\n\n def get_moment(rv, size, nu, sigma):\n sigma, _ = at.broadcast_arrays(sigma, nu)\n if not rv_size_is_none(size):\n sigma = at.full(size, sigma)\n return sigma\n\n def logp(value, nu, sigma):\n \"\"\"\n Calculate log-probability of HalfStudentT distribution at specified value.\n\n Parameters\n ----------\n value : tensor_like of float\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n\n res = (\n at.log(2)\n + gammaln((nu + 1.0) / 2.0)\n - gammaln(nu / 2.0)\n - 0.5 * at.log(nu * np.pi * sigma**2)\n - (nu + 1.0) / 2.0 * at.log1p(value**2 / (nu * sigma**2))\n )\n\n res = at.switch(\n at.lt(value, 0),\n -np.inf,\n res,\n )\n\n return check_parameters(res, sigma > 0, nu > 0, msg=\"sigma > 0, nu > 0\")\n\n\nclass ExGaussianRV(RandomVariable):\n name = \"exgaussian\"\n ndim_supp = 0\n ndims_params = [0, 0, 0]\n dtype = \"floatX\"\n _print_name = (\"ExGaussian\", \"\\\\operatorname{ExGaussian}\")\n\n @classmethod\n def rng_fn(cls, rng, mu, sigma, nu, size=None) -> np.ndarray:\n return np.asarray(rng.normal(mu, sigma, size=size) + rng.exponential(scale=nu, size=size))\n\n\nexgaussian = ExGaussianRV()\n\n\nclass ExGaussian(Continuous):\n r\"\"\"\n Exponentially modified Gaussian log-likelihood.\n\n Results from the convolution of a normal distribution with an exponential\n distribution.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, \\sigma, \\tau) =\n \\frac{1}{\\nu}\\;\n \\exp\\left\\{\\frac{\\mu-x}{\\nu}+\\frac{\\sigma^2}{2\\nu^2}\\right\\}\n \\Phi\\left(\\frac{x-\\mu}{\\sigma}-\\frac{\\sigma}{\\nu}\\right)\n\n where :math:`\\Phi` is the cumulative distribution function of the\n standard normal distribution.\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-6, 9, 200)\n mus = [0., -2., 0., -3.]\n sigmas = [1., 1., 3., 1.]\n nus = [1., 1., 1., 4.]\n for mu, sigma, nu in zip(mus, sigmas, nus):\n pdf = st.exponnorm.pdf(x, nu/sigma, loc=mu, scale=sigma)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\sigma$ = {}, $\\nu$ = {}'.format(mu, sigma, nu))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu + \\nu`\n Variance :math:`\\sigma^2 + \\nu^2`\n ======== ========================\n\n Parameters\n ----------\n mu: float\n Mean of the normal distribution.\n sigma: float\n Standard deviation of the normal distribution (sigma > 0).\n nu: float\n Mean of the exponential distribution (nu > 0).\n\n References\n ----------\n .. [Rigby2005] Rigby R.A. and Stasinopoulos D.M. (2005).\n \"Generalized additive models for location, scale and shape\"\n Applied Statististics., 54, part 3, pp 507-554.\n\n .. [Lacouture2008] Lacouture, Y. and Couseanou, D. (2008).\n \"How to use MATLAB to fit the ex-Gaussian and other probability\n functions to a distribution of response times\".\n Tutorials in Quantitative Methods for Psychology,\n Vol. 4, No. 1, pp 35-45.\n \"\"\"\n rv_op = exgaussian\n\n @classmethod\n def dist(cls, mu=0.0, sigma=None, nu=None, sd=None, *args, **kwargs):\n\n if sd is not None:\n sigma = sd\n\n mu = at.as_tensor_variable(floatX(mu))\n sigma = at.as_tensor_variable(floatX(sigma))\n nu = at.as_tensor_variable(floatX(nu))\n\n assert_negative_support(sigma, \"sigma\", \"ExGaussian\")\n assert_negative_support(nu, \"nu\", \"ExGaussian\")\n\n return super().dist([mu, sigma, nu], *args, **kwargs)\n\n def get_moment(rv, size, mu, sigma, nu):\n mu, nu, _ = at.broadcast_arrays(mu, nu, sigma)\n moment = mu + nu\n if not rv_size_is_none(size):\n moment = at.full(size, moment)\n return moment\n\n def logp(value, mu, sigma, nu):\n \"\"\"\n Calculate log-probability of ExGaussian distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n\n # Alogithm is adapted from dexGAUS.R from gamlss\n res = at.switch(\n at.gt(nu, 0.05 * sigma),\n (\n -at.log(nu)\n + (mu - value) / nu\n + 0.5 * (sigma / nu) ** 2\n + normal_lcdf(mu + (sigma**2) / nu, sigma, value)\n ),\n log_normal(value, mean=mu, sigma=sigma),\n )\n return check_parameters(\n res,\n 0 < sigma,\n 0 < nu,\n msg=\"nu > 0, sigma > 0\",\n )\n\n def logcdf(value, mu, sigma, nu):\n \"\"\"\n Compute the log of the cumulative distribution function for ExGaussian distribution\n at the specified value.\n\n References\n ----------\n .. [Rigby2005] R.A. Rigby (2005).\n \"Generalized additive models for location, scale and shape\"\n https://doi.org/10.1111/j.1467-9876.2005.00510.x\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n\n # Alogithm is adapted from pexGAUS.R from gamlss\n res = at.switch(\n at.gt(nu, 0.05 * sigma),\n logdiffexp(\n normal_lcdf(mu, sigma, value),\n (\n (mu - value) / nu\n + 0.5 * (sigma / nu) ** 2\n + normal_lcdf(mu + (sigma**2) / nu, sigma, value)\n ),\n ),\n normal_lcdf(mu, sigma, value),\n )\n\n return check_parameters(res, 0 < sigma, 0 < nu, msg=\"sigma > 0, nu > 0\")\n\n\nclass VonMises(CircularContinuous):\n r\"\"\"\n Univariate VonMises log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, \\kappa) =\n \\frac{e^{\\kappa\\cos(x-\\mu)}}{2\\pi I_0(\\kappa)}\n\n where :math:`I_0` is the modified Bessel function of order 0.\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-np.pi, np.pi, 200)\n mus = [0., 0., 0., -2.5]\n kappas = [.01, 0.5, 4., 2.]\n for mu, kappa in zip(mus, kappas):\n pdf = st.vonmises.pdf(x, kappa, loc=mu)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\kappa$ = {}'.format(mu, kappa))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ==========================================\n Support :math:`x \\in [-\\pi, \\pi]`\n Mean :math:`\\mu`\n Variance :math:`1-\\frac{I_1(\\kappa)}{I_0(\\kappa)}`\n ======== ==========================================\n\n Parameters\n ----------\n mu: float\n Mean.\n kappa: float\n Concentration (\\frac{1}{kappa} is analogous to \\sigma^2).\n \"\"\"\n\n rv_op = vonmises\n\n @classmethod\n def dist(cls, mu=0.0, kappa=None, *args, **kwargs):\n mu = at.as_tensor_variable(floatX(mu))\n kappa = at.as_tensor_variable(floatX(kappa))\n assert_negative_support(kappa, \"kappa\", \"VonMises\")\n return super().dist([mu, kappa], *args, **kwargs)\n\n def get_moment(rv, size, mu, kappa):\n mu, _ = at.broadcast_arrays(mu, kappa)\n if not rv_size_is_none(size):\n mu = at.full(size, mu)\n return mu\n\n\nclass SkewNormalRV(RandomVariable):\n name = \"skewnormal\"\n ndim_supp = 0\n ndims_params = [0, 0, 0]\n dtype = \"floatX\"\n _print_name = (\"SkewNormal\", \"\\\\operatorname{SkewNormal}\")\n\n @classmethod\n def rng_fn(cls, rng, mu, sigma, alpha, size=None) -> np.ndarray:\n return np.asarray(\n stats.skewnorm.rvs(a=alpha, loc=mu, scale=sigma, size=size, random_state=rng)\n )\n\n\nskewnormal = SkewNormalRV()\n\n\nclass SkewNormal(Continuous):\n r\"\"\"\n Univariate skew-normal log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, \\tau, \\alpha) =\n 2 \\Phi((x-\\mu)\\sqrt{\\tau}\\alpha) \\phi(x,\\mu,\\tau)\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-4, 4, 200)\n for alpha in [-6, 0, 6]:\n pdf = st.skewnorm.pdf(x, alpha, loc=0, scale=1)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\sigma$ = {}, $\\alpha$ = {}'.format(0, 1, alpha))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ==========================================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu + \\sigma \\sqrt{\\frac{2}{\\pi}} \\frac {\\alpha }{{\\sqrt {1+\\alpha ^{2}}}}`\n Variance :math:`\\sigma^2 \\left( 1-\\frac{2\\alpha^2}{(\\alpha^2+1) \\pi} \\right)`\n ======== ==========================================\n\n Skew-normal distribution can be parameterized either in terms of precision\n or standard deviation. The link between the two parametrizations is\n given by\n\n .. math::\n \\tau = \\dfrac{1}{\\sigma^2}\n\n Parameters\n ----------\n mu: float\n Location parameter.\n sigma: float\n Scale parameter (sigma > 0).\n tau: float\n Alternative scale parameter (tau > 0).\n alpha: float\n Skewness parameter.\n\n Notes\n -----\n When alpha=0 we recover the Normal distribution and mu becomes the mean,\n tau the precision and sigma the standard deviation. In the limit of alpha\n approaching plus/minus infinite we get a half-normal distribution.\n\n \"\"\"\n rv_op = skewnormal\n\n @classmethod\n def dist(cls, alpha=1, mu=0.0, sigma=None, tau=None, sd=None, *args, **kwargs):\n if sd is not None:\n sigma = sd\n\n tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)\n alpha = at.as_tensor_variable(floatX(alpha))\n mu = at.as_tensor_variable(floatX(mu))\n tau = at.as_tensor_variable(tau)\n sigma = at.as_tensor_variable(sigma)\n\n assert_negative_support(tau, \"tau\", \"SkewNormal\")\n assert_negative_support(sigma, \"sigma\", \"SkewNormal\")\n\n return super().dist([mu, sigma, alpha], *args, **kwargs)\n\n def get_moment(rv, size, mu, sigma, alpha):\n mean = mu + sigma * (2 / np.pi) ** 0.5 * alpha / (1 + alpha**2) ** 0.5\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logp(value, mu, sigma, alpha):\n \"\"\"\n Calculate log-probability of SkewNormal distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n tau, _ = get_tau_sigma(sigma=sigma)\n\n res = (\n at.log(1 + at.erf(((value - mu) * at.sqrt(tau) * alpha) / at.sqrt(2)))\n + (-tau * (value - mu) ** 2 + at.log(tau / np.pi / 2.0)) / 2.0\n )\n\n return check_parameters(res, tau > 0, msg=\"tau > 0\")\n\n\nclass Triangular(BoundedContinuous):\n r\"\"\"\n Continuous Triangular log-likelihood\n\n The pdf of this distribution is\n\n .. math::\n\n \\begin{cases}\n 0 & \\text{for } x < a, \\\\\n \\frac{2(x-a)}{(b-a)(c-a)} & \\text{for } a \\le x < c, \\\\[4pt]\n \\frac{2}{b-a} & \\text{for } x = c, \\\\[4pt]\n \\frac{2(b-x)}{(b-a)(b-c)} & \\text{for } c < x \\le b, \\\\[4pt]\n 0 & \\text{for } b < x.\n \\end{cases}\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-2, 10, 500)\n lowers = [0., -1, 2]\n cs = [2., 0., 6.5]\n uppers = [4., 1, 8]\n for lower, c, upper in zip(lowers, cs, uppers):\n scale = upper - lower\n c_ = (c - lower) / scale\n pdf = st.triang.pdf(x, loc=lower, c=c_, scale=scale)\n plt.plot(x, pdf, label='lower = {}, c = {}, upper = {}'.format(lower,\n c,\n upper))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ============================================================================\n Support :math:`x \\in [lower, upper]`\n Mean :math:`\\dfrac{lower + upper + c}{3}`\n Variance :math:`\\dfrac{upper^2 + lower^2 +c^2 - lower*upper - lower*c - upper*c}{18}`\n ======== ============================================================================\n\n Parameters\n ----------\n lower: float\n Lower limit.\n c: float\n mode\n upper: float\n Upper limit.\n \"\"\"\n\n rv_op = triangular\n bound_args_indices = (3, 5) # lower, upper\n\n @classmethod\n def dist(cls, lower=0, upper=1, c=0.5, *args, **kwargs):\n lower = at.as_tensor_variable(floatX(lower))\n upper = at.as_tensor_variable(floatX(upper))\n c = at.as_tensor_variable(floatX(c))\n\n return super().dist([lower, c, upper], *args, **kwargs)\n\n def get_moment(rv, size, lower, c, upper):\n mean = (lower + upper + c) / 3\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logcdf(value, lower, c, upper):\n \"\"\"\n Compute the log of the cumulative distribution function for Triangular distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = at.switch(\n at.le(value, lower),\n -np.inf,\n at.switch(\n at.le(value, c),\n at.log(((value - lower) ** 2) / ((upper - lower) * (c - lower))),\n at.switch(\n at.lt(value, upper),\n at.log1p(-((upper - value) ** 2) / ((upper - lower) * (upper - c))),\n 0,\n ),\n ),\n )\n\n return check_parameters(\n res,\n lower <= c,\n c <= upper,\n msg=\"lower <= c <= upper\",\n )\n\n\nclass Gumbel(Continuous):\n r\"\"\"\n Univariate Gumbel log-likelihood\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, \\beta) = \\frac{1}{\\beta}e^{-(z + e^{-z})}\n\n where\n\n .. math::\n\n z = \\frac{x - \\mu}{\\beta}.\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-10, 20, 200)\n mus = [0., 4., -1.]\n betas = [2., 2., 4.]\n for mu, beta in zip(mus, betas):\n pdf = st.gumbel_r.pdf(x, loc=mu, scale=beta)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\beta$ = {}'.format(mu, beta))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n\n ======== ==========================================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu + \\beta\\gamma`, where :math:`\\gamma` is the Euler-Mascheroni constant\n Variance :math:`\\frac{\\pi^2}{6} \\beta^2`\n ======== ==========================================\n\n Parameters\n ----------\n mu: float\n Location parameter.\n beta: float\n Scale parameter (beta > 0).\n \"\"\"\n rv_op = gumbel\n\n @classmethod\n def dist(\n cls, mu: float = None, beta: float = None, no_assert: bool = False, **kwargs\n ) -> RandomVariable:\n\n mu = at.as_tensor_variable(floatX(mu))\n beta = at.as_tensor_variable(floatX(beta))\n\n if not no_assert:\n assert_negative_support(beta, \"beta\", \"Gumbel\")\n\n return super().dist([mu, beta], **kwargs)\n\n def get_moment(rv, size, mu, beta):\n mean = mu + beta * np.euler_gamma\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def _distr_parameters_for_repr(self):\n return [\"mu\", \"beta\"]\n\n def logcdf(\n value: Union[float, np.ndarray, TensorVariable],\n mu: Union[float, np.ndarray, TensorVariable],\n beta: Union[float, np.ndarray, TensorVariable],\n ) -> TensorVariable:\n \"\"\"\n Compute the log of the cumulative distribution function for Gumbel distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = -at.exp(-(value - mu) / beta)\n\n return check_parameters(res, 0 < beta, msg=\"beta > 0\")\n\n\nclass RiceRV(RandomVariable):\n name = \"rice\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"Rice\", \"\\\\operatorname{Rice}\")\n\n @classmethod\n def rng_fn(cls, rng, b, sigma, size=None) -> np.ndarray:\n return np.asarray(stats.rice.rvs(b=b, scale=sigma, size=size, random_state=rng))\n\n\nrice = RiceRV()\n\n\nclass Rice(PositiveContinuous):\n r\"\"\"\n Rice distribution.\n\n .. math::\n\n f(x\\mid \\nu ,\\sigma )=\n {\\frac {x}{\\sigma ^{2}}}\\exp\n \\left({\\frac {-(x^{2}+\\nu ^{2})}{2\\sigma ^{2}}}\\right)I_{0}\\left({\\frac {x\\nu }{\\sigma ^{2}}}\\right),\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0, 8, 500)\n nus = [0., 0., 4., 4.]\n sigmas = [1., 2., 1., 2.]\n for nu, sigma in zip(nus, sigmas):\n pdf = st.rice.pdf(x, nu / sigma, scale=sigma)\n plt.plot(x, pdf, label=r'$\\nu$ = {}, $\\sigma$ = {}'.format(nu, sigma))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ==============================================================\n Support :math:`x \\in (0, \\infty)`\n Mean :math:`\\sigma {\\sqrt {\\pi /2}}\\,\\,L_{{1/2}}(-\\nu ^{2}/2\\sigma ^{2})`\n Variance :math:`2\\sigma ^{2}+\\nu ^{2}-{\\frac {\\pi \\sigma ^{2}}{2}}L_{{1/2}}^{2}\\left({\\frac {-\\nu ^{2}}{2\\sigma ^{2}}}\\right)`\n ======== ==============================================================\n\n\n Parameters\n ----------\n nu: float\n noncentrality parameter.\n sigma: float\n scale parameter.\n b: float\n shape parameter (alternative to nu).\n\n Notes\n -----\n The distribution :math:`\\mathrm{Rice}\\left(|\\nu|,\\sigma\\right)` is the\n distribution of :math:`R=\\sqrt{X^2+Y^2}` where :math:`X\\sim N(\\nu \\cos{\\theta}, \\sigma^2)`,\n :math:`Y\\sim N(\\nu \\sin{\\theta}, \\sigma^2)` are independent and for any\n real :math:`\\theta`.\n\n The distribution is defined with either nu or b.\n The link between the two parametrizations is given by\n\n .. math::\n\n b = \\dfrac{\\nu}{\\sigma}\n\n \"\"\"\n rv_op = rice\n\n @classmethod\n def dist(cls, nu=None, sigma=None, b=None, sd=None, *args, **kwargs):\n if sd is not None:\n sigma = sd\n\n nu, b, sigma = cls.get_nu_b(nu, b, sigma)\n b = at.as_tensor_variable(floatX(b))\n sigma = at.as_tensor_variable(floatX(sigma))\n\n return super().dist([b, sigma], *args, **kwargs)\n\n @classmethod\n def get_nu_b(cls, nu, b, sigma):\n if sigma is None:\n sigma = 1.0\n if nu is None and b is not None:\n nu = b * sigma\n return nu, b, sigma\n elif nu is not None and b is None:\n b = nu / sigma\n return nu, b, sigma\n raise ValueError(\"Rice distribution must specify either nu\" \" or b.\")\n\n def get_moment(rv, size, nu, sigma):\n nu_sigma_ratio = -(nu**2) / (2 * sigma**2)\n mean = (\n sigma\n * np.sqrt(np.pi / 2)\n * at.exp(nu_sigma_ratio / 2)\n * (\n (1 - nu_sigma_ratio) * at.i0(-nu_sigma_ratio / 2)\n - nu_sigma_ratio * at.i1(-nu_sigma_ratio / 2)\n )\n )\n\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logp(value, b, sigma):\n \"\"\"\n Calculate log-probability of Rice distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n x = value / sigma\n\n res = at.switch(\n at.le(value, 0),\n -np.inf,\n at.log(x * at.exp((-(x - b) * (x - b)) / 2) * i0e(x * b) / sigma),\n )\n\n return check_parameters(\n res,\n sigma >= 0,\n b >= 0,\n msg=\"sigma >= 0, b >= 0\",\n )\n\n\nclass Logistic(Continuous):\n r\"\"\"\n Logistic log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu, s) =\n \\frac{\\exp\\left(-\\frac{x - \\mu}{s}\\right)}{s \\left(1 + \\exp\\left(-\\frac{x - \\mu}{s}\\right)\\right)^2}\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-5, 5, 200)\n mus = [0., 0., 0., -2.]\n ss = [.4, 1., 2., .4]\n for mu, s in zip(mus, ss):\n pdf = st.logistic.pdf(x, loc=mu, scale=s)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $s$ = {}'.format(mu, s))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ==========================================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu`\n Variance :math:`\\frac{s^2 \\pi^2}{3}`\n ======== ==========================================\n\n\n Parameters\n ----------\n mu: float\n Mean.\n s: float\n Scale (s > 0).\n \"\"\"\n\n rv_op = logistic\n\n @classmethod\n def dist(cls, mu=0.0, s=1.0, *args, **kwargs):\n mu = at.as_tensor_variable(floatX(mu))\n s = at.as_tensor_variable(floatX(s))\n return super().dist([mu, s], *args, **kwargs)\n\n def get_moment(rv, size, mu, s):\n mu, _ = at.broadcast_arrays(mu, s)\n if not rv_size_is_none(size):\n mu = at.full(size, mu)\n return mu\n\n def logcdf(value, mu, s):\n r\"\"\"\n Compute the log of the cumulative distribution function for Logistic distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = -at.log1pexp(-(value - mu) / s)\n\n return check_parameters(\n res,\n 0 < s,\n msg=\"s > 0\",\n )\n\n\nclass LogitNormalRV(RandomVariable):\n name = \"logit_normal\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"logitNormal\", \"\\\\operatorname{logitNormal}\")\n\n @classmethod\n def rng_fn(cls, rng, mu, sigma, size=None) -> np.ndarray:\n return np.asarray(expit(stats.norm.rvs(loc=mu, scale=sigma, size=size, random_state=rng)))\n\n\nlogit_normal = LogitNormalRV()\n\n\nclass LogitNormal(UnitContinuous):\n r\"\"\"\n Logit-Normal log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n f(x \\mid \\mu, \\tau) =\n \\frac{1}{x(1-x)} \\sqrt{\\frac{\\tau}{2\\pi}}\n \\exp\\left\\{ -\\frac{\\tau}{2} (logit(x)-\\mu)^2 \\right\\}\n\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n from scipy.special import logit\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(0.0001, 0.9999, 500)\n mus = [0., 0., 0., 1.]\n sigmas = [0.3, 1., 2., 1.]\n for mu, sigma in zip(mus, sigmas):\n pdf = st.norm.pdf(logit(x), loc=mu, scale=sigma) * 1/(x * (1-x))\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\sigma$ = {}'.format(mu, sigma))\n plt.legend(loc=1)\n plt.show()\n\n ======== ==========================================\n Support :math:`x \\in (0, 1)`\n Mean no analytical solution\n Variance no analytical solution\n ======== ==========================================\n\n Parameters\n ----------\n mu: float\n Location parameter.\n sigma: float\n Scale parameter (sigma > 0).\n tau: float\n Scale parameter (tau > 0).\n \"\"\"\n rv_op = logit_normal\n\n @classmethod\n def dist(cls, mu=0, sigma=None, tau=None, sd=None, **kwargs):\n if sd is not None:\n sigma = sd\n mu = at.as_tensor_variable(floatX(mu))\n tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)\n sigma = sd = at.as_tensor_variable(sigma)\n tau = at.as_tensor_variable(tau)\n assert_negative_support(sigma, \"sigma\", \"LogitNormal\")\n assert_negative_support(tau, \"tau\", \"LogitNormal\")\n\n return super().dist([mu, sigma], **kwargs)\n\n def get_moment(rv, size, mu, sigma):\n median, _ = at.broadcast_arrays(invlogit(mu), sigma)\n if not rv_size_is_none(size):\n median = at.full(size, median)\n return median\n\n def logp(value, mu, sigma):\n \"\"\"\n Calculate log-probability of LogitNormal distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n tau, _ = get_tau_sigma(sigma=sigma)\n\n res = at.switch(\n at.or_(at.le(value, 0), at.ge(value, 1)),\n -np.inf,\n (\n -0.5 * tau * (logit(value) - mu) ** 2\n + 0.5 * at.log(tau / (2.0 * np.pi))\n - at.log(value * (1 - value))\n ),\n )\n\n return check_parameters(\n res,\n tau > 0,\n msg=\"tau > 0\",\n )\n\n\ndef _interpolated_argcdf(p, pdf, cdf, x):\n index = np.searchsorted(cdf, p) - 1\n slope = (pdf[index + 1] - pdf[index]) / (x[index + 1] - x[index])\n\n return x[index] + np.where(\n np.abs(slope) <= 1e-8,\n np.where(np.abs(pdf[index]) <= 1e-8, np.zeros(index.shape), (p - cdf[index]) / pdf[index]),\n (-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) / slope,\n )\n\n\nclass InterpolatedRV(RandomVariable):\n name = \"interpolated\"\n ndim_supp = 0\n ndims_params = [1, 1, 1]\n dtype = \"floatX\"\n _print_name = (\"Interpolated\", \"\\\\operatorname{Interpolated}\")\n\n @classmethod\n def rng_fn(cls, rng, x, pdf, cdf, size=None) -> np.ndarray:\n p = rng.uniform(size=size)\n return np.asarray(_interpolated_argcdf(p, pdf, cdf, x))\n\n\ninterpolated = InterpolatedRV()\n\n\nclass Interpolated(BoundedContinuous):\n r\"\"\"\n Univariate probability distribution defined as a linear interpolation\n of probability density function evaluated on some lattice of points.\n\n The lattice can be uneven, so the steps between different points can have\n different size and it is possible to vary the precision between regions\n of the support.\n\n The probability density function values don not have to be normalized, as the\n interpolated density is any way normalized to make the total probability\n equal to $1$.\n\n Both parameters ``x_points`` and values ``pdf_points`` are not variables, but\n plain array-like objects, so they are constant and cannot be sampled.\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import pymc as pm\n import arviz as az\n from scipy.stats import gamma\n plt.style.use('arviz-darkgrid')\n rv = gamma(1.99)\n x = np.linspace(rv.ppf(0.01),rv.ppf(0.99), 1000)\n points = np.linspace(x[0], x[-1], 50)\n pdf = rv.pdf(points)\n interpolated = pm.Interpolated.dist(points, pdf)\n fig, ax = plt.subplots(1, 1)\n ax.plot(x, rv.pdf(x), 'C0', linestyle = '--', label='Original Gamma pdf',alpha=0.8,lw=2)\n ax.plot(points, pdf, color='black', marker='o', label='Lattice Points',alpha=0.5,linestyle='')\n ax.plot(x, np.exp(interpolated.logp(x).eval()),'C1',label='Interpolated pdf',alpha=0.8,lw=3)\n r = interpolated.random(size=1000)\n ax.hist(r, density=True, alpha=0.4,align ='mid',color='grey')\n ax.legend(loc='best', frameon=False)\n plt.show()\n\n ======== ===========================================\n Support :math:`x \\in [x\\_points[0], x\\_points[-1]]`\n ======== ===========================================\n\n Parameters\n ----------\n x_points: array-like\n A monotonically growing list of values. Must be non-symbolic\n pdf_points: array-like\n Probability density function evaluated on lattice ``x_points``. Must\n be non-symbolic\n \"\"\"\n\n rv_op = interpolated\n\n def __new__(cls, *args, **kwargs):\n transform = kwargs.get(\"transform\", UNSET)\n if transform is UNSET:\n\n def transform_params(*params):\n _, _, _, x_points, _, _ = params\n return floatX(x_points[0]), floatX(x_points[-1])\n\n kwargs[\"transform\"] = transforms.interval(transform_params)\n return super().__new__(cls, *args, **kwargs)\n\n @classmethod\n def dist(cls, x_points, pdf_points, *args, **kwargs):\n\n interp = InterpolatedUnivariateSpline(x_points, pdf_points, k=1, ext=\"zeros\")\n\n Z = interp.integral(x_points[0], x_points[-1])\n cdf_points = interp.antiderivative()(x_points) / Z\n pdf_points = pdf_points / Z\n\n x_points = at.constant(floatX(x_points))\n pdf_points = at.constant(floatX(pdf_points))\n cdf_points = at.constant(floatX(cdf_points))\n\n # lower = at.as_tensor_variable(x_points[0])\n # upper = at.as_tensor_variable(x_points[-1])\n # median = _interpolated_argcdf(0.5, pdf_points, cdf_points, x_points)\n\n return super().dist([x_points, pdf_points, cdf_points], **kwargs)\n\n def get_moment(rv, size, x_points, pdf_points, cdf_points):\n # cdf_points argument is unused\n moment = at.sum(at.mul(x_points, pdf_points))\n\n if not rv_size_is_none(size):\n moment = at.full(size, moment)\n\n return moment\n\n def logp(value, x_points, pdf_points, cdf_points):\n \"\"\"\n Calculate log-probability of Interpolated distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n # x_points and pdf_points are expected to be non-symbolic arrays wrapped\n # within a tensor.constant. We use the .data method to retrieve them\n interp = InterpolatedUnivariateSpline(x_points.data, pdf_points.data, k=1, ext=\"zeros\")\n Z = interp.integral(x_points.data[0], x_points.data[-1])\n\n # interp and Z are converted to symbolic variables here\n interp_op = SplineWrapper(interp)\n Z = at.constant(Z)\n\n return at.log(interp_op(value) / Z)\n\n def _distr_parameters_for_repr(self):\n return []\n\n\nclass MoyalRV(RandomVariable):\n name = \"moyal\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"Moyal\", \"\\\\operatorname{Moyal}\")\n\n @classmethod\n def rng_fn(cls, rng, mu, sigma, size=None) -> np.ndarray:\n return np.asarray(stats.moyal.rvs(mu, sigma, size=size, random_state=rng))\n\n\nmoyal = MoyalRV()\n\n\nclass Moyal(Continuous):\n r\"\"\"\n Moyal log-likelihood.\n\n The pdf of this distribution is\n\n .. math::\n\n f(x \\mid \\mu,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{1}{2}\\left(z + e^{-z}\\right)},\n\n where\n\n .. math::\n\n z = \\frac{x-\\mu}{\\sigma}.\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n import scipy.stats as st\n import arviz as az\n plt.style.use('arviz-darkgrid')\n x = np.linspace(-10, 20, 200)\n mus = [-1., 0., 4.]\n sigmas = [2., 2., 4.]\n for mu, sigma in zip(mus, sigmas):\n pdf = st.moyal.pdf(x, loc=mu, scale=sigma)\n plt.plot(x, pdf, label=r'$\\mu$ = {}, $\\sigma$ = {}'.format(mu, sigma))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== ==============================================================\n Support :math:`x \\in (-\\infty, \\infty)`\n Mean :math:`\\mu + \\sigma\\left(\\gamma + \\log 2\\right)`, where :math:`\\gamma` is the Euler-Mascheroni constant\n Variance :math:`\\frac{\\pi^{2}}{2}\\sigma^{2}`\n ======== ==============================================================\n\n Parameters\n ----------\n mu: float\n Location parameter.\n sigma: float\n Scale parameter (sigma > 0).\n \"\"\"\n rv_op = moyal\n\n @classmethod\n def dist(cls, mu=0, sigma=1.0, *args, **kwargs):\n mu = at.as_tensor_variable(floatX(mu))\n sigma = at.as_tensor_variable(floatX(sigma))\n\n assert_negative_support(sigma, \"sigma\", \"Moyal\")\n\n return super().dist([mu, sigma], *args, **kwargs)\n\n def get_moment(rv, size, mu, sigma):\n mean = mu + sigma * (np.euler_gamma + at.log(2))\n\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logp(value, mu, sigma):\n \"\"\"\n Calculate log-probability of Moyal distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log probabilities for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor\n\n Returns\n -------\n TensorVariable\n \"\"\"\n scaled = (value - mu) / sigma\n res = -(1 / 2) * (scaled + at.exp(-scaled)) - at.log(sigma) - (1 / 2) * at.log(2 * np.pi)\n return check_parameters(res, 0 < sigma, msg=\"sigma > 0\")\n\n def logcdf(value, mu, sigma):\n \"\"\"\n Compute the log of the cumulative distribution function for Moyal distribution\n at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or aesara.tensor\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array or Aesara tensor.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n scaled = (value - mu) / sigma\n res = at.log(at.erfc(at.exp(-scaled / 2) * (2**-0.5)))\n return check_parameters(\n res,\n 0 < sigma,\n msg=\"sigma > 0\",\n )\n\n\nclass PolyaGammaRV(RandomVariable):\n \"\"\"Polya-Gamma random variable.\"\"\"\n\n name = \"polyagamma\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"PG\", \"\\\\operatorname{PG}\")\n\n def __call__(self, h=1.0, z=0.0, size=None, **kwargs):\n return super().__call__(h, z, size=size, **kwargs)\n\n @classmethod\n def rng_fn(cls, rng, h, z, size=None) -> np.ndarray:\n \"\"\"\n Generate a random sample from the distribution with the given parameters\n\n Parameters\n ----------\n rng : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}\n A seed to initialize the random number generator. If None, then fresh,\n unpredictable entropy will be pulled from the OS. If an ``int`` or\n ``array_like[ints]`` is passed, then it will be passed to\n `SeedSequence` to derive the initial `BitGenerator` state. One may also\n pass in a `SeedSequence` instance.\n Additionally, when passed a `BitGenerator`, it will be wrapped by\n `Generator`. If passed a `Generator`, it will be returned unaltered.\n h : scalar or sequence\n The shape parameter of the distribution.\n z : scalar or sequence\n The exponential tilting parameter.\n size : int or tuple of ints, optional\n The number of elements to draw from the distribution. If size is\n ``None`` (default) then a single value is returned. If a tuple of\n integers is passed, the returned array will have the same shape.\n If the element(s) of size is not an integer type, it will be truncated\n to the largest integer smaller than its value (e.g (2.1, 1) -> (2, 1)).\n This parameter only applies if `h` and `z` are scalars.\n \"\"\"\n # handle the kind of rng passed to the sampler\n bg = rng._bit_generator if isinstance(rng, np.random.RandomState) else rng\n return np.asarray(\n random_polyagamma(h, z, size=size, random_state=bg).astype(aesara.config.floatX)\n )\n\n\npolyagamma = PolyaGammaRV()\n\n\nclass _PolyaGammaLogDistFunc(Op):\n __props__ = (\"get_pdf\",)\n\n def __init__(self, get_pdf=False):\n self.get_pdf = get_pdf\n\n def make_node(self, x, h, z):\n x = at.as_tensor_variable(floatX(x))\n h = at.as_tensor_variable(floatX(h))\n z = at.as_tensor_variable(floatX(z))\n shape = broadcast_shape(x, h, z)\n broadcastable = [] if not shape else [False] * len(shape)\n return Apply(self, [x, h, z], [at.TensorType(aesara.config.floatX, broadcastable)()])\n\n def perform(self, node, ins, outs):\n x, h, z = ins[0], ins[1], ins[2]\n outs[0][0] = (\n polyagamma_pdf(x, h, z, return_log=True)\n if self.get_pdf\n else polyagamma_cdf(x, h, z, return_log=True)\n ).astype(aesara.config.floatX)\n\n\nclass PolyaGamma(PositiveContinuous):\n r\"\"\"\n The Polya-Gamma distribution.\n\n The distribution is parametrized by ``h`` (shape parameter) and ``z``\n (exponential tilting parameter). The pdf of this distribution is\n\n .. math::\n\n f(x \\mid h, z) = cosh^h(\\frac{z}{2})e^{-\\frac{1}{2}xz^2}f(x \\mid h, 0),\n where :math:`f(x \\mid h, 0)` is the pdf of a :math:`PG(h, 0)` variable.\n Notice that the pdf of this distribution is expressed as an alternating-sign\n sum of inverse-Gaussian densities.\n\n .. math::\n\n X = \\Sigma_{k=1}^{\\infty}\\frac{Ga(h, 1)}{d_k},\n\n where :math:`d_k = 2(k - 0.5)^2\\pi^2 + z^2/2`, :math:`Ga(h, 1)` is a gamma\n random variable with shape parameter ``h`` and scale parameter ``1``.\n\n .. plot::\n\n import matplotlib.pyplot as plt\n import numpy as np\n from polyagamma import polyagamma_pdf\n plt.style.use('seaborn-darkgrid')\n x = np.linspace(0.01, 5, 500);x.sort()\n hs = [1., 5., 10., 15.]\n zs = [0.] * 4\n for h, z in zip(hs, zs):\n pdf = polyagamma_pdf(x, h=h, z=z)\n plt.plot(x, pdf, label=r'$h$ = {}, $z$ = {}'.format(h, z))\n plt.xlabel('x', fontsize=12)\n plt.ylabel('f(x)', fontsize=12)\n plt.legend(loc=1)\n plt.show()\n\n ======== =============================\n Support :math:`x \\in (0, \\infty)`\n Mean :math:`dfrac{h}{4} if :math:`z=0`, :math:`\\dfrac{tanh(z/2)h}{2z}` otherwise.\n Variance :math:`0.041666688h` if :math:`z=0`, :math:`\\dfrac{h(sinh(z) - z)(1 - tanh^2(z/2))}{4z^3}` otherwise.\n ======== =============================\n\n Parameters\n ----------\n h: float, optional\n The shape parameter of the distribution (h > 0).\n z: float, optional\n The exponential tilting parameter of the distribution.\n\n Examples\n --------\n .. code-block:: python\n\n rng = np.random.default_rng()\n with pm.Model():\n x = pm.PolyaGamma('x', h=1, z=5.5)\n with pm.Model():\n x = pm.PolyaGamma('x', h=25, z=-2.3, rng=rng, size=(100, 5))\n\n References\n ----------\n .. [1] Polson, Nicholas G., James G. Scott, and Jesse Windle.\n \"Bayesian inference for logistic models using Pólya–Gamma latent\n variables.\" Journal of the American statistical Association\n 108.504 (2013): 1339-1349.\n .. [2] Windle, Jesse, Nicholas G. Polson, and James G. Scott.\n \"Sampling Polya-Gamma random variates: alternate and approximate\n techniques.\" arXiv preprint arXiv:1405.0506 (2014)\n .. [3] Luc Devroye. \"On exact simulation algorithms for some distributions\n related to Jacobi theta functions.\" Statistics & Probability Letters,\n Volume 79, Issue 21, (2009): 2251-2259.\n .. [4] Windle, J. (2013). Forecasting high-dimensional, time-varying\n variance-covariance matrices with high-frequency data and sampling\n Pólya-Gamma random variates for posterior distributions derived\n from logistic likelihoods.(PhD thesis). Retrieved from\n http://hdl.handle.net/2152/21842\n \"\"\"\n rv_op = polyagamma\n\n @classmethod\n def dist(cls, h=1.0, z=0.0, **kwargs):\n h = at.as_tensor_variable(floatX(h))\n z = at.as_tensor_variable(floatX(z))\n\n msg = f\"The variable {h} specified for PolyaGamma has non-positive \"\n msg += \"values, making it unsuitable for this parameter.\"\n Assert(msg)(h, at.all(at.gt(h, 0.0)))\n\n return super().dist([h, z], **kwargs)\n\n def get_moment(rv, size, h, z):\n mean = at.switch(at.eq(z, 0), h / 4, tanh(z / 2) * (h / (2 * z)))\n if not rv_size_is_none(size):\n mean = at.full(size, mean)\n return mean\n\n def logp(value, h, z):\n \"\"\"\n Calculate log-probability of Polya-Gamma distribution at specified value.\n\n Parameters\n ----------\n value: numeric\n Value(s) for which log-probability is calculated. If the log\n probabilities for multiple values are desired the values must be\n provided in a numpy array.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n\n res = at.switch(\n at.le(value, 0),\n -np.inf,\n _PolyaGammaLogDistFunc(get_pdf=True)(value, h, z),\n )\n return check_parameters(\n res,\n h > 0,\n msg=\"h > 0\",\n )\n\n def logcdf(value, h, z):\n \"\"\"\n Compute the log of the cumulative distribution function for the\n Polya-Gamma distribution at the specified value.\n\n Parameters\n ----------\n value: numeric or np.ndarray or `TensorVariable`\n Value(s) for which log CDF is calculated. If the log CDF for multiple\n values are desired the values must be provided in a numpy array.\n\n Returns\n -------\n TensorVariable\n \"\"\"\n res = at.switch(\n at.le(value, 0),\n -np.inf,\n _PolyaGammaLogDistFunc(get_pdf=False)(value, h, z),\n )\n\n return check_parameters(\n res,\n h > 0,\n msg=\"h > 0\",\n )\n"
] | [
[
"scipy.stats.norm.rvs",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.asarray",
"numpy.log",
"numpy.zeros",
"scipy.stats.t.rvs",
"scipy.stats.rice.rvs",
"numpy.abs",
"scipy.stats.skewnorm.rvs",
"numpy.all",
"numpy.searchsorted",
"numpy.sqrt",
"scipy.stats.truncnorm.rvs",
"scipy.stats.moyal.rvs"
]
] |
NimrodShabtay/KAIR | [
"d744959d75c22b016543dc65c04922ea4b59894f"
] | [
"main_train_psnr.py"
] | [
"import os.path\nimport math\nimport argparse\nimport time\nimport random\nimport numpy as np\nfrom collections import OrderedDict\n\nimport logging\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom utils import utils_logger\nfrom utils import utils_image as util\nfrom utils import utils_option as option\nfrom utils.utils_dist import get_dist_info, init_dist\n\nfrom data.select_dataset import define_Dataset\nfrom models.select_model import define_Model\n\nfrom utils.dip_utils import *\n'''\n# --------------------------------------------\n# training code for MSRResNet\n# --------------------------------------------\n# Kai Zhang ([email protected])\n# github: https://github.com/cszn/KAIR\n# --------------------------------------------\n# https://github.com/xinntao/BasicSR\n# --------------------------------------------\n'''\n\n\ndef main(json_path='options/train_msrresnet_psnr.json'):\n\n '''\n # ----------------------------------------\n # Step--1 (prepare opt)\n # ----------------------------------------\n '''\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--opt', type=str, default=json_path, help='Path to option JSON file.')\n parser.add_argument('--launcher', default='pytorch', help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--dist', default=False)\n\n opt = option.parse(parser.parse_args().opt, is_train=True)\n opt['dist'] = parser.parse_args().dist\n\n # ----------------------------------------\n # distributed settings\n # ----------------------------------------\n if opt['dist']:\n init_dist('pytorch')\n opt['rank'], opt['world_size'] = get_dist_info()\n \n if opt['rank'] == 0:\n util.mkdirs((path for key, path in opt['path'].items() if 'pretrained' not in key))\n\n # ----------------------------------------\n # update opt\n # ----------------------------------------\n # -->-->-->-->-->-->-->-->-->-->-->-->-->-\n init_iter_G, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G')\n init_iter_E, init_path_E = option.find_last_checkpoint(opt['path']['models'], net_type='E')\n opt['path']['pretrained_netG'] = init_path_G\n opt['path']['pretrained_netE'] = init_path_E\n init_iter_optimizerG, init_path_optimizerG = option.find_last_checkpoint(opt['path']['models'], net_type='optimizerG')\n opt['path']['pretrained_optimizerG'] = init_path_optimizerG\n current_step = max(init_iter_G, init_iter_E, init_iter_optimizerG)\n\n border = opt['scale']\n # --<--<--<--<--<--<--<--<--<--<--<--<--<-\n\n # ----------------------------------------\n # save opt to a '../option.json' file\n # ----------------------------------------\n if opt['rank'] == 0:\n option.save(opt)\n\n # ----------------------------------------\n # return None for missing key\n # ----------------------------------------\n opt = option.dict_to_nonedict(opt)\n\n # ----------------------------------------\n # configure logger\n # ----------------------------------------\n if opt['rank'] == 0:\n logger_name = 'train'\n utils_logger.logger_info(logger_name, os.path.join(opt['path']['log'], logger_name+'.log'))\n logger = logging.getLogger(logger_name)\n logger.info(option.dict2str(opt))\n\n # ----------------------------------------\n # seed\n # ----------------------------------------\n seed = opt['train']['manual_seed']\n if seed is None:\n seed = random.randint(1, 10000)\n print('Random seed: {}'.format(seed))\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n '''\n # ----------------------------------------\n # Step--2 (creat dataloader)\n # ----------------------------------------\n '''\n\n # ----------------------------------------\n # 1) create_dataset\n # 2) creat_dataloader for train and test\n # ----------------------------------------\n for phase, dataset_opt in opt['datasets'].items():\n if phase == 'train':\n train_set = define_Dataset(dataset_opt)\n train_size = int(math.ceil(len(train_set) / dataset_opt['dataloader_batch_size']))\n if opt['rank'] == 0:\n logger.info('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))\n if opt['dist']:\n train_sampler = DistributedSampler(train_set, shuffle=dataset_opt['dataloader_shuffle'], drop_last=True, seed=seed)\n train_loader = DataLoader(train_set,\n batch_size=dataset_opt['dataloader_batch_size']//opt['num_gpu'],\n shuffle=False,\n num_workers=dataset_opt['dataloader_num_workers']//opt['num_gpu'],\n drop_last=True,\n pin_memory=True,\n sampler=train_sampler)\n else:\n # train_loader = DataLoader(train_set,\n # batch_size=dataset_opt['dataloader_batch_size'],\n # shuffle=dataset_opt['dataloader_shuffle'],\n # num_workers=dataset_opt['dataloader_num_workers'],\n # drop_last=True,\n # pin_memory=True)\n train_loader = DataLoader(train_set,\n batch_size=dataset_opt['dataloader_batch_size'],\n shuffle=dataset_opt['dataloader_shuffle'],\n num_workers=dataset_opt['dataloader_num_workers'])\n\n elif phase == 'test':\n test_set = define_Dataset(dataset_opt)\n test_loader = DataLoader(test_set, batch_size=1,\n shuffle=False, num_workers=1,\n drop_last=False)\n else:\n raise NotImplementedError(\"Phase [%s] is not recognized.\" % phase)\n\n '''\n # ----------------------------------------\n # Step--3 (initialize model)\n # ----------------------------------------\n '''\n\n model = define_Model(opt)\n model.init_train()\n if opt['rank'] == 0:\n logger.info(model.info_network())\n logger.info(model.info_params())\n\n '''\n # ----------------------------------------\n # Step--4 (main training)\n # ----------------------------------------\n '''\n\n psnr_gt_vals = []\n psnr_noise_vals = []\n loss_vals = []\n for epoch in range(1000000): # keep running\n for i, train_data in enumerate(train_loader):\n\n current_step += 1\n\n # -------------------------------\n # 1) update learning rate\n # ------------------------------\n model.update_learning_rate(current_step)\n\n # -------------------------------\n # 2) feed patch pairs\n # -------------------------------\n model.feed_data(train_data)\n\n # -------------------------------\n # 3) optimize parameters\n # -------------------------------\n model.optimize_parameters(current_step)\n\n # -------------------------------\n # 4) training information\n # -------------------------------\n if current_step % opt['train']['checkpoint_print'] == 0 and opt['rank'] == 0:\n logs = model.current_log() # such as loss\n message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(epoch, current_step, model.current_learning_rate())\n for k, v in logs.items(): # merge log information into message\n message += '{:s}: {:.3e} '.format(k, v)\n logger.info(message)\n\n # -------------------------------\n # 5) save model\n # -------------------------------\n # if current_step % opt['train']['checkpoint_save'] == 0 and opt['rank'] == 0:\n # logger.info('Saving the model.')\n # model.save(current_step)\n\n # -------------------------------\n # 6) testing\n # -------------------------------\n if current_step % opt['train']['checkpoint_test'] == 1 and opt['rank'] == 0:\n idx = 1\n image_name_ext = os.path.basename(train_data['L_path'][0])\n img_name, ext = os.path.splitext(image_name_ext)\n\n img_dir = os.path.join(opt['path']['images'], img_name)\n util.mkdir(img_dir)\n\n model.feed_data(train_data)\n model.test()\n\n visuals = model.current_visuals()\n E_img = util.tensor2uint(visuals['E'])\n H_img = util.tensor2uint(visuals['H'])\n GT_img = util.tensor2uint(visuals['GT'])\n # -----------------------\n # save estimated image E\n # -----------------------\n # save_img_path = os.path.join(img_dir, '{:s}_{:d}.png'.format(img_name, epoch))\n # util.imsave(E_img, save_img_path)\n\n # save_img_GT_path = os.path.join(img_dir, '{:s}.png'.format(img_name))\n # util.imsave(GT_img, save_img_GT_path)\n #\n # save_img_H_path = os.path.join(img_dir, '{:s}_noise.png'.format(img_name))\n # util.imsave(H_img, save_img_H_path)\n\n # -----------------------\n # calculate PSNR\n # -----------------------\n psnr_gt = util.calculate_psnr(E_img, GT_img, border=border)\n psnr_gt_noisy = util.calculate_psnr(E_img, H_img, border=border)\n\n logger.info('PSNR-GT: {:->4d}--> {:>10s} | {:<4.2f}dB'.format(idx, image_name_ext, psnr_gt))\n logger.info('PSNR-GT-NOISE: {:->4d}--> {:>10s} | {:<4.2f}dB'.format(idx, image_name_ext, psnr_gt_noisy))\n\n save_img_path = os.path.join(img_dir, '{:s}_{:d}_{:.2f}.png'.format(img_name, current_step, psnr_gt))\n util.imsave(E_img, save_img_path)\n\n psnr_gt_vals.append(psnr_gt)\n psnr_noise_vals.append(psnr_gt_noisy)\n loss_vals.append(model.current_log()['G_loss'])\n\n plot_training_curves(loss_vals, psnr_gt_vals, psnr_noise_vals, img_dir)\n\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
]
] |
qixiuai/FATE | [
"6d50af65b96b5b226afda30dfa8e4a1e5746952d"
] | [
"python/federatedml/evaluation/metrics/classification_metric.py"
] | [
"import copy\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\n\nROUND_NUM = 6\n\n\ndef neg_pos_count(labels: np.ndarray, pos_label: int):\n pos_num = ((labels == pos_label) + 0).sum()\n neg_num = len(labels) - pos_num\n return pos_num, neg_num\n\n\ndef sort_score_and_label(labels: np.ndarray, pred_scores: np.ndarray):\n labels = np.array(labels)\n pred_scores = np.array(pred_scores)\n\n sort_idx = np.flip(pred_scores.argsort())\n sorted_labels = labels[sort_idx]\n sorted_scores = pred_scores[sort_idx]\n\n return sorted_labels, sorted_scores\n\n\nclass ConfusionMatrix(object):\n\n @staticmethod\n def compute(sorted_labels: list, sorted_pred_scores: list, score_thresholds: list, ret: list, pos_label=1):\n\n for ret_type in ret:\n assert ret_type in ['tp', 'tn', 'fp', 'fn']\n\n sorted_labels = np.array(sorted_labels)\n sorted_scores = np.array(sorted_pred_scores)\n sorted_labels[sorted_labels != pos_label] = 0\n sorted_labels[sorted_labels == pos_label] = 1\n score_thresholds = np.array([score_thresholds]).transpose()\n pred_labels = (sorted_scores > score_thresholds) + 0\n\n ret_dict = {}\n if 'tp' in ret or 'tn' in ret:\n match_arr = (pred_labels + sorted_labels)\n if 'tp' in ret:\n tp_num = (match_arr == 2).sum(axis=-1)\n ret_dict['tp'] = tp_num\n if 'tn' in ret:\n tn_num = (match_arr == 0).sum(axis=-1)\n ret_dict['tn'] = tn_num\n\n if 'fp' in ret or 'fn' in ret:\n match_arr = (sorted_labels - pred_labels)\n if 'fp' in ret:\n fp_num = (match_arr == -1).sum(axis=-1)\n ret_dict['fp'] = fp_num\n if 'fn' in ret:\n fn_num = (match_arr == 1).sum(axis=-1)\n ret_dict['fn'] = fn_num\n\n return ret_dict\n\n\nclass ThresholdCutter(object):\n\n @staticmethod\n def cut_by_step(sorted_scores, steps=0.01):\n assert isinstance(steps, float) and (0 < steps < 1)\n thresholds = list(set(sorted_scores))\n thresholds, cuts = ThresholdCutter.__filt_threshold(thresholds, 0.01)\n score_threshold = thresholds\n\n return score_threshold, cuts\n\n @staticmethod\n def cut_by_index(sorted_scores):\n cuts = np.array([c / 100 for c in range(100)])\n data_size = len(sorted_scores)\n indexs = [int(data_size * cut) for cut in cuts]\n score_threshold = [sorted_scores[idx] for idx in indexs]\n return score_threshold, cuts\n\n @staticmethod\n def __filt_threshold(thresholds, step):\n cuts = list(map(float, np.arange(0, 1, step)))\n size = len(list(thresholds))\n thresholds.sort(reverse=True)\n index_list = [int(size * cut) for cut in cuts]\n new_thresholds = [thresholds[idx] for idx in index_list]\n\n return new_thresholds, cuts\n\n @staticmethod\n def cut_by_quantile(scores, quantile_list=None, interpolation='nearest', remove_duplicate=True):\n\n if quantile_list is None: # default is 20 intervals\n quantile_list = [round(i * 0.05, 3) for i in range(20)] + [1.0]\n quantile_val = np.quantile(scores, quantile_list, interpolation=interpolation)\n if remove_duplicate:\n quantile_val = sorted(list(set(quantile_val)))\n else:\n quantile_val = sorted(list(quantile_val))\n\n if len(quantile_val) == 1:\n quantile_val = [np.min(scores), np.max(scores)]\n\n return quantile_val\n\n\nclass KS(object):\n\n @staticmethod\n def compute(labels, pred_scores, pos_label=1):\n\n sorted_labels, sorted_scores = sort_score_and_label(labels, pred_scores)\n\n score_threshold, cuts = ThresholdCutter.cut_by_index(sorted_scores)\n\n confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, score_threshold, ret=['tp', 'fp'],\n pos_label=pos_label)\n\n pos_num, neg_num = neg_pos_count(sorted_labels, pos_label=pos_label)\n\n assert pos_num > 0 and neg_num > 0, \"error when computing KS metric, pos sample number and neg sample number\" \\\n \"must be larger than 0\"\n\n tpr_arr = confusion_mat['tp'] / pos_num\n fpr_arr = confusion_mat['fp'] / neg_num\n\n tpr = np.append(tpr_arr, np.array([1.0]))\n fpr = np.append(fpr_arr, np.array([1.0]))\n cuts = np.append(cuts, np.array([1.0]))\n\n ks_curve = tpr[:-1] - fpr[:-1]\n ks_val = np.max(ks_curve)\n\n return ks_val, fpr, tpr, score_threshold, cuts\n\n\nclass BiClassMetric(object):\n\n def __init__(self, cut_method='step', remove_duplicate=False, pos_label=1):\n assert cut_method in ['step', 'quantile']\n self.cut_method = cut_method\n self.remove_duplicate = remove_duplicate # available when cut_method is quantile\n self.pos_label = pos_label\n\n def prepare_confusion_mat(self, labels, scores, add_to_end=True, ):\n sorted_labels, sorted_scores = sort_score_and_label(labels, scores)\n\n score_threshold, cuts = None, None\n\n if self.cut_method == 'step':\n score_threshold, cuts = ThresholdCutter.cut_by_step(sorted_scores, steps=0.01)\n if add_to_end:\n score_threshold.append(min(score_threshold) - 0.001)\n cuts.append(1)\n\n elif self.cut_method == 'quantile':\n score_threshold = ThresholdCutter.cut_by_quantile(sorted_scores, remove_duplicate=self.remove_duplicate)\n score_threshold = list(np.flip(score_threshold))\n\n confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, score_threshold,\n ret=['tp', 'fp', 'fn', 'tn'], pos_label=self.pos_label)\n\n return confusion_mat, score_threshold, cuts\n\n def compute(self, labels, scores, ):\n confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, scores,)\n metric_scores = self.compute_metric_from_confusion_mat(confusion_mat)\n return list(metric_scores), score_threshold, cuts\n\n def compute_metric_from_confusion_mat(self, *args):\n raise NotImplementedError()\n\n\nclass Lift(BiClassMetric):\n\n \"\"\"\n Compute lift\n \"\"\"\n\n @staticmethod\n def _lift_helper(val):\n\n tp, fp, fn, tn, labels_num = val[0], val[1], val[2], val[3], val[4]\n\n lift_x_type, lift_y_type = [], []\n\n for label_type in ['1', '0']:\n\n if label_type == '0':\n tp, tn = tn, tp\n fp, fn = fn, fp\n\n if labels_num == 0:\n lift_x = 1\n denominator = 1\n else:\n lift_x = (tp + fp) / labels_num\n denominator = (tp + fn) / labels_num\n\n if tp + fp == 0:\n numerator = 1\n else:\n numerator = tp / (tp + fp)\n\n if denominator == 0:\n lift_y = sys.float_info.max\n else:\n lift_y = numerator / denominator\n\n lift_x_type.insert(0, lift_x)\n lift_y_type.insert(0, lift_y)\n\n return lift_x_type, lift_y_type\n\n def compute(self, labels, pred_scores, pos_label=1):\n\n confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, pred_scores, add_to_end=False, )\n\n lifts_y, lifts_x = self.compute_metric_from_confusion_mat(confusion_mat, len(labels),)\n\n return lifts_y, lifts_x, list(score_threshold)\n\n def compute_metric_from_confusion_mat(self, confusion_mat, labels_len,):\n\n labels_nums = np.zeros(len(confusion_mat['tp'])) + labels_len\n\n rs = map(self._lift_helper, zip(confusion_mat['tp'], confusion_mat['fp'],\n confusion_mat['fn'], confusion_mat['tn'], labels_nums))\n\n rs = list(rs)\n\n lifts_x, lifts_y = [i[0] for i in rs], [i[1] for i in rs]\n\n return lifts_y, lifts_x\n\n\nclass Gain(BiClassMetric):\n \"\"\"\n Compute Gain\n \"\"\"\n\n @staticmethod\n def _gain_helper(val):\n\n tp, fp, fn, tn, num_label = val[0], val[1], val[2], val[3], val[4]\n\n gain_x_type, gain_y_type = [], []\n\n for pos_label in ['1', '0']:\n\n if pos_label == '0':\n tp, tn = tn, tp\n fp, fn = fn, fp\n\n if num_label == 0:\n gain_x = 1\n else:\n gain_x = float((tp + fp) / num_label)\n\n num_positives = tp + fn\n if num_positives == 0:\n gain_y = 1\n else:\n gain_y = float(tp / num_positives)\n\n gain_x_type.insert(0, gain_x)\n gain_y_type.insert(0, gain_y)\n\n return gain_x_type, gain_y_type\n\n def compute(self, labels, pred_scores, pos_label=1):\n\n confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, pred_scores, add_to_end=False, )\n\n gain_y, gain_x = self.compute_metric_from_confusion_mat(confusion_mat, len(labels))\n\n return gain_y, gain_x, list(score_threshold)\n\n def compute_metric_from_confusion_mat(self, confusion_mat, labels_len):\n\n labels_nums = np.zeros(len(confusion_mat['tp'])) + labels_len\n\n rs = map(self._gain_helper, zip(confusion_mat['tp'], confusion_mat['fp'],\n confusion_mat['fn'], confusion_mat['tn'], labels_nums))\n\n rs = list(rs)\n\n gain_x, gain_y = [i[0] for i in rs], [i[1] for i in rs]\n\n return gain_y, gain_x\n\n\nclass BiClassPrecision(BiClassMetric):\n \"\"\"\n Compute binary classification precision\n \"\"\"\n\n def compute_metric_from_confusion_mat(self, confusion_mat, formatted=True, impute_val=1.0):\n numerator = confusion_mat['tp']\n denominator = (confusion_mat['tp'] + confusion_mat['fp'])\n zero_indexes = (denominator == 0)\n denominator[zero_indexes] = 1\n precision_scores = numerator / denominator\n precision_scores[zero_indexes] = impute_val # impute_val is for prettifying when drawing pr curves\n\n if formatted:\n score_formatted = [[0, i] for i in precision_scores]\n return score_formatted\n else:\n return precision_scores\n\n\nclass MultiClassPrecision(object):\n \"\"\"\n Compute multi-classification precision\n \"\"\"\n\n def compute(self, labels, pred_scores):\n all_labels = list(set(labels).union(set(pred_scores)))\n all_labels.sort()\n return precision_score(labels, pred_scores, average=None), all_labels\n\n\nclass BiClassRecall(BiClassMetric):\n \"\"\"\n Compute binary classification recall\n \"\"\"\n\n def compute_metric_from_confusion_mat(self, confusion_mat, formatted=True):\n recall_scores = confusion_mat['tp'] / (confusion_mat['tp'] + confusion_mat['fn'])\n\n if formatted:\n score_formatted = [[0, i] for i in recall_scores]\n return score_formatted\n else:\n return recall_scores\n\n\nclass MultiClassRecall(object):\n \"\"\"\n Compute multi-classification recall\n \"\"\"\n\n def compute(self, labels, pred_scores):\n all_labels = list(set(labels).union(set(pred_scores)))\n all_labels.sort()\n return recall_score(labels, pred_scores, average=None), all_labels\n\n\nclass BiClassAccuracy(BiClassMetric):\n\n \"\"\"\n Compute binary classification accuracy\n \"\"\"\n\n def compute(self, labels, scores, normalize=True):\n confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, scores)\n metric_scores = self.compute_metric_from_confusion_mat(confusion_mat, normalize=normalize)\n return list(metric_scores), score_threshold[: len(metric_scores)], cuts[: len(metric_scores)]\n\n def compute_metric_from_confusion_mat(self, confusion_mat, normalize=True):\n rs = (confusion_mat['tp'] + confusion_mat['tn']) / \\\n (confusion_mat['tp'] + confusion_mat['tn'] + confusion_mat['fn'] + confusion_mat['fp']) if normalize \\\n else (confusion_mat['tp'] + confusion_mat['tn'])\n return rs[:-1]\n\n\nclass MultiClassAccuracy(object):\n \"\"\"\n Compute multi-classification accuracy\n \"\"\"\n\n def compute(self, labels, pred_scores, normalize=True):\n return accuracy_score(labels, pred_scores, normalize)\n\n\nclass FScore(object):\n\n \"\"\"\n Compute F score from bi-class confusion mat\n \"\"\"\n @staticmethod\n def compute(labels, pred_scores, beta=1, pos_label=1):\n\n sorted_labels, sorted_scores = sort_score_and_label(labels, pred_scores)\n score_threshold, cuts = ThresholdCutter.cut_by_step(sorted_scores, steps=0.01)\n score_threshold.append(0)\n confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores,\n score_threshold,\n ret=['tp', 'fp', 'fn', 'tn'], pos_label=pos_label)\n\n precision_computer = BiClassPrecision()\n recall_computer = BiClassRecall()\n p_score = precision_computer.compute_metric_from_confusion_mat(confusion_mat, formatted=False)\n r_score = recall_computer.compute_metric_from_confusion_mat(confusion_mat, formatted=False)\n\n beta_2 = beta * beta\n denominator = (beta_2 * p_score + r_score)\n denominator[denominator == 0] = 1e-6 # in case denominator is 0\n numerator = (1 + beta_2) * (p_score * r_score)\n f_score = numerator / denominator\n\n return f_score, score_threshold, cuts\n\n\nclass PSI(object):\n\n def compute(self, train_scores: list, validate_scores: list, train_labels=None, validate_labels=None,\n debug=False, str_intervals=False, round_num=3, pos_label=1):\n\n \"\"\"\n train/validate scores: predicted scores on train/validate set\n train/validate labels: true labels\n debug: print debug message\n if train&validate labels are not None, count positive sample percentage in every interval\n pos_label: pos label\n round_num: round number\n str_intervals: return str intervals\n \"\"\"\n\n train_scores = np.array(train_scores)\n validate_scores = np.array(validate_scores)\n quantile_points = ThresholdCutter().cut_by_quantile(train_scores)\n\n train_count = self.quantile_binning_and_count(train_scores, quantile_points)\n validate_count = self.quantile_binning_and_count(validate_scores, quantile_points)\n\n train_pos_perc, validate_pos_perc = None, None\n if train_labels is not None and validate_labels is not None:\n assert len(train_labels) == len(train_scores) and len(validate_labels) == len(validate_scores)\n train_labels, validate_labels = np.array(train_labels), np.array(validate_labels)\n train_pos_count = self.quantile_binning_and_count(train_scores[train_labels == pos_label], quantile_points)\n validate_pos_count = self.quantile_binning_and_count(validate_scores[validate_labels == pos_label],\n quantile_points)\n\n train_pos_perc = np.array(list(train_pos_count['count'])) / np.array(train_count['count'])\n validate_pos_perc = np.array(list(validate_pos_count['count'])) / np.array(validate_count['count'])\n\n # handle special cases\n train_pos_perc[train_pos_perc == np.inf] = -1\n validate_pos_perc[validate_pos_perc == np.inf] = -1\n train_pos_perc[np.isnan(train_pos_perc)] = 0\n validate_pos_perc[np.isnan(validate_pos_perc)] = 0\n\n if debug:\n print(train_count)\n print(validate_count)\n\n assert (train_count['interval'] == validate_count['interval']).all()\n\n expected_interval = np.array(list(train_count['count']))\n actual_interval = np.array(list(validate_count['count']))\n\n expected_interval = expected_interval.astype(np.float)\n actual_interval = actual_interval.astype(np.float)\n\n psi_scores, total_psi, expected_interval, actual_interval, expected_percentage, actual_percentage \\\n = self.psi_score(expected_interval, actual_interval)\n\n intervals = train_count['interval'] if not str_intervals else PSI.intervals_to_str(train_count['interval'],\n round_num=round_num)\n\n if train_labels is None and validate_labels is None:\n return psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \\\n intervals\n else:\n return psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \\\n train_pos_perc, validate_pos_perc, intervals\n\n @staticmethod\n def quantile_binning_and_count(scores, quantile_points):\n\n \"\"\"\n left edge and right edge of last interval are closed\n \"\"\"\n\n assert len(quantile_points) >= 2\n\n left_bounds = copy.deepcopy(quantile_points[:-1])\n right_bounds = copy.deepcopy(quantile_points[1:])\n\n last_interval_left = left_bounds.pop()\n last_interval_right = right_bounds.pop()\n\n bin_result_1, bin_result_2 = None, None\n\n if len(left_bounds) != 0 and len(right_bounds) != 0:\n bin_result_1 = pd.cut(scores, pd.IntervalIndex.from_arrays(left_bounds, right_bounds, closed='left'))\n\n bin_result_2 = pd.cut(scores, pd.IntervalIndex.from_arrays([last_interval_left], [last_interval_right],\n closed='both'))\n\n count1 = None if bin_result_1 is None else bin_result_1.value_counts().reset_index()\n count2 = bin_result_2.value_counts().reset_index()\n\n rs = pd.concat([count1, count2], axis=0)\n rs.columns = ['interval', 'count']\n return rs\n\n @staticmethod\n def interval_psi_score(val):\n expected, actual = val[0], val[1]\n return (actual - expected) * np.log(actual / expected)\n\n @staticmethod\n def intervals_to_str(intervals, round_num=3):\n str_intervals = []\n for interval in list(intervals):\n left_bound, right_bound = '[', ']'\n if interval.closed == 'left':\n right_bound = ')'\n elif interval.closed == 'right':\n left_bound = '('\n str_intervals.append(\"{}{}, {}{}\".format(left_bound, round(interval.left, round_num),\n round(interval.right, round_num), right_bound))\n\n return str_intervals\n\n @ staticmethod\n def psi_score(expected_interval: np.ndarray, actual_interval: np.ndarray, debug=False):\n\n expected_sum = expected_interval.sum()\n expected_interval[expected_interval == 0] = 1e-6 # in case no overlap samples\n\n actual_sum = actual_interval.sum()\n actual_interval[actual_interval == 0] = 1e-6 # in case no overlap samples\n\n expected_percentage = expected_interval / expected_sum\n actual_percentage = actual_interval / actual_sum\n\n if debug:\n print(expected_interval)\n print(actual_interval)\n print(expected_percentage)\n print(actual_percentage)\n\n psi_scores = list(map(PSI.interval_psi_score, zip(expected_percentage, actual_percentage)))\n psi_scores = np.array(psi_scores)\n total_psi = psi_scores.sum()\n return psi_scores, total_psi, expected_interval, actual_interval, expected_percentage, actual_percentage\n\n"
] | [
[
"numpy.max",
"numpy.quantile",
"numpy.array",
"numpy.isnan",
"numpy.log",
"numpy.min",
"sklearn.metrics.accuracy_score",
"numpy.arange",
"pandas.concat",
"pandas.IntervalIndex.from_arrays",
"sklearn.metrics.precision_score",
"numpy.flip",
"sklearn.metrics.recall_score"
]
] |
morris-frank/unnamed-source-separation | [
"e23af1761e8fdd587a67b307eaee800b02f5bccf"
] | [
"thesis/train.py"
] | [
"import os\nimport time\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom statistics import mean\nfrom typing import Dict, List, Optional\n\nimport torch\nfrom colorama import Fore\nfrom torch import optim\nfrom torch.nn.utils import clip_grad_value_\nfrom torch.utils import data\nfrom torch import nn\n\nfrom functools import reduce\nfrom operator import add\nfrom .io import glob_remove\nfrom .nn.models import BaseModel\nfrom .utils import max_grad, any_invalid_grad, _LossLogger\n\nLAST_LOG = defaultdict(float)\nLAST_LOG[\"start\"] = True\n\n_wandb = None\n\n\ndef prepare_batch(batch, device):\n if isinstance(batch, list):\n if isinstance(batch[0], list) or isinstance(batch[0], tuple):\n (x1, x2), y = (\n (batch[0][0].to(device), batch[0][1].to(device)),\n batch[1].to(device),\n )\n batch = ((x1, x2), y)\n else:\n x, y = batch[0].to(device), batch[1].to(device)\n batch = (x, y)\n else:\n x = batch.to(device)\n batch = (x,)\n return batch\n\n\ndef print_log(LL, add_log: Dict, cat: str, step: Optional[int] = None):\n log = add_log.copy()\n\n if hasattr(LL, \"L\"):\n LL = LL.ℒ\n\n # Add new logs from ℒ logger\n if isinstance(LL, _LossLogger):\n for k, v in LL.log.items():\n if len(v):\n log[f\"{k}/{cat}\"] = reduce(add, v) / len(v)\n LL.log[k] = []\n\n # Print to console\n _step = step if step is not None else \"---\"\n print(f\"step {_step:>9} {Fore.YELLOW}[{cat}]\", end=\" \")\n for k, v in log.items():\n print(f\"{Fore.RESET}{'/'.join(k.split('/')[:-1])}=\", end=\"\")\n col = (\n Fore.CYAN\n if v == LAST_LOG[k] or LAST_LOG[\"start\"]\n else (Fore.GREEN if v < LAST_LOG[k] else Fore.RED)\n )\n print(f\"{col}{v:.3e}\", end=f\"{Fore.RESET}, \")\n LAST_LOG[k] = v\n print()\n LAST_LOG[\"start\"] = False\n\n if _wandb is not None:\n _wandb.log(log, step=step)\n\n\ndef train(\n model: BaseModel,\n gpu: List[int],\n train_loader: data.DataLoader,\n test_loader: data.DataLoader,\n iterations: int,\n wandb: bool = False,\n keep_checkpoints: bool = False,\n keep_optim: bool = False,\n base_lr: float = 1e-4,\n start_it: int = 0,\n optimizer_state_dict = None,\n scheduler_state_dict = None,\n):\n \"\"\"\n Args:\n model: the module to train\n gpu: list of GPUs to use (int indexes)\n train_loader: dataset loader for the training data\n test_loader: dataset loader for the test data\n iterations: number of iterations to train for\n wandb: Whether to log wandb\n keep_checkpoints: whether to keep all checkpoints not just the last one\n keep_optim: whether to also save the optimizer\n base_lr: the starting learing rate\n start_it\n optimizer_state_dict\n scheduler_state_dict\n \"\"\"\n model_id = f\"{datetime.today():%b%d-%H%M}_{type(model).__name__}_{model.name}\"\n params = model.params\n\n os.makedirs(\"./checkpoints/\", exist_ok=True)\n\n if wandb:\n global _wandb\n import wandb as __wandb\n\n _wandb = __wandb\n _wandb.init(\n name=model_id,\n config=params[\"kwargs\"],\n project=__name__.split(\".\")[0],\n )\n\n # Move model to device(s):\n device = f\"cuda:{gpu[0]}\" if gpu else \"cpu\"\n dataparallel = False\n if gpu:\n dataparallel = len(gpu) > 1\n if dataparallel:\n modelclass = model.__class__\n model = model.to(device)\n model = nn.DataParallel(model, device_ids=gpu)\n LL = _LossLogger()\n else:\n model = model.to(device)\n LL = model.ℒ\n\n # Setup optimizer and learning rate scheduler\n optimizer = optim.Adam(model.parameters(), eps=1e-8, lr=base_lr)\n if optimizer_state_dict is not None:\n optimizer.load_state_dict(optimizer_state_dict)\n lr_milestones = torch.linspace(iterations * 0.36, iterations, 5).tolist()\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, lr_milestones, gamma=0.6)\n if scheduler_state_dict is not None:\n scheduler.load_state_dict(scheduler_state_dict)\n\n losses, it_times = [], []\n train_iterator = iter(train_loader)\n it_timer = time.time()\n model.train()\n print(\n f\"\\n{Fore.YELLOW}This is {Fore.GREEN}{model_id}{Fore.RESET}\\n\"\n f\"{Fore.YELLOW}{f'{Fore.GREEN} Start training {Fore.YELLOW}'.center(80, '-')}{Fore.RESET}\"\n )\n for it in range(start_it, iterations):\n it_start_time = time.time()\n # Load next random batch\n try:\n batch = next(train_iterator)\n except StopIteration:\n train_iterator = iter(train_loader)\n batch = next(train_iterator)\n\n if dataparallel:\n ℒ = modelclass.test(model, *prepare_batch(batch, device), LL)\n else:\n ℒ = model.test(*prepare_batch(batch, device))\n model.zero_grad()\n\n if torch.isnan(ℒ) or torch.isinf(ℒ):\n print(\n Fore.RED + \"NaN Loss ℒ.\\n\"\n \"Try Again. I'm gonna try to continue…\" + Fore.RESET\n )\n exit()\n else:\n ℒ.backward()\n clip_grad_value_(model.parameters(), 30)\n if any_invalid_grad(model.parameters()):\n print(\n Fore.RED + \"There was a NaN or inf in one of the grads.\\n\"\n \"Saving everything……\" + Fore.RESET\n )\n save_point = {\n \"model_state_dict\": model.state_dict(),\n \"params\": params,\n \"batch\": batch,\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"it\": it,\n \"scheduler\": scheduler.state_dict(),\n }\n torch.save(\n save_point, f\"checkpoints/invalid_grad_{model_id}_{it:06}.pt\"\n )\n exit()\n optimizer.step()\n scheduler.step()\n\n losses.append(ℒ.detach().item())\n it_times.append(time.time() - it_start_time)\n\n # LOG INFO (every 10 mini batches)\n if it % 10 == 0 or it == iterations - 1:\n log = {\n \"Loss/train\": mean(losses),\n \"Time/train\": mean(it_times),\n \"LR/train\": optimizer.param_groups[0][\"lr\"],\n \"MaxGrad/train\": max_grad(model.parameters()),\n }\n print_log(LL if dataparallel else model, log, \"train\", step=it)\n losses, it_times = [], []\n\n # TEST AND SAVE THE MODEL (every 30min)\n if (time.time() - it_timer) > 1800 or it == iterations - 1:\n if not keep_checkpoints:\n glob_remove(f\"checkpoints/{model_id}_*.pt\")\n save_point = {\n \"it\": it,\n \"model_state_dict\": model.state_dict(),\n \"params\": params,\n }\n if keep_optim:\n save_point.update(\n {\"optimizer_state_dict\": optimizer.state_dict(),\n \"scheduler\": scheduler.state_dict(),\n \"test\": ℒ}\n )\n torch.save(save_point, f\"checkpoints/{model_id}_{it:06}.pt\")\n test_time, test_losses = time.time(), []\n model.eval()\n with torch.no_grad():\n for batch in test_loader:\n if dataparallel:\n ℒ = modelclass.test(model,\n *prepare_batch(batch, device), LL)\n else:\n ℒ = model.test(*prepare_batch(batch, device))\n test_losses.append(ℒ.detach().item())\n\n log = {\"Loss/test\": mean(test_losses),\n \"Time/test\": time.time() - test_time}\n\n print_log(LL if dataparallel else model, log, \"test\")\n it_timer = time.time()\n model.train()\n"
] | [
[
"torch.isnan",
"torch.save",
"torch.no_grad",
"torch.linspace",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.isinf",
"torch.nn.DataParallel"
]
] |
gbc-sid/MTMFI-VOS | [
"b26dbfad515ebf61b4f6316d166f11735b5dcb9a"
] | [
"lib/training_datasets.py"
] | [
"from pathlib import Path\nimport json\nfrom collections import OrderedDict as odict\nfrom easydict import EasyDict as edict\nfrom PIL import Image\nfrom tqdm import tqdm\nimport cv2\nimport numpy as np\nimport random\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset\n\n\nclass SampleSpec:\n\n def __init__(self, seq_name=None, obj_id=None, frames=None, frame0_id=None):\n self.seq_name = seq_name\n self.obj_id = obj_id\n self.frames = frames\n self.frame0_id = frame0_id\n\n def __repr__(self):\n return \"SampleSpec: \" + str(vars(self))\n\n def encoded(self):\n v = json.dumps(vars(self))\n return v\n\n @staticmethod\n def from_encoded(meta):\n specs = [SampleSpec(**json.loads(m)) for m in meta]\n return specs\n\n\nclass TrainingDataset(Dataset):\n\n def __init__(self, name, dset_path):\n super().__init__()\n self.dset_path = Path(dset_path)\n self.name = name\n\n def load_meta(self):\n\n meta_file = Path(__file__).resolve().parent / (self.name + \"_meta.pth\")\n if meta_file.exists():\n return torch.load(meta_file)\n\n print(\"Caching occlusions for %s, please wait.\" % self.anno_path)\n\n frame_names = dict()\n label_pixel_counts = dict()\n\n paths = [self.anno_path / seq for seq in sorted(self.sequences)]\n for k, p in enumerate(tqdm(paths)):\n\n frames = []\n num_objects = 0\n\n # Gather per-frame stats\n\n seq_lb_files = list(sorted(p.glob(\"*.png\")))\n for lb_path in seq_lb_files:\n\n lb = np.array(Image.open(lb_path))\n obj_ids, counts = np.unique(lb, return_counts=True)\n frames.append((obj_ids, counts))\n num_objects = max(num_objects, max(obj_ids))\n\n # Populate a matrix of object pixel counts\n\n px_counts = np.zeros((len(frames), num_objects + 1))\n\n for i, (obj_ids, counts) in enumerate(frames):\n for oid, cnt in zip(obj_ids, counts):\n px_counts[i, oid] = cnt\n\n frame_names[p.stem] = [f.stem for f in seq_lb_files]\n label_pixel_counts[p.stem] = (px_counts, np.max(px_counts, axis=0))\n\n # Generate object occlusions information and save\n\n occlusions = self._generate_occlusions(label_pixel_counts)\n meta = dict(frame_names=frame_names, occlusions=occlusions)\n torch.save(meta, meta_file)\n\n return meta\n\n def generate_samples(self, epoch_samples, epoch_repeats, min_seq_length, sample_size):\n\n d = self.load_meta()\n self.occlusions = d['occlusions']\n self.frame_names = d['frame_names']\n\n sequences = []\n for seq_name in self.sequences:\n if self.sequence_length(seq_name) < min_seq_length:\n continue\n for obj_id in self.object_ids(seq_name)[1:].tolist():\n sequences.append(edict(name=seq_name, obj_id=obj_id))\n\n if epoch_samples > 0:\n sequences = random.sample(sequences, epoch_samples)\n\n self.specs = []\n for seq in sequences:\n for rep in range(epoch_repeats):\n spec = self.sample_random_image_set(seq.name, obj_id=seq.obj_id, size=sample_size)\n self.specs.append(spec)\n\n def sample_random_image_set(self, seq_name, obj_id, size=3):\n \"\"\" Create a SampleSpec object representing a (random) set of frames from a sequence.\n :param seq_name: Sequence name\n :param obj_id: Object to track (int)\n :param size: Set size > 1\n :return: SampleSpec object\n \"\"\"\n object_visible = self.object_visibility(seq_name, [obj_id], merge_objects=True)\n\n possible_frames = np.where(object_visible)[0]\n frames = np.random.choice(possible_frames, size=1, replace=False).tolist()\n first_frame = frames[0]\n\n num_frames = self.sequence_length(seq_name)\n allframes = np.arange(num_frames)\n allframes = allframes[allframes != first_frame]\n frames = np.random.choice(allframes, size=size, replace=False).tolist()\n\n return SampleSpec(seq_name, obj_id, frames=[first_frame, *frames[1:]], frame0_id=first_frame)\n\n def object_ids(self, seq_name):\n \"\"\" Find the ids of objects seen in the sequence. id 0 == background \"\"\"\n assert self.occlusions is not None\n occlusions = self.occlusions[seq_name]\n always_occluded = occlusions.sum(axis=0) == occlusions.shape[0]\n object_ids = np.where(np.invert(always_occluded))[0]\n\n return object_ids\n\n def object_visibility(self, seq_name, obj_ids, merge_objects=False):\n \"\"\" Get boolean vector of per-frame object visibility in the named sequence.\n :param seq_name: Sequence name\n :param obj_ids: Zero (None), one (int) or more (list) object ids.\n If zero, all objects (except the background) are selected\n :param merge_objects: If true, the visibilities of multiple objects are merged.\n :return:\n \"\"\"\n assert self.occlusions is not None\n\n visible = np.invert(self.occlusions[seq_name])\n\n if obj_ids is None:\n visible = visible[:, 1:]\n else:\n visible = visible[:, obj_ids]\n\n if visible.ndim == 1:\n visible = np.expand_dims(visible, axis=1)\n\n if merge_objects:\n visible = visible.any(axis=1)\n\n if visible.ndim == 1:\n visible = np.expand_dims(visible, axis=1)\n\n return visible\n\n def sequence_length(self, seq_name):\n return self.occlusions[seq_name].shape[0]\n\n def __len__(self):\n return len(self.specs)\n\n def __getitem__(self, item):\n\n spec = self.specs[item]\n images = []\n labels = []\n\n frame_names = self.frame_names[spec.seq_name]\n for f in spec.frames:\n frame = frame_names[f]\n\n im = np.array(Image.open(self.jpeg_path / spec.seq_name / (frame + \".jpg\")))\n s = 480 / im.shape[0]\n im = cv2.resize(im, (854, 480), cv2.INTER_AREA if (s < 1.0) or (self.name == 'davis') else cv2.INTER_CUBIC)\n im = torch.from_numpy(im.transpose(2, 0, 1))\n images.append(im)\n\n lb = np.array(Image.open(self.anno_path / spec.seq_name / (frame + \".png\")))\n lb = (lb == spec.obj_id).astype(np.uint8) # Relabel selected object to id 1\n lb = torch.as_tensor(lb, dtype=torch.float32).view(1, 1, *lb.shape[:2])\n lb = F.interpolate(lb, (480, 854), mode='nearest').byte().squeeze(0)\n labels.append(lb)\n\n return images, labels, spec.encoded()\n\n# Davis数据集\nclass DAVISDataset(TrainingDataset):\n\n def __init__(self, dset_path: Path, epoch_repeats=1, epoch_samples=0, min_seq_length=4, sample_size=3):\n super().__init__(\"davis\", dset_path)\n\n self.jpeg_path = self.dset_path / \"JPEGImages\" / \"480p\"\n self.anno_path = self.dset_path / \"Annotations\" / \"480p\"\n self.sequences = [s.strip() for s in open(self.dset_path / \"ImageSets/2017/train.txt\").readlines()]\n\n self.generate_samples(epoch_samples, epoch_repeats, min_seq_length, sample_size)\n\n def _generate_occlusions(self, label_pixel_counts):\n \"\"\" Generate per-frame, per-object occlusion flags\n Each sequence is an (N, M) boolean array of N frames and M object ids. True/False if occluded/visible.\n object 0 is the background. \"\"\"\n\n occlusions = odict()\n\n min_px = 100 # Hard minimum\n # 应该是没有任何遮挡的序列\n never_occluded = ['bus', 'car-turn', 'drift-turn', 'kid-football', 'koala',\n 'mallard-fly', 'motocross-bumps', 'motorbike',\n 'rallye', 'snowboard', 'train', 'upside-down']\n\n for seq_name in tqdm(self.sequences):\n\n px_counts, max_counts = label_pixel_counts[seq_name]\n seq_length = len(list((self.jpeg_path / seq_name).glob(\"*.jpg\")))\n #print(seq_name, ': has', seq_length, 'frames')\n\n if seq_name in never_occluded:\n occ = np.zeros(shape=px_counts.shape, dtype=np.bool)\n else:\n\n # Pixel fraction\n\n if seq_name in ('bmx-bumps', 'disk-jockey'):\n occ_threshold = 0.5\n elif seq_name in ('boxing-fisheye', 'cat-girl', 'dog-gooses'):\n occ_threshold = 0.2\n elif seq_name in ('tractor-sand', 'drone'):\n occ_threshold = 0.1\n else:\n occ_threshold = 0.25\n\n occ = (px_counts / (max_counts + 0.001)) < occ_threshold\n occ = occ + (max_counts == 0)\n\n # Sequence specific tweaks\n\n if seq_name == 'classic-car':\n occ[:56, :] = False\n elif seq_name == 'drone':\n occ[:17, 1] = False # Red quad\n occ[24:60, 1] = False\n elif seq_name == 'night-race':\n occ[:29, :] = False\n occ[:, 2] = False # Green car\n\n occ = occ + (px_counts < min_px) # Apply a hard minimum\n\n occlusions[seq_name] = occ\n\n return occlusions\n\n\nclass YouTubeVOSDataset(TrainingDataset):\n\n def __init__(self, dset_path, epoch_samples=4000, epoch_repeats=1, min_seq_length=4, sample_size=3, year=2018):\n super().__init__(\"ytvos\" + str(year), dset_path)\n\n self.jpeg_path = self.dset_path / \"train\" / \"JPEGImages\"\n self.anno_path = self.dset_path / \"train\" / \"Annotations\"\n self.sequences = [s.strip() for s in open(Path(__file__).resolve().parent / \"ytvos_jjtrain.txt\").readlines()]\n\n self.generate_samples(epoch_samples, epoch_repeats, min_seq_length, sample_size)\n\n def _generate_occlusions(self, label_pixel_counts):\n \"\"\" Generate per-frame, per-object occlusion flags\n Each sequence is an (N, M) boolean array of N frames and M object ids. True/False if occluded/visible.\n object 0 is the background. \"\"\"\n\n occlusions = odict()\n for seq_name, (px_counts, max_counts) in label_pixel_counts.items():\n occlusions[seq_name] = (px_counts < 100)\n\n return occlusions\n\nclass AlibabaDataset(TrainingDataset):\n\n def __init__(self, dset_path: Path, epoch_repeats=1, epoch_samples=0, min_seq_length=4, sample_size=3):\n super().__init__(\"ali\", dset_path)\n\n self.jpeg_path = self.dset_path / \"JPEGImages\"\n self.anno_path = self.dset_path / \"Annotations\"\n self.sequences = [s.strip() for s in open(self.dset_path / \"ImageSets/train.txt\").readlines()]\n\n self.generate_samples(epoch_samples, epoch_repeats, min_seq_length, sample_size)\n\n def _generate_occlusions(self, label_pixel_counts):\n \"\"\" Generate per-frame, per-object occlusion flags\n Each sequence is an (N, M) boolean array of N frames and M object ids. True/False if occluded/visible.\n object 0 is the background. \"\"\"\n\n occlusions = odict()\n for seq_name, (px_counts, max_counts) in label_pixel_counts.items():\n occlusions[seq_name] = (px_counts < 100)\n\n return occlusions\n"
] | [
[
"numpy.max",
"numpy.random.choice",
"numpy.zeros",
"torch.save",
"torch.nn.functional.interpolate",
"numpy.where",
"numpy.invert",
"numpy.arange",
"torch.load",
"numpy.unique",
"torch.as_tensor",
"numpy.expand_dims"
]
] |
glisses/segmentation-and-uncertainty-estimation-on-CityScapes | [
"aed35e61383817579271b67748fb0e33866c09d6"
] | [
"UNet/CityScapes_epistemic_and_aleatoric.py"
] | [
"import tensorflow as tf\nimport datetime\nimport os\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\n\nimport tf_slim as slim\nimport Visualize\nimport utils\n\nimages = glob.glob('./Train_rgb/*_rgb.png') # tf.io.glob.glob\nlabels = glob.glob('./Train_seg/*_seg.png')\n\nimage_names = []\nfor paths in images:\n image_names.append(paths.split('/Train_rgb')[1].split('_rgb.png')[0])\n\nlabels = ['./Train_seg/' + name + '_seg.png' for name in image_names]\n\nindex = np.random.permutation(2975)\nimages = np.array(images)[index]\nlabels = np.array(labels)[index]\n\nval_img = glob.glob('./Test_rgb/*_rgb.png') # tf.io.glob.glob\nval_label = glob.glob('./Test_seg/*_seg.png')\n\nimage_names = []\nfor paths in val_img:\n image_names.append(paths.split('/Test_rgb')[1].split('_rgb.png')[0])\n\nval_label = ['./Test_seg/' + name + '_seg.png' for name in image_names]\n\ntrain_data = tf.data.Dataset.from_tensor_slices((images, labels))\nval_data = tf.data.Dataset.from_tensor_slices((val_img, val_label))\n\nBATCH_SIZE = 32\nBUFFER_SIZE = 300\nSTEPS_PER_EPOCH = 2975 // BATCH_SIZE\nVALIDATION_STEPS = 500 // BATCH_SIZE\nauto = tf.data.experimental.AUTOTUNE\n\ntrain_data = train_data.map(utils.load_img_train, num_parallel_calls=auto)\nval_data = val_data.map(utils.load_img_val, num_parallel_calls=auto)\n\ntrain_data = train_data.cache().repeat().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(auto)\nval_data = val_data.cache().batch(BATCH_SIZE)\n\n\n# Unet Model with dropout\n\ndef create_model():\n inputs = tf.keras.layers.Input(shape=(256, 256, 3))\n\n layer0 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(inputs)\n layer0 = tf.keras.layers.BatchNormalization()(layer0)\n layer0 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(layer0)\n layer0 = tf.keras.layers.BatchNormalization()(layer0) # 256*256*64\n\n layer1 = tf.keras.layers.MaxPooling2D(padding='same')(layer0) # 128*128*64\n\n layer1 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(layer1)\n layer1 = tf.keras.layers.BatchNormalization()(layer1)\n layer1 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(layer1)\n layer1 = tf.keras.layers.BatchNormalization()(layer1) # 128*128*128\n\n layer2 = tf.keras.layers.MaxPooling2D(padding='same')(layer1) # 64*64*128\n\n layer2 = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(layer2)\n layer2 = tf.keras.layers.BatchNormalization()(layer2)\n layer2 = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(layer2)\n layer2 = tf.keras.layers.BatchNormalization()(layer2) # 64*64*256\n\n layer3 = tf.keras.layers.MaxPooling2D(padding='same')(layer2) # 32*32*256\n\n layer3 = tf.keras.layers.Conv2D(512, 3, padding='same', activation='relu')(layer3)\n layer3 = tf.keras.layers.BatchNormalization()(layer3)\n layer3 = tf.keras.layers.Conv2D(512, 3, padding='same', activation='relu')(layer3)\n layer3 = tf.keras.layers.BatchNormalization()(layer3) # 32*32*512\n\n layer4 = tf.keras.layers.MaxPooling2D(padding='same')(layer3) # 16*16*512\n\n layer4 = tf.keras.layers.Conv2D(1024, 3, padding='same', activation='relu')(layer4)\n layer4 = tf.keras.layers.BatchNormalization()(layer4)\n layer4 = tf.keras.layers.Conv2D(1024, 3, padding='same', activation='relu')(layer4)\n layer4 = tf.keras.layers.BatchNormalization()(layer4) # 16*16*1024\n\n layer5 = tf.keras.layers.Conv2DTranspose(512, 2, strides=2, padding='same', activation='relu')(layer4)\n layer5 = tf.keras.layers.BatchNormalization()(layer5) # 32*32*512\n\n layer6 = tf.concat([layer3, layer5], axis=-1) # 32*32*1024\n\n layer6 = tf.keras.layers.Conv2D(512, 3, padding='same', activation='relu')(layer6)\n layer6 = tf.keras.layers.BatchNormalization()(layer6)\n layer6 = tf.keras.layers.Conv2D(512, 3, padding='same', activation='relu')(layer6)\n layer6 = tf.keras.layers.BatchNormalization()(layer6) # 32*32*512\n\n layer7 = tf.keras.layers.Conv2DTranspose(256, 2, strides=2, padding='same', activation='relu')(layer6)\n layer7 = tf.keras.layers.BatchNormalization()(layer7) # 64*64*256\n\n layer8 = tf.concat([layer2, layer7], axis=-1) # 64*64*512\n\n layer8 = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(layer8)\n layer8 = tf.keras.layers.BatchNormalization()(layer8)\n layer8 = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(layer8)\n layer8 = tf.keras.layers.BatchNormalization()(layer8) # 64*64*256\n\n layer9 = tf.keras.layers.Conv2DTranspose(128, 2, strides=2, padding='same', activation='relu')(layer8)\n layer9 = tf.keras.layers.BatchNormalization()(layer9) # 128*128*128\n\n layer10 = tf.concat([layer1, layer9], axis=-1) # 128*128*256\n layer10 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(layer10)\n layer10 = tf.keras.layers.BatchNormalization()(layer10)\n layer10 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(layer10)\n layer10 = tf.keras.layers.BatchNormalization()(layer10) # 128*128*128\n layer10 = tf.keras.layers.Dropout(0.5, noise_shape=None)(layer10)\n\n layer11 = tf.keras.layers.Conv2DTranspose(64, 2, strides=2, padding='same', activation='relu')(layer10)\n layer11 = tf.keras.layers.BatchNormalization()(layer11) # 256*256*64\n\n layer12 = tf.concat([layer0, layer11], axis=-1) # 256*256*128\n\n layer12 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(layer12)\n layer12 = tf.keras.layers.BatchNormalization()(layer12)\n layer12 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(layer12)\n layer12 = tf.keras.layers.BatchNormalization()(layer12) # 256*256*64\n\n outputs = tf.keras.layers.Conv2D(34, 1, activation='softmax')(layer12) # 256*256*34\n\n return tf.keras.Model(inputs=inputs, outputs=outputs)\n\n\n# create the model\nmodel = create_model()\nmodel.summary()\n\n\nclass MeanIoU(tf.keras.metrics.MeanIoU):\n def __call__(self, y_true, y_pred, sample_weight=None):\n y_pred = tf.argmax(y_pred, axis=-1)\n return super().__call__(y_true, y_pred, sample_weight=sample_weight)\n\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['acc'])\n\nhistory = model.fit(train_data, steps_per_epoch=STEPS_PER_EPOCH, validation_data=val_data,\n validation_steps=VALIDATION_STEPS, epochs=10)\n\n# save the model\nmodel.save('./model.h5')\n"
] | [
[
"numpy.array",
"tensorflow.concat",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.layers.Input",
"tensorflow.argmax",
"numpy.random.permutation",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.Model",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.BatchNormalization"
]
] |
otiliastr/coarse-to-fine-curriculum | [
"00fe0ad58dd2a5871958307f7791d93003b6310b"
] | [
"src/data/dataset.py"
] | [
"import numpy as np\n\nfrom .preprocessing import no_preproc, split_train_val\n\n__author__ = 'Otilia Stretcu'\n\n\nclass Dataset(object):\n def __init__(self, name, features, labels, indices_train, indices_test, indices_val,\n num_classes=None, feature_preproc_fn=no_preproc, class_names=None):\n self.name = name\n self.features = features\n self.labels = labels\n\n self.indices_train = indices_train\n self.indices_val = indices_val\n self.indices_test = indices_test\n self.feature_preproc_fn = feature_preproc_fn\n\n self.num_val = len(self.indices_val)\n self.num_test = len(self.indices_test)\n\n self.num_samples = len(features)\n self.features_shape = features[0].shape\n self.num_features = np.prod(features[0].shape)\n self.num_classes = 1 + max(labels) if num_classes is None else num_classes\n self.class_names = class_names\n\n @staticmethod\n def build_from_splits(name,\n inputs_train,\n labels_train,\n inputs_test,\n labels_test,\n inputs_val=None,\n labels_val=None,\n ratio_val=0.1,\n num_classes=None,\n class_names=None,\n feature_preproc_fn=no_preproc):\n if inputs_val is None:\n indices_train, indices_val = split_train_val(\n inputs_train=np.arange(inputs_train.shape[0]),\n ratio_val=ratio_val)\n inputs_val = inputs_train[indices_val]\n inputs_train = inputs_train[indices_train]\n labels_val = labels_train[indices_val]\n labels_train = labels_train[indices_train]\n\n num_train = len(inputs_train)\n num_val = len(inputs_val)\n num_test = len(inputs_test)\n\n features = np.concatenate((inputs_train, inputs_val, inputs_test))\n labels = np.concatenate((labels_train, labels_val, labels_test))\n indices_train = np.arange(num_train)\n indices_val = np.arange(num_train, num_train+num_val)\n indices_test = np.arange(num_train+num_val, num_train+num_val+num_test)\n\n return Dataset(name=name,\n features=features,\n labels=labels,\n indices_train=indices_train,\n indices_test=indices_test,\n indices_val=indices_val,\n num_classes=num_classes,\n class_names=class_names,\n feature_preproc_fn=feature_preproc_fn)\n\n @staticmethod\n def build_from_features(name, features, labels, indices_train, indices_test,\n indices_val=None, ratio_val=0.2, seed=None,\n num_classes=None, class_names=None,\n feature_preproc_fn=lambda x: x):\n if indices_val is None:\n rng = np.random.RandomState(seed=seed)\n indices_train, indices_val = split_train_val(\n np.arange(indices_train.shape[0]), ratio_val, rng)\n\n return Dataset(name=name,\n features=features,\n labels=labels,\n indices_train=indices_train,\n indices_test=indices_test,\n indices_val=indices_val,\n num_classes=num_classes,\n class_names=class_names,\n feature_preproc_fn=feature_preproc_fn)\n\n def get_labels(self, indices):\n return self.labels[indices]\n\n def get_indices_train(self):\n return self.indices_train\n\n def get_indices_val(self):\n return self.indices_val\n\n def get_indices_test(self):\n return self.indices_test\n\n def get_features(self, indices, is_train=False, **kwargs):\n f = self.features[indices]\n f = self.feature_preproc_fn(f, is_train=is_train, **kwargs)\n return f\n\n def copy(self, name=None, features=None, labels=None, indices_train=None,\n indices_test=None, indices_val=None, num_classes=None,\n class_names=None, feature_preproc_fn=None):\n name = name if name is not None else self.name\n features = features if features is not None else self.features\n labels = labels if labels is not None else self.labels\n indices_train = indices_train if indices_train is not None else self.indices_train\n indices_test = indices_test if indices_test is not None else self.indices_test\n indices_val = indices_val if indices_val is not None else self.indices_val\n num_classes = num_classes if num_classes is not None else self.num_classes\n class_names = class_names if class_names is not None else self.class_names\n feature_preproc_fn = feature_preproc_fn if feature_preproc_fn is not None else self.feature_preproc_fn\n\n return self.__class__(name=name,\n features=features,\n labels=labels,\n indices_train=indices_train,\n indices_test=indices_test,\n indices_val=indices_val,\n num_classes=num_classes,\n class_names=class_names,\n feature_preproc_fn=feature_preproc_fn)\n"
] | [
[
"numpy.concatenate",
"numpy.prod",
"numpy.arange",
"numpy.random.RandomState"
]
] |
caryleo/self-critical.pytorch | [
"3f83a01e5cf75dfcdb6ca06602dfab363b626d51"
] | [
"tools/train.py"
] | [
"\"\"\"\n原始实现:\n 1. 初始化训练迭代信息,并在必要的时候从既有保存文件中加载\n 2. 初始化日志,实现文件日志和tensorboard日志,并在必要的时候从既有保存文件中加载\n 3. 初始化模型,并在必要的时候从既有保存文件中加载\n 4. 初始化并配置数据加载器,并在必要的时候加载迭代状态\n 5. 完成训练,在必要的时候保存当前模型迭代信息,以及在必要的时候完成模型的选择评估\n 6. 在合适的时候对现有模型进行保存\n新增实现:\n 1. 新增了训练阶段信息,1为经典训练阶段,2为Finetune阶段\n 2. 将既有的训练流程调整为经典训练阶段,新增Finetune阶段,该阶段内模型不进行评估选择\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport numpy as np\n\nimport time\nimport os\nfrom six.moves import cPickle\nimport traceback\nfrom collections import defaultdict\n\nimport captioning.utils.opts as opts\nimport captioning.models as models\nfrom captioning.data.dataloader import *\nimport skimage.io\nimport captioning.utils.eval_utils as eval_utils\nimport captioning.utils.misc as utils\nfrom captioning.utils.rewards import init_scorer, get_self_critical_reward\nfrom captioning.modules.loss_wrapper import LossWrapper\n\n\ndef add_summary_value(writer, key, value, iteration):\n if writer:\n writer.add_scalar(key, value, iteration)\n\n\ndef train(opt):\n ################################\n # 创建dataloader\n ################################\n loader = DataLoader(opt)\n opt.vocab_size = loader.vocab_size\n opt.seq_length = loader.seq_length\n\n ##########################\n # 初始化训练信息\n ##########################\n infos = {\n 'iter': 0,\n 'epoch': 0,\n 'loader_state_dict': None,\n 'vocab': loader.get_vocab(),\n 'stage': 1,\n 'stage_saved': 1 # 用于中断处理,记录了中断时的状态,用于判定是否重新加载最佳模型\n }\n\n if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'infos_' + opt.id + '.pkl')):\n with open(os.path.join(opt.start_from, 'infos_' + opt.id + '.pkl'), 'rb') as f:\n infos = utils.pickle_load(f)\n saved_model_opt = infos['opt']\n need_be_same = [\"caption_model\", \"rnn_type\", \"rnn_size\", \"num_layers\"]\n for checkme in need_be_same:\n assert getattr(saved_model_opt, checkme) == getattr(opt, checkme), \"Command line argument and saved model disagree on '%s' \" % checkme\n\n infos['opt'] = opt\n\n #########################\n # 创建logger\n #########################\n # 文件logger\n histories = defaultdict(dict)\n if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'histories_' + opt.id + '.pkl')):\n with open(os.path.join(opt.start_from, 'histories_' + opt.id + '.pkl'), 'rb') as f:\n histories.update(utils.pickle_load(f))\n\n # tensorboard logger\n tb_summary_writer = SummaryWriter(opt.checkpoint_path)\n\n ##########################\n # 创建模型\n ##########################\n opt.vocab = loader.get_vocab()\n model = models.setup(opt).cuda()\n del opt.vocab\n\n if opt.finetune_only == 1:\n if os.path.isfile(os.path.join(opt.start_from, 'model_best.pth')):\n model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model_best.pth')))\n else:\n if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'model.pth')):\n model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))\n\n # 作者注:面向模型的loss封装,便于将loss计算独立,便于多卡时减小No.0 GPU的负载\n lw_model = LossWrapper(model, opt)\n # 多GPU封装\n dp_model = torch.nn.DataParallel(model)\n dp_model.vocab = getattr(model, 'vocab', None)\n dp_lw_model = torch.nn.DataParallel(lw_model)\n\n model.set_stage(infos['stage'])\n\n ##########################\n # 创建优化器\n ##########################\n if opt.noamopt:\n assert opt.caption_model in ['transformer', 'bert', 'm2transformer'], 'noamopt can only work with transformer'\n optimizer = utils.get_std_opt(model, optim_func=opt.optim, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)\n elif opt.reduce_on_plateau:\n optimizer = utils.build_optimizer(model.parameters(), opt)\n optimizer = utils.ReduceLROnPlateau(optimizer,\n factor=opt.reduce_on_plateau_factor,\n patience=opt.reduce_on_plateau_patience)\n else:\n optimizer = utils.build_optimizer(model.parameters(), opt)\n\n if opt.finetune_only == 1:\n if os.path.isfile(os.path.join(opt.start_from, \"optimizer_best.pth\")):\n optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer_best.pth')))\n else:\n if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, \"optimizer.pth\")):\n optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))\n\n #########################\n # 训练\n #########################\n\n # 准备阶段\n iteration = infos['iter']\n epoch = infos['epoch']\n loader.load_state_dict(infos['loader_state_dict'])\n if opt.load_best_score == 1:\n best_val_score = infos.get('best_val_score', None)\n if opt.noamopt:\n optimizer._step = iteration\n\n # 作者注:轮次完成标志量,用于新轮次可能的训练参数调整\n epoch_done = True\n eval_done = False\n\n dp_lw_model.train()\n\n # 开始训练啦!经典训练\n if infos['stage'] == 1 and opt.finetune_only != 1:\n try:\n while True:\n # 达到最大epoch限制,跳出经典训练\n if epoch >= opt.max_epochs_base != -1:\n if eval_done:\n break\n else:\n # 末尾再评估一次\n eval_kwargs = {'split': 'base_val', 'dataset': opt.input_json}\n eval_kwargs.update(vars(opt))\n val_loss, predictions, lang_stats, _ = eval_utils.eval_split(dp_model, lw_model.crit, loader, eval_kwargs)\n\n if opt.reduce_on_plateau:\n if 'CIDEr' in lang_stats:\n optimizer.scheduler_step(-lang_stats['CIDEr'])\n else:\n optimizer.scheduler_step(val_loss)\n\n # 将评估结果写入日志\n tb_summary_writer.add_scalar('validation loss', val_loss, iteration)\n if lang_stats is not None:\n for k, v in lang_stats.items():\n tb_summary_writer.add_scalar(k, v, iteration)\n\n histories['val_result_history'][iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}\n\n # 根据CIDEr指标选择最佳模型\n if opt.language_eval == 1:\n current_score = lang_stats['CIDEr']\n else:\n current_score = - val_loss\n\n best_flag = False\n\n if best_val_score is None or current_score > best_val_score:\n best_val_score = current_score\n best_flag = True\n\n infos['best_val_score'] = best_val_score\n\n utils.save_checkpoint(opt, model, infos, optimizer, histories)\n\n if opt.save_history_ckpt:\n utils.save_checkpoint(opt, model, infos, optimizer, append=str(epoch) if opt.save_every_epoch else str(iteration))\n\n if best_flag:\n utils.save_checkpoint(opt, model, infos, optimizer, append='best')\n\n break\n\n eval_done = False\n\n # 设置学习参数\n if epoch_done:\n # Transformer相关\n if not opt.noamopt and not opt.reduce_on_plateau:\n if epoch > opt.learning_rate_decay_start >= 0:\n frac = (epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every\n decay_factor = opt.learning_rate_decay_rate ** frac\n opt.current_lr = opt.learning_rate_base * decay_factor\n else:\n opt.current_lr = opt.learning_rate_base\n utils.set_lr(optimizer, opt.current_lr)\n\n # scheduled sampling\n if epoch > opt.scheduled_sampling_start >= 0:\n frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every\n opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)\n model.ss_prob = opt.ss_prob\n\n # SCST\n if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:\n sc_flag = True\n init_scorer(opt.cached_tokens)\n else:\n sc_flag = False\n\n # 结构损失\n if opt.structure_after != -1 and epoch >= opt.structure_after:\n struc_flag = True\n init_scorer(opt.cached_tokens)\n else:\n struc_flag = False\n\n epoch_done = False\n\n # start = time.time()\n # Transformer Warmup\n if opt.use_warmup and (iteration < opt.noamopt_warmup):\n opt.current_lr = opt.learning_rate_base * (iteration + 1) / opt.noamopt_warmup\n utils.set_lr(optimizer, opt.current_lr)\n\n data = loader.get_batch('base_train')\n # print('\\r Read data:', time.time() - start, end=\"\")\n\n torch.cuda.synchronize()\n start = time.time()\n\n tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]\n tmp = [_ if _ is None else _.cuda() for _ in tmp]\n fc_feats, att_feats, labels, masks, att_masks = tmp\n\n optimizer.zero_grad()\n model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag, struc_flag)\n\n loss = model_out['loss'].mean()\n\n loss.backward()\n\n # 梯度截断\n if opt.grad_clip_value != 0:\n getattr(torch.nn.utils, 'clip_grad_{}_'.format(opt.grad_clip_mode))(model.parameters(), opt.grad_clip_value)\n\n optimizer.step()\n\n train_loss = loss.item()\n torch.cuda.synchronize()\n end = time.time()\n\n # 输出\n if struc_flag:\n print('Base Training:', \"iter {} (epoch {}), train_loss = {:.3f}, lm_loss = {:.3f}, struc_loss = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, train_loss, model_out['lm_loss'].mean().item(), model_out['struc_loss'].mean().item(), end - start))\n elif not sc_flag:\n print('Base Training:', \"iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, train_loss, end - start))\n else:\n print('Base Training:', \"iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, model_out['reward'].mean(), end - start))\n\n # 更新迭代计数器,如果到达epoch边界,需要调整一些参数\n iteration += 1\n if data['bounds']['wrapped']:\n epoch += 1\n epoch_done = True\n\n # 将训练结构写入到日志中\n if iteration % opt.losses_log_every == 0:\n tb_summary_writer.add_scalar('train_loss', train_loss, iteration)\n if opt.noamopt:\n opt.current_lr = optimizer.rate()\n elif opt.reduce_on_plateau:\n opt.current_lr = optimizer.current_lr\n tb_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)\n tb_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)\n if sc_flag:\n tb_summary_writer.add_scalar('avg_reward', model_out['reward'].mean(), iteration)\n elif struc_flag:\n tb_summary_writer.add_scalar('lm_loss', model_out['lm_loss'].mean().item(), iteration)\n tb_summary_writer.add_scalar('struc_loss', model_out['struc_loss'].mean().item(), iteration)\n tb_summary_writer.add_scalar('reward', model_out['reward'].mean().item(), iteration)\n tb_summary_writer.add_scalar('reward_var', model_out['reward'].var(1).mean(), iteration)\n\n histories['loss_history'][iteration] = train_loss if not sc_flag else model_out['reward'].mean()\n histories['lr_history'][iteration] = opt.current_lr\n histories['ss_prob_history'][iteration] = model.ss_prob\n\n # 信息更新\n infos['iter'] = iteration\n infos['epoch'] = epoch\n infos['loader_state_dict'] = loader.state_dict()\n\n # 根据需要,在两个模式下评估模型\n if (iteration % opt.save_checkpoint_every == 0 and not opt.save_every_epoch) or (epoch_done and opt.save_every_epoch):\n eval_kwargs = {'split': 'base_val', 'dataset': opt.input_json}\n eval_kwargs.update(vars(opt))\n val_loss, predictions, lang_stats, _ = eval_utils.eval_split(dp_model, lw_model.crit, loader, eval_kwargs)\n\n if opt.reduce_on_plateau:\n if 'CIDEr' in lang_stats:\n optimizer.scheduler_step(-lang_stats['CIDEr'])\n else:\n optimizer.scheduler_step(val_loss)\n\n # 将评估结果写入日志\n tb_summary_writer.add_scalar('validation loss', val_loss, iteration)\n if lang_stats is not None:\n for k, v in lang_stats.items():\n tb_summary_writer.add_scalar(k, v, iteration)\n\n histories['val_result_history'][iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}\n\n # 根据CIDEr指标选择最佳模型\n if opt.language_eval == 1:\n current_score = lang_stats['CIDEr']\n else:\n current_score = - val_loss\n\n best_flag = False\n\n if best_val_score is None or current_score > best_val_score:\n best_val_score = current_score\n best_flag = True\n\n infos['best_val_score'] = best_val_score\n\n utils.save_checkpoint(opt, model, infos, optimizer, histories)\n\n if opt.save_history_ckpt:\n utils.save_checkpoint(opt, model, infos, optimizer, append=str(epoch) if opt.save_every_epoch else str(iteration))\n\n if best_flag:\n utils.save_checkpoint(opt, model, infos, optimizer, append='best')\n\n eval_done = True\n\n except (RuntimeError, KeyboardInterrupt):\n print('Save ckpt on exception ...')\n utils.save_checkpoint(opt, model, infos, optimizer)\n print('Save ckpt done.')\n stack_trace = traceback.format_exc()\n print(stack_trace)\n os._exit(0)\n\n infos['stage'] = 2\n\n # dummy配置下,不进行微调\n if opt.train_only == 1:\n # 微调训练\n infos['stage'] = 2\n epoch_done = True\n loader.reset_iterator('support')\n\n # 加载最佳模型,如果中断位置在第二阶段,则不进行模型加载\n if opt.start_from and infos['stage_saved'] == 2:\n pass\n else:\n # 否则加载stage 1的最佳模型进行微调\n print('Finetuning:', \"loading best model from stage 1\")\n model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model_best' + '.pth')))\n optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer_best' + '.pth')))\n\n lw_model = LossWrapper(model, opt)\n # 多GPU封装\n dp_model = torch.nn.DataParallel(model)\n dp_model.vocab = getattr(model, 'vocab', None)\n dp_lw_model = torch.nn.DataParallel(lw_model)\n\n model.set_stage(infos['stage'])\n infos['stage_saved'] = 2\n\n # 冻结除了最后一个logit层之外的所有参数\n for name, parameter in dp_lw_model.module.named_parameters():\n if 'logit' not in name:\n parameter.requires_grad = False\n else:\n parameter.requires_grad = True\n\n # 因为计数器没有清零,所以这里是直接加上去\n max_epochs_all = opt.max_epochs_base + opt.max_epochs_finetune\n\n # 提前准备:相关学习参数是否跟随\n if opt.learning_rate_decay_start_finetune < 0:\n opt.learning_rate_decay_start_finetune = opt.learning_rate_decay_start - opt.max_epochs_base\n\n if opt.learning_rate_finetune < 0:\n opt.learning_rate_finetune = opt.learning_rate_base\n\n if opt.scheduled_sampling_start_finetune < 0:\n opt.scheduled_sampling_start_finetune = opt.scheduled_sampling_start - opt.max_epochs_base\n\n try:\n while True:\n # 达到最大epoch限制,跳出\n if epoch >= max_epochs_all != -2:\n utils.save_checkpoint(opt, model, infos, optimizer, histories, append='finetune')\n break\n\n # 设置学习参数\n if epoch_done:\n # Transformer相关\n if not opt.noamopt and not opt.reduce_on_plateau:\n if epoch > opt.learning_rate_decay_start_finetune + opt.max_epochs_base >= 0:\n frac = (epoch - opt.learning_rate_decay_start_finetune - opt.max_epochs_base) // opt.learning_rate_decay_every_finetune\n decay_factor = opt.learning_rate_decay_rate_finetune ** frac\n opt.current_lr = opt.learning_rate_finetune * decay_factor\n else:\n opt.current_lr = opt.learning_rate_finetune\n\n utils.set_lr(optimizer, opt.current_lr)\n\n # scheduled sampling\n if epoch > opt.scheduled_sampling_start_finetune + opt.max_epochs_base >= 0:\n frac = (epoch - opt.scheduled_sampling_start_finetune - opt.max_epochs_base) // opt.scheduled_sampling_increase_every_finetune\n opt.ss_prob = min(opt.scheduled_sampling_increase_prob_finetune * frac, opt.scheduled_sampling_max_prob_finetune)\n model.ss_prob = opt.ss_prob\n\n # SCST\n if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:\n sc_flag = True\n init_scorer(opt.cached_tokens)\n else:\n sc_flag = False\n\n # 结构损失\n if opt.structure_after != -1 and epoch >= opt.structure_after:\n struc_flag = True\n init_scorer(opt.cached_tokens)\n else:\n struc_flag = False\n\n epoch_done = False\n\n # start = time.time()\n # Transformer Warmup\n # if opt.use_warmup and (iteration < opt.noamopt_warmup):\n # opt.current_lr = opt.learning_rate * (iteration + 1) / opt.noamopt_warmup\n # utils.set_lr(optimizer, opt.current_lr)\n\n data = loader.get_batch('support')\n\n torch.cuda.synchronize()\n start = time.time()\n\n tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]\n tmp = [_ if _ is None else _.cuda() for _ in tmp]\n fc_feats, att_feats, labels, masks, att_masks = tmp\n\n optimizer.zero_grad()\n model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag, struc_flag)\n\n loss = model_out['loss'].mean()\n\n loss.backward()\n\n # 梯度截断\n if opt.grad_clip_value != 0:\n getattr(torch.nn.utils, 'clip_grad_{}_'.format(opt.grad_clip_mode))(model.parameters(), opt.grad_clip_value)\n\n optimizer.step()\n\n train_loss = loss.item()\n torch.cuda.synchronize()\n end = time.time()\n\n # 输出\n if struc_flag:\n print('Finetuning:', \"iter {} (epoch {}), train_loss = {:.3f}, lm_loss = {:.3f}, struc_loss = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, train_loss, model_out['lm_loss'].mean().item(), model_out['struc_loss'].mean().item(), end - start))\n elif not sc_flag:\n print('Finetuning:', \"iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, train_loss, end - start))\n else:\n print('Finetuning:', \"iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, model_out['reward'].mean(), end - start))\n\n # 更新迭代计数器,如果到达epoch边界,需要调整一些参数\n iteration += 1\n if data['bounds']['wrapped']:\n epoch += 1\n epoch_done = True\n\n # 将训练结构写入到日志中\n if iteration % opt.losses_log_every == 0:\n tb_summary_writer.add_scalar('train_loss', train_loss, iteration)\n if opt.noamopt:\n opt.current_lr = optimizer.rate()\n elif opt.reduce_on_plateau:\n opt.current_lr = optimizer.current_lr\n tb_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)\n tb_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)\n if sc_flag:\n tb_summary_writer.add_scalar('avg_reward', model_out['reward'].mean(), iteration)\n elif struc_flag:\n tb_summary_writer.add_scalar('lm_loss', model_out['lm_loss'].mean().item(), iteration)\n tb_summary_writer.add_scalar('struc_loss', model_out['struc_loss'].mean().item(), iteration)\n tb_summary_writer.add_scalar('reward', model_out['reward'].mean().item(), iteration)\n tb_summary_writer.add_scalar('reward_var', model_out['reward'].var(1).mean(), iteration)\n\n histories['loss_history'][iteration] = train_loss if not sc_flag else model_out['reward'].mean()\n histories['lr_history'][iteration] = opt.current_lr\n histories['ss_prob_history'][iteration] = model.ss_prob\n\n # 信息更新\n infos['iter'] = iteration\n infos['epoch'] = epoch\n infos['loader_state_dict'] = loader.state_dict()\n\n if (iteration % opt.save_checkpoint_every == 0 and not opt.save_every_epoch) or (epoch_done and opt.save_every_epoch):\n utils.save_checkpoint(opt, model, infos, optimizer, histories, append='finetune')\n\n if opt.save_history_ckpt:\n utils.save_checkpoint(opt, model, infos, optimizer, append=str(epoch) if opt.save_every_epoch else str(iteration))\n\n except (RuntimeError, KeyboardInterrupt):\n print('Save ckpt on exception ...')\n utils.save_checkpoint(opt, model, infos, optimizer)\n print('Save ckpt done.')\n stack_trace = traceback.format_exc()\n print(stack_trace)\n os._exit(0)\n\n\nopt = opts.parse_opt()\ntrain(opt)\n"
] | [
[
"torch.cuda.synchronize",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.DataParallel"
]
] |
faithlierheimer/sqlalchemy-challenge | [
"79d81ebe1159442486edf114adeffc03fba80de3"
] | [
"flaskapp.py"
] | [
"#Import flask\nfrom flask import Flask, jsonify, request\n\n#Import dependencies for queries to include in endpoints\n############################\nfrom matplotlib import style\nstyle.use('seaborn')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nimport pprint as pp\nfrom datetime import timedelta\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n###############################\n#Set up connection to sqlite database\n## Create engine\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n## Declare a base \nBase = automap_base()\n## Use base class to reflect Hawaii database tables\nBase.prepare(engine, reflect = True)\n## Double check connection brought in right tables\nTables = Base.classes.keys()\n#print(Tables)\n##Save measurement and station table refs to their own variables \nmeasurement = Base.classes.measurement\nstation = Base.classes.station\n##Create a session to manage transactions to sqlite db\nsession = Session(engine)\n#################################\n#Find last 12 months of precipitation data to store on appropriate flask app page\n## Start with finding last date in dataset\nlast = session.query(func.max(measurement.date))\n##Initialize list to store last date object when we find it\nlast_date = []\n##Add last date to its list\nfor l in last:\n last_date.append(l)\n##Check work\n#####print(last_date)\n##Turn last date into a datetime object\nbegin = dt.date(2017, 8, 23)\n##Find date 12 months before the last date to retrieve last 12 months of precip data & plot results\nyear_range = begin - dt.timedelta(days = 365)\n##Check work\n######print(year_range)\n##Query database for last 12 mo of precip data\ndate = session.query(measurement.date, measurement.prcp).filter(measurement.date >= year_range).all()\n##Put returned query object into a df, drop any duplicates, check work\nprecip = pd.DataFrame(date, columns=['date', 'precipitation'])\n##print(precip.head())\nprecip_dict = precip.to_dict('records')\n#print(precip_dict)\n\n#################################\n#Query database to find active stations & list in descending order of activity\nactiveStations = session.query(measurement.station, func.count(measurement.station)).group_by(measurement.station).order_by(measurement.station.desc())\nactiveStations_df = pd.DataFrame(activeStations, columns=['station', 'count'])\nactiveStations_df = activeStations_df.sort_values(by = ['count'], ascending = False)\n##Convert stations df to dictionary\nactiveStations_dict = activeStations_df.to_dict('records')\n\n###################################\n#Query database to find temperature observations for the previous 12 months\ntemps = session.query(measurement.date, measurement.tobs).filter(measurement.date >= year_range).all()\ntemps_df = pd.DataFrame(temps, columns = ['date', 'temperature'])\ntemps_dict = temps_df.to_dict('records')\n\n#Flask setup \n##Initialize Flask app\napp = Flask(__name__)\n\[email protected](\"/\")\ndef home():\n print(\"Server received request for homepage.\")\n return \"\"\"Available routes:<br/>\n <a href=/api/v1.0/precipitation>Precipitation</a>: Returns JSON version of precipitation data over last 12 months.<br/> \n <a href=/api/v1.0/stations>Stations</a>: Returns JSON version of stations in dataset. <br/> \n <a href=/api/v1.0/tobs>Temperature</a>: Returns JSON list of temperature observations for the previous year. <br/> \n <a href=/api/v1.0/start>Start Date</a>: and <br/>\n <a href=/api/v1.0/start/end>End Date</a>: Returns JSON list of min, avg, and max temp for a given start or start-end range. <br/>\n If start only, calculates min, avg, and max for all dates greater than and equal to the start date. <br/>\n When given start and end date, calculates min, avg, and max for dates between the start and end date inclusive. Dates MUST be in following format YYYYMMDD.\"\"\"\n##This endpoint works as far as I can tell, as of 8/23. \[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n print(\"Server received request for precipitation page.\")\n return jsonify(precip_dict)\n ##Put session stuff inside relevant fxns then do session.close() after that\n##This endpoint works! as far as I can tell, as of 8/23.\[email protected](\"/api/v1.0/stations\")\ndef stations():\n print(\"Server received request for stations page.\")\n return jsonify(activeStations_dict)\n##This endpoint works! as far as I can tell, as of 8/23\[email protected](\"/api/v1.0/tobs\")\ndef temperature():\n print(\"Server received request for temperature page.\")\n return jsonify(temps_dict)\n#@app.route(#\"/api/v1.0/<variable for start date>/<variable for end date>\")\[email protected](\"/api/v1.0/start\")\ndef datesa():\n return \"\"\"\n <html><body>\n <h2>Thanks for choosing the date request page!</h2>\n <form action=\"/date\">\n What start date do you want to check the temperature for?<br>\n Your answer must be in the YYYYMMDD format.<br>\n You will receive the min, max, and avg temp for everything including and after that date.<br>\n <input type = 'text' name = 'startdate'><br>\n <input type = 'submit' value = 'Continue'>\n </form>\n </body></html>\n \"\"\"\[email protected](\"/date\")\ndef temp_getter():\n startdate = request.args['startdate']\n start = pd.to_datetime(str(startdate), format = '%Y%m%d').date()\n engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n Base = automap_base()\n Base.prepare(engine, reflect = True)\n Tables = Base.classes.keys()\n measurement = Base.classes.measurement\n station = Base.classes.station\n session = Session(engine)\n temps2 = session.query(measurement.date, func.min(measurement.tobs), func.max(measurement.tobs), func.avg(measurement.tobs)).group_by(measurement.date).filter(measurement.date >= start).all()\n temps2_df = pd.DataFrame(temps2, columns = ['date', 'min_temp', 'max_temp', 'avg_temp'])\n temps2_dict = temps2_df.to_dict('records')\n return jsonify(temps2_dict)\[email protected](\"/api/v1.0/start/end\")\ndef datesb():\n return \"\"\"\n <html><body>\n <h2>Thanks for choosing the date request page!</h2>\n <form action=\"/date2\">\n What start and end date do you want to check the temperature in between?<br>\n Your answers must be in the YYYYMMDD format.<br>\n You will receive the min, max, and avg temp for everything including and after that date.<br>\n Your start date must go in the first box, and your end date must go in the second box. <br>\n <input type = 'text' name = 'startdate'><br>\n <input type = 'text' name = 'enddate'><br>\n <input type = 'submit' value = 'Continue'>\n </form>\n </body></html>\n \"\"\"\[email protected](\"/date2\")\ndef temp_getter2():\n startdate = request.args['startdate']\n enddate = request.args['enddate']\n start = pd.to_datetime(str(startdate), format = '%Y%m%d').date()\n end = pd.to_datetime(str(enddate), format = '%Y%m%d').date()\n engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n Base = automap_base()\n Base.prepare(engine, reflect = True)\n Tables = Base.classes.keys()\n measurement = Base.classes.measurement\n station = Base.classes.station\n session = Session(engine)\n temp_range = session.query(measurement.date, func.min(measurement.tobs), func.max(measurement.tobs), func.avg(measurement.tobs)).group_by(measurement.date).filter(measurement.date >= start).filter(measurement.date <= end).all()\n temp_range_df = pd.DataFrame(temp_range, columns = ['date', 'min_temp', 'max_temp', 'avg_temp'])\n temp_range_dict = temp_range_df.to_dict('records')\n return jsonify(temp_range_dict)\nif __name__ == \"__main__\":\n app.run(debug = True)\n "
] | [
[
"pandas.DataFrame",
"matplotlib.style.use"
]
] |
ucl-tbr-group-project/regression | [
"ab02f62cdf83ffc43d9b8e88b0c1833190d65b95"
] | [
"tbr_reg/metrics/mae.py"
] | [
"from sklearn.metrics import mean_absolute_error\n\nfrom .basic_metric import RegressionMetric\n\n\nclass MAEFactory:\n def __init__(self):\n self.id = MAE().id\n\n def init_metric(self):\n return MAE()\n\n\nclass MAE(RegressionMetric):\n '''Mean absolute error, implemented by SciKit.'''\n\n def __init__(self):\n RegressionMetric.__init__(self, 'MAE', '{\\\\rm MAE}', 'mae')\n\n def evaluate(self, X, y_test, y_pred):\n return mean_absolute_error(y_test, y_pred)\n\n def rank(self, values):\n return values\n"
] | [
[
"sklearn.metrics.mean_absolute_error"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.