repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
bryant1410/Emotion-FAN
|
[
"8a4ea4f0eacced38e8f4c50ad37515e84c781ab8"
] |
[
"Code/util.py"
] |
[
"import os\nimport torch\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True) # first position is score; second position is pred.\n pred = pred.t() # .t() is T of matrix (256 * 1) -> (1 * 256)\n correct = pred.eq(target.view(1, -1).expand_as(pred)) # target.view(1,2,2,-1): (256,) -> (1, 2, 2, 64)\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res\n\n\ndef adjust_learning_rate(optimizer, epoch, learning_rate, end_epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n if epoch in [round(end_epoch * 0.333), round(end_epoch * 0.666)]:\n for param_group in optimizer.param_groups:\n param_group['lr'] *= 0.2\n\n learning_rate = learning_rate* 0.2\n print('Adjust_learning_rate ' + str(epoch))\n print('New_LearningRate: {}'.format(learning_rate))\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef save_checkpoint(state, at_type=''):\n\n if not os.path.exists('./model'):\n os.makedirs('./model')\n\n epoch = state['epoch']\n save_dir = './model/'+at_type+'_' + str(epoch) + '_' + str(round(float(state['prec1']), 4))\n torch.save(state, save_dir)\n print(save_dir)\n"
] |
[
[
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KanazawaAIMeetup/TradePython
|
[
"b6fc93a42a2d85078167a9a5bf712a11f24391ca",
"b6fc93a42a2d85078167a9a5bf712a11f24391ca"
] |
[
"TrainExample/PredictPrice/SimplePricePrediction/main_price_prediction.py",
"ColaboratoryCode/trade_class.py"
] |
[
"#coding: utf-8\n'''\nMITライセンス このプログラムについては、改変・再配布可能です\n著作者: Tomohiro Ueno ([email protected])\n\nUsage: ddqn-multiple-inputディレクトリから実行する。\npython main_price_prediction.py\n\n注意:評価する場合は、正解データのリークが起きないようにする。Train,Validation,Testの分割方法に気をつける\nこのプログラムは、リークを厳密に回避していません!\n実行を早くするため、test_term=120000 epochs=1 となっていますが、実際に評価する場合はtest_term=20000、 epocsも20などに直して下さい。\n詳しくは、このソースコード内を「TODO」で検索して下さい。\n'''\nimport sys, os\nsys.path.append(\"..\")\nsys.path.append(\"../..\")\nsys.path.append(os.getcwd())\nsys.path.append(os.pardir)\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.layers.recurrent import LSTM\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\n\nimport numpy as np\nimport random\nimport sys,os,copy,traceback\nfrom sklearn.utils import shuffle\nfrom trade_class import TradeClass\nfrom sklearn import preprocessing\nss = preprocessing.StandardScaler()\n\nprint(os.path.basename(__file__))\n\ntradecl=TradeClass()\nprice_data = tradecl.ReadPoloniexCSV()\nnp.set_printoptions(threshold=np.inf)\nprint(\"price_data idx 0-10\"+str(price_data[0:10]))\nprint(\"price_data idx last 10\"+str(price_data[-1]))\n\ninput_price_len=400\ninput_discrete_value_size=3\ntotal_input_size = input_price_len+input_discrete_value_size\nn_actions=3\n\n#obs_size = input_len+n_actions#shape#env.observation_space.shape[0]\n#データを標準化して、ディープラーニングで学習しやすくする。\ndef standarization(x, axis = None):\n x = np.array(x)\n x2 = x - x.mean()\n xstd = np.std(x2, axis=axis, keepdims=True)\n zscore = x2/(3*xstd)\n return zscore.tolist()\n\ntest_term=180000#TODO 20000に直す。\nvalid_term = 4000\nX_train = []\ny_train = []\nX_valid = []\ny_valid = []\nfor idx in range(input_price_len, len(price_data)-test_term-valid_term):\n X_train.append(standarization(price_data[idx - input_price_len:idx]))#idx番目の価格がトレーニングデータに入らないのが重要。教師がデータに含まれてしまう。\n y_train.append(price_data[idx])\n if np.array(X_train[-1]).shape != (400,):\n print(np.array(X_train[-1]).shape)\n\nfor idx in range(len(price_data)-test_term-valid_term,len(price_data)-test_term):\n X_valid.append(standarization(price_data[idx - input_price_len:idx]))#idx番目の価格がトレーニングデータに入らないのが重要。教師がデータに含まれてしまう。\n y_valid.append(price_data[idx])\n\nX_test = []\ny_test = []\nfor idx in range(len(price_data)-test_term,len(price_data)):\n X_test.append(standarization(price_data[idx - input_price_len:idx]))\n y_test.append(price_data[idx])\n\nX_train, y_train = shuffle(X_train, y_train, random_state=1234)\n\n#取引を何もしなくても価格の変化に応じて資産が増減するようにする\ndef reset_info():\n money = 300\n before_money = money\n cripto = 0.01\n total_money = money + np.float64(y_train[0] * cripto)\n first_total_money = total_money\n pass_count = 0\n buy_sell_count = 0\n pass_renzoku_count = 0\n before_action = 2 \n\n return money,before_money,cripto,total_money,first_total_money,pass_count,buy_sell_count,pass_renzoku_count, before_action\n\ndef action_if(action,buy_sell_count,pass_count,money,cripto,total_money,current_price):\n #buy_simple, sell_simple, pass_simple関数は一階層上のtrade_class.py参照。\n ratio = 0.5\n if action == 0:\n #Buy\n buy_sell_count += 1\n money, cripto, total_money = tradecl.buy_simple(money, cripto, total_money, current_price,ratio)\n elif action == 1:\n #Sell\n buy_sell_count -= 1\n money, cripto, total_money = tradecl.sell_simple(money, cripto, total_money, current_price,ratio)\n elif action == 2:\n #PASS\n money, cripto, total_money = tradecl.pass_simple(money, cripto, total_money, current_price,ratio)\n pass_count += 1\n\n total_money=money+cripto*current_price\n\n return buy_sell_count, pass_count, money, cripto, total_money\n\n# Kerasでモデルを定義\n#model = Sequential()\n# 1つの学習データのStep数(今回は25)\nprint(\"Model Define\")\nlength_of_sequence = input_price_len \nin_out_neurons = 1\nn_hidden = 300\n\nmodel = Sequential()\nmodel.add(LSTM(n_hidden, batch_input_shape=(None, length_of_sequence, in_out_neurons), return_sequences=False))\nmodel.add(Dense(in_out_neurons))\nmodel.add(Activation(\"linear\"))\noptimizer = Adam(lr=0.001)\nmodel.compile(loss=\"mean_absolute_error\", optimizer=optimizer)\n\n# 教師データを正規化して、スケールを合わせる\ny_train_normalized = np.array(y_train) / 10000\ny_test_normalized = np.array(y_test) / 10000\n\n'''\n#X_trainなど入力はLSTMを使う場合、以下のような形に変形する必要がある。\narray([[1],\n [2],\n [3],\n [4]])\n'''\nX_train = np.array(X_train).reshape(len(X_train),input_price_len,1)\nX_valid = np.array(X_valid).reshape(len(X_valid),input_price_len,1)\nX_test = np.array(X_test).reshape(len(X_test),input_price_len,1)\n\nprint(X_train.shape)\nprint(X_train[0:3])\n\nprint(\"Training Starts\")\nmodel.fit(X_train, y_train_normalized,\n batch_size=64,\n validation_data=(X_valid, y_valid),\n verbose=1,\n epochs=1)\n\n'''\nreward: 強化学習のトレーニングに必要な報酬\nmoney: 現在の総資産(スタート時は300ドルなど任意の値で初期化)\nbefore_money: 1ステップ前の資産\ncripto: 資産として保持している仮想通貨の量\ntotal_money: 法定通貨と仮想通貨両方を合計した現在の総資産\nfirst_total_money: スタート時の総資産 運用成績を評価するために使用\npass_count: 何回売買をせずに見送ったか。passをした合計回数を記録\n#buy_sell_count: 今までの取引の中でBuyとSellにどれだけ偏りがあるかを表す数。Buyされる度に+1,Sellされる度に-1される。つまり、正数の場合はBuyばかりされていて、負数の場合はSellばかりされている。\npass_renzoku_count: 取引せずに見送るPassを何回連続で行なったか。学習の状況や取引を可視化するために作成した。\n'''\n\n#TODO モデルの保存\n\nmoney, before_money, cripto, total_money, first_total_money, pass_count, buy_sell_count, pass_renzoku_count, before_action = reset_info()\ntradecl.reset_trading_view()#グラフの描画をリセットする\nbefore_price = y_test[0]\nbefore_pred = y_test[0]\n\n'''\n一つの入力データについてだけ予測したい場合\n#pred_array = model.predict(np.array([input_data])) # 教師が入力に入らないように。\n#pred = pred_array.tolist()[0][0]#出力がnumpy型のa=np.array([[0.5467384]])のようになっている\n'''\npred_array = model.predict(X_test)#TODO X_test[:2000]を、X_testに変更する。 \nprint(pred_array.shape)\nprint(pred_array[0:2])\n\nmoney, before_money, cripto, total_money, first_total_money, pass_count, buy_sell_count,pass_renzoku_count, before_action = reset_info()\nfor idx in range(0, len(pred_array.tolist())-1):#TODO 配列の長さを元に戻す。\n current_price = y_test[idx]#添字間違えないように\n\n pred = pred_array[idx][0]\n\n if pred - before_pred > 0.00005:\n action = 0\n elif pred - before_pred < -0.00005:\n action = 1\n else:\n action = 2\n\n tradecl.update_trading_view(current_price, action)\n buy_sell_count, pass_count, money, cripto, total_money = \\\n action_if(action,buy_sell_count,pass_count,money,cripto,total_money,current_price)\n before_money = total_money\n before_price = current_price\n before_pred = pred\n before_action = action\n\nprint(\"====================TEST======================\")\nprint(\"START MONEY\" + str(first_total_money))\nprint(\"FINAL MONEY:\" + str(total_money))\nprint(\"pass_count:\" + str(pass_count))\nprint(\"buy_sell_count(at the end of TEST):\" + str(buy_sell_count))\n\n\n#matploblibでトレードの結果をグラフで可視化\ntry:\n tradecl.draw_trading_view(\"main_price_prediction.png\")\nexcept:\n print(traceback.format_exc()) \n print(\"tradecl.draw_trading_view FAILED!!\")\n\n",
"#coding: utf-8\n'''\nMITライセンス このプログラムについては、改変・再配布可能です\n著作者: Tomohiro Ueno ([email protected])\n'''\n\nimport numpy as np\nimport poloniex\nimport datetime\nimport time\nimport json\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass TradeClass(object):\n def __init__(self):\n self.trade_history = []\n self.price_history = []\n self.transaction_fee = 0.0001 #取引手数料。この場合0.01%としたが、自由に変えて良い。\n\n def ReadPoloniexCSV(self):\n import csv\n history_data=[]\n with open('../DATA/USDT_BTC_LATEST.csv', 'r') as f:\n reader=csv.reader(f,delimiter=',')\n next(reader)\n for row in reader:\n history_data.append(float(row[1]))\n #print(float(row[1]))\n return history_data\n\n def ReadBitflyerJson(self):\n import csv\n history_data=[]\n import csv\n with open(os.environ['HOME']+'/bitcoin/bitflyerJPY_convert.csv', 'r') as f:\n reader=csv.reader(f,delimiter=',')\n next(reader) # ヘッダーを読み飛ばしたい時\n for row in reader:\n history_data.append(float(row[1]))\n return history_data\n\n def GetDataPoloniex(self):\n polo = poloniex.Poloniex()\n polo.timeout = 10\n chartUSDT_BTC = polo.returnChartData('USDT_ETH', period=300, start=time.time() - 1440*60 * 500, end=time.time())#1440(min)*60(sec)=DAY\n tmpDate = [chartUSDT_BTC[i]['date'] for i in range(len(chartUSDT_BTC))]\n date = [datetime.datetime.fromtimestamp(tmpDate[i]) for i in range(len(tmpDate))]\n data = [float(chartUSDT_BTC[i]['open']) for i in range(len(chartUSDT_BTC))]\n return date ,data\n\n def PercentageLabel(self,Xtrain,yTrain):\n X=[]\n Y=[]\n for i in range(0,len(yTrain)):\n original=Xtrain[i][-1]\n X.append([float(val/original) for val in Xtrain[i]])\n Y.append(float(float(yTrain[i]/Xtrain[i][-1])-1)*100*100)#%*100\n return X,Y\n\n def TestPercentageLabel(self,Xtrain):\n X=[]\n for i in range(0,len(Xtrain)):\n original = Xtrain[-1]\n X.append([float(val/original) for val in Xtrain])\n return X\n\n def buy_simple(self,money, ethereum, total_money, current_price):\n first_money, first_ethereum, first_total_money = money, ethereum, total_money\n spend = money * 0.1#いくら分取引に使うか\n money -= spend * (1+self.transaction_fee)\n if money <= 0.0:\n return first_money,first_ethereum,first_total_money\n\n ethereum += float(spend / current_price)\n total_money = money + ethereum * current_price\n\n return money, ethereum, total_money\n\n def sell_simple(self,money, ethereum, total_money, current_price):\n first_money, first_ethereum, first_total_money = money, ethereum, total_money\n spend = ethereum * 0.1 #いくら分取引に使うか\n ethereum -= spend * (1+self.transaction_fee)\n if ethereum <= 0.0:\n return first_money,first_ethereum,first_total_money\n\n money += float(spend * current_price)\n total_money = money + float(ethereum * current_price)\n\n return money, ethereum, total_money\n def pass_simple(self,money,ethereum,total_money,current_price):\n total_money = money + float(ethereum * current_price)\n return money,ethereum,total_money\n\n def SellAndCalcAmoutUsingPrediction(self,pred,money, ethereum, total_money, current_price):\n first_money, first_ethereum, first_total_money = money, ethereum, total_money\n spend = ethereum * 0.5 * (abs(pred)*0.1)\n ethereum -= spend * (1.0+self.transaction_fee)#取引手数料も含めていくら分購入したかを計算。\n if ethereum < 0.0 or abs(pred) < 0.5:##資産がマイナスになる or 予測に自信がない場合\n #何もしない\n return first_money,first_ethereum,first_total_money\n\n money += float(spend * current_price)#仮想通貨を売却した分、法定通貨が増える。\n total_money = money + float(ethereum * current_price)#仮想通貨と法定通貨両方を合計した資産を計算\n\n return money, ethereum, total_money\n \n def BuyAndCalcAmoutUsingPrediction(self,pred,money, ethereum, total_money, current_price):\n first_money, first_ethereum, first_total_money = money, ethereum, total_money\n spend = money * (abs(pred)*0.05)#資産全体の何割を一回の取引に使うか abs(pred)にすると便利\n money -= spend * (1.0+self.transaction_fee)#取引手数料も含めていくら分購入したかを計算。\n if money < 0.0 or abs(pred) < 0.5:#資産がマイナスになる or 予測に自信がない場合\n #何もしない\n return first_money,first_ethereum,first_total_money\n\n ethereum += float(spend / current_price)#法定通貨を消費した分、仮想通貨が増える。\n total_money = money + ethereum * current_price#仮想通貨と法定通貨両方を合計した資産を計算\n\n return money, ethereum, total_money\n\n def PassUsingPrediction(self, pred, money, ethereum, total_money, current_price):\n first_money, first_ethereum, first_total_money = money, ethereum, total_money\n return first_money,first_ethereum,first_total_money\n\n def buy_using_ratio(self,money, ethereum, total_money, current_price, ratio):\n first_money, first_ethereum, first_total_money = money, ethereum, total_money\n spend = money * ratio#いくら分取引に使うか\n money -= spend * (1+self.transaction_fee)\n if money <= 0.0:\n return first_money,first_ethereum,first_total_money\n\n ethereum += float(spend / current_price)\n total_money = money + ethereum * current_price\n\n return money, ethereum, total_money\n\n def sell_using_ratio(self,money, ethereum, total_money, current_price, ratio):\n first_money, first_ethereum, first_total_money = money, ethereum, total_money\n spend = ethereum * ratio #いくら分取引に使うか\n ethereum -= spend * (1+self.transaction_fee)\n if ethereum <= 0.0:\n return first_money,first_ethereum,first_total_money\n\n money += float(spend * current_price)\n total_money = money + float(ethereum * current_price)\n\n return money, ethereum, total_money\n def pass_using_ratio(self,money,ethereum,total_money,current_price, ratio):\n total_money = money + float(ethereum * current_price)\n return money,ethereum,total_money\n\n # 配列の長さに気をつける。\n #実験結果:何割の資産を取引に使うかについて、0.01%だけだと+30ドル 0.1%*pred(予測値によって取引量を変える)で+200ドル\n def simulate_trade(self,price, X_test, model):\n money = 300\n ethereum = 0.01\n total_money = money + np.float64(price[0] * ethereum)\n first_total_money = total_money\n\n for i in range(0, len(price)):\n print(i)\n current_price = price[i]\n prediction = model.predict(X_test[i])\n pred = prediction[0]\n if pred > 0.0001: #1.0で100% 一つ前の予測と比較して1%増えたら、と書きたい場合は > 0.01\n print(\"buy\")\n money, ethereum, total_money = self.BuyAndCalcAmoutUsingPrediction(pred,money, ethereum, total_money, current_price)\n print(\"money\"+str(money))\n elif pred <= 0.0001:# 一つ前の予測と比較して1%減ったら、と書きたい場合は < 0.01\n print(\"sell\")\n money, ethereum, total_money = self.SellAndCalcAmoutUsingPrediction(pred,money, ethereum, total_money, current_price)\n print(\"money\"+str(money))\n print(\"FIRST\"+str(first_total_money))\n print(\"FINAL\" + str(total_money))\n return total_money\n\n def update_trading_view(self, current_price, action):\n self.price_history.append(current_price)\n self.trade_history.append(action)\n\n def reset_trading_view(self):\n self.price_history=[]\n self.trade_history=[]\n\n def draw_trading_view(self,filename):\n data, date = np.array(self.price_history), np.array([idx for idx in range(0, len(self.price_history))])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(date, data)#,marker='o'\n ax.plot()\n\n for num in range(0,len(self.price_history)):\n if self.trade_history[num] == 0:\n plt.scatter(date[num], data[num], marker=\"^\", color=\"green\")\n elif self.trade_history[num] == 1:\n plt.scatter(date[num],data[num], marker=\"v\", color=\"red\")\n\n ax.set_title(\"Cripto Price\")\n ax.set_xlabel(\"Day\")\n ax.set_ylabel(\"Price[$]\")\n plt.grid(fig)\n #print(\"===Show Figure===\")\n #plt.show(fig)\n \n print(\"Save Figure :\"+ filename)\n plt.savefig(filename)\n \n self.price_history=[]\n self.trade_history=[]\n"
] |
[
[
"sklearn.utils.shuffle",
"numpy.set_printoptions",
"numpy.std",
"numpy.float64",
"sklearn.preprocessing.StandardScaler",
"numpy.array"
],
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.savefig",
"numpy.float64",
"matplotlib.pyplot.grid",
"numpy.array",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qdmy/detectron2
|
[
"0a74634d804f64409770bab082b6501f2ac57641",
"0a74634d804f64409770bab082b6501f2ac57641"
] |
[
"detectron2/data/samplers/distributed_sampler.py",
"codebase/third_party/spos_ofa/ofa/imagenet_classification/elastic_nn/modules/dynamic_single_path_layers.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport itertools\nimport math\nfrom collections import defaultdict\nfrom typing import Optional\nimport torch\nfrom torch.utils.data.sampler import Sampler\n\nfrom detectron2.utils import comm\n\n\nclass TrainingSampler(Sampler):\n \"\"\"\n In training, we only care about the \"infinite stream\" of training data.\n So this sampler produces an infinite stream of indices and\n all workers cooperate to correctly shuffle the indices and sample different indices.\n\n The samplers in each worker effectively produces `indices[worker_id::num_workers]`\n where `indices` is an infinite stream of indices consisting of\n `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)\n or `range(size) + range(size) + ...` (if shuffle is False)\n \"\"\"\n\n def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n self._size = size\n assert size > 0\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n if self._shuffle:\n yield from torch.randperm(self._size, generator=g).tolist()\n else:\n yield from torch.arange(self._size).tolist()\n\n\nclass RepeatFactorTrainingSampler(Sampler):\n \"\"\"\n Similar to TrainingSampler, but a sample may appear more times than others based\n on its \"repeat factor\". This is suitable for training on class imbalanced datasets like LVIS.\n \"\"\"\n\n def __init__(self, repeat_factors, *, shuffle=True, seed=None):\n \"\"\"\n Args:\n repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's\n full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n # Split into whole number (_int_part) and fractional (_frac_part) parts.\n self._int_part = torch.trunc(repeat_factors)\n self._frac_part = repeat_factors - self._int_part\n\n @staticmethod\n def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):\n \"\"\"\n Compute (fractional) per-image repeat factors based on category frequency.\n The repeat factor for an image is a function of the frequency of the rarest\n category labeled in that image. The \"frequency of category c\" in [0, 1] is defined\n as the fraction of images in the training set (without repeats) in which category c\n appears.\n See :paper:`lvis` (>= v2) Appendix B.2.\n\n Args:\n dataset_dicts (list[dict]): annotations in Detectron2 dataset format.\n repeat_thresh (float): frequency threshold below which data is repeated.\n If the frequency is half of `repeat_thresh`, the image will be\n repeated twice.\n\n Returns:\n torch.Tensor:\n the i-th element is the repeat factor for the dataset image at index i.\n \"\"\"\n # 1. For each category c, compute the fraction of images that contain it: f(c)\n category_freq = defaultdict(int)\n for dataset_dict in dataset_dicts: # For each image (without repeats)\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n for cat_id in cat_ids:\n category_freq[cat_id] += 1\n num_images = len(dataset_dicts)\n for k, v in category_freq.items():\n category_freq[k] = v / num_images\n\n # 2. For each category c, compute the category-level repeat factor:\n # r(c) = max(1, sqrt(t / f(c)))\n category_rep = {\n cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))\n for cat_id, cat_freq in category_freq.items()\n }\n\n # 3. For each image I, compute the image-level repeat factor:\n # r(I) = max_{c in I} r(c)\n rep_factors = []\n for dataset_dict in dataset_dicts:\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)\n rep_factors.append(rep_factor)\n\n return torch.tensor(rep_factors, dtype=torch.float32)\n\n def _get_epoch_indices(self, generator):\n \"\"\"\n Create a list of dataset indices (with repeats) to use for one epoch.\n\n Args:\n generator (torch.Generator): pseudo random number generator used for\n stochastic rounding.\n\n Returns:\n torch.Tensor: list of dataset indices to use in one epoch. Each index\n is repeated based on its calculated repeat factor.\n \"\"\"\n # Since repeat factors are fractional, we use stochastic rounding so\n # that the target repeat factor is achieved in expectation over the\n # course of training\n rands = torch.rand(len(self._frac_part), generator=generator)\n rep_factors = self._int_part + (rands < self._frac_part).float()\n # Construct a list of indices in which we repeat images as specified\n indices = []\n for dataset_index, rep_factor in enumerate(rep_factors):\n indices.extend([dataset_index] * int(rep_factor.item()))\n return torch.tensor(indices, dtype=torch.int64)\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n # Sample indices with repeats determined by stochastic rounding; each\n # \"epoch\" may have a slightly different size due to the rounding.\n indices = self._get_epoch_indices(g)\n if self._shuffle:\n randperm = torch.randperm(len(indices), generator=g)\n yield from indices[randperm].tolist()\n else:\n yield from indices.tolist()\n\n\nclass InferenceSampler(Sampler):\n \"\"\"\n Produce indices for inference across all workers.\n Inference needs to run on the __exact__ set of samples,\n therefore when the total number of samples is not divisible by the number of workers,\n this sampler produces different number of samples on different workers.\n \"\"\"\n\n def __init__(self, size: int):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n \"\"\"\n self._size = size\n assert size > 0\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n shard_size = (self._size - 1) // self._world_size + 1\n begin = shard_size * self._rank\n end = min(shard_size * (self._rank + 1), self._size)\n self._local_indices = range(begin, end)\n\n def __iter__(self):\n yield from self._local_indices\n\n def __len__(self):\n return len(self._local_indices)\n\n\nclass InferenceSampler_controller(Sampler):\n \"\"\"\n Produce indices for inference across all workers.\n Inference needs to run on the __exact__ set of samples,\n therefore when the total number of samples is not divisible by the number of workers,\n this sampler produces different number of samples on different workers.\n \"\"\"\n\n def __init__(self, size: list):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n \"\"\"\n self.superclass_id = 0\n self._local_indices_per_superclass = []\n for superclass_size in size:\n _size = superclass_size\n assert _size > 0\n _rank = comm.get_rank()\n _world_size = comm.get_world_size()\n\n shard_size = (_size - 1) // _world_size + 1\n begin = shard_size * _rank\n end = min(shard_size * (_rank + 1), _size)\n _local_indices = range(begin, end)\n self._local_indices_per_superclass.append(_local_indices)\n\n def __iter__(self):\n yield from self._local_indices_per_superclass[self.superclass_id]\n\n def __len__(self):\n return len(self._local_indices_per_superclass[self.superclass_id])\n \n def set_superclass_id(self, superclass_index: int):\n self.superclass_id = superclass_index",
"import copy\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\n\nfrom codebase.third_party.spos_ofa.ofa.imagenet_classification.elastic_nn.modules.dynamic_layers import copy_bn, \\\n adjust_bn_according_to_idx\nfrom codebase.third_party.spos_ofa.ofa.utils import MyModule, val2list, get_net_device, build_activation, \\\n make_divisible, SEModule, MyNetwork\nfrom codebase.third_party.spos_ofa.ofa.utils import get_same_padding, MyConv2d\nfrom codebase.third_party.spos_ofa.ofa.utils.layers import MBConvLayer, IdentityLayer, set_layer_from_config\nfrom codebase.third_party.spos_ofa.ofa.utils.layers import PreResNetBasicBlock, ZeroLayer\nfrom .dynamic_op import DynamicConv2d, DynamicBatchNorm2d, DynamicSE, DynamicGroupNorm, get_dynamic_norm\n\n\nclass DynamicPreResNetSinglePathBasicBlock(MyModule):\n\n def __init__(self, in_channel_list, out_channel_list,\n kernel_size=3, stride=1, act_func='relu',\n downsample_mode='conv', block_type=\"both_preact\"):\n super(DynamicPreResNetSinglePathBasicBlock, self).__init__()\n\n self.in_channel_list = in_channel_list\n self.out_channel_list = out_channel_list\n self.delta_out_channel_list = [self.out_channel_list[i] - self.out_channel_list[i - 1] for i in\n range(1, len(self.out_channel_list))]\n self.delta_out_channel_list.insert(0, self.out_channel_list[0])\n\n self.kernel_size = kernel_size\n self.stride = stride\n self.act_func = act_func\n self.downsample_mode = downsample_mode\n self.block_type = block_type\n\n self.max_output_channel = max(self.out_channel_list)\n\n # build modules\n self.conv1 = nn.Sequential(OrderedDict([\n ('bn', DynamicBatchNorm2d(max(in_channel_list))),\n ('act', build_activation(self.act_func, inplace=True)),\n ('conv', DynamicConv2d(max(self.in_channel_list), self.max_output_channel, kernel_size, stride)),\n ]))\n\n self.conv2 = nn.Sequential(OrderedDict([\n ('bn', DynamicBatchNorm2d(self.max_output_channel)),\n ('act', build_activation(self.act_func, inplace=True)),\n ('conv', DynamicConv2d(self.max_output_channel, self.max_output_channel, kernel_size)),\n ]))\n\n if self.stride == 1 and self.in_channel_list == self.out_channel_list:\n self.downsample = IdentityLayer(max(self.in_channel_list), max(self.out_channel_list))\n elif self.downsample_mode == 'conv':\n self.downsample = nn.Sequential(OrderedDict([\n ('conv', DynamicConv2d(max(self.in_channel_list), max(self.out_channel_list), stride=stride)),\n ]))\n else:\n raise NotImplementedError\n\n max_out_channels = max(self.out_channel_list)\n self.active_out_channel = max_out_channels\n\n channel_masks = []\n prev_out_channels = None\n for out_channels in self.out_channel_list:\n channel_mask = torch.ones(max_out_channels)\n channel_mask *= nn.functional.pad(torch.ones(out_channels), [0, max_out_channels - out_channels], value=0)\n if prev_out_channels:\n channel_mask *= nn.functional.pad(torch.zeros(prev_out_channels),\n [0, max_out_channels - prev_out_channels], value=1)\n channel_mask = channel_mask.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n prev_out_channels = out_channels\n channel_masks.append(channel_mask)\n self.register_buffer(\"channel_masks\", torch.stack(channel_masks, dim=0))\n self.register_buffer(\"delta_out_channels\", torch.FloatTensor(self.delta_out_channel_list))\n\n def forward(self, x, cum_indicator):\n self.conv1.conv.active_out_channel = self.max_output_channel\n self.conv2.conv.active_out_channel = self.max_output_channel\n if not isinstance(self.downsample, IdentityLayer):\n self.downsample.conv.active_out_channel = self.max_output_channel\n\n cum_indicator = cum_indicator.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)\n current_channel_mask = (cum_indicator * self.channel_masks).sum(0)\n\n if self.block_type == \"half_preact\":\n x = self.conv1.bn(x)\n x = self.conv1.act(x)\n residual = x\n x = self.conv1.conv(x)\n x = self.conv2.bn(x)\n x = self.conv2.act(x)\n x = x * current_channel_mask\n x = self.conv2.conv(x)\n elif self.block_type == \"both_preact\":\n residual = x\n x = self.conv1.bn(x)\n x = self.conv1.act(x)\n x = self.conv1.conv(x)\n x = self.conv2.bn(x)\n x = self.conv2.act(x)\n x = x * current_channel_mask\n x = self.conv2.conv(x)\n\n residual = self.downsample(residual)\n x = x + residual\n return x\n\n @property\n def module_str(self):\n return '(%s, %s)' % (\n '%dx%d_PreResNetBasicBlockConv_in->%d_S%d' % (\n self.kernel_size, self.kernel_size, self.active_out_channel, self.stride\n ),\n 'Identity' if isinstance(self.downsample, IdentityLayer) else self.downsample_mode,\n )\n\n @property\n def config(self):\n return {\n 'name': DynamicPreResNetSinglePathBasicBlock.__name__,\n 'in_channel_list': self.in_channel_list,\n 'out_channel_list': self.out_channel_list,\n 'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'act_func': self.act_func,\n 'downsample_mode': self.downsample_mode,\n 'block_type': self.block_type,\n }\n\n def extra_repr(self):\n s = super().extra_repr()\n s += \", block_type={}\".format(self.block_type)\n return s\n\n @staticmethod\n def build_from_config(config):\n return DynamicPreResNetSinglePathBasicBlock(**config)\n\n ############################################################################################\n\n @property\n def in_channels(self):\n return max(self.in_channel_list)\n\n @property\n def out_channels(self):\n return max(self.out_channel_list)\n\n ############################################################################################\n\n def get_active_subnet(self, in_channel, preserve_weight=True):\n # build the new layer\n sub_layer = set_layer_from_config(self.get_active_subnet_config(in_channel))\n sub_layer = sub_layer.to(get_net_device(self))\n if not preserve_weight:\n return sub_layer\n\n # copy weight from current layer\n sub_layer.conv1.conv.weight.data.copy_(\n self.conv1.conv.get_active_filter(self.active_out_channel, in_channel).data)\n copy_bn(sub_layer.conv1.bn, self.conv1.bn.bn)\n\n sub_layer.conv2.conv.weight.data.copy_(\n self.conv2.conv.get_active_filter(self.active_out_channel, self.active_out_channel).data)\n copy_bn(sub_layer.conv2.bn, self.conv2.bn.bn)\n\n if not isinstance(self.downsample, IdentityLayer):\n sub_layer.downsample.conv.weight.data.copy_(\n self.downsample.conv.get_active_filter(self.active_out_channel, in_channel).data)\n\n return sub_layer\n\n def get_active_subnet_config(self, in_channel):\n return {\n 'name': PreResNetBasicBlock.__name__,\n 'in_channels': in_channel,\n 'out_channels': self.active_out_channel,\n 'max_out_channels': max(self.out_channel_list),\n 'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'act_func': self.act_func,\n 'groups': 1,\n 'downsample_mode': self.downsample_mode,\n 'block_type': self.block_type,\n }\n\n\nclass DynamicSinglePathSeparableConv2d(nn.Module):\n KERNEL_TRANSFORM_MODE = 1 # None or 1\n\n def __init__(self, max_in_channels, kernel_size_list, stride=1, dilation=1):\n super(DynamicSinglePathSeparableConv2d, self).__init__()\n\n self.max_in_channels = max_in_channels\n self.kernel_size_list = kernel_size_list\n self.stride = stride\n self.dilation = dilation\n\n self.conv = nn.Conv2d(\n self.max_in_channels, self.max_in_channels, max(self.kernel_size_list), self.stride,\n groups=self.max_in_channels, bias=False,\n )\n\n self._ks_set = list(set(self.kernel_size_list))\n self._ks_set.sort() # e.g., [3, 5, 7]\n if self.KERNEL_TRANSFORM_MODE is not None:\n # register scaling parameters\n # 7to5_matrix, 5to3_matrix\n scale_params = {}\n for i in range(len(self._ks_set) - 1):\n ks_small = self._ks_set[i]\n ks_larger = self._ks_set[i + 1]\n param_name = '%dto%d' % (ks_larger, ks_small)\n # noinspection PyArgumentList\n scale_params['%s_matrix' % param_name] = Parameter(torch.eye(ks_small ** 2))\n for name, param in scale_params.items():\n self.register_parameter(name, param)\n\n self.delta_kernel_size_list = [self.kernel_size_list[i] - self.kernel_size_list[i - 1] for i in\n range(1, len(self.kernel_size_list))]\n self.delta_kernel_size_list.insert(0, self.kernel_size_list[0])\n\n max_kernel_size = max(kernel_size_list)\n kernel_masks = []\n prev_kernel_size = None\n for kernel_size in kernel_size_list:\n kernel_mask = torch.ones(max_kernel_size, max_kernel_size)\n kernel_mask *= nn.functional.pad(torch.ones(kernel_size, kernel_size),\n [(max_kernel_size - kernel_size) // 2] * 4, value=0)\n if prev_kernel_size:\n kernel_mask *= nn.functional.pad(torch.zeros(prev_kernel_size, prev_kernel_size),\n [(max_kernel_size - prev_kernel_size) // 2] * 4, value=1)\n kernel_mask = kernel_mask.unsqueeze(0).unsqueeze(0)\n prev_kernel_size = kernel_size\n kernel_masks.append(kernel_mask)\n self.register_buffer(\"kernel_masks\", torch.stack(kernel_masks, dim=0) if kernel_size_list else None)\n self.register_buffer(\"delta_kernel_sizes\", torch.FloatTensor(self.delta_kernel_size_list))\n\n self.active_kernel_size = max(self.kernel_size_list)\n\n def forward(self, x, kernel_cum_indicator):\n in_channel = x.size(1)\n\n kernel_size = max(self.kernel_size_list)\n kernel_cum_indicator = kernel_cum_indicator[0].unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)\n current_kernel_mask = (kernel_cum_indicator * self.kernel_masks).sum(0)\n\n filters = current_kernel_mask * self.conv.weight\n\n padding = get_same_padding(kernel_size)\n filters = self.conv.weight_standardization(filters) if isinstance(self.conv, MyConv2d) else filters\n y = F.conv2d(\n x, filters, None, self.stride, padding, self.dilation, in_channel\n )\n return y\n\n\nclass DynamicSinglePathMBConvLayer(MyModule):\n\n def __init__(self, in_channel_list, out_channel_list,\n kernel_size_list=3, expand_ratio_list=6, stride=1, act_func='relu6', use_se=False, norm=\"BN\"):\n super(DynamicSinglePathMBConvLayer, self).__init__()\n\n self.in_channel_list = in_channel_list\n self.out_channel_list = out_channel_list\n\n self.kernel_size_list = val2list(kernel_size_list)\n self.expand_ratio_list = val2list(expand_ratio_list)\n\n self.stride = stride\n self.act_func = act_func\n self.use_se = use_se\n\n # build modules\n max_middle_channel = make_divisible(\n round(max(self.in_channel_list) * max(self.expand_ratio_list)), MyNetwork.CHANNEL_DIVISIBLE)\n self.middle_channel_list = [\n make_divisible(round(max(self.in_channel_list)) * expand_ratio, MyNetwork.CHANNEL_DIVISIBLE) for\n expand_ratio in self.expand_ratio_list]\n self.delta_middle_channel_list = [self.middle_channel_list[i] - self.middle_channel_list[i - 1] for i in\n range(1, len(self.middle_channel_list))]\n self.delta_middle_channel_list.insert(0, self.middle_channel_list[0])\n\n if max(self.expand_ratio_list) == 1:\n self.inverted_bottleneck = None\n else:\n self.inverted_bottleneck = nn.Sequential(OrderedDict([\n ('conv', DynamicConv2d(max(self.in_channel_list), max_middle_channel)),\n ('bn', get_dynamic_norm(norm, max_middle_channel)), # DynamicBatchNorm2d(max_middle_channel)),\n ('act', build_activation(self.act_func)),\n ]))\n\n self.depth_conv = nn.Sequential(OrderedDict([\n ('conv', DynamicSinglePathSeparableConv2d(max_middle_channel, self.kernel_size_list, self.stride)),\n ('bn', get_dynamic_norm(norm, max_middle_channel)), # DynamicBatchNorm2d(max_middle_channel)),\n ('act', build_activation(self.act_func))\n ]))\n if self.use_se:\n self.depth_conv.add_module('se', DynamicSE(max_middle_channel))\n\n self.point_linear = nn.Sequential(OrderedDict([\n ('conv', DynamicConv2d(max_middle_channel, max(self.out_channel_list))),\n ('bn', get_dynamic_norm(norm, max(self.out_channel_list))), # DynamicBatchNorm2d(max(self.out_channel_list))),\n ]))\n\n channel_masks = []\n prev_out_channels = None\n for mid_channels in self.middle_channel_list:\n channel_mask = torch.ones(max_middle_channel)\n channel_mask *= nn.functional.pad(torch.ones(mid_channels), [0, max_middle_channel - mid_channels], value=0)\n if prev_out_channels:\n channel_mask *= nn.functional.pad(torch.zeros(prev_out_channels),\n [0, max_middle_channel - prev_out_channels], value=1)\n channel_mask = channel_mask.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n prev_out_channels = mid_channels\n channel_masks.append(channel_mask)\n self.register_buffer(\"channel_masks\", torch.stack(channel_masks, dim=0))\n self.register_buffer(\"delta_middle_channels\", torch.FloatTensor(self.delta_middle_channel_list))\n\n self.active_kernel_size = max(self.kernel_size_list)\n self.active_expand_ratio = max(self.expand_ratio_list)\n self.active_out_channel = max(self.out_channel_list)\n\n def forward(self, x, ratio_cum_indicator, kernel_size_cum_indicator):\n in_channel = x.size(1)\n\n ratio_cum_indicator = ratio_cum_indicator[0].unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)\n current_channel_mask = (ratio_cum_indicator * self.channel_masks).sum(0)\n\n if self.inverted_bottleneck is not None:\n x = self.inverted_bottleneck(x)\n x = self.depth_conv.conv(x, kernel_size_cum_indicator)\n x = self.depth_conv.bn(x)\n x = self.depth_conv.act(x)\n if self.use_se:\n x = self.depth_conv.se(x)\n x = x * current_channel_mask\n x = self.point_linear(x)\n return x\n\n @property\n def module_str(self):\n if self.use_se:\n return 'SE(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)\n else:\n return '(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)\n\n @property\n def config(self):\n return {\n 'name': DynamicSinglePathMBConvLayer.__name__,\n 'in_channel_list': self.in_channel_list,\n 'out_channel_list': self.out_channel_list,\n 'kernel_size_list': self.kernel_size_list,\n 'expand_ratio_list': self.expand_ratio_list,\n 'stride': self.stride,\n 'act_func': self.act_func,\n 'use_se': self.use_se,\n }\n\n @staticmethod\n def build_from_config(config):\n return DynamicSinglePathMBConvLayer(**config)\n\n ############################################################################################\n\n @property\n def in_channels(self):\n return max(self.in_channel_list)\n\n @property\n def out_channels(self):\n return max(self.out_channel_list)\n\n def active_middle_channel(self, in_channel):\n return make_divisible(round(in_channel * self.active_expand_ratio), MyNetwork.CHANNEL_DIVISIBLE)\n\n ############################################################################################\n\n def get_active_subnet(self, in_channel, preserve_weight=True):\n # build the new layer\n sub_layer = set_layer_from_config(self.get_active_subnet_config(in_channel))\n sub_layer = sub_layer.to(get_net_device(self))\n if not preserve_weight:\n return sub_layer\n\n middle_channel = self.active_middle_channel(in_channel)\n # copy weight from current layer\n if sub_layer.inverted_bottleneck is not None:\n sub_layer.inverted_bottleneck.conv.weight.data.copy_(\n self.inverted_bottleneck.conv.get_active_filter(middle_channel, in_channel).data,\n )\n copy_bn(sub_layer.inverted_bottleneck.bn, self.inverted_bottleneck.bn.bn)\n\n sub_layer.depth_conv.conv.weight.data.copy_(\n self.depth_conv.conv.get_active_filter(middle_channel, self.active_kernel_size).data\n )\n copy_bn(sub_layer.depth_conv.bn, self.depth_conv.bn.bn)\n\n if self.use_se:\n se_mid = make_divisible(middle_channel // SEModule.REDUCTION, divisor=MyNetwork.CHANNEL_DIVISIBLE)\n sub_layer.depth_conv.se.fc.reduce.weight.data.copy_(\n self.depth_conv.se.get_active_reduce_weight(se_mid, middle_channel).data\n )\n sub_layer.depth_conv.se.fc.reduce.bias.data.copy_(\n self.depth_conv.se.get_active_reduce_bias(se_mid).data\n )\n\n sub_layer.depth_conv.se.fc.expand.weight.data.copy_(\n self.depth_conv.se.get_active_expand_weight(se_mid, middle_channel).data\n )\n sub_layer.depth_conv.se.fc.expand.bias.data.copy_(\n self.depth_conv.se.get_active_expand_bias(middle_channel).data\n )\n\n sub_layer.point_linear.conv.weight.data.copy_(\n self.point_linear.conv.get_active_filter(self.active_out_channel, middle_channel).data\n )\n copy_bn(sub_layer.point_linear.bn, self.point_linear.bn.bn)\n\n return sub_layer\n\n def get_active_subnet_config(self, in_channel):\n return {\n 'name': MBConvLayer.__name__,\n 'in_channels': in_channel,\n 'out_channels': self.active_out_channel,\n 'kernel_size': self.active_kernel_size,\n 'stride': self.stride,\n 'expand_ratio': self.active_expand_ratio,\n 'mid_channels': self.active_middle_channel(in_channel),\n 'act_func': self.act_func,\n 'use_se': self.use_se,\n }\n\n def re_organize_middle_weights(self, expand_ratio_stage=0):\n importance = torch.sum(torch.abs(self.point_linear.conv.conv.weight.data), dim=(0, 2, 3))\n if isinstance(self.depth_conv.bn, DynamicGroupNorm):\n channel_per_group = self.depth_conv.bn.channel_per_group\n importance_chunks = torch.split(importance, channel_per_group)\n for chunk in importance_chunks:\n chunk.data.fill_(torch.mean(chunk))\n importance = torch.cat(importance_chunks, dim=0)\n if expand_ratio_stage > 0:\n sorted_expand_list = copy.deepcopy(self.expand_ratio_list)\n sorted_expand_list.sort(reverse=True)\n target_width_list = [\n make_divisible(round(max(self.in_channel_list) * expand), MyNetwork.CHANNEL_DIVISIBLE)\n for expand in sorted_expand_list\n ]\n\n right = len(importance)\n base = - len(target_width_list) * 1e5\n for i in range(expand_ratio_stage + 1):\n left = target_width_list[i]\n importance[left:right] += base\n base += 1e5\n right = left\n\n sorted_importance, sorted_idx = torch.sort(importance, dim=0, descending=True)\n self.point_linear.conv.conv.weight.data = torch.index_select(\n self.point_linear.conv.conv.weight.data, 1, sorted_idx\n )\n\n adjust_bn_according_to_idx(self.depth_conv.bn.bn, sorted_idx)\n self.depth_conv.conv.conv.weight.data = torch.index_select(\n self.depth_conv.conv.conv.weight.data, 0, sorted_idx\n )\n\n if self.use_se:\n # se expand: output dim 0 reorganize\n se_expand = self.depth_conv.se.fc.expand\n se_expand.weight.data = torch.index_select(se_expand.weight.data, 0, sorted_idx)\n se_expand.bias.data = torch.index_select(se_expand.bias.data, 0, sorted_idx)\n # se reduce: input dim 1 reorganize\n se_reduce = self.depth_conv.se.fc.reduce\n se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 1, sorted_idx)\n # middle weight reorganize\n se_importance = torch.sum(torch.abs(se_expand.weight.data), dim=(0, 2, 3))\n se_importance, se_idx = torch.sort(se_importance, dim=0, descending=True)\n\n se_expand.weight.data = torch.index_select(se_expand.weight.data, 1, se_idx)\n se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 0, se_idx)\n se_reduce.bias.data = torch.index_select(se_reduce.bias.data, 0, se_idx)\n\n if self.inverted_bottleneck is not None:\n adjust_bn_according_to_idx(self.inverted_bottleneck.bn.bn, sorted_idx)\n self.inverted_bottleneck.conv.conv.weight.data = torch.index_select(\n self.inverted_bottleneck.conv.conv.weight.data, 0, sorted_idx\n )\n return None\n else:\n return sorted_idx\n\n\nclass SinglePathResidualBlock(MyModule):\n\n def __init__(self, conv, shortcut):\n super(SinglePathResidualBlock, self).__init__()\n\n self.conv = conv\n self.shortcut = shortcut\n\n def forward(self, x, ratio_cum_indicator, kernel_size_cum_indicator):\n if self.conv is None or isinstance(self.conv, ZeroLayer):\n res = x\n elif self.shortcut is None or isinstance(self.shortcut, ZeroLayer):\n res = self.conv(x, ratio_cum_indicator, kernel_size_cum_indicator)\n else:\n res = self.conv(x, ratio_cum_indicator, kernel_size_cum_indicator) + self.shortcut(x)\n return res\n\n @property\n def module_str(self):\n return '(%s, %s)' % (\n self.conv.module_str if self.conv is not None else None,\n self.shortcut.module_str if self.shortcut is not None else None\n )\n\n @property\n def config(self):\n return {\n 'name': SinglePathResidualBlock.__name__,\n 'conv': self.conv.config if self.conv is not None else None,\n 'shortcut': self.shortcut.config if self.shortcut is not None else None,\n }\n\n @staticmethod\n def build_from_config(config):\n conv_config = config['conv'] if 'conv' in config else config['mobile_inverted_conv']\n conv = set_layer_from_config(conv_config)\n shortcut = set_layer_from_config(config['shortcut'])\n return SinglePathResidualBlock(conv, shortcut)\n\n @property\n def mobile_inverted_conv(self):\n return self.conv\n"
] |
[
[
"torch.Generator",
"torch.randperm",
"torch.trunc",
"torch.tensor",
"torch.arange"
],
[
"torch.abs",
"torch.mean",
"torch.ones",
"torch.cat",
"torch.zeros",
"torch.nn.functional.conv2d",
"torch.eye",
"torch.FloatTensor",
"torch.sort",
"torch.split",
"torch.stack",
"torch.index_select"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yushan111/analytics-zoo
|
[
"cf63e52e1dc2969a10fce56740a1fecb510a46d2",
"cf63e52e1dc2969a10fce56740a1fecb510a46d2"
] |
[
"pyzoo/zoo/xshard/pandas/preprocessing.py",
"pyzoo/zoo/tfpark/tf_optimizer.py"
] |
[
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\n\nimport ray\nfrom pyspark.context import SparkContext\n\nfrom zoo.ray import RayContext\nfrom zoo.xshard.shard import RayDataShards, RayPartition\nfrom zoo.xshard.utils import *\n\n\ndef read_csv(file_path, context):\n \"\"\"\n Read csv files to DataShards\n :param file_path: could be a csv file, multiple csv file paths separated by comma,\n a directory containing csv files.\n Supported file systems are local file system, hdfs, and s3.\n :param context: SparkContext or RayContext\n :return: DataShards\n \"\"\"\n if isinstance(context, RayContext):\n return read_file_ray(context, file_path, \"csv\")\n elif isinstance(context, SparkContext):\n pass\n else:\n raise Exception(\"Context type should be RayContext or SparkContext\")\n\n\ndef read_json(file_path, context):\n \"\"\"\n Read json files to DataShards\n :param file_path: could be a json file, multiple json file paths separated by comma,\n a directory containing json files.\n Supported file systems are local file system, hdfs, and s3.\n :param context: SparkContext or RayContext\n :return: DataShards\n \"\"\"\n if isinstance(context, RayContext):\n return read_file_ray(context, file_path, \"json\")\n elif isinstance(context, SparkContext):\n pass\n else:\n raise Exception(\"Context type should be RayContext or SparkContext\")\n\n\ndef read_file_ray(context, file_path, file_type):\n file_path_splits = file_path.split(',')\n if len(file_path_splits) == 1:\n # only one file\n if os.path.splitext(file_path)[-1] == \".\" + file_type:\n file_paths = [file_path]\n # directory\n else:\n file_url_splits = file_path.split(\"://\")\n prefix = file_url_splits[0]\n if prefix == \"hdfs\":\n server_address = file_url_splits[1].split('/')[0]\n import pyarrow as pa\n fs = pa.hdfs.connect()\n files = fs.ls(file_path)\n # only get json/csv files\n files = [file for file in files if os.path.splitext(file)[1] == \".\" + file_type]\n file_paths = [\"hdfs://\" + server_address + file for file in files]\n elif prefix == \"s3\":\n import boto3\n path_parts = file_url_splits[1].split('/')\n bucket = path_parts.pop(0)\n key = \"/\".join(path_parts)\n env = context.ray_service.env\n access_key_id = env[\"AWS_ACCESS_KEY_ID\"]\n secret_access_key = env[\"AWS_SECRET_ACCESS_KEY\"]\n s3_client = boto3.Session(\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key,\n ).client('s3', verify=False)\n keys = []\n resp = s3_client.list_objects_v2(Bucket=bucket,\n Prefix=key)\n for obj in resp['Contents']:\n keys.append(obj['Key'])\n files = list(dict.fromkeys(keys))\n # only get json/csv files\n files = [file for file in files if os.path.splitext(file)[1] == \".\" + file_type]\n file_paths = [os.path.join(\"s3://\" + bucket, file) for file in files]\n else:\n # only get json/csv files\n file_paths = [os.path.join(file_path, file)\n for file in os.listdir(file_path)\n if os.path.splitext(file)[1] == \".\" + file_type]\n else:\n file_paths = file_path_splits\n\n num_executors = context.num_ray_nodes\n num_cores = context.ray_node_cpu_cores\n num_partitions = num_executors * num_cores\n # remove empty partitions\n file_partition_list = [partition for partition\n in list(chunk(file_paths, num_partitions)) if partition]\n # create shard actor to read data\n shards = [RayPandasShard.remote() for i in range(len(file_partition_list))]\n done_ids, undone_ids = \\\n ray.wait([shard.read_file_partitions.remote(file_partition_list[i], file_type)\n for i, shard in enumerate(shards)], num_returns=len(shards))\n assert len(undone_ids) == 0\n\n # create initial partition\n partitions = [RayPartition([shard]) for shard in shards]\n data_shards = RayDataShards(partitions)\n return data_shards\n\n\[email protected]\nclass RayPandasShard(object):\n \"\"\"\n Actor to read csv/json file to Pandas DataFrame and manipulate data\n \"\"\"\n\n def __init__(self, data=None):\n self.data = data\n\n def read_file_partitions(self, paths, file_type):\n df_list = []\n import pandas as pd\n prefix = paths[0].split(\"://\")[0]\n if prefix == \"hdfs\":\n import pyarrow as pa\n fs = pa.hdfs.connect()\n print(\"Start loading files\")\n for path in paths:\n with fs.open(path, 'rb') as f:\n if file_type == \"json\":\n df = pd.read_json(f, orient='columns', lines=True)\n elif file_type == \"csv\":\n df = pd.read_csv(f)\n else:\n raise Exception(\"Unsupported file type\")\n df_list.append(df)\n elif prefix == \"s3\":\n import boto3\n access_key_id = os.environ[\"AWS_ACCESS_KEY_ID\"]\n secret_access_key = os.environ[\"AWS_SECRET_ACCESS_KEY\"]\n s3_client = boto3.Session(\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key,\n ).client('s3', verify=False)\n for path in paths:\n path_parts = path.split(\"://\")[1].split('/')\n bucket = path_parts.pop(0)\n key = \"/\".join(path_parts)\n obj = s3_client.get_object(Bucket=bucket, Key=key)\n if file_type == \"json\":\n df = pd.read_json(obj['Body'], orient='columns', lines=True)\n elif file_type == \"csv\":\n df = pd.read_csv(obj['Body'])\n else:\n raise Exception(\"Unsupported file type\")\n df_list.append(df)\n else:\n for path in paths:\n if file_type == \"json\":\n df = pd.read_json(path, orient='columns', lines=True)\n elif file_type == \"csv\":\n df = pd.read_csv(path)\n else:\n raise Exception(\"Unsupported file type\")\n df_list.append(df)\n self.data = pd.concat(df_list)\n return 0\n\n def apply(self, func, *args):\n self.data = func(self.data, *args)\n return 0\n\n def get_data(self):\n return self.data\n",
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom bigdl.nn.criterion import Criterion\nfrom bigdl.nn.layer import Layer\nfrom bigdl.optim.optimizer import MaxEpoch, EveryEpoch\nfrom bigdl.util.common import to_list, JavaValue\n\nfrom zoo.tfpark.zoo_optimizer import FakeOptimMethod\nfrom zoo.common.utils import callZooFunc\nfrom zoo.pipeline.api.keras.engine.topology import to_bigdl_metric, Loss, OptimMethod\nfrom zoo.pipeline.api.net.utils import find_placeholders, to_bigdl_optim_method, find_tensors\nfrom zoo.pipeline.estimator import Estimator\nfrom zoo.util import nest\n\nimport tensorflow as tf\nfrom tensorflow import gfile\n\nif sys.version >= '3':\n long = int\n unicode = str\n\n\nclass IdentityCriterion(Criterion):\n def __init__(self):\n super(IdentityCriterion, self).__init__(None, \"float\")\n\n\nclass TFValidationMethod(JavaValue):\n def __init__(self, val_method, name, output_indices, label_indices):\n JavaValue.__init__(self, None, \"float\",\n val_method, name, output_indices, label_indices)\n\n\nclass StatelessMetric(JavaValue):\n def __init__(self, metric_name, idx):\n self.name = metric_name\n self.idx = idx\n JavaValue.__init__(self, None, \"float\", metric_name, idx)\n\n\nclass BigDLMetric(object):\n def __init__(self, val_method, outputs, labels):\n self.val_method = val_method\n self.outputs = outputs\n self.labels = labels\n\n\nclass TFTrainingHelper(Layer):\n def __init__(self, path, config_proto, saver, meta, sess):\n self.saver = saver\n self.meta = meta\n self.export_dir = path\n self.sess = sess\n\n if config_proto is not None:\n import tensorflow as tf\n assert isinstance(config_proto, tf.ConfigProto), \\\n \"session_config should be a tf.ConfigProto\"\n config_proto.use_per_session_threads = True\n byte_arr = bytearray(config_proto.SerializeToString())\n else:\n byte_arr = None\n\n super(TFTrainingHelper, self).__init__(None, \"float\", path, byte_arr)\n\n def save_checkpoint(self):\n callZooFunc(self.bigdl_type, \"saveCheckpoint\",\n self.value)\n\n def get_weights_to_python(self):\n self.save_checkpoint()\n self.saver.restore(self.sess, os.path.join(self.export_dir, \"model\"))\n\n def load_checkpoint(self, path):\n callZooFunc(self.bigdl_type, \"loadZooCheckpoint\", self.value, path)\n\n\ndef _to_operation_name(name):\n return name.split(\":\")[0]\n\n\ndef _to_floats(vs):\n return [float(v) for v in vs]\n\n\nclass TFModel(object):\n def __init__(self, training_helper_layer, criterion, val_methods):\n\n self.training_helper_layer = training_helper_layer\n self.criterion = criterion\n self.val_methods = val_methods\n\n @staticmethod\n def _expand_inputs(inputs, tensors_with_value, loss):\n additional_inputs = []\n additional_values = []\n all_required_inputs = find_placeholders([loss])\n all_required_inputs_names = [v.name for v in all_required_inputs]\n if tensors_with_value:\n for t, v in tensors_with_value.items():\n if t.name in all_required_inputs_names:\n additional_inputs.append(t)\n additional_values.append(v)\n\n if not isinstance(inputs, list):\n inputs = nest.flatten(inputs)\n\n return inputs, additional_inputs, additional_values\n\n @staticmethod\n def _process_session_config(session_config):\n if session_config is not None:\n\n assert isinstance(session_config, tf.ConfigProto), \\\n \"session_config should be a tf.ConfigProto\"\n session_config.use_per_session_threads = True\n return session_config\n\n @staticmethod\n def _process_grads(graph, grads):\n\n with graph.as_default():\n from zoo.util.tf import process_grad\n grads = [process_grad(grad) for grad in grads]\n return grads\n\n @staticmethod\n def _process_metrics(graph, metrics):\n\n outputs = []\n val_methods = None\n if metrics is not None:\n idx = 0\n val_methods = []\n for metric_name in metrics:\n metric = metrics[metric_name]\n if tf.is_numeric_tensor(metric):\n outputs.append(metric)\n val_methods.append(StatelessMetric(metric_name, idx))\n idx += 1\n else:\n outputs += metric.outputs\n with graph.as_default():\n val_labels = [tf.identity(v) for v in metric.labels]\n outputs += val_labels\n method = TFValidationMethod(metric.val_method,\n metric_name,\n list(range(idx, idx + len(metric.outputs))),\n list(range(idx + len(metric.outputs),\n idx + len(metric.outputs)\n + len(val_labels))))\n val_methods.append(method)\n idx += len(metric.outputs) + len(val_labels)\n\n outputs = [tf.to_float(output) for output in outputs]\n return outputs, val_methods\n\n @staticmethod\n def _process_variables(graph, variables, updates):\n\n all_trainable_variables = variables\n\n name2idx = dict([(v.name, idx) for idx, v in enumerate(all_trainable_variables)])\n\n all_variables = graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n\n update_ops = graph.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n if updates is not None:\n update_ops += updates\n\n trainable_variables = [0] * len(all_trainable_variables)\n trainable_assigns = [0] * len(all_trainable_variables)\n trainable_variable_placeholders = [0] * len(all_trainable_variables)\n extra_variables = []\n extra_variable_assigns = []\n extra_variable_assign_placeholders = []\n for v in all_variables:\n p = tf.placeholder(dtype=v.dtype, shape=v.shape)\n a = tf.assign(v, p)\n\n # special treatment for ResourceVariable\n if v.op.type == \"VarHandleOp\":\n v_float_value = tf.to_float(v.read_value())\n else:\n v_float_value = tf.to_float(v)\n\n if v.name in name2idx:\n trainable_variables[name2idx[v.name]] = v_float_value\n trainable_assigns[name2idx[v.name]] = a\n trainable_variable_placeholders[name2idx[v.name]] = p\n else:\n extra_variables.append(v_float_value)\n extra_variable_assigns.append(a)\n extra_variable_assign_placeholders.append(p)\n\n extra_variable_assign = tf.group(*extra_variable_assigns)\n trainable_assign = tf.group(*trainable_assigns)\n update_op = tf.group(update_ops)\n\n return trainable_variables, trainable_variable_placeholders, trainable_assign, \\\n extra_variables, extra_variable_assign_placeholders, \\\n extra_variable_assign, update_op\n\n @staticmethod\n def _save_to_dir(folder, sess, graph,\n metric_tensors,\n batch_size_tensor,\n loss_tensor, inputs, labels, predictions,\n trainable_variables,\n trainable_variable_placeholders,\n trainable_assign,\n extra_variables,\n extra_variable_assign_placeholders,\n extra_variable_assign,\n grads, update_op, train_op,\n additional_inputs,\n additional_values):\n saver = tf.train.Saver()\n if not os.path.isdir(folder):\n os.makedirs(folder)\n saver.save(sess, os.path.join(folder, \"model\"), write_meta_graph=False)\n\n meta = {\n \"inputs\": [i.name for i in inputs],\n \"input_types\": [i.dtype.as_datatype_enum for i in inputs],\n \"additional_inputs\": [i.name for i in additional_inputs],\n \"additional_input_types\": [i.dtype.as_datatype_enum for i in additional_inputs],\n \"labels\": [l.name for l in labels],\n \"label_types\": [i.dtype.as_datatype_enum for i in labels],\n \"predictions\": [t.name for t in predictions] if predictions else [],\n \"metric_tensors\": [t.name for t in metric_tensors],\n \"batch_size_tensor\": batch_size_tensor.name,\n \"loss_tensor\": loss_tensor.name,\n \"variables\": [v.name for v in trainable_variables],\n \"variable_types\": [v.dtype.as_datatype_enum for v in trainable_variable_placeholders],\n \"variable_assign_placeholders\": [v.name for v in trainable_variable_placeholders],\n \"assign_variable_op\": trainable_assign.name,\n \"extra_variables\": [v.name for v in extra_variables],\n \"extra_variable_types\": [v.dtype.as_datatype_enum for v\n in extra_variable_assign_placeholders],\n \"extra_variable_assign_placeholders\": [p.name for p in\n extra_variable_assign_placeholders],\n \"assign_extra_variable_op\": extra_variable_assign.name,\n \"grad_variables\": [g.name for g in grads],\n \"update_op\": update_op.name,\n \"restore_op\": saver.saver_def.restore_op_name,\n \"restore_path_placeholder\": saver.saver_def.filename_tensor_name,\n \"save_op\": _to_operation_name(saver.saver_def.save_tensor_name),\n \"save_path_placeholder\": saver.saver_def.filename_tensor_name,\n \"default_tensor_value\": [_to_floats(v) for v in additional_values],\n \"init_op\": tf.tables_initializer().name\n }\n\n if train_op is not None:\n meta[\"train_op\"] = train_op.name\n\n with open(os.path.join(folder, \"training_meta.json\"), \"w\") as f:\n f.write(json.dumps(meta))\n\n with gfile.GFile(os.path.join(folder, \"model.meta\"), \"wb\") as f:\n f.write(graph.as_graph_def().SerializeToString())\n\n return meta, saver\n\n @staticmethod\n def export(model_dir, loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,\n tensors_with_value, metrics, updates, train_op=None):\n inputs, additional_inputs, additional_values = \\\n TFModel._expand_inputs(inputs, tensors_with_value, loss_tensor)\n metric_tensors, val_methods = TFModel._process_metrics(graph, metrics)\n grads = TFModel._process_grads(graph, grads)\n\n with graph.as_default():\n batch_size_tensor = tf.to_float(tf.shape(inputs[0])[0])\n\n trainable_variables, trainable_variable_placeholders, trainable_assign, \\\n extra_variables, extra_variable_assign_placeholders, \\\n extra_variable_assign, update_op = \\\n TFModel._process_variables(graph, variables, updates)\n\n meta, saver = \\\n TFModel._save_to_dir(model_dir, sess, graph,\n metric_tensors,\n batch_size_tensor,\n loss_tensor, inputs, labels, predictions,\n trainable_variables,\n trainable_variable_placeholders,\n trainable_assign,\n extra_variables,\n extra_variable_assign_placeholders,\n extra_variable_assign,\n grads, update_op, train_op,\n additional_inputs,\n additional_values)\n return meta, saver, val_methods\n\n @staticmethod\n def create(loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,\n tensors_with_value, session_config, metrics, updates,\n model_dir, train_op=None):\n\n if model_dir is None:\n model_dir = tempfile.mkdtemp()\n else:\n if not os.path.isdir(model_dir):\n os.makedirs(model_dir)\n\n meta, saver, val_methods = TFModel.export(model_dir, loss_tensor, sess,\n inputs, labels, predictions, grads, variables,\n graph, tensors_with_value, metrics, updates,\n train_op)\n\n training_helper_layer = TFTrainingHelper(model_dir,\n session_config, saver, meta, sess)\n\n criterion = IdentityCriterion()\n\n return TFModel(training_helper_layer, criterion, val_methods)\n\n\nclass TFOptimizer:\n def __init__(self, tf_model, optim_method,\n sess=None, dataset=None,\n val_split=0.0,\n clip_norm=None, clip_value=None,\n model_dir=None):\n \"\"\"\n TFOptimizer is used for distributed training of TensorFlow\n on Spark/BigDL.\n\n Note that if grads and variables are not None, then they need to be sorted by name\n if you want to use multiple optimization methods for a TensorFlow model according to\n variable names.\n\n :param loss: The loss tensor of the TensorFlow model, should be a scalar\n :param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam\n :param sess: the current TensorFlow Session, if you want to used a pre-trained model, you\n should use the Session to load the pre-trained variables and pass it to TFOptimizer.\n \"\"\"\n\n self.optim_method = optim_method\n self.sess = sess\n self.dataset = dataset\n\n self.clip_norm = clip_norm\n if clip_value is not None and not isinstance(clip_value, tuple):\n raise ValueError(\"The clip_value argument should be a tuple (min_value, max_value)\")\n self.clip_constant = clip_value\n\n if self.dataset.batch_size <= 0:\n raise ValueError(\"You should set batch_size instead of batch_per_thread for training\")\n\n self.model_dir = model_dir\n\n self.tf_model = tf_model\n\n batch_size = self.dataset.batch_size\n\n sample_rdd = self.dataset.get_training_data()\n\n if val_split != 0.0:\n training_rdd, val_rdd = sample_rdd.randomSplit([1 - val_split, val_split])\n else:\n training_rdd = sample_rdd\n val_rdd = self.dataset.get_validation_data()\n\n self.training_rdd = training_rdd\n self.val_rdd = val_rdd\n self.batch_size = batch_size\n\n self.estimator = Estimator(self.tf_model.training_helper_layer,\n self.optim_method,\n self.model_dir)\n\n if self.clip_norm:\n self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)\n if self.clip_constant:\n min_value, max_value = self.clip_constant\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n def load_checkpoint(self, path, version):\n # todo make version optional\n model_path = os.path.join(path, \"model.{}\".format(version))\n optim_method_path = os.path.join(path, \"optimMethod-TFParkTraining.{}\".format(version))\n self.tf_model.training_helper_layer.load_checkpoint(model_path)\n self.optim_method = OptimMethod.load(optim_method_path)\n self.estimator = Estimator(self.tf_model.training_helper_layer,\n self.optim_method,\n self.model_dir)\n if self.clip_norm:\n self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)\n if self.clip_constant:\n min_value, max_value = self.clip_constant\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n @staticmethod\n def _get_or_create_session(session):\n if session is None:\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n else:\n sess = session\n return sess\n\n @staticmethod\n def _get_dataset_from_loss(loss):\n all_required_inputs = find_placeholders([loss])\n dataset = tf.get_collection(all_required_inputs[0].name)[0]\n return dataset\n\n @staticmethod\n def _get_vars_grads(loss):\n\n grads_vars = tf.train.GradientDescentOptimizer(0).compute_gradients(loss)\n grads_vars.sort(key=lambda grad_var: grad_var[1].name)\n variables = []\n grads = []\n for (grad, var) in grads_vars:\n if grad is not None:\n variables.append(var)\n grads.append(grad)\n return grads, variables\n\n @staticmethod\n def _get_vars_grads_from_train_op(train_op):\n def predicate(t):\n return t.name.split(\"/\")[-1].startswith(\"zoo_identity_op_for_grad\")\n\n grads = find_tensors([train_op], predicate)\n grad_ops = [grad.op for grad in grads]\n variables = []\n for grad in grad_ops:\n var = list(grad.control_inputs)[0]\n if var.name == \"VarHandleOp\":\n variables.append(var)\n else:\n variables.append(list(var.outputs)[0])\n # variables = [grad.op.control_inputs[0].outputs[0] for grad in grads]\n return grads, variables\n\n @classmethod\n def from_train_op(cls, train_op, loss, metrics=None, updates=None, sess=None, dataset=None,\n tensor_with_value=None, session_config=None, model_dir=None):\n sess = TFOptimizer._get_or_create_session(sess)\n grads, variables = TFOptimizer._get_vars_grads_from_train_op(train_op)\n if dataset is None:\n dataset = TFOptimizer._get_dataset_from_loss(loss)\n inputs = dataset._original_tensors\n if isinstance(inputs, tuple) and len(inputs) == 2:\n inputs, labels = inputs\n else:\n labels = []\n\n inputs = nest.flatten(inputs)\n labels = nest.flatten(labels)\n return TFOptimizer._from_grads(loss=loss, sess=sess, inputs=inputs, labels=labels,\n grads=grads,\n variables=variables, dataset=dataset, metrics=metrics,\n tensor_with_value=tensor_with_value,\n optim_method=FakeOptimMethod(),\n session_config=session_config, updates=updates,\n model_dir=model_dir, train_op=train_op)\n\n @classmethod\n def _from_grads(cls, loss, sess, inputs, labels, grads, variables, dataset, optim_method=None,\n val_split=0.0, clip_norm=None, clip_value=None,\n metrics=None, tensor_with_value=None, session_config=None,\n model_dir=None, updates=None, train_op=None):\n graph = loss.graph\n if metrics is None:\n metrics = {}\n\n tf_model = TFModel.create(loss, sess, inputs, labels, [], grads, variables, graph,\n tensor_with_value, session_config, metrics,\n updates, model_dir=None, train_op=train_op)\n return cls(tf_model, optim_method, sess=sess, dataset=dataset, val_split=val_split,\n clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)\n\n @classmethod\n def from_loss(cls, loss, optim_method, session=None, val_outputs=None,\n val_labels=None, val_method=None, val_split=0.0,\n clip_norm=None, clip_value=None, metrics=None,\n tensor_with_value=None, session_config=None, model_dir=None, updates=None):\n \"\"\"\n Create a TFOptimizer from a TensorFlow loss tensor.\n The loss tensor must come from a TensorFlow graph that only takes TFDataset.tensors and\n the tensors in `tensor_with_value` as inputs.\n :param loss: The loss tensor of the TensorFlow model, should be a scalar\n :param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam\n :param session: the current TensorFlow Session, if you want to used a pre-trained model,\n you should use the Session to load the pre-trained variables and pass it to TFOptimizer.\n :param val_outputs: the validation output TensorFlow tensor to be used by val_methods\n :param val_labels: the validation label TensorFlow tensor to be used by val_methods\n :param val_method: the BigDL val_method(s) to be used.\n :param val_split: Float between 0 and 1. Fraction of the training data to be used as\n validation data.\n :param clip_norm: float >= 0. Gradients will be clipped when their L2 norm exceeds\n this value.\n :param clip_value: float >= 0. Gradients will be clipped when their absolute value\n exceeds this value.\n :param metrics: a dictionary. The key should be a string representing the metric's name\n and the value should be the corresponding TensorFlow tensor, which should be a scalar.\n :param tensor_with_value: a dictionary. The key is TensorFlow tensor, usually a\n placeholder, the value of the dictionary is a tuple of two elements. The first one of\n the tuple is the value to feed to the tensor in training phase and the second one\n is the value to feed to the tensor in validation phase.\n :return: a TFOptimizer\n \"\"\"\n sess = TFOptimizer._get_or_create_session(session)\n grads, variables = TFOptimizer._get_vars_grads(loss)\n dataset = TFOptimizer._get_dataset_from_loss(loss)\n inputs = dataset._original_tensors\n if isinstance(inputs, tuple) and len(inputs) == 2:\n inputs, labels = inputs\n else:\n labels = []\n\n inputs = nest.flatten(inputs)\n labels = nest.flatten(labels)\n\n if clip_value is not None:\n if isinstance(clip_value, float) or isinstance(clip_value, int):\n if clip_value <= 0:\n ValueError(\"The clip_value argument should be positive number\")\n clip_value = (-float(clip_value), float(clip_value))\n\n if not isinstance(clip_value, tuple):\n raise ValueError(\"The clip_value argument should be\" +\n \" a positive float/int which clips to\" +\n \" (-clip_value, clip_value); \" +\n \"or a tuple which clips to (min_value, max_value)\")\n\n if val_method is not None:\n val_methods = to_list(val_method)\n if metrics is None:\n metrics = {}\n\n for i, method in enumerate(val_methods):\n metrics['bigdl_metirc_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)\n\n return TFOptimizer._from_grads(loss, sess, inputs, labels, grads, variables, dataset,\n optim_method, val_split, clip_norm, clip_value,\n metrics, tensor_with_value, session_config,\n model_dir, updates)\n\n @staticmethod\n def export_training_model(export_dir, loss, sess, inputs, labels=None, predictions=None,\n metrics=None, tensor_with_value=None, updates=None):\n\n grads, variables = TFOptimizer._get_vars_grads(loss)\n\n TFModel.export(export_dir, loss, sess, inputs, labels, predictions, grads, variables,\n loss.graph, tensor_with_value, metrics, updates)\n logging.info(\"Exported TensorFlow model in {} for training\".format(export_dir))\n\n @classmethod\n def from_keras(cls, keras_model, dataset, optim_method=None, val_split=0.0,\n session_config=None, model_dir=None):\n \"\"\"\n Create a TFOptimizer from a tensorflow.keras model. The model must be compiled.\n :param keras_model: the tensorflow.keras model, which must be compiled.\n :param dataset: a TFDataset\n :param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam\n :param val_split: Float between 0 and 1. Fraction of the training data to be used as\n validation data.\n :return:\n \"\"\"\n import tensorflow.keras.backend as K\n\n model_inputs = keras_model.inputs\n if hasattr(keras_model, \"targets\"):\n model_targets = keras_model.targets\n else:\n model_targets = keras_model._targets\n\n loss = keras_model.total_loss\n variables = keras_model._collected_trainable_weights\n variables.sort(key=lambda variable: variable.name)\n keras_optimizer = keras_model.optimizer\n\n grads = K.gradients(loss, variables)\n if None in grads:\n raise ValueError('An operation has `None` for gradient. '\n 'Please make sure that all of your ops have a '\n 'gradient defined (i.e. are differentiable). '\n 'Common ops without gradient: '\n 'K.argmax, K.round, K.eval.')\n clip_norm = None\n clip_value = None\n if hasattr(keras_optimizer, 'clipnorm'):\n clip_norm = keras_optimizer.clipnorm\n if hasattr(keras_optimizer, 'clipvalue'):\n clip_value = (-keras_optimizer.clipvalue, keras_optimizer.clipvalue)\n\n sess = K.get_session()\n if optim_method is None:\n optim_method = keras_optimizer\n optim_method = to_bigdl_optim_method(optim_method)\n\n if keras_model.metrics and (dataset.get_validation_data() is not None or val_split != 0.0):\n if isinstance(keras_model.metrics, dict):\n raise ValueError(\n \"different metrics for different outputs are not supported right now\")\n\n if dataset.get_validation_data() is None and val_split == 0.0:\n raise ValueError(\"Validation data is not specified. Please set \" +\n \"val_rdd in TFDataset, or set val_split larger than zero\")\n\n if len(keras_model.outputs) > 1:\n if not all([name.endswith(\"loss\") for name in keras_model.metrics_names]):\n raise ValueError(\"metrics (except loss) for multi-head model is not supported\")\n else:\n bigdl_val_methods = [Loss()]\n val_outputs = keras_model.outputs\n val_labels = model_targets\n else:\n bigdl_val_methods = \\\n [to_bigdl_metric(m, keras_model.loss) for m in keras_model.metrics_names]\n val_outputs = keras_model.outputs\n val_labels = model_targets\n else:\n val_outputs = None\n val_labels = None\n bigdl_val_methods = None\n\n tensor_with_value = {\n K.learning_phase(): [True, False]\n }\n\n updates = keras_model.updates\n\n metrics = None\n\n if bigdl_val_methods is not None:\n val_methods = to_list(bigdl_val_methods)\n metrics = {}\n for i, method in enumerate(val_methods):\n metrics['bigdl_metirc_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)\n\n tf_model = TFModel.create(loss, sess, model_inputs, model_targets, keras_model.outputs,\n grads, variables, loss.graph,\n tensor_with_value, session_config, metrics,\n updates, model_dir=None)\n\n return cls(tf_model, optim_method, sess=sess, dataset=dataset, val_split=val_split,\n clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)\n\n def set_constant_gradient_clipping(self, min_value, max_value):\n \"\"\"\n Configure constant clipping settings.\n\n :param min_value: the minimum value to clip by\n :param max_value: the maxmimum value to clip by\n \"\"\"\n self.estimator.set_constant_gradient_clipping(min_value, max_value)\n\n def set_gradient_clipping_by_l2_norm(self, clip_norm):\n \"\"\"\n Configure L2 norm clipping settings.\n :param clip_norm: gradient L2-Norm threshold\n \"\"\"\n self.estimator.set_l2_norm_gradient_clipping(clip_norm)\n\n def optimize(self, end_trigger=None, checkpoint_trigger=None):\n \"\"\"\n Run the training loop of the this optimizer\n :param end_trigger: BigDL's Trigger to indicate when to stop the training.\n :param checkpoint_trigger: When to save a checkpoint and evaluate model.\n \"\"\"\n if end_trigger is None:\n end_trigger = MaxEpoch(1)\n\n if checkpoint_trigger is None:\n checkpoint_trigger = EveryEpoch()\n\n if self.tf_model.val_methods and self.val_rdd is not None:\n self.estimator.train_minibatch(train_set=self.training_rdd,\n criterion=self.tf_model.criterion,\n end_trigger=end_trigger,\n checkpoint_trigger=checkpoint_trigger,\n validation_set=self.val_rdd,\n validation_method=self.tf_model.val_methods)\n else:\n self.estimator.train_minibatch(train_set=self.training_rdd,\n criterion=self.tf_model.criterion,\n end_trigger=end_trigger)\n\n self.tf_model.training_helper_layer.get_weights_to_python()\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.read_json"
],
[
"tensorflow.shape",
"tensorflow.get_collection",
"tensorflow.keras.backend.get_session",
"tensorflow.keras.backend.learning_phase",
"tensorflow.keras.backend.gradients",
"tensorflow.assign",
"tensorflow.placeholder",
"tensorflow.is_numeric_tensor",
"tensorflow.identity",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.to_float",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.group",
"tensorflow.tables_initializer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
YanivAvrahami/AI_is_Math
|
[
"d34add0b5ec5f7efeefe856d67f3ff502a6a4b01"
] |
[
"c_02b_filtering_and_resampling/fft.py"
] |
[
"# %%\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\n\ndef fftPlot(sig, dt=None, block=False, plot=True):\n # here it's assumes analytic signal (real signal...)- so only half of the axis is required\n\n if dt is None:\n dt = 1\n t = np.arange(0, sig.shape[-1])\n xLabel = 'samples'\n else:\n t = np.arange(0, sig.shape[-1]) * dt\n xLabel = 'freq [Hz]'\n\n if sig.shape[0] % 2 != 0:\n warnings.warn(\"signal prefered to be even in size, autoFixing it...\")\n t = t[0:-1]\n sig = sig[0:-1]\n\n sigFFT = np.fft.fft(sig) / t.shape[0] # divided by size t for coherent magnitude\n\n freq = np.fft.fftfreq(t.shape[0], d=dt)\n\n # plot analytic signal - right half of freq axis needed only...\n firstNegInd = np.argmax(freq < 0)\n freqAxisPos = freq[0:firstNegInd]\n sigFFTPos = 2 * sigFFT[0:firstNegInd] # *2 because of magnitude of analytic signal\n\n if plot:\n plt.figure()\n plt.plot(freqAxisPos, np.abs(sigFFTPos))\n plt.xlabel(xLabel)\n plt.ylabel('mag')\n plt.title('Analytic FFT plot')\n plt.show(block=block)\n\n return sigFFTPos, freqAxisPos\n\n\n# %%\ndt = 1 / 1000\nf0 = 10 # 1 / dt / 4\n\nt = np.arange(0, 1 + dt, dt)\nsig = np.sin(2 * np.pi * f0 * t+100) + 10 * np.sin(2 * np.pi * f0 / 2 * t)\nsig = np.sin(2 * np.pi * f0 * t) #+ 10 * np.sin(2 * np.pi * f0 / 2 * t)\n\nplt.figure()\nplt.plot(t,sig)\n# %%\nfftPlot(sig, dt=dt, block=True)\n\n\n# %%\n"
] |
[
[
"numpy.abs",
"numpy.fft.fft",
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"numpy.argmax",
"numpy.fft.fftfreq",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.6",
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.21",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ICME2022/R-S-R
|
[
"0e938082a0ea381cff7f0348deb65826848c43e4",
"0e938082a0ea381cff7f0348deb65826848c43e4"
] |
[
"selfsupervision/MirrorSSB/JXWithChannel.py",
"ops/tdn_net.py"
] |
[
"import torch\nimport random\n\n\ndef create5Dimages(images):\n # images : 4D tensor with shape [BT,C,H,W]\n T = 16\n B = images.size(0) // T\n C = images.size(1)\n H = images.size(2)\n W = images.size(3)\n image = torch.tensor([]).cuda()\n for b in range(B):\n bimage = images[b * T:(b + 1) * T, :, :, :].float().view(1, T, C, H, W).cuda()\n image = torch.cat([image, bimage], dim=0)\n return image # [B,T,C,H,W]\n\ndef create4Dimages(images):\n # images : 5D tensor with shape [B,T,C,H,W]\n B,T,C,H,W = images.size()\n image = torch.tensor([]).cuda()\n for b in range(B):\n image = torch.cat([image,images[b]],dim=0)\n return image\n\n\ndef TmirrorZong(image):\n #image[C,H,W] W->L\n C,H,L = image.size()\n index = list(range(0,L,1))\n index.reverse()\n return image[:,:,index]\n\ndef TmirrorHeng(image):\n #image[C,H,W] \n C,H,W = image.size()\n index = list(range(0,H,1))\n index.reverse()\n return image[:,index]\n\n\n\n\ndef mirror(images,labels):\n #images[T,C,H,W]\n #labels[T]\n\n T,C,H,W = images.size()\n result = torch.Tensor().cuda()\n for l in range(len(labels)):\n if(labels[l]==0):\n image = TmirrorZong(images[l]).view(1,C,H,W)\n image_c = torch.tensor([]).cuda()\n index = list(range(0,C,1))\n index.reverse()\n image_c = image[:,index]\n\n\n #image = mirror3DZong(images[l]).view(1,C,H,W)\n result = torch.cat([result,image_c],0)\n elif(labels[l]==1):\n image = TmirrorHeng(images[l]).view(1,C,H,W)\n #image = mirror3DHeng(images[l]).view(1,C,H,W)\n\n image_c = torch.tensor([]).cuda()\n index = list(range(0,C,1))\n index.reverse()\n image_c = image[:,index]\n\n result = torch.cat([result,image_c],0)\n \n return result \n \ndef getLabel():\n T = 16\n for t in range(T):\n\n label_T = [random.randint(0,1) for _ in range(T)]\n return torch.tensor(label_T).float().cuda()\n\n\ndef Mirror_Self_Supervision(images):\n #images [BT,C,H,W]\n Bt,C,H,W = images.size()\n T = 16\n B = Bt//T\n label_domain = (0,1)\n\n image = create5Dimages(images) #[B,T,C,H,W]\n mirrorImage = torch.Tensor().cuda()\n mirrorLabel = torch.Tensor().cuda()\n for b in range(B):\n label_T = getLabel() #[T]\n mirror_image_T = mirror(image[b],label_T) # image[b]:[T,C,H,W] label_T:[T]\n mirrorLabel = torch.cat([mirrorLabel,label_T],0)\n mirrorImage = torch.cat([mirrorImage,mirror_image_T.view(1,T,C,H,W)],0)\n #5D->4D\n mirrorImage = create4Dimages(mirrorImage)\n return mirrorImage,mirrorLabel\n\n\n\n\n\n\n\n\n\n\n",
"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F \nfrom torch.nn.init import normal_, constant_\nimport torch.nn.functional as F\nfrom ops.base_module import *\nimport torch.nn.functional as F\nimport random\n\nimport selfsupervision.MirrorSSB.RBRF_2 as rbrf\nimport selfsupervision.shuffle.reverse_random_batch_channel as rbrc\n\nimport ops.Strongly_constrained_self_attention as sattention\n\n\nclass TDN_Net(nn.Module):\n\n def __init__(self,resnet_model,resnet_model1,apha,belta):\n super(TDN_Net, self).__init__()\n\n self.conv1 = list(resnet_model.children())[0]\n self.bn1 = list(resnet_model.children())[1]\n self.relu = nn.ReLU(inplace=True)\n \n # implement conv1_5 and inflate weight \n self.conv1_temp = list(resnet_model1.children())[0]\n params = [x.clone() for x in self.conv1_temp.parameters()]\n kernel_size = params[0].size()\n new_kernel_size = kernel_size[:1] + (3 * 4,) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n self.conv1_5 = nn.Sequential(nn.Conv2d(12,64,kernel_size=7,stride=2,padding=3,bias=False),nn.BatchNorm2d(64),nn.ReLU(inplace=True))\n self.conv1_5[0].weight.data = new_kernels\n\n self.maxpool_diff = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n self.resnext_layer1 =nn.Sequential(*list(resnet_model1.children())[4])\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n self.layer1_bak = nn.Sequential(*list(resnet_model.children())[4])\n self.layer2_bak = nn.Sequential(*list(resnet_model.children())[5])\n self.layer3_bak = nn.Sequential(*list(resnet_model.children())[6])\n self.layer4_bak = nn.Sequential(*list(resnet_model.children())[7])\n\n self.avgpool = nn.AvgPool2d(7, stride=1)\n\n\n self.avg_diff = nn.AvgPool2d(kernel_size=2,stride=2)\n self.fc = list(resnet_model.children())[8]\n \n self.apha = apha\n self.belta = belta\n\n self.fc_shuffle_rbrf = nn.Linear(2048,2048,bias = True)\n self.fc_shuffle_rbrc = nn.Linear(1,2048,bias = True)\n\n\n\n\n self.avgpool_rbrf = nn.AvgPool2d(7,stride=1)\n self.avgpool_rbrc = nn.AvgPool2d(7,stride=1)\n\n \n self.scsattention = sattention.SCSAttention(2048,4,8,7,7,256)\n \n\n\n\n\n def forward(self, x):\n x1, x2, x3, x4, x5 = x[:,0:3,:,:], x[:,3:6,:,:], x[:,6:9,:,:], x[:,9:12,:,:], x[:,12:15,:,:]\n x_c5 = self.conv1_5(self.avg_diff(torch.cat([x2-x1,x3-x2,x4-x3,x5-x4],1).view(-1,12,x2.size()[2],x2.size()[3])))\n x_diff = self.maxpool_diff(1.0/1.0*x_c5) \n temp_out_diff1 = x_diff \n x_diff = self.resnext_layer1(x_diff)\n x = self.conv1(x3)\n x = self.bn1(x)\n x = self.relu(x)\n #fusion layer1\n x = self.maxpool(x)\n temp_out_diff1 = F.interpolate(temp_out_diff1, x.size()[2:])\n x = self.apha*x + self.belta*temp_out_diff1\n #fusion layer2\n x = self.layer1_bak(x)\n x_diff = F.interpolate(x_diff, x.size()[2:])\n x = self.apha*x + self.belta*x_diff\n x = self.layer2_bak(x)\n x = self.layer3_bak(x) \n x = self.layer4_bak(x) #[64,2048,7,7]\n\n '''images,labels = revbc.reverse_B_channel(x)'''\n '''images,labels = DCnC.Mirror_Self_Supervision(x)'''\n\n #images,labels = JXnC.Mirror_Self_Supervision(x)\n #images,labels = JXWC.Mirror_Self_Supervision(x)\n images_rbrf,labels_rbrf = rbrf.RBRF(x)\n images_rbrc,labels_rbrc = rbrc.RBRC(x) #[64,1,7,7]\n \n \n \n labels_rbrf = labels_rbrf.long()\n labels_rbrc = labels_rbrc.long()\n\n x = self.scsattention(x,images_rbrf,images_rbrc)\n\n ##########################################################\n\n\n images_rbrf = self.avgpool_rbrf(images_rbrf) #[64,1,1,1]\n images_rbrc = self.avgpool_rbrc(images_rbrc) #[64,1,1,1]\n\n\n x = self.avgpool(x)\n x = x.view(x.size(0),-1)\n\n\n y = images_rbrf.view(32,2048) #[64,1]\n z = images_rbrc.view(32,1)\n \n \n \n\n\n x = self.fc(x) #[64,2048]\n #mx = self.fc(mx)\n #print(mx.shape,'fc')[64,2048]\n \n\n y = self.fc_shuffle_rbrf(y)#[64,2048]\n z = self.fc_shuffle_rbrc(z)\n\n \n return x,y,labels_rbrf,z,labels_rbrc\n\ndef tdn_net(base_model=None,num_segments=8,pretrained=True, **kwargs):\n if(\"50\" in base_model):\n resnet_model = fbresnet50(num_segments, pretrained)\n resnet_model1 = fbresnet50(num_segments, pretrained)\n else:\n resnet_model = fbresnet101(num_segments, pretrained)\n resnet_model1 = fbresnet101(num_segments, pretrained)\n\n if(num_segments is 8):\n model = TDN_Net(resnet_model,resnet_model1,apha=0.5,belta=0.5)\n else:\n model = TDN_Net(resnet_model,resnet_model1,apha=0.75,belta=0.25)\n return model\n\n"
] |
[
[
"torch.tensor",
"torch.Tensor",
"torch.cat"
],
[
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahhuhtal/scikit-fem
|
[
"84ad97bfd2a92d28694f54f6897d97966bda31df",
"84ad97bfd2a92d28694f54f6897d97966bda31df"
] |
[
"docs/examples/ex05.py",
"tests/test_assembly.py"
] |
[
"r\"\"\"Integral condition.\n\nThis short example demonstrates the implementation of an integral boundary\n condition\n\n.. math::\n \n \\int_\\Gamma \\nabla u \\cdot \\boldsymbol{n} \\, \\mathrm{d}s = 1\n\non a part of the boundary of the domain :math:`\\Gamma \\subset \\partial \\Omega`\n for the Laplace operator. In this example, :math:`\\Gamma` is the right\n boundary of the unit square and the solution satisfies :math:`u=0` on the\n bottom boundary and :math:`\\nabla u \\cdot \\boldsymbol{n} = 0` on the rest of\n the boundaries. The constraint is introduced via a Lagrange multiplier leading\n to a saddle point system.\n\n\"\"\"\n\nfrom skfem import *\nfrom skfem.helpers import dot, grad\nfrom skfem.models.poisson import laplace\n\nm = MeshTri()\nm.refine(5)\n\ne = ElementTriP1()\n\nib = InteriorBasis(m, e)\nfb = FacetBasis(m, e)\n\n\n@BilinearForm\ndef facetbilinf(u, v, w):\n n = w.n\n x = w.x\n return -dot(grad(u), n) * v * (x[0] == 1.0)\n\n\n@LinearForm\ndef facetlinf(v, w):\n n = w.n\n x = w.x\n return -dot(grad(v), n) * (x[0] == 1.0)\n\n\nA = asm(laplace, ib)\nB = asm(facetbilinf, fb)\n\nb = asm(facetlinf, fb)\n\nD = ib.find_dofs({'plate': m.facets_satisfying(lambda x: (x[1] == 0.0))})\nI = ib.complement_dofs(D)\n\nimport scipy.sparse\nb = scipy.sparse.csr_matrix(b)\nK = scipy.sparse.bmat([[A+B, b.T], [b, None]], 'csr')\n\nimport numpy as np\nf = np.concatenate((np.zeros(A.shape[0]), -1.0*np.ones(1)))\n\nI = np.append(I, K.shape[0] - 1)\n\nx = solve(*condense(K, f, I=I))\n\nif __name__ == \"__main__\":\n from os.path import splitext\n from sys import argv\n from skfem.visuals.matplotlib import plot, savefig\n plot(m, x[:-1], colorbar=True)\n savefig(splitext(argv[0])[0] + '_solution.png')\n",
"import unittest\n\nimport numpy as np\n\nfrom skfem import (BilinearForm, LinearForm, Functional, asm, bilinear_form,\n linear_form, solve, functional)\nfrom skfem.element import (ElementQuad1, ElementQuadS2, ElementHex1,\n ElementHexS2, ElementTetP0, ElementTetP1,\n ElementTetP2, ElementTriP1, ElementQuad2,\n ElementTriMorley, ElementVectorH1)\nfrom skfem.mesh import MeshQuad, MeshHex, MeshTet, MeshTri\nfrom skfem.assembly import FacetBasis, InteriorBasis\n\n\nclass IntegrateOneOverBoundaryQ1(unittest.TestCase):\n elem = ElementQuad1()\n\n def createBasis(self):\n m = MeshQuad()\n m.refine(6)\n self.fbasis = FacetBasis(m, self.elem)\n self.boundary_area = 4.0000\n\n def runTest(self):\n self.createBasis()\n\n @BilinearForm\n def uv(u, v, w):\n return u * v\n\n B = asm(uv, self.fbasis)\n\n @LinearForm\n def gv(v, w):\n return 1.0 * v\n\n g = asm(gv, self.fbasis)\n\n ones = np.ones(g.shape)\n\n self.assertAlmostEqual(ones @ g, self.boundary_area, places=4)\n self.assertAlmostEqual(ones @ (B @ ones), self.boundary_area, places=4)\n\n\nclass IntegrateOneOverBoundaryS2(IntegrateOneOverBoundaryQ1):\n elem = ElementQuadS2()\n\n\nclass IntegrateOneOverBoundaryHex1(IntegrateOneOverBoundaryQ1):\n\n def createBasis(self):\n m = MeshHex()\n m.refine(3)\n self.fbasis = FacetBasis(m, ElementHex1())\n self.boundary_area = 6.000\n\n\nclass IntegrateOneOverBoundaryHex1_2(IntegrateOneOverBoundaryQ1):\n\n def createBasis(self):\n m = MeshHex()\n m.refine(3)\n self.fbasis = FacetBasis(m, ElementHexS2())\n self.boundary_area = 6.000\n\n\nclass IntegrateFuncOverBoundary(unittest.TestCase):\n\n def runTest(self):\n cases = [(MeshHex, ElementHex1),\n (MeshTet, ElementTetP1),\n (MeshTet, ElementTetP0)]\n\n for (mtype, etype) in cases:\n m = mtype()\n m.refine(3)\n fb = FacetBasis(m, etype())\n\n @BilinearForm\n def uv(u, v, w):\n x, y, z = w.x\n return x ** 2 * y ** 2 * z ** 2 * u * v\n\n B = asm(uv, fb)\n\n ones = np.ones(B.shape[0])\n\n self.assertAlmostEqual(ones @ (B @ ones), 0.3333333333, places=5)\n\n\nclass IntegrateFuncOverBoundaryPart(unittest.TestCase):\n case = (MeshHex, ElementHex1)\n\n def runTest(self):\n mtype, etype = self.case\n m = mtype()\n m.refine(3)\n bnd = m.facets_satisfying(lambda x: x[0] == 1.0)\n fb = FacetBasis(m, etype(), facets=bnd)\n\n @BilinearForm\n def uv(u, v, w):\n x, y, z = w.x\n return x ** 2 * y ** 2 * z ** 2 * u * v\n\n B = asm(uv, fb)\n ones = np.ones(B.shape[0])\n\n self.assertAlmostEqual(ones @ (B @ ones), 0.11111111, places=5)\n\n\nclass IntegrateFuncOverBoundaryPartHexS2(IntegrateFuncOverBoundaryPart):\n case = (MeshHex, ElementHexS2)\n\n\nclass IntegrateFuncOverBoundaryPartTetP1(IntegrateFuncOverBoundaryPart):\n case = (MeshTet, ElementTetP1)\n\n\nclass IntegrateFuncOverBoundaryPartTetP2(IntegrateFuncOverBoundaryPart):\n case = (MeshTet, ElementTetP2)\n\n\nclass IntegrateFuncOverBoundaryPartTetP0(IntegrateFuncOverBoundaryPart):\n case = (MeshTet, ElementTetP0)\n\n\nclass BasisInterpolator(unittest.TestCase):\n case = (MeshTri, ElementTriP1)\n\n def initOnes(self, basis):\n return np.ones(basis.N)\n\n def runTest(self):\n mtype, etype = self.case\n m = mtype()\n m.refine(3)\n e = etype()\n ib = InteriorBasis(m, e)\n\n x = self.initOnes(ib)\n f = ib.interpolator(x)\n\n X = np.array([np.sin(m.p[0, :]), np.sin(3. * m.p[1, :])])\n self.assertTrue(np.sum(f(X) - 1.0) < 1.0e-10)\n\n\nclass BasisInterpolatorTriP2(BasisInterpolator):\n case = (MeshQuad, ElementQuad1)\n\n\nclass BasisInterpolatorQuad1(BasisInterpolator):\n case = (MeshQuad, ElementQuad1)\n\n\nclass BasisInterpolatorQuad2(BasisInterpolator):\n case = (MeshQuad, ElementQuad2)\n\n\nclass BasisInterpolatorQuadS2(BasisInterpolator):\n case = (MeshQuad, ElementQuadS2)\n\n\nclass BasisInterpolatorMorley(BasisInterpolator):\n case = (MeshTri, ElementTriMorley)\n\n def initOnes(self, basis):\n @bilinear_form\n def mass(u, du, ddu, v, dv, ddv, w):\n return u * v\n\n @linear_form\n def ones(v, dv, ddv, w):\n return 1.0 * v\n\n M = asm(mass, basis)\n f = asm(ones, basis)\n\n return solve(M, f)\n\n\nclass NormalVectorTestTri(unittest.TestCase):\n case = (MeshTri(), ElementTriP1())\n test_integrate_volume = True\n intorder = None\n\n def runTest(self):\n self.case[0].refine()\n\n if self.intorder is not None:\n basis = FacetBasis(*self.case, intorder=self.intorder)\n else:\n basis = FacetBasis(*self.case)\n\n @linear_form\n def linf(v, dv, w):\n return np.sum(w.n ** 2, axis=0) * v\n\n b = asm(linf, basis)\n m = self.case[0]\n self.assertAlmostEqual(b @ np.ones(b.shape),\n 2 * m.p.shape[0],\n places=10)\n\n if self.test_integrate_volume:\n # by Gauss theorem this integrates to one\n for itr in range(m.p.shape[0]):\n @linear_form\n def linf(v, dv, w):\n return w.n[itr] * v\n\n b = asm(linf, basis)\n self.assertAlmostEqual(b @ m.p[itr, :], 1.0, places=5)\n\n\nclass NormalVectorTestTet(NormalVectorTestTri):\n case = (MeshTet(), ElementTetP1())\n\n\nclass NormalVectorTestTetP2(NormalVectorTestTri):\n case = (MeshTet(), ElementTetP2())\n test_integrate_volume = False\n\n\nclass NormalVectorTestQuad(NormalVectorTestTri):\n case = (MeshQuad(), ElementQuad1())\n\n\nclass NormalVectorTestHex(NormalVectorTestTri):\n case = (MeshHex(), ElementHex1())\n intorder = 3\n\n\nclass NormalVectorTestHexS2(NormalVectorTestTri):\n case = (MeshHex(), ElementHexS2())\n intorder = 3\n test_integrate_volume = False\n\n\nclass EvaluateFunctional(unittest.TestCase):\n\n def runTest(self):\n m = MeshQuad()\n m.refine(3)\n e = ElementQuad1()\n basis = InteriorBasis(m, e)\n\n @functional\n def x_squared(w):\n return w.x[0] ** 2\n\n y = asm(x_squared, basis)\n\n self.assertAlmostEqual(y, 1. / 3.)\n self.assertEqual(len(x_squared.elemental(basis)),\n m.t.shape[1])\n\n\nclass TestRefinterp(unittest.TestCase):\n\n def runTest(self):\n m = MeshQuad()\n m.refine(2)\n e = ElementQuad1()\n basis = InteriorBasis(m, e)\n\n M, X = basis.refinterp(m.p[0], 3)\n\n self.assertEqual(M.p.shape[1], len(X))\n\n\nclass TestCompositeAssembly(unittest.TestCase):\n\n def runTest(self):\n\n m = MeshHex()\n # check that these assemble to the same matrix\n ec = ElementHex1() * ElementHex1() * ElementHex1()\n ev = ElementVectorH1(ElementHex1())\n basisc = InteriorBasis(m, ec)\n basisv = InteriorBasis(m, ev)\n\n @BilinearForm\n def bilinf_ev(u, v, w):\n from skfem.helpers import dot\n return dot(u, v)\n\n @BilinearForm\n def bilinf_ec(ux, uy, uz, vx, vy, vz, w):\n return ux * vx + uy * vy + uz * vz\n\n Kv = asm(bilinf_ev, basisv)\n Kc = asm(bilinf_ec, basisc)\n\n self.assertAlmostEqual(np.sum(np.sum((Kv - Kc).todense())), 0.)\n\n\nclass TestFieldInterpolation(unittest.TestCase):\n\n def runTest(self):\n\n m = MeshTri()\n e = ElementTriP1()\n basis = InteriorBasis(m, e)\n\n @Functional\n def feqx(w):\n from skfem.helpers import grad\n f = w['func'] # f(x) = x\n return grad(f)[0] # f'(x) = 1\n\n func = basis.interpolate(m.p[0])\n\n # integrate f'(x) = 1 over [0, 1]^2\n self.assertAlmostEqual(feqx.assemble(basis, func=func), 1.)\n\n\nclass TestFieldInterpolation_2(unittest.TestCase):\n\n def runTest(self):\n\n m = MeshTri()\n e = ElementTriP1()\n basis = InteriorBasis(m, e)\n\n @Functional\n def feqx(w):\n from skfem.helpers import grad\n f = w['func'] # f(x, y) = x\n g = w['gunc'] # g(x, y) = y\n return grad(f)[0] + grad(g)[1]\n\n func = basis.interpolate(m.p[0])\n gunc = basis.interpolate(m.p[1])\n\n self.assertAlmostEqual(feqx.assemble(basis, func=func, gunc=gunc), 2.)\n\n\nclass TestComplexValuedAssembly(unittest.TestCase):\n\n def runTest(self):\n\n m = MeshTri()\n e = ElementTriP1()\n basis = InteriorBasis(m, e)\n self.interior_area = 1\n\n @BilinearForm(dtype=np.complex64)\n def complexmass(u, v, w):\n return 1j*u*v\n\n @LinearForm(dtype=np.complex64)\n def complexfun(v, w):\n return 1j*v\n\n M = asm(complexmass, basis)\n f = asm(complexfun, basis)\n ones = np.ones(M.shape[1])\n\n self.assertAlmostEqual(np.dot(ones, M @ ones), 1j*self.interior_area)\n self.assertAlmostEqual(np.dot(ones, f), 1j*self.interior_area)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.append",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.dot",
"numpy.sum",
"numpy.sin",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dwervin/pyhampel
|
[
"84c3617d5f6f7ee518b45d7bd4856235d7b87617"
] |
[
"pyhampel/src/HampelFiltering.py"
] |
[
"import numpy as np\nimport pandas as pd\n\n\ndef hampel_filter_df(df: pd.DataFrame, vals_col: str, time_col=None, win_size=30, num_dev=3, center_win=True) -> pd.DataFrame:\n \"\"\"\n This function takes in dataframe containing time series of values, applies Hampel filter on\n these values, and returns dataframe consisting of original values columns along with\n the Hampel filtered data, outlier values and boolean flags where outliers found.\n\n Parameters\n ----------\n df: pd.DataFrame\n data from containing time series that needs to be Hampel filtered\n vals_col: str\n Single column name that contains values that need to be filtered.\n time_col: str\n Name of column that contains dates or timestamps\n win_size: int\n Size of sliding window for filtering. Essentially the number of time steps to be considered when filtering.\n num_dev: int\n Number of standard deviations to consider when detecting values that would be considered outliers.\n center_win: Boolean\n Boolean value that determines whether the window is centered about the point being filtered? Default=True.\n If False, point is at the leading edge (i.e. right side) of window calculation.\n\n Returns\n -------\n Function returns a full dataframe consisting of original values columns along with\n the Hampel filtered data, outlier values and boolean flags where outliers found.\n \"\"\"\n #print(\"IN HAMPEL_FILTER_DF\")\n\n if (time_col != None):\n if (time_col not in list(df.columns)):\n raise Exception(\"Timestamp column '{}' is missing!\".format(time_col))\n elif (time_col in list(df.columns)):\n if (not np.issubdtype(df[time_col].dtype, np.datetime64)):\n if (not np.issubdtype(pd.to_datetime(df[time_col]).dtype, np.datetime64)):\n raise Exception(\"Timestamp column '{}' is not np.datetime64\".format(time_col))\n else:\n df[time_col] = pd.to_datetime(df[time_col])\n drop_cols = set(df.columns) - set([time_col, vals_col])\n # Not really filtered at this point. Just naming appropriately ahead of time.\n orig_vals = df.sort_values(time_col, ascending=True).set_index(time_col).copy()\n filtered = orig_vals.drop(columns=drop_cols).copy()\n else:\n df[time_col] = pd.to_datetime(df[time_col])\n drop_cols = set(df.columns) - set([time_col, vals_col])\n # Not really filtered at this point. Just naming appropriately ahead of time.\n orig_vals = df.sort_values(time_col, ascending=True).set_index(time_col).copy()\n filtered = orig_vals.drop(columns=drop_cols).copy()\n\n elif (time_col == None):\n if (not isinstance(df.index, pd.DatetimeIndex)):\n raise Exception(\"DataFrame index is not pd.DatetimeIndex\")\n else:\n df.sort_index(inplace=True)\n drop_cols = set(df.columns) - set([vals_col])\n orig_vals = df.copy()\n filtered = orig_vals.drop(columns=drop_cols).copy()\n\n # Scale factor for estimating standard deviation based upon median value\n L = 1.4826\n\n # Calculate rolling median for the series\n rolling_median = filtered.rolling(window=int(win_size), center=center_win, min_periods=1).median()\n\n # Define a lambda function to apply to the series to calculate Median Absolute Deviation\n MAD = lambda x: np.median(np.abs(x - np.median(x)))\n\n # Calculate rolling MAD series\n rolling_MAD = filtered.rolling(window=(win_size), center=center_win, min_periods=1).apply(MAD)\n\n # Calculate threshold level for filtering based upon the number of standard deviation and\n # constant scaling factor L.\n threshold = int(num_dev) * L * rolling_MAD\n\n # Difference between original values and rolling median\n # Again, \"filtered\" not yet filtered at this point.\n difference = np.abs(filtered - rolling_median)\n\n '''\n # TODO: Look at logic here to possibly not mark as an outlier if threshold value\n is 0.0\n '''\n\n # Flag outliers\n outlier_idx = difference > threshold\n\n # Now it's filtered. This should replace original values with filtered values from the rolling_median\n # dataframe where outliers were found.\n filtered[outlier_idx] = rolling_median[outlier_idx]\n filtered.rename(columns={vals_col: 'FLTRD_VAL'}, inplace=True)\n\n # Capture outliers column\n outliers = orig_vals[outlier_idx].rename(columns={vals_col: 'OUTLIER_VAL'}).drop(columns=drop_cols)\n # Capture outlier IS_OUTLIER column\n outlier_idx.rename(columns={vals_col: 'IS_OUTLIER'}, inplace=True)\n\n # The following returns a full dataframe consisting of original values columns\n # along with the Hampel filtered data, outlier values and boolean flags where outliers found.\n return pd.concat([orig_vals, filtered, outliers, outlier_idx], axis=1)\n"
] |
[
[
"pandas.concat",
"pandas.to_datetime",
"numpy.abs",
"numpy.issubdtype",
"numpy.median"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
BlackHC/uncertainty-baselines
|
[
"1a28be3e41e14d8ab74dfa1e3eed15f113718f03",
"1a28be3e41e14d8ab74dfa1e3eed15f113718f03",
"1a28be3e41e14d8ab74dfa1e3eed15f113718f03"
] |
[
"baselines/jft/deterministic.py",
"baselines/jft/begp.py",
"experimental/language_structure/vrnn/utils.py"
] |
[
"# coding=utf-8\n# Copyright 2022 The Uncertainty Baselines Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Deterministic ViT.\"\"\"\n\nimport functools\nimport itertools\nimport multiprocessing\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom clu import metric_writers\nfrom clu import parameter_overview\nfrom clu import periodic_actions\nfrom clu import preprocess_spec\nimport flax\nimport jax\nimport jax.numpy as jnp\nimport ml_collections.config_flags\nimport numpy as np\nimport robustness_metrics as rm\n\nimport tensorflow as tf\nimport uncertainty_baselines as ub\nimport checkpoint_utils # local file import from baselines.jft\nimport data_uncertainty_utils # local file import from baselines.jft\nimport input_utils # local file import from baselines.jft\nimport ood_utils # local file import from baselines.jft\nimport preprocess_utils # local file import from baselines.jft\nimport train_utils # local file import from baselines.jft\n\n# TODO(dusenberrymw): Open-source remaining imports.\nfewshot = None\n\nml_collections.config_flags.DEFINE_config_file(\n 'config', None, 'Training configuration.', lock_config=True)\nflags.DEFINE_string('output_dir', default=None, help='Work unit directory.')\nflags.DEFINE_integer(\n 'num_cores', default=None, help='Unused. How many devices being used.')\nflags.DEFINE_boolean(\n 'use_gpu', default=None, help='Unused. Whether or not running on GPU.')\nflags.DEFINE_string('tpu', None,\n 'Unused. Name of the TPU. Only used if use_gpu is False.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(config, output_dir):\n\n seed = config.get('seed', 0)\n rng = jax.random.PRNGKey(seed)\n tf.random.set_seed(seed)\n\n if config.get('data_dir'):\n logging.info('data_dir=%s', config.data_dir)\n logging.info('Output dir: %s', output_dir)\n tf.io.gfile.makedirs(output_dir)\n\n save_checkpoint_path = None\n if config.get('checkpoint_steps'):\n save_checkpoint_path = os.path.join(output_dir, 'checkpoint.npz')\n\n # Create an asynchronous multi-metric writer.\n writer = metric_writers.create_default_writer(\n output_dir, just_logging=jax.process_index() > 0)\n\n # The pool is used to perform misc operations such as logging in async way.\n pool = multiprocessing.pool.ThreadPool()\n\n def write_note(note):\n if jax.process_index() == 0:\n logging.info('NOTE: %s', note)\n\n write_note('Initializing...')\n\n # Verify settings to make sure no checkpoints are accidentally missed.\n if config.get('keep_checkpoint_steps'):\n assert config.get('checkpoint_steps'), 'Specify `checkpoint_steps`.'\n assert config.keep_checkpoint_steps % config.checkpoint_steps == 0, (\n f'`keep_checkpoint_steps` ({config.checkpoint_steps}) should be'\n f'divisible by `checkpoint_steps ({config.checkpoint_steps}).`')\n\n batch_size = config.batch_size\n batch_size_eval = config.get('batch_size_eval', batch_size)\n if (batch_size % jax.device_count() != 0 or\n batch_size_eval % jax.device_count() != 0):\n raise ValueError(f'Batch sizes ({batch_size} and {batch_size_eval}) must '\n f'be divisible by device number ({jax.device_count()})')\n\n local_batch_size = batch_size // jax.process_count()\n local_batch_size_eval = batch_size_eval // jax.process_count()\n logging.info(\n 'Global batch size %d on %d hosts results in %d local batch size. '\n 'With %d devices per host (%d devices total), that\\'s a %d per-device '\n 'batch size.', batch_size, jax.process_count(), local_batch_size,\n jax.local_device_count(), jax.device_count(),\n local_batch_size // jax.local_device_count())\n\n write_note('Initializing train dataset...')\n rng, train_ds_rng = jax.random.split(rng)\n train_ds_rng = jax.random.fold_in(train_ds_rng, jax.process_index())\n train_ds = input_utils.get_data(\n dataset=config.dataset,\n split=config.train_split,\n rng=train_ds_rng,\n process_batch_size=local_batch_size,\n preprocess_fn=preprocess_spec.parse(\n spec=config.pp_train, available_ops=preprocess_utils.all_ops()),\n shuffle_buffer_size=config.shuffle_buffer_size,\n prefetch_size=config.get('prefetch_to_host', 2),\n data_dir=config.get('data_dir'))\n\n # Start prefetching already.\n train_iter = input_utils.start_input_pipeline(\n train_ds, config.get('prefetch_to_device', 1))\n\n write_note('Initializing val dataset(s)...')\n\n def _get_val_split(dataset, split, pp_eval, data_dir=None):\n # We do ceil rounding such that we include the last incomplete batch.\n nval_img = input_utils.get_num_examples(\n dataset,\n split=split,\n process_batch_size=local_batch_size_eval,\n drop_remainder=False,\n data_dir=data_dir)\n val_steps = int(np.ceil(nval_img / batch_size_eval))\n logging.info('Running validation for %d steps for %s, %s', val_steps,\n dataset, split)\n\n if isinstance(pp_eval, str):\n pp_eval = preprocess_spec.parse(\n spec=pp_eval, available_ops=preprocess_utils.all_ops())\n\n val_ds = input_utils.get_data(\n dataset=dataset,\n split=split,\n rng=None,\n process_batch_size=local_batch_size_eval,\n preprocess_fn=pp_eval,\n cache=config.get('val_cache', 'batched'),\n num_epochs=1,\n repeat_after_batching=True,\n shuffle=False,\n prefetch_size=config.get('prefetch_to_host', 2),\n drop_remainder=False,\n data_dir=data_dir)\n\n return val_ds\n\n val_ds_splits = {\n 'val':\n _get_val_split(\n config.dataset,\n split=config.val_split,\n pp_eval=config.pp_eval,\n data_dir=config.get('data_dir'))\n }\n\n if config.get('test_split'):\n val_ds_splits.update({\n 'test':\n _get_val_split(\n config.dataset,\n split=config.test_split,\n pp_eval=config.pp_eval,\n data_dir=config.get('data_dir'))\n })\n\n if config.get('eval_on_cifar_10h'):\n cifar10_to_cifar10h_fn = data_uncertainty_utils.create_cifar10_to_cifar10h_fn(\n config.get('data_dir', None))\n preprocess_fn = preprocess_spec.parse(\n spec=config.pp_eval_cifar_10h, available_ops=preprocess_utils.all_ops())\n pp_eval = lambda ex: preprocess_fn(cifar10_to_cifar10h_fn(ex))\n val_ds_splits['cifar_10h'] = _get_val_split(\n 'cifar10',\n split=config.get('cifar_10h_split') or 'test',\n pp_eval=pp_eval,\n data_dir=config.get('data_dir'))\n elif config.get('eval_on_imagenet_real'):\n imagenet_to_real_fn = data_uncertainty_utils.create_imagenet_to_real_fn()\n preprocess_fn = preprocess_spec.parse(\n spec=config.pp_eval_imagenet_real,\n available_ops=preprocess_utils.all_ops())\n pp_eval = lambda ex: preprocess_fn(imagenet_to_real_fn(ex))\n val_ds_splits['imagenet_real'] = _get_val_split(\n 'imagenet2012_real',\n split=config.get('imagenet_real_split') or 'validation',\n pp_eval=pp_eval,\n data_dir=config.get('data_dir'))\n\n ood_ds = {}\n if config.get('ood_datasets') and config.get('ood_methods'):\n if config.get('ood_methods'): # config.ood_methods is not a empty list\n logging.info('loading OOD dataset = %s', config.get('ood_datasets'))\n ood_ds, ood_ds_names = ood_utils.load_ood_datasets(\n config.dataset,\n config.ood_datasets,\n config.ood_split,\n config.pp_eval,\n config.pp_eval_ood,\n config.ood_methods,\n config.train_split,\n config.get('data_dir'),\n _get_val_split,\n )\n\n ntrain_img = input_utils.get_num_examples(\n config.dataset,\n split=config.train_split,\n process_batch_size=local_batch_size,\n data_dir=config.get('data_dir'))\n steps_per_epoch = ntrain_img // batch_size\n\n if config.get('num_epochs'):\n total_steps = int(config.num_epochs * steps_per_epoch)\n assert not config.get('total_steps'), 'Set either num_epochs or total_steps'\n else:\n total_steps = config.total_steps\n\n logging.info('Total train data points: %d', ntrain_img)\n logging.info(\n 'Running for %d steps, that means %f epochs and %d steps per epoch',\n total_steps, total_steps * batch_size / ntrain_img, steps_per_epoch)\n\n write_note('Initializing model...')\n logging.info('config.model = %s', config.model)\n model = ub.models.vision_transformer(\n num_classes=config.num_classes, **config.model)\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def init(rng):\n image_size = tuple(train_ds.element_spec['image'].shape[2:])\n logging.info('image_size = %s', image_size)\n dummy_input = jnp.zeros((local_batch_size,) + image_size, jnp.float32)\n params = flax.core.unfreeze(model.init(rng, dummy_input,\n train=False))['params']\n\n # Set bias in the head to a low value, such that loss is small initially.\n params['head']['bias'] = jnp.full_like(params['head']['bias'],\n config.get('init_head_bias', 0))\n\n # init head kernel to all zeros for fine-tuning\n if config.get('model_init'):\n params['head']['kernel'] = jnp.full_like(params['head']['kernel'], 0)\n\n return params\n\n rng, rng_init = jax.random.split(rng)\n params_cpu = init(rng_init)\n\n if jax.process_index() == 0:\n num_params = sum(p.size for p in jax.tree_flatten(params_cpu)[0])\n parameter_overview.log_parameter_overview(params_cpu)\n writer.write_scalars(step=0, scalars={'num_params': num_params})\n\n @functools.partial(jax.pmap, axis_name='batch')\n def evaluation_fn(params, images, labels, mask):\n \"\"\"Copy to deterministic_utils.py whenever changes are made!\"\"\"\n # Ignore the entries with all zero labels for evaluation.\n mask *= labels.max(axis=1)\n logits, out = model.apply({'params': flax.core.freeze(params)},\n images,\n train=False)\n label_indices = config.get('label_indices')\n logging.info('!!! mask %s, label_indices %s', mask, label_indices)\n if label_indices:\n logits = logits[:, label_indices]\n\n # Note that logits and labels are usually of the shape [batch,num_classes].\n # But for OOD data, when num_classes_ood > num_classes_ind, we need to\n # adjust labels to labels[:, :config.num_classes] to match the shape of\n # logits. That is just to avoid shape mismatch. The output losses does not\n # have any meaning for OOD data, because OOD not belong to any IND class.\n losses = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(\n logits=logits,\n labels=labels[:, :(len(label_indices) if label_indices\n else config.num_classes)], reduction=False)\n loss = jax.lax.psum(losses * mask, axis_name='batch')\n\n top1_idx = jnp.argmax(logits, axis=1)\n # Extracts the label at the highest logit index for each image.\n top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]\n ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')\n n = jax.lax.psum(mask, axis_name='batch')\n\n metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],\n axis_name='batch')\n return ncorrect, loss, n, metric_args\n\n @functools.partial(jax.pmap, axis_name='batch')\n def cifar_10h_evaluation_fn(params, images, labels, mask):\n logits, out = model.apply({'params': flax.core.freeze(params)},\n images,\n train=False)\n label_indices = config.get('label_indices')\n if label_indices:\n logits = logits[:, label_indices]\n\n losses = getattr(train_utils, config.get('loss', 'softmax_xent'))(\n logits=logits, labels=labels, reduction=False)\n loss = jax.lax.psum(losses, axis_name='batch')\n\n top1_idx = jnp.argmax(logits, axis=1)\n # Extracts the label at the highest logit index for each image.\n one_hot_labels = jnp.eye(10)[jnp.argmax(labels, axis=1)]\n\n top1_correct = jnp.take_along_axis(\n one_hot_labels, top1_idx[:, None], axis=1)[:, 0]\n ncorrect = jax.lax.psum(top1_correct, axis_name='batch')\n n = jax.lax.psum(one_hot_labels, axis_name='batch')\n\n metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],\n axis_name='batch')\n return ncorrect, loss, n, metric_args\n\n # Setup function for computing representation.\n @functools.partial(jax.pmap, axis_name='batch')\n def representation_fn(params, images, labels, mask):\n _, outputs = model.apply({'params': flax.core.freeze(params)},\n images,\n train=False)\n representation = outputs[config.fewshot.representation_layer]\n representation = jax.lax.all_gather(representation, 'batch')\n labels = jax.lax.all_gather(labels, 'batch')\n mask = jax.lax.all_gather(mask, 'batch')\n return representation, labels, mask\n\n # Load the optimizer from flax.\n opt_name = config.get('optim_name')\n write_note(f'Initializing {opt_name} optimizer...')\n opt_def = getattr(flax.optim, opt_name)(**config.get('optim', {}))\n\n # We jit this, such that the arrays that are created are created on the same\n # device as the input is, in this case the CPU. Else they'd be on device[0].\n opt_cpu = jax.jit(opt_def.create)(params_cpu)\n\n weight_decay_rules = config.get('weight_decay', []) or []\n rescale_value = config.lr.base if config.get('weight_decay_decouple') else 1.\n weight_decay_fn = train_utils.get_weight_decay_fn(\n weight_decay_rules=weight_decay_rules, rescale_value=rescale_value)\n\n @functools.partial(jax.pmap, axis_name='batch', donate_argnums=(0,))\n def update_fn(opt, lr, images, labels, rng):\n \"\"\"Update step. Copy to deterministic_utils.py whenever changes are made!\"\"\"\n measurements = {}\n\n # Split rng and return next_rng for the following step.\n rng, next_rng = jax.random.split(rng, 2)\n rng_local = jax.random.fold_in(rng, jax.lax.axis_index('batch'))\n\n def loss_fn(params, images, labels):\n logits, _ = model.apply(\n {'params': flax.core.freeze(params)}, images,\n train=True, rngs={'dropout': rng_local})\n label_indices = config.get('label_indices')\n if label_indices:\n logits = logits[:, label_indices]\n loss = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(\n logits=logits, labels=labels)\n return loss, logits\n # Implementation considerations compared and summarized at\n # https://docs.google.com/document/d/1g3kMEvqu1DOawaflKNyUsIoQ4yIVEoyE5ZlIPkIl4Lc/edit?hl=en#\n (l, logits), g = train_utils.accumulate_gradient(\n jax.value_and_grad(loss_fn, has_aux=True), opt.target, images, labels,\n config.get('grad_accum_steps'))\n l, g = jax.lax.pmean((l, g), axis_name='batch')\n measurements['training_loss'] = l\n\n # Log the gradient norm only if we need to compute it anyways (clipping)\n # or if we don't use grad_accum_steps, as they interact badly.\n if config.get('grad_accum_steps', 1) == 1 or config.get('grad_clip_norm'):\n grads, _ = jax.tree_flatten(g)\n l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads]))\n measurements['l2_grads'] = l2_g\n\n # Optionally resize the global gradient to a maximum norm. We found this\n # useful in some cases across optimizers, hence it's in the main loop.\n if config.get('grad_clip_norm'):\n g_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)\n g = jax.tree_util.tree_map(lambda p: g_factor * p, g)\n opt = opt.apply_gradient(g, learning_rate=lr)\n\n opt = opt.replace(target=weight_decay_fn(opt.target, lr))\n\n params, _ = jax.tree_flatten(opt.target)\n measurements['l2_params'] = jnp.sqrt(sum([jnp.vdot(p, p) for p in params]))\n\n top1_idx = jnp.argmax(logits, axis=1)\n top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]\n prec1 = jax.lax.psum(jnp.sum(top1_correct), axis_name='batch') / batch_size\n measurements['training_prec@1'] = prec1\n measurements['learning_rate'] = lr\n return opt, next_rng, measurements\n\n reint_params = ('head/kernel', 'head/bias')\n if config.get('only_eval', False) or not config.get('reint_head', True):\n reint_params = []\n checkpoint_data = checkpoint_utils.maybe_load_checkpoint(\n train_loop_rngs=rng,\n save_checkpoint_path=save_checkpoint_path,\n init_optimizer=opt_cpu,\n init_params=params_cpu,\n init_fixed_model_states=None,\n default_reinit_params=reint_params,\n config=config)\n train_loop_rngs = checkpoint_data.train_loop_rngs\n opt_cpu = checkpoint_data.optimizer\n accumulated_train_time = checkpoint_data.accumulated_train_time\n\n write_note('Kicking off misc stuff...')\n first_step = int(opt_cpu.state.step) # Might be a DeviceArray type.\n if first_step == 0 and jax.process_index() == 0:\n writer.write_hparams(dict(config))\n chrono = train_utils.Chrono(\n first_step, total_steps, batch_size, accumulated_train_time)\n # Note: switch to ProfileAllHosts() if you need to profile all hosts.\n # (Xprof data become much larger and take longer to load for analysis)\n profiler = periodic_actions.Profile(\n # Create profile after every restart to analyze pre-emption related\n # problems and assure we get similar performance in every run.\n logdir=output_dir, first_profile=first_step + 10)\n\n # Prepare the learning-rate and pre-fetch it to device to avoid delays.\n lr_fn = train_utils.create_learning_rate_schedule(total_steps,\n **config.get('lr', {}))\n # TODO(dusenberrymw): According to flax docs, prefetching shouldn't be\n # necessary for TPUs.\n lr_iter = train_utils.prefetch_scalar(\n map(lr_fn, range(total_steps)), config.get('prefetch_to_device', 1))\n\n write_note(f'Replicating...\\n{chrono.note}')\n opt_repl = flax.jax_utils.replicate(opt_cpu)\n\n write_note(f'Initializing few-shotters...\\n{chrono.note}')\n fewshotter = None\n if 'fewshot' in config and fewshot is not None:\n fewshotter = fewshot.FewShotEvaluator(\n representation_fn, config.fewshot,\n config.fewshot.get('batch_size') or batch_size_eval)\n\n checkpoint_writer = None\n\n # Note: we return the train loss, val loss, and fewshot best l2s for use in\n # reproducibility unit tests.\n train_loss = -jnp.inf\n val_loss = {val_name: -jnp.inf for val_name, _ in val_ds_splits.items()}\n fewshot_results = {'dummy': {(0, 1): -jnp.inf}}\n\n write_note(f'First step compilations...\\n{chrono.note}')\n logging.info('first_step = %s', first_step)\n # Advance the iterators if we are restarting from an earlier checkpoint.\n # TODO(dusenberrymw): Look into checkpointing dataset state instead.\n if first_step > 0:\n write_note('Advancing iterators after resuming from a checkpoint...')\n lr_iter = itertools.islice(lr_iter, first_step, None)\n train_iter = itertools.islice(train_iter, first_step, None)\n\n # Using a python integer for step here, because opt.state.step is allocated\n # on TPU during replication.\n for step, train_batch, lr_repl in zip(\n range(first_step + 1, total_steps + 1), train_iter, lr_iter):\n\n with jax.profiler.TraceAnnotation('train_step', step_num=step, _r=1):\n if not config.get('only_eval', False):\n opt_repl, train_loop_rngs, extra_measurements = update_fn(\n opt_repl,\n lr_repl,\n train_batch['image'],\n train_batch['labels'],\n rng=train_loop_rngs)\n\n if jax.process_index() == 0:\n profiler(step)\n\n # Checkpoint saving\n if not config.get('only_eval', False) and train_utils.itstime(\n step, config.get('checkpoint_steps'), total_steps, process=0):\n write_note('Checkpointing...')\n chrono.pause()\n train_utils.checkpointing_timeout(checkpoint_writer,\n config.get('checkpoint_timeout', 1))\n accumulated_train_time = chrono.accum_train_time\n # We need to transfer the weights over now or else we risk keeping them\n # alive while they'll be updated in a future step, creating hard to debug\n # memory errors (see b/160593526). Also, takes device 0's params only.\n opt_cpu = jax.tree_util.tree_map(lambda x: np.array(x[0]), opt_repl)\n\n # Check whether we want to keep a copy of the current checkpoint.\n copy_step = None\n if train_utils.itstime(step, config.get('keep_checkpoint_steps'),\n total_steps):\n write_note('Keeping a checkpoint copy...')\n copy_step = step\n\n # Checkpoint should be a nested dictionary or FLAX datataclasses from\n # `flax.struct`. Both can be present in a checkpoint.\n checkpoint_data = checkpoint_utils.CheckpointData(\n train_loop_rngs=train_loop_rngs,\n optimizer=opt_cpu,\n accumulated_train_time=accumulated_train_time)\n\n checkpoint_writer = pool.apply_async(\n checkpoint_utils.checkpoint_trained_model,\n (checkpoint_data, save_checkpoint_path, copy_step))\n chrono.resume()\n\n # Report training progress\n if not config.get('only_eval', False) and train_utils.itstime(\n step, config.log_training_steps, total_steps, process=0):\n write_note('Reporting training progress...')\n timing_measurements, note = chrono.tick(step)\n write_note(note)\n train_measurements = {}\n train_measurements.update(flax.jax_utils.unreplicate(extra_measurements))\n train_measurements.update(timing_measurements)\n writer.write_scalars(step, train_measurements)\n # Keep train_loss to return for reproducibility tests.\n train_loss = train_measurements['training_loss']\n\n # Report validation performance\n if config.get('only_eval', False) or train_utils.itstime(\n step, config.log_eval_steps, total_steps):\n write_note('Evaluating on the validation set...')\n chrono.pause()\n for val_name, val_ds in val_ds_splits.items():\n # Sets up evaluation metrics.\n ece_num_bins = config.get('ece_num_bins', 15)\n auc_num_bins = config.get('auc_num_bins', 1000)\n ece = rm.metrics.ExpectedCalibrationError(num_bins=ece_num_bins)\n calib_auc = rm.metrics.CalibrationAUC(correct_pred_as_pos_label=False)\n oc_auc_0_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.005,\n num_bins=auc_num_bins)\n oc_auc_1 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.01,\n num_bins=auc_num_bins)\n oc_auc_2 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.02,\n num_bins=auc_num_bins)\n oc_auc_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.05,\n num_bins=auc_num_bins)\n label_diversity = tf.keras.metrics.Mean()\n sample_diversity = tf.keras.metrics.Mean()\n ged = tf.keras.metrics.Mean()\n\n # Runs evaluation loop.\n val_iter = input_utils.start_input_pipeline(\n val_ds, config.get('prefetch_to_device', 1))\n ncorrect, loss, nseen = 0, 0, 0\n for batch in val_iter:\n if val_name == 'cifar_10h':\n batch_ncorrect, batch_losses, batch_n, batch_metric_args = (\n cifar_10h_evaluation_fn(opt_repl.target, batch['image'],\n batch['labels'], batch['mask']))\n else:\n batch_ncorrect, batch_losses, batch_n, batch_metric_args = (\n evaluation_fn(opt_repl.target, batch['image'],\n batch['labels'], batch['mask']))\n # All results are a replicated array shaped as follows:\n # (local_devices, per_device_batch_size, elem_shape...)\n # with each local device's entry being identical as they got psum'd.\n # So let's just take the first one to the host as numpy.\n ncorrect += np.sum(np.array(batch_ncorrect[0]))\n loss += np.sum(np.array(batch_losses[0]))\n nseen += np.sum(np.array(batch_n[0]))\n if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':\n # Here we parse batch_metric_args to compute uncertainty metrics.\n # (e.g., ECE or Calibration AUC).\n logits, labels, _, masks = batch_metric_args\n masks = np.array(masks[0], dtype=np.bool)\n logits = np.array(logits[0])\n probs = jax.nn.softmax(logits)\n # From one-hot to integer labels, as required by ECE.\n int_labels = np.argmax(np.array(labels[0]), axis=-1)\n int_preds = np.argmax(logits, axis=-1)\n confidence = np.max(probs, axis=-1)\n for p, c, l, d, m, label in zip(probs, confidence, int_labels,\n int_preds, masks, labels[0]):\n ece.add_batch(p[m, :], label=l[m])\n calib_auc.add_batch(d[m], label=l[m], confidence=c[m])\n # TODO(jereliu): Extend to support soft multi-class probabilities.\n oc_auc_0_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_1.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_2.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n\n if val_name == 'cifar_10h' or val_name == 'imagenet_real':\n num_classes = config.num_classes\n if config.get('label_indices'):\n num_classes = len(config.get('label_indices'))\n batch_label_diversity, batch_sample_diversity, batch_ged = data_uncertainty_utils.generalized_energy_distance(\n label[m], p[m, :], num_classes)\n label_diversity.update_state(batch_label_diversity)\n sample_diversity.update_state(batch_sample_diversity)\n ged.update_state(batch_ged)\n\n val_loss[val_name] = loss / nseen # Keep for reproducibility tests.\n val_measurements = {\n f'{val_name}_prec@1': ncorrect / nseen,\n f'{val_name}_loss': val_loss[val_name],\n }\n if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':\n val_measurements[f'{val_name}_ece'] = ece.result()['ece']\n val_measurements[f'{val_name}_calib_auc'] = calib_auc.result()[\n 'calibration_auc']\n val_measurements[f'{val_name}_oc_auc_0.5%'] = oc_auc_0_5.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_1%'] = oc_auc_1.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_2%'] = oc_auc_2.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_5%'] = oc_auc_5.result()[\n 'collaborative_auc']\n writer.write_scalars(step, val_measurements)\n\n if val_name == 'cifar_10h' or val_name == 'imagenet_real':\n cifar_10h_measurements = {\n f'{val_name}_label_diversity': label_diversity.result(),\n f'{val_name}_sample_diversity': sample_diversity.result(),\n f'{val_name}_ged': ged.result(),\n }\n writer.write_scalars(step, cifar_10h_measurements)\n\n # OOD eval\n # Entries in the ood_ds dict include:\n # (ind_dataset, ood_dataset1, ood_dataset2, ...).\n # OOD metrics are computed using ind_dataset paired with each of the\n # ood_dataset. When Mahalanobis distance method is applied, train_ind_ds\n # is also included in the ood_ds.\n if ood_ds and config.ood_methods:\n ood_measurements = ood_utils.eval_ood_metrics(\n ood_ds,\n ood_ds_names,\n config.ood_methods,\n evaluation_fn,\n opt_repl.target,\n n_prefetch=config.get('prefetch_to_device', 1))\n writer.write_scalars(step, ood_measurements)\n chrono.resume()\n\n if 'fewshot' in config and fewshotter is not None:\n # Compute few-shot on-the-fly evaluation.\n if config.get('only_eval', False) or train_utils.itstime(\n step, config.fewshot.log_steps, total_steps):\n chrono.pause()\n write_note(f'Few-shot evaluation...\\n{chrono.note}')\n # Keep `results` to return for reproducibility tests.\n fewshot_results, best_l2 = fewshotter.run_all(opt_repl.target,\n config.fewshot.datasets)\n\n # TODO(dusenberrymw): Remove this once fewshot.py is updated.\n def make_writer_measure_fn(step):\n\n def writer_measure(name, value):\n writer.write_scalars(step, {name: value})\n\n return writer_measure\n\n fewshotter.walk_results(\n make_writer_measure_fn(step), fewshot_results, best_l2)\n chrono.resume()\n\n if config.get('only_eval', False):\n break\n elif config.get('testing_failure_step'):\n # Break early to simulate infra failures in test cases.\n if config.testing_failure_step == step:\n break\n\n write_note(f'Done!\\n{chrono.note}')\n pool.close()\n pool.join()\n writer.close()\n\n # Return final training loss, validation loss, and fewshot results for\n # reproducibility test cases.\n return train_loss, val_loss, fewshot_results\n\n\nif __name__ == '__main__':\n # Adds jax flags to the program.\n jax.config.config_with_absl()\n\n # TODO(dusenberrymw): Refactor `main` such that there is a `train_eval`\n # function that returns values for tests and does not directly access flags,\n # and then have `main` return None.\n\n def _main(argv):\n del argv\n config = FLAGS.config\n output_dir = FLAGS.output_dir\n main(config, output_dir)\n\n app.run(_main) # Ignore the returned values from `main`.\n",
"# coding=utf-8\n# Copyright 2022 The Uncertainty Baselines Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"BatchEnsemble of GP version of Vision Transformer.\"\"\"\n\nimport functools\nimport itertools\nimport multiprocessing\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom clu import metric_writers\nfrom clu import parameter_overview\nfrom clu import periodic_actions\nfrom clu import preprocess_spec\nimport flax\nimport jax\nimport jax.numpy as jnp\nimport ml_collections.config_flags\nimport numpy as np\nimport robustness_metrics as rm\n\nimport tensorflow as tf\nimport uncertainty_baselines as ub\nimport batchensemble_utils # local file import from baselines.jft\nimport checkpoint_utils # local file import from baselines.jft\nimport data_uncertainty_utils # local file import from baselines.jft\nimport input_utils # local file import from baselines.jft\nimport ood_utils # local file import from baselines.jft\nimport preprocess_utils # local file import from baselines.jft\nimport train_utils # local file import from baselines.jft\n\n# TODO(dusenberrymw): Open-source remaining imports.\nfewshot = None\n\nml_collections.config_flags.DEFINE_config_file(\n 'config', None, 'Training configuration.', lock_config=True)\nflags.DEFINE_string('output_dir', default=None, help='Work unit directory.')\nflags.DEFINE_integer(\n 'num_cores', default=None, help='Unused. How many devices being used.')\nflags.DEFINE_boolean(\n 'use_gpu', default=None, help='Unused. Whether or not running on GPU.')\nflags.DEFINE_string('tpu', None,\n 'Unused. Name of the TPU. Only used if use_gpu is False.')\n\nFLAGS = flags.FLAGS\n\n\ndef get_gp_kwargs(gp_config):\n \"\"\"Extract keyword argument parameters for the Gaussian process layer.\"\"\"\n covmat_momentum = gp_config.get('covmat_momentum', 0.999)\n\n # Extracts model parameter.\n logging.info('gp_config.covmat_momentum = %s', covmat_momentum)\n covmat_momentum = None if covmat_momentum < 0. else covmat_momentum\n covmat_kwargs = dict(momentum=covmat_momentum)\n\n # Assembles into kwargs dictionary.\n gp_layer_kwargs = dict(covmat_kwargs=covmat_kwargs)\n\n return gp_layer_kwargs\n\n\ndef main(config, output_dir):\n\n seed = config.get('seed', 0)\n rng = jax.random.PRNGKey(seed)\n tf.random.set_seed(seed)\n\n if config.get('data_dir'):\n logging.info('data_dir=%s', config.data_dir)\n logging.info('Output dir: %s', output_dir)\n tf.io.gfile.makedirs(output_dir)\n\n save_checkpoint_path = None\n if config.get('checkpoint_steps'):\n save_checkpoint_path = os.path.join(output_dir, 'checkpoint.npz')\n\n # Create an asynchronous multi-metric writer.\n writer = metric_writers.create_default_writer(\n output_dir, just_logging=jax.process_index() > 0)\n\n # The pool is used to perform misc operations such as logging in async way.\n pool = multiprocessing.pool.ThreadPool()\n\n def write_note(note):\n if jax.process_index() == 0:\n logging.info('NOTE: %s', note)\n\n write_note('Initializing...')\n\n # Verify settings to make sure no checkpoints are accidentally missed.\n if config.get('keep_checkpoint_steps'):\n assert config.get('checkpoint_steps'), 'Specify `checkpoint_steps`.'\n assert config.keep_checkpoint_steps % config.checkpoint_steps == 0, (\n f'`keep_checkpoint_steps` ({config.checkpoint_steps}) should be'\n f'divisible by `checkpoint_steps ({config.checkpoint_steps}).`')\n\n batch_size = config.batch_size\n batch_size_eval = config.get('batch_size_eval', batch_size)\n if (batch_size % jax.device_count() != 0 or\n batch_size_eval % jax.device_count() != 0):\n raise ValueError(f'Batch sizes ({batch_size} and {batch_size_eval}) must '\n f'be divisible by device number ({jax.device_count()})')\n\n local_batch_size = batch_size // jax.process_count()\n local_batch_size_eval = batch_size_eval // jax.process_count()\n logging.info(\n 'Global batch size %d on %d hosts results in %d local batch size. '\n 'With %d devices per host (%d devices total), that\\'s a %d per-device '\n 'batch size.', batch_size, jax.process_count(), local_batch_size,\n jax.local_device_count(), jax.device_count(),\n local_batch_size // jax.local_device_count())\n\n write_note('Initializing train dataset...')\n rng, train_ds_rng = jax.random.split(rng)\n train_ds_rng = jax.random.fold_in(train_ds_rng, jax.process_index())\n\n train_ds = input_utils.get_data(\n dataset=config.dataset,\n split=config.train_split,\n rng=train_ds_rng,\n process_batch_size=local_batch_size,\n preprocess_fn=preprocess_spec.parse(\n spec=config.pp_train, available_ops=preprocess_utils.all_ops()),\n shuffle_buffer_size=config.shuffle_buffer_size,\n prefetch_size=config.get('prefetch_to_host', 2),\n data_dir=config.get('data_dir'))\n\n # Start prefetching already.\n train_iter = input_utils.start_input_pipeline(\n train_ds, config.get('prefetch_to_device', 1))\n\n write_note('Initializing val dataset(s)...')\n\n def _get_val_split(dataset, split, pp_eval, data_dir=None):\n # We do ceil rounding such that we include the last incomplete batch.\n nval_img = input_utils.get_num_examples(\n dataset,\n split=split,\n process_batch_size=local_batch_size_eval,\n drop_remainder=False,\n data_dir=data_dir)\n val_steps = int(np.ceil(nval_img / batch_size_eval))\n logging.info('Running validation for %d steps for %s, %s', val_steps,\n dataset, split)\n\n if isinstance(pp_eval, str):\n pp_eval = preprocess_spec.parse(\n spec=pp_eval, available_ops=preprocess_utils.all_ops())\n\n val_ds = input_utils.get_data(\n dataset=dataset,\n split=split,\n rng=None,\n process_batch_size=local_batch_size_eval,\n preprocess_fn=pp_eval,\n cache=config.get('val_cache', 'batched'),\n num_epochs=1,\n repeat_after_batching=True,\n shuffle=False,\n prefetch_size=config.get('prefetch_to_host', 2),\n drop_remainder=False,\n data_dir=data_dir)\n\n return val_ds\n\n val_ds_splits = {\n 'val':\n _get_val_split(\n config.dataset,\n split=config.val_split,\n pp_eval=config.pp_eval,\n data_dir=config.get('data_dir'))\n }\n\n if config.get('test_split'):\n val_ds_splits.update({\n 'test':\n _get_val_split(\n config.dataset,\n split=config.test_split,\n pp_eval=config.pp_eval,\n data_dir=config.get('data_dir'))\n })\n\n if config.get('eval_on_cifar_10h'):\n cifar10_to_cifar10h_fn = data_uncertainty_utils.create_cifar10_to_cifar10h_fn(\n config.get('data_dir', None))\n preprocess_fn = preprocess_spec.parse(\n spec=config.pp_eval_cifar_10h, available_ops=preprocess_utils.all_ops())\n pp_eval = lambda ex: preprocess_fn(cifar10_to_cifar10h_fn(ex))\n val_ds_splits['cifar_10h'] = _get_val_split(\n 'cifar10',\n split=config.get('cifar_10h_split') or 'test',\n pp_eval=pp_eval,\n data_dir=config.get('data_dir'))\n\n elif config.get('eval_on_imagenet_real'):\n imagenet_to_real_fn = data_uncertainty_utils.create_imagenet_to_real_fn()\n preprocess_fn = preprocess_spec.parse(\n spec=config.pp_eval_imagenet_real,\n available_ops=preprocess_utils.all_ops())\n pp_eval = lambda ex: preprocess_fn(imagenet_to_real_fn(ex))\n val_ds_splits['imagenet_real'] = _get_val_split(\n 'imagenet2012_real',\n split=config.get('imagenet_real_split') or 'validation',\n pp_eval=pp_eval,\n data_dir=config.get('data_dir'))\n\n ood_ds = {}\n if config.get('ood_datasets') and config.get('ood_methods'):\n if config.get('ood_methods'): # config.ood_methods is not a empty list\n logging.info('loading OOD dataset = %s', config.get('ood_datasets'))\n ood_ds, ood_ds_names = ood_utils.load_ood_datasets(\n config.dataset,\n config.ood_datasets,\n config.ood_split,\n config.pp_eval,\n config.pp_eval_ood,\n config.ood_methods,\n config.train_split,\n config.get('data_dir'),\n _get_val_split,\n )\n\n ntrain_img = input_utils.get_num_examples(\n config.dataset,\n split=config.train_split,\n process_batch_size=local_batch_size,\n data_dir=config.get('data_dir'))\n steps_per_epoch = ntrain_img // batch_size\n\n if config.get('num_epochs'):\n total_steps = int(config.num_epochs * steps_per_epoch)\n assert not config.get('total_steps'), 'Set either num_epochs or total_steps'\n else:\n total_steps = config.total_steps\n\n logging.info('Total train data points: %d', ntrain_img)\n logging.info(\n 'Running for %d steps, that means %f epochs and %d steps per epoch',\n total_steps, total_steps * batch_size / ntrain_img, steps_per_epoch)\n\n write_note('Initializing model...')\n logging.info('config.model = %s', config.model)\n\n # Specify Gaussian process layer configs.\n use_gp_layer = config.get('use_gp_layer', True)\n gp_config = config.get('gp_layer', {})\n gp_layer_kwargs = get_gp_kwargs(gp_config)\n\n # Process ViT backbone model configs.\n vit_kwargs = config.get('model')\n model = ub.models.PatchTransformerBEGP(\n num_classes=config.num_classes,\n use_gp_layer=use_gp_layer,\n gp_layer_kwargs=gp_layer_kwargs,\n **vit_kwargs)\n ens_size = config.model.transformer.ens_size\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def init(rng):\n image_size = tuple(train_ds.element_spec['image'].shape[2:])\n logging.info('image_size = %s', image_size)\n dummy_input = jnp.zeros((local_batch_size,) + image_size, jnp.float32)\n variables = model.init(rng, dummy_input, train=False)\n\n if use_gp_layer:\n # Split model parameters into trainable and untrainable collections.\n states, params = variables.pop('params')\n del variables\n params = flax.core.unfreeze(params)\n # Set bias in the head to a low value, such that loss is small initially.\n # Modify the head parameter in the GP head.\n params['head']['output_layer']['bias'] = jnp.full_like(\n params['head']['output_layer']['bias'],\n config.get('init_head_bias', 0))\n else:\n params = variables.pop('params')\n params = flax.core.unfreeze(params)\n params['batchensemble_head']['bias'] = jnp.full_like(\n params['batchensemble_head']['bias'], config.get('init_head_bias', 0))\n states = {}\n\n # init head kernel to all zeros for fine-tuning\n if config.get('model_init'):\n params['batchensemble_head']['kernel'] = jnp.full_like(\n params['batchensemble_head']['kernel'], 0)\n\n return params, states\n\n rng, rng_init = jax.random.split(rng)\n params_cpu, states_cpu = init(rng_init)\n\n if jax.process_index() == 0:\n num_params = sum(p.size for p in jax.tree_flatten(params_cpu)[0])\n parameter_overview.log_parameter_overview(params_cpu)\n writer.write_scalars(step=0, scalars={'num_params': num_params})\n\n @functools.partial(jax.pmap, axis_name='batch')\n def evaluation_fn(params, states, images, labels, mask):\n # Ignore the entries with all zero labels for evaluation.\n mask *= labels.max(axis=1)\n variable_dict = {'params': flax.core.freeze(params), **states}\n tiled_logits, out = model.apply(\n variable_dict,\n images,\n train=False,\n mean_field_factor=gp_config.get('mean_field_factor', -1.))\n\n label_indices = config.get('label_indices')\n logging.info('!!! mask %s, label_indices %s', mask, label_indices)\n if label_indices:\n tiled_logits = tiled_logits[:, label_indices]\n\n loss_name = config.get('loss', 'sigmoid_xent')\n # TODO(dusenberrymw,zmariet): Clean up and generalize this.\n if loss_name == 'sigmoid_xent':\n ens_logits = batchensemble_utils.log_average_sigmoid_probs(\n jnp.asarray(jnp.split(tiled_logits, ens_size)))\n else: # softmax\n ens_logits = batchensemble_utils.log_average_softmax_probs(\n jnp.asarray(jnp.split(tiled_logits, ens_size)))\n pre_logits = jnp.concatenate(\n jnp.split(out['pre_logits'], ens_size), axis=-1)\n\n losses = getattr(train_utils, loss_name)(\n logits=ens_logits,\n labels=labels[:, :(len(label_indices) if label_indices\n else config.num_classes)],\n reduction=False)\n loss = jax.lax.psum(losses * mask, axis_name='batch')\n\n top1_idx = jnp.argmax(ens_logits, axis=1)\n top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]\n ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')\n n = jax.lax.psum(mask, axis_name='batch')\n\n metric_args = jax.lax.all_gather(\n [ens_logits, labels, pre_logits, mask],\n axis_name='batch')\n return ncorrect, loss, n, metric_args\n\n @functools.partial(jax.pmap, axis_name='batch')\n def cifar_10h_evaluation_fn(params, states, images, labels, mask):\n variable_dict = {'params': flax.core.freeze(params), **states}\n tiled_logits, out = model.apply(\n variable_dict,\n images,\n train=False,\n mean_field_factor=gp_config.get('mean_field_factor', -1.))\n loss_name = config.get('loss', 'softmax_xent')\n if loss_name == 'sigmoid_xent':\n ens_logits = batchensemble_utils.log_average_sigmoid_probs(\n jnp.asarray(jnp.split(tiled_logits, ens_size)))\n else: # softmax\n ens_logits = batchensemble_utils.log_average_softmax_probs(\n jnp.asarray(jnp.split(tiled_logits, ens_size)))\n pre_logits = jnp.concatenate(\n jnp.split(out['pre_logits'], ens_size), axis=-1)\n\n label_indices = config.get('label_indices')\n if label_indices:\n ens_logits = ens_logits[:, label_indices]\n\n losses = getattr(train_utils, config.get('loss', 'softmax_xent'))(\n logits=ens_logits, labels=labels, reduction=False)\n loss = jax.lax.psum(losses, axis_name='batch')\n\n top1_idx = jnp.argmax(ens_logits, axis=1)\n # Extracts the label at the highest logit index for each image.\n one_hot_labels = jnp.eye(10)[jnp.argmax(labels, axis=1)]\n\n top1_correct = jnp.take_along_axis(\n one_hot_labels, top1_idx[:, None], axis=1)[:, 0]\n ncorrect = jax.lax.psum(top1_correct, axis_name='batch')\n n = jax.lax.psum(one_hot_labels, axis_name='batch')\n\n metric_args = jax.lax.all_gather([ens_logits, labels, pre_logits, mask],\n axis_name='batch')\n return ncorrect, loss, n, metric_args\n\n # Setup function for computing representation.\n @functools.partial(jax.pmap, axis_name='batch')\n def representation_fn(params, states, images, labels, mask):\n # Return shape [batch_size, representation_size * ensemble_size]. During\n # few-shot eval, a single linear regressor is applied over all dimensions.\n variable_dict = {'params': flax.core.freeze(params), **states}\n _, outputs = model.apply(\n variable_dict,\n images,\n train=False,\n mean_field_factor=gp_config.get('mean_field_factor', -1.))\n representation = outputs[config.fewshot.representation_layer]\n representation = jnp.concatenate(\n jnp.split(representation, ens_size), axis=-1)\n representation = jax.lax.all_gather(representation, 'batch')\n labels = jax.lax.all_gather(labels, 'batch')\n mask = jax.lax.all_gather(mask, 'batch')\n return representation, labels, mask\n\n opt_name = config.get('optim_name')\n write_note(f'Initializing {opt_name} optimizer...')\n opt_def = getattr(flax.optim, opt_name)(**config.get('optim', {}))\n\n # We jit this, such that the arrays that are created are created on the same\n # device as the input is, in this case the CPU. Else they'd be on device[0].\n opt_cpu = jax.jit(opt_def.create)(params_cpu)\n\n weight_decay_rules = config.get('weight_decay', []) or []\n rescale_value = config.lr.base if config.get('weight_decay_decouple') else 1.\n weight_decay_fn = train_utils.get_weight_decay_fn(\n weight_decay_rules=weight_decay_rules, rescale_value=rescale_value)\n\n @functools.partial(jax.pmap, axis_name='batch', donate_argnums=(0,))\n def update_fn(opt, states, lr, reset_covmat, images, labels, rng):\n \"\"\"Update step.\"\"\"\n measurements = {}\n\n # Split rng and return next_rng for the following step.\n rng, next_rng = jax.random.split(rng, 2)\n rng_local = jax.random.fold_in(rng, jax.lax.axis_index('batch'))\n\n def loss_fn(params, states, images, labels):\n if use_gp_layer:\n (logits, _), updated_states = model.apply(\n {'params': flax.core.freeze(params), **states},\n images,\n train=True,\n rngs={'dropout': rng_local},\n # Specify mutable collection to update untrainable GP parameters.\n mutable=list(states.keys()))\n else:\n updated_states = {}\n logits, _ = model.apply(\n {'params': flax.core.freeze(params), **states},\n images,\n train=True,\n rngs={'dropout': rng_local})\n\n ens_size = config.model.transformer.ens_size\n labels = jnp.tile(labels, (ens_size, 1))\n loss_fn = getattr(train_utils, config.get('loss', 'sigmoid_xent'))\n loss = loss_fn(logits=logits, labels=labels)\n aux = {'logits': logits, 'states': updated_states}\n return loss, aux\n\n # Performs exact covariance update (i.e., reset precision matrix resetting\n # at begining of new epoch) if covmat_momentum is a null value.\n if use_gp_layer and config.get('gp_layer.covmat_momentum', -1.) < 0:\n # Resets precision matrix to Identity * ridge_penalty if at the begining\n # of a new epoch. This should be done before accumulate gradient.\n ridge_penalty = config.get('gp_layer.ridge_penalty', 1.)\n prec_mat_old = states['laplace_covariance']['head']['covmat_layer'][\n 'precision_matrix']\n prec_mat_new = (\n (1. - reset_covmat) * prec_mat_old +\n reset_covmat * jnp.eye(prec_mat_old.shape[0]) * ridge_penalty)\n\n states = flax.core.unfreeze(states)\n states['laplace_covariance']['head']['covmat_layer'][\n 'precision_matrix'] = prec_mat_new\n states = flax.core.freeze(states)\n\n # Implementation considerations compared and summarized at\n # https://docs.google.com/document/d/1g3kMEvqu1DOawaflKNyUsIoQ4yIVEoyE5ZlIPkIl4Lc/edit?hl=en#\n (l, aux), g = train_utils.accumulate_gradient_with_states(\n jax.value_and_grad(loss_fn, has_aux=True), opt.target, states, images,\n labels, config.get('grad_accum_steps'))\n l, g = jax.lax.pmean((l, g), axis_name='batch')\n measurements['training_loss'] = l\n\n # Log the gradient norm only if we need to compute it anyways (clipping)\n # or if we don't use grad_accum_steps, as they interact badly.\n if config.get('grad_accum_steps', 1) == 1 or config.get('grad_clip_norm'):\n grads, _ = jax.tree_flatten(g)\n l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads]))\n measurements['l2_grads'] = l2_g\n\n # Optionally resize the global gradient to a maximum norm. We found this\n # useful in some cases across optimizers, hence it's in the main loop.\n if config.get('grad_clip_norm'):\n g_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)\n g = jax.tree_map(lambda p: g_factor * p, g)\n opt = opt.apply_gradient(g, learning_rate=lr)\n opt = opt.replace(target=weight_decay_fn(opt.target, lr))\n\n params, _ = jax.tree_flatten(opt.target)\n measurements['l2_params'] = jnp.sqrt(sum([jnp.vdot(p, p) for p in params]))\n\n # Compute training accuracy by the ensemble members independently to save\n # compute.\n top1_idx = jnp.argmax(aux['logits'], axis=1)\n ens_size = config.model.transformer.ens_size\n tiled_labels = jnp.tile(labels, (ens_size, 1))\n top1_correct = jnp.take_along_axis(tiled_labels,\n top1_idx[:, None], axis=1)[:, 0]\n prec1 = jax.lax.psum(jnp.sum(top1_correct), axis_name='batch') / (\n batch_size * ens_size)\n measurements['training_prec@1'] = prec1\n measurements['learning_rate'] = lr\n return opt, aux['states'], next_rng, measurements\n\n reint_params = []\n checkpoint_data = checkpoint_utils.maybe_load_checkpoint(\n train_loop_rngs=rng,\n save_checkpoint_path=save_checkpoint_path,\n init_optimizer=opt_cpu,\n init_params=params_cpu,\n init_fixed_model_states=states_cpu,\n default_reinit_params=reint_params,\n config=config)\n\n train_loop_rngs = checkpoint_data.train_loop_rngs\n opt_cpu = checkpoint_data.optimizer\n states_cpu = checkpoint_data.fixed_model_states\n accumulated_train_time = checkpoint_data.accumulated_train_time\n\n write_note('Kicking off misc stuff...')\n first_step = int(opt_cpu.state.step) # Might be a DeviceArray type.\n if first_step == 0 and jax.process_index() == 0:\n writer.write_hparams(dict(config))\n chrono = train_utils.Chrono(\n first_step, total_steps, batch_size, accumulated_train_time)\n\n # Note: switch to ProfileAllHosts() if you need to profile all hosts.\n # (Xprof data become much larger and take longer to load for analysis)\n profiler = periodic_actions.Profile(\n # Create profile after every restart to analyze pre-emption related\n # problems and assure we get similar performance in every run.\n logdir=output_dir, first_profile=first_step + 10)\n\n # Prepare the learning-rate and pre-fetch it to device to avoid delays.\n lr_fn = train_utils.create_learning_rate_schedule(total_steps,\n **config.get('lr', {}))\n # TODO(dusenberrymw): According to flax docs, prefetching shouldn't be\n # necessary for TPUs.\n lr_iter = train_utils.prefetch_scalar(\n map(lr_fn, range(total_steps)), config.get('prefetch_to_device', 1))\n\n # Prepare the precision matrix resetting schedule, and pre-fetch it to device.\n reset_steps = steps_per_epoch * 1\n reset_covmat_fn = lambda step: float(step % reset_steps == 0)\n reset_covmat_iter = train_utils.prefetch_scalar(\n map(reset_covmat_fn, range(first_step, total_steps)),\n nprefetch=config.get('prefetch_to_device', 1))\n\n write_note(f'Replicating...\\n{chrono.note}')\n opt_repl = flax.jax_utils.replicate(opt_cpu)\n states_repl = flax.jax_utils.replicate(states_cpu)\n\n write_note(f'Initializing few-shotters...\\n{chrono.note}')\n fewshotter = None\n if 'fewshot' in config and fewshot is not None:\n fewshotter = fewshot.FewShotEvaluator(\n representation_fn, config.fewshot,\n config.fewshot.get('batch_size') or batch_size_eval)\n\n checkpoint_writer = None\n\n # Note: we return the train loss, val loss, and fewshot best l2s for use in\n # reproducibility unit tests.\n train_loss = -jnp.inf\n val_loss = {val_name: -jnp.inf for val_name, _ in val_ds_splits.items()}\n fewshot_results = {'dummy': {(0, 1): -jnp.inf}}\n\n write_note(f'First step compilations...\\n{chrono.note}')\n logging.info('first_step = %s', first_step)\n # Make sure log_eval_steps is same as steps_per_epoch. This is because\n # the precision matrix needs to be updated fully (at the end of each epoch)\n # when eval takes place.\n log_eval_steps = config.log_eval_steps\n if use_gp_layer:\n log_eval_steps = max(steps_per_epoch, 2)\n # Advance the iterators if we are restarting from an earlier checkpoint.\n # TODO(dusenberrymw): Look into checkpointing dataset state instead.\n if first_step > 0:\n write_note('Advancing iterators after resuming from a checkpoint...')\n lr_iter = itertools.islice(lr_iter, first_step, None)\n train_iter = itertools.islice(train_iter, first_step, None)\n\n # Using a python integer for step here, because opt.state.step is allocated\n # on TPU during replication.\n for step, train_batch, lr_repl, reset_covmat_repl in zip(\n range(first_step + 1, total_steps + 1), train_iter, lr_iter,\n reset_covmat_iter):\n with jax.profiler.TraceAnnotation('train_step', step_num=step, _r=1):\n if not config.get('only_eval', False):\n opt_repl, states_repl, train_loop_rngs, extra_measurements = update_fn(\n opt_repl,\n states_repl,\n lr_repl,\n reset_covmat_repl,\n train_batch['image'],\n train_batch['labels'],\n rng=train_loop_rngs)\n\n if jax.process_index() == 0:\n profiler(step)\n\n # Checkpoint saving\n if not config.get('only_eval', False) and train_utils.itstime(\n step, config.get('checkpoint_steps'), total_steps, process=0):\n write_note('Checkpointing...')\n chrono.pause()\n train_utils.checkpointing_timeout(checkpoint_writer,\n config.get('checkpoint_timeout', 1))\n accumulated_train_time = chrono.accum_train_time\n # We need to transfer the weights over now or else we risk keeping them\n # alive while they'll be updated in a future step, creating hard to debug\n # memory errors (see b/160593526). Also, takes device 0's params only.\n # For GP layer, we will also do the same for untrainable parameters\n # (`states`). This is ok since `random features` are frozen throughout\n # pre-training, and `precision matrix` is a finetuning-specific parameters\n # that will be re-learned in the finetuning task.\n opt_cpu = jax.tree_map(lambda x: np.array(x[0]), opt_repl)\n states_cpu = jax.tree_map(lambda x: np.array(x[0]), states_repl)\n\n # Check whether we want to keep a copy of the current checkpoint.\n copy_step = None\n if train_utils.itstime(step, config.get('keep_checkpoint_steps'),\n total_steps):\n write_note('Keeping a checkpoint copy...')\n copy_step = step\n\n # Checkpoint should be a nested dictionary or FLAX datataclasses from\n # `flax.struct`. Both can be present in a checkpoint.\n checkpoint_data = checkpoint_utils.CheckpointData(\n optimizer=opt_cpu,\n fixed_model_states=states_cpu,\n train_loop_rngs=train_loop_rngs,\n accumulated_train_time=accumulated_train_time)\n checkpoint_writer = pool.apply_async(\n checkpoint_utils.checkpoint_trained_model,\n (checkpoint_data, save_checkpoint_path, copy_step))\n chrono.resume()\n\n # Report training progress\n if not config.get('only_eval', False) and train_utils.itstime(\n step, config.log_training_steps, total_steps, process=0):\n write_note('Reporting training progress...')\n timing_measurements, note = chrono.tick(step)\n write_note(note)\n train_measurements = {}\n train_measurements.update(flax.jax_utils.unreplicate(extra_measurements))\n train_measurements.update(timing_measurements)\n writer.write_scalars(step, train_measurements)\n # Keep to return for reproducibility tests.\n train_loss = train_measurements['training_loss']\n\n # Report validation performance\n if config.get('only_eval', False) or train_utils.itstime(\n step, log_eval_steps, total_steps):\n write_note('Evaluating on the validation set...')\n chrono.pause()\n for val_name, val_ds in val_ds_splits.items():\n # Sets up evaluation metrics.\n ece_num_bins = config.get('ece_num_bins', 15)\n auc_num_bins = config.get('auc_num_bins', 1000)\n ece = rm.metrics.ExpectedCalibrationError(num_bins=ece_num_bins)\n calib_auc = rm.metrics.CalibrationAUC(correct_pred_as_pos_label=False)\n oc_auc_0_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.005,\n num_bins=auc_num_bins)\n oc_auc_1 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.01,\n num_bins=auc_num_bins)\n oc_auc_2 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.02,\n num_bins=auc_num_bins)\n oc_auc_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.05,\n num_bins=auc_num_bins)\n label_diversity = tf.keras.metrics.Mean()\n sample_diversity = tf.keras.metrics.Mean()\n ged = tf.keras.metrics.Mean()\n\n # Runs evaluation loop.\n val_iter = input_utils.start_input_pipeline(\n val_ds, config.get('prefetch_to_device', 1))\n ncorrect, loss, nseen = 0, 0, 0\n for batch in val_iter:\n if val_name == 'cifar_10h':\n batch_ncorrect, batch_losses, batch_n, batch_metric_args = (\n cifar_10h_evaluation_fn(opt_repl.target,\n states_repl,\n batch['image'],\n batch['labels'],\n batch['mask']))\n else:\n batch_ncorrect, batch_losses, batch_n, batch_metric_args = (\n evaluation_fn(opt_repl.target,\n states_repl,\n batch['image'],\n batch['labels'],\n batch['mask']))\n # All results are a replicated array shaped as follows:\n # (local_devices, per_device_batch_size, elem_shape...)\n # with each local device's entry being identical as they got psum'd.\n # So let's just take the first one to the host as numpy.\n ncorrect += np.sum(np.array(batch_ncorrect[0]))\n loss += np.sum(np.array(batch_losses[0]))\n nseen += np.sum(np.array(batch_n[0]))\n if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':\n # Here we parse batch_metric_args to compute uncertainty metrics.\n # (e.g., ECE or Calibration AUC).\n logits, labels, _, masks = batch_metric_args\n masks = np.array(masks[0], dtype=np.bool)\n logits = np.array(logits[0])\n probs = jax.nn.softmax(logits)\n # From one-hot to integer labels, as required by ECE.\n int_labels = np.argmax(np.array(labels[0]), axis=-1)\n int_preds = np.argmax(logits, axis=-1)\n confidence = np.max(probs, axis=-1)\n for p, c, l, d, m, label in zip(probs, confidence, int_labels,\n int_preds, masks, labels[0]):\n ece.add_batch(p[m, :], label=l[m])\n calib_auc.add_batch(d[m], label=l[m], confidence=c[m])\n # TODO(jereliu): Extend to support soft multi-class probabilities.\n oc_auc_0_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_1.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_2.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n\n if val_name == 'cifar_10h' or val_name == 'imagenet_real':\n num_classes = config.num_classes\n if config.get('label_indices'):\n num_classes = len(config.get('label_indices'))\n batch_label_diversity, batch_sample_diversity, batch_ged = data_uncertainty_utils.generalized_energy_distance(\n label[m], p[m, :], num_classes)\n label_diversity.update_state(batch_label_diversity)\n sample_diversity.update_state(batch_sample_diversity)\n ged.update_state(batch_ged)\n\n val_loss[val_name] = loss / nseen # Keep for reproducibility tests.\n val_measurements = {\n f'{val_name}_prec@1': ncorrect / nseen,\n f'{val_name}_loss': val_loss[val_name],\n }\n if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':\n val_measurements[f'{val_name}_ece'] = ece.result()['ece']\n val_measurements[f'{val_name}_calib_auc'] = calib_auc.result()[\n 'calibration_auc']\n val_measurements[f'{val_name}_oc_auc_0.5%'] = oc_auc_0_5.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_1%'] = oc_auc_1.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_2%'] = oc_auc_2.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_5%'] = oc_auc_5.result()[\n 'collaborative_auc']\n writer.write_scalars(step, val_measurements)\n\n if val_name == 'cifar_10h' or val_name == 'imagenet_real':\n cifar_10h_measurements = {\n f'{val_name}_label_diversity': label_diversity.result(),\n f'{val_name}_sample_diversity': sample_diversity.result(),\n f'{val_name}_ged': ged.result(),\n }\n writer.write_scalars(step, cifar_10h_measurements)\n\n # OOD eval\n # Entries in the ood_ds dict include:\n # (ind_dataset, ood_dataset1, ood_dataset2, ...).\n # OOD metrics are computed using ind_dataset paired with each of the\n # ood_dataset. When Mahalanobis distance method is applied, train_ind_ds\n # is also included in the ood_ds.\n if ood_ds and config.ood_methods:\n\n def make_sngp_eval_fn(states):\n\n def sngp_eval_fn(params, images, labels, mask):\n return evaluation_fn(\n params=params,\n states=states,\n images=images,\n labels=labels,\n mask=mask)\n\n return sngp_eval_fn\n\n ood_measurements = ood_utils.eval_ood_metrics(\n ood_ds,\n ood_ds_names,\n config.ood_methods,\n make_sngp_eval_fn(states_repl),\n opt_repl.target,\n n_prefetch=config.get('prefetch_to_device', 1))\n writer.write_scalars(step, ood_measurements)\n\n chrono.resume()\n\n if 'fewshot' in config and fewshotter is not None:\n # Compute few-shot on-the-fly evaluation.\n if config.get('only_eval', False) or train_utils.itstime(\n step, config.fewshot.log_steps, total_steps):\n chrono.pause()\n write_note(f'Few-shot evaluation...\\n{chrono.note}')\n # Keep `results` to return for reproducibility tests.\n fewshot_results, best_l2 = fewshotter.run_all(opt_repl.target,\n config.fewshot.datasets)\n\n # TODO(dusenberrymw): Remove this once fewshot.py is updated.\n def make_writer_measure_fn(step):\n\n def writer_measure(name, value):\n writer.write_scalars(step, {name: value})\n\n return writer_measure\n\n fewshotter.walk_results(\n make_writer_measure_fn(step), fewshot_results, best_l2)\n chrono.resume()\n\n if config.get('only_eval', False):\n break\n\n write_note(f'Done!\\n{chrono.note}')\n pool.close()\n pool.join()\n writer.close()\n\n # Return final training loss, validation loss, and fewshot results for\n # reproducibility test cases.\n return train_loss, val_loss, fewshot_results\n\nif __name__ == '__main__':\n # Adds jax flags to the program.\n jax.config.parse_flags_with_absl()\n\n def _main(argv):\n del argv\n config = FLAGS.config\n output_dir = FLAGS.output_dir\n main(config, output_dir)\n\n app.run(_main) # Ignore the returned values from `main`.\n",
"# coding=utf-8\n# Copyright 2022 The Uncertainty Baselines Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"utils methods/classes.\"\"\"\n\nfrom typing import Any, Dict, Optional, Sequence\n\nimport numpy as np\nimport sklearn.metrics\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nPADDING_VALUE = 0\n\n\ndef state_is_tuple(cell_type):\n return cell_type == 'lstm'\n\n\ndef create_mask(inputs: tf.Tensor,\n masking_prob: Dict[Any, float],\n seed: Optional[int] = None) -> tf.Tensor:\n \"\"\"Creates mask by the masking probability of each element in the inputs.\"\"\"\n threshold = tf.zeros_like(inputs, dtype=tf.float32)\n for element, ratio in masking_prob.items():\n threshold += tf.where(tf.equal(inputs, element), ratio, 0.0)\n prob = tf.random.uniform(inputs.shape, minval=0, maxval=1, seed=seed)\n return tf.cast(prob < threshold, tf.int32)\n\n\ndef value_in_tensor(inputs: tf.Tensor, tensor: tf.Tensor) -> tf.Tensor:\n \"\"\"Checks if each element in `inputs` is in `tensor`.\"\"\"\n tile_multiples = tf.concat(\n [tf.ones(tf.rank(inputs), dtype=tf.int32),\n tf.shape(tensor)], axis=0)\n inputs = tf.tile(tf.expand_dims(inputs, -1), tile_multiples)\n return tf.reduce_any(tf.equal(inputs, tensor), -1)\n\n\ndef create_rebalanced_sample_weights(\n labels: tf.Tensor,\n dtype: Optional[tf.dtypes.DType] = tf.float32,\n mask_padding: Optional[bool] = True) -> tf.Tensor:\n \"\"\"Creates the sample weights by inverse of label counts.\"\"\"\n unique_label, _, count = tf.unique_with_counts(tf.reshape(labels, [-1]))\n weights = tf.reduce_min(count) / count\n sample_weights = tf.map_fn(\n fn=lambda t: tf.where(labels == tf.cast(t[0], dtype=labels.dtype), t[1], 0\n ),\n elems=tf.stack([tf.cast(unique_label, dtype=weights.dtype), weights],\n axis=1))\n sample_weights = tf.cast(tf.reduce_sum(sample_weights, axis=0), dtype=dtype)\n if mask_padding:\n sample_weights *= tf.cast(tf.sign(labels), dtype=dtype)\n sample_weights /= tf.reduce_mean(sample_weights)\n return sample_weights\n\n\ndef get_rnn_cls(cell_type: str):\n if cell_type == 'lstm':\n return tf.keras.layers.LSTM\n elif cell_type == 'gru':\n return tf.keras.layers.GRU\n else:\n return tf.keras.layers.SimpleRNN\n\n\ndef get_rnn_cell(cell_type: str):\n if cell_type == 'lstm':\n return tf.keras.layers.LSTMCell\n elif cell_type == 'gru':\n return tf.keras.layers.GRUCell\n else:\n return tf.keras.layers.SimpleRNNCell\n\n\ndef to_one_hot(x) -> tf.Tensor:\n \"\"\"Returns the argmax of the input tensor in one-hot format.\"\"\"\n indices = tf.math.argmax(x, axis=1)\n depth = x.shape.as_list()[-1]\n x_hard = tf.one_hot(indices, depth, dtype=x.dtype)\n return tf.stop_gradient(x_hard - x) + x\n\n\ndef get_last_step(inputs: tf.Tensor, seq_length: tf.Tensor) -> tf.Tensor:\n \"\"\"Returns the last step of inputs by the sequence length.\n\n If the sequence length is zero, it will return the zero tensor.\n\n Args:\n inputs: tensor of [batch_size, max_seq_length, hidden_size].\n seq_length: tensor of [batch_size] recording the actual length of inputs.\n\n Returns:\n tensor of [batch_size, hidden_size], where tensor[i, :] = inputs[i,\n seq_length[i], :]\n \"\"\"\n batch_range = tf.range(tf.shape(seq_length)[0])\n\n non_empty_seq = tf.sign(seq_length)\n safe_indices = tf.cast((seq_length - non_empty_seq), dtype=tf.int32)\n indices = tf.stack([batch_range, safe_indices], axis=1)\n result = tf.gather_nd(inputs, indices)\n # Expand axis to broadcast to the second dimension (hidden size).\n result *= tf.expand_dims(tf.cast(non_empty_seq, dtype=result.dtype), axis=1)\n return result\n\n\n# thanks for the implementation at\n# https://blog.evjang.com/2016/11/tutorial-categorical-variational.html\ndef sample_gumbel(shape, eps=1e-20):\n \"\"\"Sample from Gumbel 0 to 1.\"\"\"\n uniform = tf.random.uniform(shape, minval=0, maxval=1)\n return -tf.math.log(-tf.math.log(uniform + eps) + eps)\n\n\ndef gumbel_softmax_sample(logits, temperature):\n \"\"\"Draw a sample from the Gumbel-Softmax distribution.\"\"\"\n y = logits + sample_gumbel(tf.shape(logits))\n y_adjusted = y / temperature\n return tf.nn.softmax(y_adjusted), y_adjusted\n\n\nclass GumbelSoftmaxSampler(tf.keras.layers.Layer):\n \"\"\"Gumbel-Softmax sampler.\n\n Sample from the Gumbel-Softmax distribution and optionally discretize.\n \"\"\"\n\n def __init__(self,\n temperature,\n hard: bool = False,\n trainable_temperature: bool = True):\n \"\"\"GumbelSoftmaxSampler constructor.\n\n Args:\n temperature: non-negative scalar\n hard: if True, take argmax, but differentiate w.r.t. soft sample y\n trainable_temperature: whether temperature is trainable\n \"\"\"\n self._trainable_temperature = trainable_temperature\n self._initial_temperature = temperature\n self._hard = hard\n\n super(GumbelSoftmaxSampler, self).__init__()\n\n def build(self, input_shape):\n self._temperature = self.add_weight(\n 'temperature',\n initializer=tf.keras.initializers.Constant(self._initial_temperature),\n trainable=self._trainable_temperature)\n super().build(input_shape)\n\n def call(self, logits: tf.Tensor, return_logits: bool = False):\n \"\"\"Sample from the Gumbel-Softmax distribution and optionally discretize.\n\n Args:\n logits: [batch_size, n_class] unnormalized log-probs.\n return_logits: whether to also return logits tensor.\n\n Returns:\n A [batch_size, n_class] sample from the Gumbel-Softmax distribution.\n If self._hard=True, then the returned sample will be one-hot, otherwise it\n will be a probabilitiy distribution that sums to 1 across classes.\n \"\"\"\n y, logits = gumbel_softmax_sample(logits, self._temperature)\n if self._hard:\n y = to_one_hot(y)\n if return_logits:\n return y, logits\n return y\n\n\nclass MLP(tf.keras.Model):\n \"\"\"Multilayer perceptron.\"\"\"\n\n def __init__(self,\n output_sizes: Sequence[int],\n use_bias: bool = True,\n dropout: float = 0.5,\n hidden_activation: Optional[Any] = None,\n final_activation: Optional[Any] = None):\n super(MLP, self).__init__()\n\n self._layers = []\n for output_size in output_sizes:\n self._layers.append(\n tf.keras.layers.Dense(\n output_size, activation=hidden_activation, use_bias=use_bias))\n if dropout not in (None, 0):\n self._layers.append(tf.keras.layers.Dropout(dropout))\n if final_activation:\n self._layers.append(final_activation)\n\n def call(self, inputs):\n outputs = inputs\n for layer in self._layers:\n outputs = layer(outputs)\n return outputs\n\n\nclass SequentialWordLoss(tf.keras.losses.SparseCategoricalCrossentropy):\n \"\"\"Cross entropy loss of the word id sequences.\"\"\"\n\n def __init__(self, *args, word_weights: Optional[Any] = None, **kwargs):\n \"\"\"SequentialWordLoss constructor.\n\n Args:\n *args: optional arguments passed to\n tf.keras.losses.SparseCategoricalCrossentropy.\n word_weights: of shape [vocab_size], the weights of each token, used to\n rescale loss. word_weights[0] should be the weight of the padding token\n id 0.\n **kwargs: optional arguments passed to\n tf.keras.losses.SparseCategoricalCrossentropy.\n \"\"\"\n # Disable reduction to be able to apply sequence mask and (optional) word\n # weights.\n super(SequentialWordLoss, self).__init__(\n reduction=tf.keras.losses.Reduction.NONE, *args, **kwargs)\n self._word_weights = word_weights\n\n def call(self, y_true, y_pred, sample_weight: Optional[tf.Tensor] = None):\n loss = super().call(y_true=y_true, y_pred=y_pred)\n if sample_weight:\n sample_weight = tf.cast(sample_weight, dtype=loss.dtype)\n loss *= sample_weight\n if self._word_weights is not None:\n word_idx = tf.cast(y_true, tf.int32)\n weights = tf.gather(self._word_weights, word_idx)\n loss *= tf.cast(weights, dtype=loss.dtype)\n return loss\n\n\nclass BowLoss(SequentialWordLoss):\n \"\"\"Bag-of-word loss [1].\n\n Reference:\n [1]: Zhao et al. Learning Discourse-level Diversity for Neural Dialog Models\n using Conditional Variational Autoencoders. https://arxiv.org/abs/1703.10960\n \"\"\"\n\n def __init__(self, *args, sequence_axis: Optional[int] = 1, **kwargs):\n \"\"\"BowLoss Constructor.\n\n Args:\n *args: arguments passed to super class SequentialWordLoss.\n sequence_axis: the axis of the sequence dimension bow logits to be\n repeated.\n **kwargs: arguments passed to super class SequentialWordLoss.\n \"\"\"\n super(BowLoss, self).__init__(*args, **kwargs)\n self._sequence_axis = sequence_axis\n\n def call(self, y_true, bow_pred, sample_weight: Optional[tf.Tensor] = None):\n \"\"\"Computes bow loss.\n\n Args:\n y_true: the label tensor, of shape [d0, d1, ..., dN] where dN =\n self._sequence_axis.\n bow_pred: the bow prediction logits, of shape [d0, d1, ..., d_{N-1}, H].\n It will be repeated to [d0, d1, ..., d_{N-1}, dN, H] and compute\n SequentialWordLoss with y_true.\n sample_weight: the optional tensor of shape [d0, d1, ..., dN] specifying\n the weight to rescale the loss.\n\n Returns:\n loss: tensor of shape [d0, d1, ..., dN].\n \"\"\"\n y_true_shape = tf.shape(y_true)\n y_true_rank = len(y_true.shape)\n axis = self._sequence_axis\n if y_true_rank <= axis:\n raise ValueError(\n 'Expected sequence axis {}, but y_true has a lower rank {}: {}'\n .format(axis, y_true_rank, y_true_shape))\n\n # Step 1/2: construct the multiples for tf.tile; insert the max_seq_length\n # multiple in the sequence axis. It's equivalent to:\n # multiples = [1] * y_true_rank\n # multiples.insert(axis, y_true_shape[axis])\n multiples = tf.concat([[1] * axis, [y_true_shape[axis]], [1] *\n (y_true_rank - axis)],\n axis=0)\n # Step 2/2: repeat `bow_pred` to match `y_true` on the sequence axis.\n y_pred = tf.tile(tf.expand_dims(bow_pred, axis=axis), multiples)\n loss = super().call(y_true, y_pred, sample_weight)\n return loss\n\n\nclass KlLoss(tf.keras.losses.KLDivergence):\n \"\"\"KL divergence with Batch Prior Regularization support [1].\n\n Reference:\n [1]: Zhao et al. Learning Discourse-level Diversity for Neural Dialog Models\n using Conditional Variational Autoencoders. https://arxiv.org/abs/1703.10960\n \"\"\"\n\n def __init__(self,\n bpr: bool,\n *args,\n from_logits: Optional[bool] = False,\n **kwargs):\n super(KlLoss, self).__init__(*args, **kwargs)\n self._bpr = bpr\n self._from_logits = from_logits\n\n def call(self, p_z, q_z):\n if self._from_logits:\n p_z = tf.nn.softmax(p_z)\n q_z = tf.nn.softmax(q_z)\n\n if self._bpr:\n if not p_z.shape.is_compatible_with(q_z.shape):\n raise ValueError(\n 'Inconsistent shape between p_z_logits {} and q_z_logits {}'.format(\n p_z.shape, q_z.shape))\n batch_size = tf.shape(p_z)[0]\n p_z = tf.reduce_mean(p_z, axis=0)\n q_z = tf.reduce_mean(q_z, axis=0)\n loss = super().call(q_z, p_z) * tf.cast(batch_size, p_z.dtype)\n else:\n loss = super().call(q_z, p_z)\n return loss\n\n\nclass BertPreprocessor(tf.keras.Model):\n \"\"\"Preprocessor converting text into BERT input formats.\"\"\"\n\n def __init__(self, tfhub_url: str, max_seq_length: int):\n super(BertPreprocessor, self).__init__()\n\n self._tfhub_url = tfhub_url\n self._max_seq_length = max_seq_length\n\n preprocess = hub.load(self._tfhub_url)\n self._special_tokens_dict = preprocess.tokenize.get_special_tokens_dict()\n\n self.tokenizer = hub.KerasLayer(preprocess.tokenize, name='tokenizer')\n self.packer = hub.KerasLayer(\n preprocess.bert_pack_inputs,\n arguments=dict(seq_length=self._max_seq_length),\n name='packer')\n\n def call(self, inputs: Sequence[tf.Tensor], concat: Optional[bool] = False):\n segments = [self.tokenizer(input) for input in inputs]\n truncated_segments = [\n segment[:, :self._max_seq_length] for segment in segments\n ]\n if concat:\n return self.packer(truncated_segments)\n return [self.packer([segment]) for segment in truncated_segments]\n\n @property\n def vocab_size(self) -> int:\n return self._special_tokens_dict['vocab_size'].numpy().item()\n\n\ndef _get_flatten_non_padding_value(\n tensors: Sequence[tf.Tensor],\n mask_gen_tensor: tf.Tensor) -> Sequence[tf.Tensor]:\n \"\"\"Returns the flatten tensors with the padding filtered.\"\"\"\n mask_gen_tensor = tf.reshape(mask_gen_tensor, [-1])\n padding_mask = mask_gen_tensor != PADDING_VALUE\n outputs = []\n for tensor in tensors:\n tensor = tf.reshape(tensor, [-1])\n outputs.append(tf.boolean_mask(tensor, padding_mask))\n return outputs\n\n\ndef adjusted_mutual_info(y_true: tf.Tensor, y_pred: tf.Tensor) -> float:\n \"\"\"Computes adjusted mutual information of non-padded prediction and label.\"\"\"\n # pylint: disable=unbalanced-tuple-unpacking\n y_pred, y_true = _get_flatten_non_padding_value([y_pred, y_true],\n mask_gen_tensor=y_true)\n return sklearn.metrics.adjusted_mutual_info_score(y_true, y_pred)\n\n\ndef cluster_purity(y_true: tf.Tensor, y_pred: tf.Tensor) -> float:\n \"\"\"Computes cluster purity of non-padded prediction and label.\"\"\"\n # pylint: disable=unbalanced-tuple-unpacking\n y_pred, y_true = _get_flatten_non_padding_value([y_pred, y_true],\n mask_gen_tensor=y_true)\n contingency_matrix = sklearn.metrics.cluster.contingency_matrix(\n y_true, y_pred)\n return np.sum(np.amax(contingency_matrix,\n axis=0)) / np.sum(contingency_matrix)\n"
] |
[
[
"tensorflow.random.set_seed",
"tensorflow.io.gfile.makedirs",
"numpy.ceil",
"numpy.max",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.metrics.Mean"
],
[
"tensorflow.random.set_seed",
"tensorflow.io.gfile.makedirs",
"numpy.ceil",
"numpy.max",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.metrics.Mean"
],
[
"numpy.amax",
"tensorflow.sign",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.rank",
"tensorflow.math.argmax",
"tensorflow.boolean_mask",
"tensorflow.stop_gradient",
"tensorflow.gather",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.keras.layers.Dense",
"tensorflow.random.uniform",
"tensorflow.zeros_like",
"tensorflow.one_hot",
"numpy.sum",
"tensorflow.keras.initializers.Constant",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.math.log",
"tensorflow.reduce_min",
"tensorflow.keras.layers.Dropout"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
YueYANG1996/merlot_reserve
|
[
"76bc38d4b97e447200b4d6b097c6b68d5235c1e6"
] |
[
"finetune/tvqa/submit_to_leaderboard.py"
] |
[
"\"\"\"\nSubmit an existing TVQA model on the leaderboard\n\nipython -i submit_to_leaderboard.py -- ../../pretrain/configs/base.yaml ${ckpt}\nipython -i submit_to_leaderboard.py -- ../../pretrain/configs/large.yaml ${ckpt}\n\"\"\"\n\nimport sys\n\nsys.path.append('../../')\nimport yaml\nfrom datetime import datetime\nimport pytz\nimport jax\nimport jax.numpy as jnp\nfrom pretrain.dataloader import input_fn_builder, MASK, encoder, AUDIOSPAN\nfrom finetune.common_dataloader import finetune_input_fn_builder, finetune_val_input_fn_builder\nfrom mreserve.modeling import MerlotReserve\n\nfrom flax.training import train_state\nfrom flax import jax_utils\nimport flax.linen as nn\nfrom finetune.optimization import construct_finetuning_train_state, finetune_train_step\nfrom mreserve.checkpoint import save_checkpoint, load_checkpoint, bf16_to_f32, f32_to_bf16\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom flax.core.frozen_dict import freeze\nfrom copy import deepcopy\nimport clu.parameter_overview\nimport functools\nimport time\nimport os\nimport optax\nfrom tqdm import tqdm\nimport json\n\njax.config.update('jax_log_compiles', True)\nis_on_gpu = any([x.platform == 'gpu' for x in jax.local_devices()])\nprint('JAX process: {} / {}. Local devices {}. Using {}'.format(\n jax.process_index(), jax.process_count(), jax.local_devices(), 'GPU' if is_on_gpu else 'TPU'), flush=True)\n\nparser = argparse.ArgumentParser(description='Train model!')\n\n# '../../pretrain/configs/ytt180m_base_v4_bsize=1024.yaml'\nparser.add_argument(\n 'pretrain_config_file',\n help='Where the config.yaml is located',\n type=str,\n)\nparser.add_argument(\n 'ckpt',\n help='checkpoint to use',\n type=str,\n)\nargs = parser.parse_args()\n\n# print(f\"Loading from {args.config_file}\", flush=True)\nwith open(args.pretrain_config_file, 'r') as f:\n config = yaml.load(f, yaml.FullLoader)\n\nconfig['data']['val_fns'] = \"${path_to_tvqa}/test{:03d}of008.tfrecord\"\nconfig['data']['num_val_files'] = 8\nconfig['data']['num_answers'] = 5\nconfig['data']['do_random_scale'] = False\n\nconfig['data']['num_segments'] = 7\n\nconfig['device']['batch_size'] = 8\nconfig['device']['prefetch_size'] = 0\nconfig['device']['n_fns_per_cycle'] = 256\n\nTRAIN_SIZE = 122112\n\nconfig['data']['lang_seq_len'] = 256\ncfg_name = args.pretrain_config_file.split('/')[-1]\nseattle_time = pytz.utc.localize(datetime.utcnow()).astimezone(pytz.timezone('America/Los_Angeles'))\nseattle_time = seattle_time.strftime(\"%Y-%m-%d-%H:%M.%S\")\n\nnp.random.seed(123456)\nconfig['model']['output_grid'] = [18, 32]\n\n\nconfig['_ckpt'] = args.ckpt\n\nclass MerlotReserveTVQA(MerlotReserve):\n def setup(self):\n super().setup()\n self.proj = nn.Dense(features=1, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=0.02), name='proj',\n use_bias=False)\n\n def __call__(self, batch):\n\n # Encode images (twice)\n batch_size, images_per_batch, seq_size, img_dim = batch['images'].shape\n imgs_enc = self.vision_encoder(batch['images'].reshape(batch_size * images_per_batch, seq_size, img_dim))['seq_attnpool']\n imgs_enc = imgs_enc.reshape(batch_size, images_per_batch, seq_size // 4, self.hidden_size)\n\n # Add the \"first image\"\n imgs_enc = jnp.concatenate([\n jnp.zeros([batch_size, 1, seq_size // 4, self.hidden_size], dtype=imgs_enc.dtype),\n imgs_enc,\n ], 1)\n\n # duplicate so that we have one per answer\n images_per_batch += 1\n batch_size, num_ans_per, joint_seq_len, two_ = batch['textonly_seqs'].shape\n imgs_enc = imgs_enc.reshape(batch_size, images_per_batch * seq_size // 4, self.hidden_size).repeat(num_ans_per, axis=0)\n\n #########################\n text_toks = batch['textonly_seqs'][..., 0].reshape(batch_size * num_ans_per, joint_seq_len)\n textonly_inputs = self.prepare_multimodal_inputs(\n tokens=text_toks,\n token_segment_idx=batch['textonly_seqs'][..., 1].reshape(batch_size * num_ans_per, joint_seq_len),\n vision_input=imgs_enc,\n )\n\n # Encode audio\n # Audio clips are provided as [batch_size, num_segments, num_audio_subsegments, audio_seq_len, num_mels]\n batch_size, num_segments, num_audio_subsegments, audio_seq_len, num_mels = batch['audio_clips'].shape\n audio_enc = self.audio_encoder(batch['audio_clips'].reshape(-1, audio_seq_len, num_mels))['seq_attnpool']\n\n _, audio_token_len, hidden_size = audio_enc.shape\n num_audio_spans = num_segments * num_audio_subsegments\n\n audio_enc = audio_enc.reshape(batch_size, num_audio_spans, audio_token_len, hidden_size)\n audio_enc = audio_enc.repeat(num_ans_per, axis=0)\n\n audio_toks = batch['audio_seqs'][..., 0].reshape(batch_size * num_ans_per, joint_seq_len)\n audio_pointers = (jnp.cumsum((audio_toks == AUDIOSPAN).astype(jnp.int32), -1) - 1) // audio_token_len\n audio_pointers = audio_pointers % num_audio_spans\n\n audio_inputs = self.prepare_multimodal_inputs(\n tokens=batch['audio_seqs'][..., 0].reshape(batch_size * num_ans_per, joint_seq_len),\n token_segment_idx=batch['audio_seqs'][..., 1].reshape(batch_size * num_ans_per, joint_seq_len),\n vision_input=imgs_enc,\n audio_spans=audio_enc,\n audio_pointers=audio_pointers,\n )\n # hack: remove 'first img' from sequence lengths\n start_imgs = joint_seq_len + seq_size // 4\n for k in ['x', 'rotary_coords', 'attention_mask']:\n textonly_inputs[k] = jnp.concatenate([textonly_inputs[k][:, :joint_seq_len],\n textonly_inputs[k][:, start_imgs:]], 1)\n\n audio_inputs[k] = jnp.concatenate([audio_inputs[k][:, :joint_seq_len],\n audio_inputs[k][:, start_imgs:]], 1)\n\n textonly_inputs['attention_mask'] = jnp.concatenate([textonly_inputs['attention_mask'][:, :, :joint_seq_len],\n textonly_inputs['attention_mask'][:, :, start_imgs:]], 2)\n\n audio_inputs['attention_mask'] = jnp.concatenate([audio_inputs['attention_mask'][:, :, :joint_seq_len],\n audio_inputs['attention_mask'][:, :, start_imgs:]], 2)\n #############################################################################################################\n x = jnp.concatenate([textonly_inputs['x'], audio_inputs['x']], 0)\n coords = jnp.concatenate([textonly_inputs['rotary_coords'], audio_inputs['rotary_coords']], 0)\n attnmask = jnp.concatenate([textonly_inputs['attention_mask'], audio_inputs['attention_mask']], 0)\n\n joint_enc = self.joint_transformer(x, rotary_coords=coords, attention_mask=attnmask)['seq']\n joint_enc = joint_enc[:, :joint_seq_len].reshape(batch_size * 2 * num_ans_per, joint_seq_len, self.hidden_size)\n\n # Pool from the right tokens\n pool_idx = jnp.argmax((jnp.concatenate([text_toks, audio_toks], 0) == MASK).astype(jnp.float32), 1)\n pooled_h = joint_enc[jnp.arange(batch_size * 2 * num_ans_per), pool_idx]\n joint_enc = jnp.squeeze(self.proj(pooled_h), -1)\n\n logits_from_audio, logits_from_text = jnp.split(joint_enc, 2, axis=0)\n logits_from_audio = logits_from_audio.reshape(batch_size, num_ans_per)\n logits_from_text = logits_from_text.reshape(batch_size, num_ans_per)\n\n return logits_from_audio, logits_from_text\n\n\nmodel = MerlotReserveTVQA.from_config(config)\n\nparams = freeze(load_checkpoint(args.ckpt))['params']\nparams = f32_to_bf16(params)\nstate = train_state.TrainState.create(apply_fn=model.apply, params=params, tx=optax.identity())\nstate = jax_utils.replicate(state)\n\ndef pred_step(state: train_state.TrainState, batch):\n logits_from_audio, logits_from_text = state.apply_fn({'params': state.params}, batch)\n\n out = {'logprobs_audio': jax.nn.log_softmax(logits_from_audio, axis=-1),\n 'preds_audio': jnp.argmax(logits_from_audio, -1),\n 'logprobs_text': jax.nn.log_softmax(logits_from_text, axis=-1),\n 'preds_text': jnp.argmax(logits_from_text, -1),\n }\n softmax_joint = jax.nn.softmax(logits_from_audio, axis=-1) + jax.nn.softmax(logits_from_text, axis=-1)\n out['preds_joint'] = jnp.argmax(softmax_joint, -1)\n return out\np_pred_step = jax.pmap(pred_step, axis_name='batch', donate_argnums=(1,))\n\nout = {}\nfor split in ['val', 'test']:\n config['data']['val_fns'] = f\"path_to_tvqa/{split}\" + '{:03d}of008.tfrecord'\n val_iter = finetune_val_input_fn_builder(config, 'tvqa')\n\n for ids, batch in tqdm(val_iter):\n val_pred = p_pred_step(state, batch)\n preds_joint = val_pred['preds_joint'].reshape(-1).tolist()\n preds_audio = val_pred['preds_audio'].reshape(-1).tolist()\n preds_text = val_pred['preds_text'].reshape(-1).tolist()\n for (id_i, p_j, p_a, p_t) in zip(ids, preds_joint, preds_audio, preds_text):\n if id_i == 'pad':\n continue\n id_i = id_i.split('~')[0]\n out[(split, id_i)] = (p_t, p_a, p_j)\n\nfor sub_i, submission in enumerate(['text', 'audio', 'joint']):\n os.makedirs(submission, exist_ok=True)\n\n # Make prediction_val.json\n pred_dict = {id_idx: v[sub_i] for (split, id_idx), v in out.items() if split == 'val'}\n with open(os.path.join(submission, 'prediction_val.json'), 'w') as f:\n json.dump(pred_dict, f)\n\n # Make prediction_val.json\n pred_dict = {id_idx: v[sub_i] for (split, id_idx), v in out.items() if split == 'test'}\n with open(os.path.join(submission, 'prediction_test_public.json'), 'w') as f:\n json.dump(pred_dict, f)\n\n model_size = 'Base' if 'base' in args.pretrain_config_file.lower() else 'Large'\n model_suffix = {'text': '(subtitles)', 'audio': '(audio)', 'joint': '(subtitles and audio)'}[submission]\n meta = {'model_name': f'MerlotReserve-{model_size} {model_suffix}',\n 'is_ensemble': False,\n 'with_ts': True,\n 'show_on_leaderboard': True,\n 'author': 'Anonymous',\n 'institution': 'Anonymous',\n 'description': 'A {}-sized model, given {} at test time'.format(model_size, model_suffix.strip('()')),\n 'paper_link': '', 'code_link': ''}\n with open(os.path.join(submission, 'meta.json'), 'w') as f:\n json.dump(meta, f)\n\n os.system(f'cd {submission} && zip ../{submission}.zip prediction_val.json prediction_test_public.json meta.json')"
] |
[
[
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gao-lab/Cell_BLAST
|
[
"45b14bbd3385b8a7be0b48ef5ab42bc946f3558f"
] |
[
"Notebooks/Case/HSC/prep_spring.py"
] |
[
"#!/usr/bin/env python\n# Run with SPRING environment\n\nimport sys\nimport argparse\nimport numpy as np\nimport scipy.sparse\n\nsys.path.append(\"./SPRING\")\nimport preprocessing_python\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-e\", dest=\"expr\", type=str, required=True)\nparser.add_argument(\"-d\", dest=\"dist\", type=str, required=True)\nparser.add_argument(\"-g\", dest=\"gene\", type=str, required=True)\nparser.add_argument(\"-k\", dest=\"k\", type=int, required=True)\nparser.add_argument(\"-o\", dest=\"output\", type=str, required=True)\ncmd_args = parser.parse_args()\n\nexpr = np.load(cmd_args.expr, allow_pickle=True)\ndist = np.load(cmd_args.dist, allow_pickle=True)\ngene = np.load(cmd_args.gene, allow_pickle=True)\n\npreprocessing_python.save_spring_dir(expr, dist, cmd_args.k, gene.tolist(), cmd_args.output)\n"
] |
[
[
"numpy.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
didrif/megatrond
|
[
"b6e680f1c78f20132c14b09abbf676e5354c7e14"
] |
[
"megatrond_perception/scripts/Aruco2navgoal.py"
] |
[
"#! /usr/bin/python\n\nimport numpy as np\nimport rospy\nimport roslib\nfrom std_msgs.msg import String, Int32, Float32, Float64\nfrom fiducial_msgs.msg import FiducialTransform, FiducialTransformArray, FiducialArray\nfrom geometry_msgs.msg import Transform, Quaternion, Vector3\nfrom nav_msgs.msg import Odometry\nimport math as m\nimport actionlib\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nimport rosservice\n\ntransform = FiducialTransform()\nimg_seq = Int32()\n\nname = String()\n\ntx = 0\nty = 0\ntz = 0\n\nrz = 0\n\nx0 = 0\ny0 = 0\nx1 = 0\ny1 = 0\nx2 = 0\ny2 = 0\nx3 = 0\ny3 = 0\n\npose_x = 0\npose_y = 0\npose_z = 0\n\ngoal_x = 0\ngoal_y = 0\nglobal yaw\nyaw = 0\n\n\nmarkerLength = 0.2\n\nf = FiducialTransform()\n \ndef fiducial_callback(msg):\n\n global img_seq, transform, tx, ty, tz, rz, name\n header = msg.header\n img_seq = msg.image_seq\n transform = msg.transforms\n name = header.frame_id\n\n for f in transform:\n tx = f.transform.translation.x\n ty = f.transform.translation.y\n tz = f.transform.translation.z\n rz = f.transform.rotation.z\n\ndef vertecies_callback(msg):\n\n global x0, y0, x1, y1, x2, y2, x3, y3\n \n fiducials = msg.fiducials\n for n in fiducials:\n \n x0 = n.x0\n y0 = n.y0\n x1 = n.x1\n y1 = n.y1\n x2 = n.x2\n y2 = n.y2\n x3 = n.x3\n y3 = n.y3\n\ndef odom_callback(msg):\n global pose_x, pose_y, pose_z, yaw\n\n pose_x = msg.pose.pose.position.x\n pose_y = msg.pose.pose.position.y\n pose_z = msg.pose.pose.position.z\n\n yaw = msg.pose.pose.orientation.z\n\ndef movebase_client(target_x, target_y, target_r_z, target_r_w):\n\n client = actionlib.SimpleActionClient('move_base',MoveBaseAction)\n \n client.wait_for_server()\n\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = \"map\"\n goal.target_pose.header.stamp = rospy.Time.now()\n \n goal.target_pose.pose.position.x = target_x\n goal.target_pose.pose.position.y = target_y\n \n #goal.target_pose.pose.orientation.x = 0.0\n #goal.target_pose.pose.orientation.y = 0.0\n goal.target_pose.pose.orientation.z = target_r_z\n goal.target_pose.pose.orientation.w = target_r_w\n\n client.send_goal(goal)\n\n\n# wait = client.wait_for_result()\n# if not wait:\n# rospy.logerr(\"Action server not available!\")\n# rospy.signal_shutdown(\"Action server not available!\")\n# else:\n# rospy.loginfo(\"Goal finished, send new one\")\n# return \n# rospy.loginfo(\"Goal Sent with (1,0)\")\n \n \n\ndef main():\n #rospy.sleep(.25)\n \n # Initialize node\n rospy.init_node(\"aruco_2_navgoal\", anonymous=True)\n rospy.loginfo(\"node Initialize\")\n d = rospy.Duration(0,25)\n rospy.sleep(d)\n # Subsribers\n aruco_t_sub = rospy.Subscriber(\"fiducial_transforms\", FiducialTransformArray, fiducial_callback)\n aruco_vetecies_sub = rospy.Subscriber(\"fiducial_vertices\", FiducialArray, vertecies_callback)\n odom_pose_sub = rospy.Subscriber(\"odom\", Odometry, odom_callback)\n \n #rospy.sleep(1)\n\n rate = rospy.Rate(10)\n \n rospy.loginfo(\">> calculating navigation goal\")\n \n if transform != [] and x0 != 0.0 and tx != 0.0:\n if name == \"frnt_cam_opt\":\n #beta = yaw\n iden = 1\n #print(1)\n elif name == \"rear_cam_opt\":\n #beta = yaw + m.pi\n iden = 2\n #print(2)\n elif name == \"left_cam_opt\":\n #beta = yaw + m.pi/2\n iden = 3\n #print(3)\n elif name == \"right_cam_opt\":\n #beta = yaw - m.pi/2\n iden = 4\n #print(4)\n else:\n print(\"No\")\n \n else:\n return\n print(\"This is x0: \" + str(x0))\n rospy.loginfo(\">> Sending Navigation goal\")\n \n if x0 != 0.0 and tx != 0.0:\n #print(tx, ty, tz)\n t = np.array([tx, ty, tz])\n aruco_x = np.linalg.norm(t)-1\n a_cx = ((x0 + x1 + x2 + x3)/4)\n \n goal_r_z = m.sin(yaw/2)\n goal_r_w = m.cos(yaw/2)\n ratio = markerLength/(((x1 - x0)+(x2 - x3))/2)\n\n aruco_y = (256 - a_cx) * ratio\n\n angle = m.atan(aruco_y/aruco_x)\n\n alpha = yaw + angle\n if iden == 1:\n if alpha >= 0 and alpha < m.pi/2:\n goal_x = pose_x - aruco_x*m.cos((alpha)) \n goal_y = pose_y - aruco_y*m.sin((alpha))\n elif alpha > m.pi/2:\n goal_x = pose_x + aruco_y*m.sin((alpha))\n goal_y = pose_y - aruco_x*m.cos((alpha)) \n elif alpha < 0 and alpha > -m.pi/2:\n goal_x = pose_x - aruco_x*m.cos((alpha)) \n goal_y = pose_y + aruco_y*m.sin((alpha))\n elif alpha < -m.pi/2 :\n goal_x = pose_x + aruco_x*m.cos((alpha)) \n goal_y = pose_y + aruco_y*m.sin((alpha))\n else:\n goal_x = 0\n goal_y = 0\n elif iden == 2:\n if alpha >= 0 and alpha < m.pi/2:\n goal_x = pose_x + aruco_x*m.cos((alpha)) \n goal_y = pose_y + aruco_y*m.sin((alpha))\n elif alpha > m.pi/2:\n goal_x = pose_x - aruco_y*m.sin((alpha))\n goal_y = pose_y + aruco_x*m.cos((alpha)) \n elif alpha < 0 and alpha > -m.pi/2:\n goal_x = pose_x + aruco_x*m.cos((alpha)) \n goal_y = pose_y - aruco_y*m.sin((alpha))\n elif alpha < -m.pi/2 :\n goal_x = pose_x - aruco_x*m.cos((alpha)) \n goal_y = pose_y - aruco_y*m.sin((alpha))\n else:\n goal_x = 0\n goal_y = 0\n elif iden == 3:\n if alpha >= 0 and alpha < m.pi/2:\n goal_y = pose_y - aruco_x*m.cos((alpha)) \n goal_x = pose_x + aruco_y*m.sin((alpha))\n elif alpha > m.pi/2:\n goal_y = pose_y + aruco_y*m.sin((alpha))\n goal_x = pose_x + aruco_x*m.cos((alpha)) \n elif alpha < 0 and alpha > -m.pi/2:\n goal_y = pose_y - aruco_x*m.cos((alpha)) \n goal_x = pose_x - aruco_y*m.sin((alpha))\n elif alpha < -m.pi/2 :\n goal_y = pose_y + aruco_x*m.cos((alpha)) \n goal_x = pose_x - aruco_y*m.sin((alpha))\n else:\n goal_x = 0\n goal_y = 0\n elif iden == 4:\n if alpha >= 0 and alpha < m.pi/2:\n goal_y = pose_y - aruco_x*m.cos((alpha)) \n goal_x = pose_x - aruco_y*m.sin((alpha))\n elif alpha > m.pi/2:\n goal_y = pose_y - aruco_y*m.sin((alpha))\n goal_x = pose_x + aruco_x*m.cos((alpha)) \n elif alpha < 0 and alpha > -m.pi/2:\n goal_y = pose_y + aruco_x*m.cos((alpha)) \n goal_x = pose_x - aruco_y*m.sin((alpha))\n elif alpha < -m.pi/2 :\n goal_y = pose_y + aruco_x*m.cos((alpha)) \n goal_x = pose_x + aruco_y*m.sin((alpha))\n else:\n goal_x = 0\n goal_y = 0\n else:\n goal_x = 0\n goal_y = 0\n\n rospy.loginfo(goal_x)\n rospy.loginfo(goal_y)\n print(\"This is yaw goal: \" + str(goal_r_z))\n movebase_client(goal_x, goal_y, 0, 1)\n\n \n else: \n movebase_client(pose_x, pose_y, 0, 1)\n print(\"No aruco detected\")\n\n\n\n\nif __name__ == \"__main__\":\n\n while True:\n main()\n \n"
] |
[
[
"numpy.array",
"numpy.linalg.norm"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lucasb-eyer/DeepFried
|
[
"b1a8c8c9c39748a2c5505025b54bb279c9208b2c"
] |
[
"optim.py"
] |
[
"#!/usr/bin/env python3\n\nimport DeepFried.util as _u\n\nimport numpy as _np\nimport theano as _th\nimport theano.tensor as _T\n\n\nclass StreaMiniOptimizer(object):\n \"\"\"\n This is an optimizer that works through minibatches of the dataset, each\n minibatch being uploaded onto the GPU each time.\n\n This is slower than moving the whole dataset on the GPU once and addressing\n each slices of it, but it allows for larger datasets to fit on the GPU as\n well as \"infinite\" data augmentation.\n \"\"\"\n\n\n def __init__(self, batchsize, model, cost, extra_outs=None, Xnames=[], tnames=[]):\n \"\"\"\n Initializes the things that are common amongst all streaming minibatch\n optimizers.\n\n - `batchsize`: The number of samples in a minibatch.\n - `model`: The model. This should be an object with at least:\n - `make_inputs(basename='X')`: a function which returns a list of\n as many symbolic variables of the correct dimensions as the\n model takes as inputs. That's usually just one.\n - `train_exprs(*Xs)`: a function which returns the symbolic\n output(s) of the model, during training, given symbolic model\n input(s) `X`.\n - `params`: an iterable containing all trainable parameters.\n - `cost`: The cost. This should be an object with at least:\n - `make_target(name='')`: a function which returns a symbolic\n variable of the correct dimensions for serving as target.\n - `out_expr(Y, t)`: a function which returns the symbolic cost\n of the output `Y` wrt. the targets `t`.\n - `aggregate_batches(costs)`: a function which returns the\n aggregation of the `costs` of each minibatch.\n - `extra_outs`: A single or a list of extra outputs to compute along\n the way. Each such extra should be an object with both `out_expr`\n and `aggregate_batches` just like described for `cost` above.\n - `Xnames`: Optional list of names to use for input variables. Note\n that this must be exactly as many names as the model has inputs,\n then these names may be used as keyword arguments to `fit_epoch`.\n - `tnames`: The same as `Xnames`, but for target variables.\n \"\"\"\n self.model = model\n self.cost = cost\n self.batchsize = batchsize\n\n self.Xs = _u.tuplize(self.model.make_inputs(*Xnames))\n self.targets = _u.tuplize(self.cost.make_target(*tnames))\n self.xtras = _u.tuplize(extra_outs, tuplize_none=True)\n\n # These two will collect any additional updates that layers may have,\n # for example batch-normalization's statistics collection.\n self.fwd_updates = []\n self.fin_updates = []\n\n train_expr = _u.tuplize(self.model.train_expr(*self.Xs, fwd_updates=self.fwd_updates, fin_updates=self.fin_updates))\n self.cost_expr = self.cost.out_expr(self.model, train_expr, self.targets)\n self.outs = (self.cost_expr,) + tuple(\n x.out_expr(self.model, train_expr, self.targets) for x in self.xtras\n )\n\n\n def _mk_train_fn(self, name, updates, extra_in=None, extra_out=None):\n \"\"\" To be used by specializations only. \"\"\"\n self.fn_train = _th.function(\n inputs=self.Xs + self.targets + _u.tuplize(extra_in, tuplize_none=True),\n outputs=self.outs + _u.tuplize(extra_out, tuplize_none=True),\n updates=updates + self.fwd_updates,\n name=name\n )\n\n if len(self.fin_updates):\n # Because targets might or might not be used by the layers in the\n # extra update rules, we'll just allow for unused inputs.\n self.fn_finalize = _th.function(\n inputs=self.Xs + self.targets,\n updates=self.fin_updates,\n name=name + \" finalize\",\n on_unused_input='ignore'\n )\n\n\n def reinit(self):\n \"\"\"\n This will reinitialize any state (such as momentum) that may be kept by\n this optimizer.\n \"\"\"\n pass\n\n\n def fit_epoch(self, X, t, aug=None, batchsize=None, shuf=False, **kwargs):\n \"\"\"\n Trains the model for one full epoch by iterating through minibatches.\n\n - `X`: A numpy array or a list of numpy arrays containing the model input(s).\n The first dimension of an input should be the datapoints,\n i.e. X.shape[0] == ndata,\n and any remaining dimensions should fit the model's expected input shape(s).\n - `t`: The target values where the first dimension should be the\n datapoints, just like for `X`.\n - `aug`: An optional data augmentation pipeline that can transform each\n sample in the minibatch individually.\n - `batchsize`: Optionally override the batchsize given at construction.\n - `shuf`: If not False, go through `X` and `t` in lockstep-random order.\n Use `shuf` as rng or seed for the shuffling.\n\n Any remaining arguments will be passed on to the optimization function;\n this can be used to pass values such as learning-rate, momentum etc.\n \"\"\"\n self.model.pre_epoch()\n\n costs = []\n xtras = []\n\n # Sanitize inputs for more flexibility.\n Xs = _u.tuplize(X)\n ts = _u.tuplize(t)\n bs = batchsize or self.batchsize\n N = Xs[0].shape[0]\n\n assert all(X.shape[0] == N for X in Xs), \"All inputs to fit_epoch should contain the same amount of datapoints.\"\n assert all(t.shape[0] == N for t in ts), \"All targets to fit_epoch should contain the same amount of datapoints.\"\n\n # Keyword arguments for `batched`, for conciseness.\n if shuf is False:\n bxkw = btkw = {}\n else:\n common_seed = _u.check_random_state(shuf).randint(2**31)\n bxkw = dict(shuf=_np.random.RandomState(common_seed))\n btkw = dict(shuf=_np.random.RandomState(common_seed))\n\n # Go through the training in minibatches. Note that the last batch\n # may be smaller than the batchsize.\n for bxs, bts in zip(_u.batched(bs, *Xs, **bxkw), _u.batched(bs, *ts, **btkw)):\n # Possibly need to re-tuplize them because `batched` tries to be\n # smart and not return a tuple if batching a single array.\n bxs = _u.tuplize(bxs)\n bts = _u.tuplize(bts)\n\n # Potentially generate a new augmentation on-the-fly.\n if aug is not None:\n assert len(bxs) == 1, \"Augmentation with multiple inputs not implemented yet. Please open an issue describing the use-case!\"\n bx, bts = aug.augbatch_train(bxs[0], *bts)\n bxs = (bx,)\n\n self.model.pre_minibatch()\n\n # Uploads to the GPU, does the forward pass,\n # the backward pass *and* the weight updates!\n cost, *xtra = self.fn_train(*bxs+bts, **kwargs)\n\n # Collect stats over the batches, so we can aggregate.\n costs.append(cost)\n xtras.append(xtra)\n\n self.model.post_minibatch()\n\n self.model.post_epoch()\n\n # Average the stats over the batches.\n return _u.maybetuple((self.cost.aggregate_batches(costs),)\n + tuple(x.aggregate_batches(b) for x, b in zip(self.xtras, zip(*xtras))))\n # The above zip transposes from minibatches of extras to extras of minibatches.\n\n\n def finalize(self, X, t, batchsize=None, aug=None, fast=False, **kwargs):\n \"\"\"\n A forward-pass through the training data, but using only the\n `fin_updates` of layers such as batch-normalization.\n\n The call is just like that of `fit_epoch`, but a few parameters as well\n as most comments have been omitted.\n \"\"\"\n # Early-exit if unnecessary.\n if len(self.fin_updates) == 0:\n return\n\n bs = batchsize or self.batchsize\n\n # Ignore that one.\n kwargs.pop('shuf', None)\n\n self.model.pre_finalize()\n for bxs, bts in zip(_u.batched(bs, *_u.tuplize(X)), _u.batched(bs, *_u.tuplize(t))):\n if aug is not None:\n for bxs_aug in aug.augbatch_pred(*_u.tuplize(bxs), fast=fast):\n self.model.finalize_pre_minibatch()\n self.fn_finalize(*_u.tuplize(bxs_aug)+_u.tuplize(bts), **kwargs)\n self.model.finalize_post_minibatch()\n else:\n self.model.finalize_pre_minibatch()\n self.fn_finalize(*_u.tuplize(bxs)+_u.tuplize(bts), **kwargs)\n self.model.finalize_post_minibatch()\n self.model.post_finalize()\n\n\nclass StreaMiniSGD(StreaMiniOptimizer):\n \"\"\"\n Vanilla Stochastic Gradient Descent on minibatches. The training is quite\n simple:\n\n p_{e+1} = p_e - lr * ∇p_e\n\n Additional parameters added to `fit_epoch`:\n\n - `lrate`: The learning-rate.\n \"\"\"\n\n def __init__(self, batchsize, model, cost, *args, **kwargs):\n \"\"\"\n See `StreaMiniOptimizer` for details on the arguments.\n \"\"\"\n super(StreaMiniSGD, self).__init__(batchsize, model, cost, *args, **kwargs)\n\n self.sh_learningrate = _T.scalar('lrate')\n\n g = _T.grad(cost=self.cost_expr, wrt=self.model.params)\n\n self._mk_train_fn(\"StreaMiniSGD train\",\n [(p, p - self.sh_learningrate * gp) for p, gp in zip(self.model.params, g)],\n extra_in=self.sh_learningrate)\n\n\nclass StreaMiniMomentum(StreaMiniOptimizer):\n \"\"\"\n TL;DR: Nesterov allows for larger momentum to be used, making it better.\n Very finicky parameter-selection.\n\n Implements both the \"Classical Momentum (CM)\" and \"Nesterov's\n Accelerated Gradient (NAG)\" which are explained in further detail in\n\n \"On the importance of initialization and momentum in deep learning\"\n\n But the equation for NAG has been reshuffled by Nicolas Boulanger in\n\n https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617\n\n for easier implementation in Theano. The updates are:\n\n v_{e+1} = mom * v_e - lr * ∇p_e\n p_{e+1} = p_e + v_{e+1}\n\n for CM, and\n\n p_{e+1} = p_e + mom * v_{e+1} - lr * ∇p_e\n\n for Nicolas' reformulated NAG.\n\n Additional parameters added to `fit_epoch`:\n\n - `lrate`: The learning-rate.\n - `momentum`: The momentum, defaulting to the one passed at construction.\n \"\"\"\n\n def __init__(self, batchsize, model, cost, momentum, nesterov=False, *args, **kwargs):\n \"\"\"\n See `StreaMiniOptimizer` for details on the arguments.\n\n - `momentum`: The amount of momentum to use, typically something around\n 0.9, 0.95 or 0.99. This value sets the default, but it can also\n be overridden in each individual call to `fit_epoch`.\n - `nesterov`: If `True`, Nesterov's momentum (NAG) is used instead\n of classical momentum (CM).\n \"\"\"\n super(StreaMiniMomentum, self).__init__(batchsize, model, cost, *args, **kwargs)\n\n self.sh_learningrate = _T.scalar('lrate')\n self.sh_momentum = _T.scalar('momentum')\n\n # For momentum, we need a \"mirror\" of each parameter, which keeps track\n # of the \"velocity\" of that parameter during training.\n self.sh_v = [\n _th.shared(_np.zeros_like(p.get_value()), broadcastable=p.broadcastable, name='v_'+p.name)\n for p in model.params\n ]\n\n g = _T.grad(cost=self.cost_expr, wrt=self.model.params)\n\n updates = []\n for sh_p, gp, sh_v in zip(self.model.params, g, self.sh_v):\n v = self.sh_momentum * sh_v - self.sh_learningrate * gp\n updates.append((sh_v, v))\n\n if not nesterov:\n updates.append((sh_p, sh_p + v))\n else:\n updates.append((sh_p, sh_p + self.sh_momentum * v - self.sh_learningrate * gp))\n\n self._mk_train_fn(\"StreaMiniMomentum train\",\n updates,\n extra_in=(self.sh_learningrate, _th.Param(self.sh_momentum, momentum))\n )\n\n\n def reinit(self):\n for sh_v in self.sh_v:\n sh_v.set_value(_np.zeros_like(sh_v.get_value()))\n\n\nclass StreaMiniAdaGrad(StreaMiniOptimizer):\n \"\"\"\n Implements Duchi's \"Adaptive Subgradient\" method, aka AdaGrad.\n Chris Dyer's \"Notes on AdaGrad\" are pretty awesome for practical purposes.\n\n TL;DR: AdaGrad doesn't need additional parameters (a lie) and makes the\n optimization much less sensitive to the learning-rate!\n\n The updates are:\n\n g²_{e+1} = g²_e + ∇(p_e)²\n p_{e+1} = p_e - (lr / √g²_{e+1}) * ∇p_e\n\n that is, divide the learning-rate by a running square of the gradients.\n\n Note that this would lead to division by 0 in the beginning for those\n weights which don't receive a gradient (might be many with ReLUs), so we\n initialize g² with a small value.\n\n Additional parameters added to `fit_epoch`:\n\n - `lrate`: The learning-rate.\n \"\"\"\n\n def __init__(self, batchsize, model, cost, eps=1e-5, *args, **kwargs):\n \"\"\"\n See `StreaMiniOptimizer` for details on the arguments.\n\n - `eps`: A regularization-factor, should be smaller than the\n square of the weight gradients.\n \"\"\"\n super(StreaMiniAdaGrad, self).__init__(batchsize, model, cost, *args, **kwargs)\n\n self.sh_learningrate = _T.scalar('lrate')\n\n # Adagrad needs to accumulate the square gradient of each parameter.\n # I wonder if this won't explode at some point? Probably should fully\n # read the original paper!\n # Edit: RMSProp fixes exactly that.\n # Edit: Matt Zeiler seems to agree cf. AdaDelta.\n self.eps = eps\n self.sh_g2 = [\n _th.shared(_np.full_like(p.get_value(), eps), broadcastable=p.broadcastable, name='g2_'+p.name)\n for p in model.params\n ]\n\n g = _T.grad(cost=self.cost_expr, wrt=self.model.params)\n\n updates = []\n for sh_p, gp, sh_g2 in zip(self.model.params, g, self.sh_g2):\n g2 = sh_g2 + gp*gp\n updates.append((sh_g2, g2))\n updates.append((sh_p, sh_p - self.sh_learningrate/_T.sqrt(g2) * gp))\n # Instead of adding eps inside the square-root like most\n # implementations do, I just initialize `g2` to eps, that should\n # have the same effect, but cheaper.\n\n self._mk_train_fn(\"StreaMiniAdaGrad train\",\n updates,\n extra_in=self.sh_learningrate\n )\n\n\n def reinit(self):\n for sh_g2 in self.sh_g2:\n sh_g2.set_value(_np.full_like(sh_g2.get_value(), self.eps))\n\n\nclass StreaMiniRMSProp(StreaMiniOptimizer):\n \"\"\"\n Implements Hinton's \"RMSProp\" method presented in his Coursera lecture 6.5.\n Essentially, it sits right in-between AdaGrad and AdaDelta by being a\n windowed version of AdaGrad.\n\n The updates are:\n\n g²_{e+1} = ρ * g²_e + (1-ρ) * ∇p_e²\n p_{e+1} = p_e - (lr / √g²_{e+1}) * ∇p_e\n\n Note that in this case just initializing with epsilon is not enough anymore\n as we could get zero-gradient for some units long enough as to completely\n dominate the window.\n\n Additional parameters added to `fit_epoch`:\n\n - `lrate`: The learning-rate.\n - `rho`: The momentum for square-gradient accumulation, defaulting to the\n one passed at construction.\n \"\"\"\n\n def __init__(self, batchsize, model, cost, rho=0.95, eps=1e-5, *args, **kwargs):\n \"\"\"\n See `StreaMiniOptimizer` for details on the arguments.\n\n - `rho`: The \"momentum\" to use for averaging past gradients.\n - `eps`: A regularization-factor, should be smaller than the\n square of the weight gradients.\n \"\"\"\n super(StreaMiniRMSProp, self).__init__(batchsize, model, cost, *args, **kwargs)\n\n self.sh_learningrate = _T.scalar('lrate')\n self.sh_rho = _T.scalar('rho')\n\n # This too needs to accumulate the square gradient of each parameter.\n self.sh_g2 = [\n _th.shared(_np.zeros_like(p.get_value()), broadcastable=p.broadcastable, name='g2_'+p.name)\n for p in model.params\n ]\n\n g = _T.grad(cost=self.cost_expr, wrt=self.model.params)\n\n updates = []\n for sh_p, gp, sh_g2 in zip(self.model.params, g, self.sh_g2):\n g2 = self.sh_rho*sh_g2 + (1-self.sh_rho)*gp*gp\n updates.append((sh_g2, g2))\n updates.append((sh_p, sh_p - self.sh_learningrate/_T.sqrt(eps+g2) * gp))\n\n self._mk_train_fn(\"StreaMiniRMSProp train\",\n updates,\n extra_in=(self.sh_learningrate, _th.Param(self.sh_rho, rho))\n )\n\n\n def reinit(self):\n for sh_g2 in self.sh_g2:\n sh_g2.set_value(_np.zeros_like(sh_g2.get_value()))\n\n\nclass StreaMiniAdaDelta(StreaMiniOptimizer):\n \"\"\"\n Implements Matt Zeiler's \"Adaptive Learningrate\" method, aka. AdaDelta.\n The paper itself is really neat, and both very convincing and practical.\n\n TL;DR: 1. AdaGrad quickly anneals, AdaDelta doesn't. (No proof.)\n 2. AdaGrad *is* sensitive to learning-rate, AdaGrad not so much. (Table 1.)\n 3. AdaGrad includes 2nd-order approximation. (3.2)\n\n The updates are:\n\n g²_{e+1} = ρ * g²_e + (1-ρ) * ∇p_e²\n up_{e+1} = √(d²_e / g²_{e+1}) * ∇p_e\n d²_{e+1} = ρ * d²_e + (1-ρ) * up²\n p_{e+1} = p_e - up_{e+1}\n\n As in RMSProp, we need to add epsilons in order to create stability.\n\n It turns out that the effective learning-rate will converge to 1 as the\n gradients decrease (and thus learning grinds to a halt). This could be used\n to check for convergence by a specialized trainer.\n \"\"\"\n\n def __init__(self, batchsize, model, cost, rho=0.95, eps=1e-5, *args, **kwargs):\n \"\"\"\n See `StreaMiniOptimizer` for details on the arguments.\n\n - `rho`: The \"momentum decay\" of AdaDelta. The paper tests three values\n on MNIST: 0.9, 0.95 and 0.99, they don't change the score much.\n The paper also uses the same values for a speech task.\n - `eps`: A regularization term only used to avoid singularities. The\n paper tests four values on MNIST: 1e-2, 1e-4, 1e-6, 1e-8;\n all of them work pretty well.\n \"\"\"\n super(StreaMiniAdaDelta, self).__init__(batchsize, model, cost, *args, **kwargs)\n\n self.sh_rho = _T.scalar('rho')\n\n # Similarly to Adagrad, AdaDelta accumulates the square gradient of\n # each parameter, it just exponentially decays the old value,\n # effectively only summing over a recent window.\n self.sh_g2 = [\n _th.shared(_np.zeros_like(p.get_value()), broadcastable=p.broadcastable, name='g2_'+p.name)\n for p in model.params\n ]\n\n # Similarly to momentum, AdaDelta accumulates previous update values.\n # This also happens in a decaying fashion, so as to cover a window.\n self.sh_delta2 = [\n _th.shared(_np.zeros_like(p.get_value()), broadcastable=p.broadcastable, name='d2_'+p.name)\n for p in model.params\n ]\n\n g = _T.grad(cost=self.cost_expr, wrt=self.model.params)\n\n updates = []\n for sh_p, gp, sh_g2, sh_d2 in zip(self.model.params, g, self.sh_g2, self.sh_delta2):\n g2 = self.sh_rho*sh_g2 + (1-self.sh_rho)*gp*gp\n up = _T.sqrt((sh_d2+eps) / (g2+eps)) * gp\n d2 = self.sh_rho*sh_d2 + (1-self.sh_rho)*up*up\n updates.append((sh_g2, g2))\n updates.append((sh_p, sh_p - up))\n updates.append((sh_d2, d2))\n\n self._mk_train_fn(\"StreaMiniAdaDelta train\",\n updates,\n extra_in=_th.Param(self.sh_rho, rho)\n )\n\n\n def reinit(self):\n for sh_g2 in self.sh_g2:\n sh_g2.set_value(_np.zeros_like(sh_g2.get_value()))\n for sh_delta2 in self.sh_delta2:\n sh_delta2.set_value(_np.zeros_like(sh_delta2.get_value()))\n"
] |
[
[
"numpy.random.RandomState"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ishine/DeepPhonemizer
|
[
"b8f170764c7648fe2acb552b787099ab4f941e58"
] |
[
"dp/training/trainer.py"
] |
[
"import math\nfrom collections import Counter\nfrom pathlib import Path\nfrom typing import List, Dict, Any, Tuple\n\nimport torch\nimport tqdm\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom dp.model.model import Model\nfrom dp.model.utils import _trim_util_stop\nfrom dp.preprocessing.text import Preprocessor\nfrom dp.training.dataset import new_dataloader\nfrom dp.training.decorators import ignore_exception\nfrom dp.training.losses import CrossEntropyLoss, CTCLoss\nfrom dp.training.evaluation import evaluate_samples\nfrom dp.utils.io import to_device, unpickle_binary\n\n\nclass Trainer:\n\n \"\"\" Performs model training. \"\"\"\n\n def __init__(self, checkpoint_dir: Path, loss_type='ctc') -> None:\n \"\"\"\n Initializes a Trainer object.\n\n Args:\n checkpoint_dir (Path): Directory to store the model checkpoints.\n loss_type (str): Type of loss: 'ctc' for forward transformer models\n and 'cross_entropy' for autoregressive models.\n \"\"\"\n\n self.checkpoint_dir = checkpoint_dir\n self.checkpoint_dir.mkdir(parents=True, exist_ok=True)\n self.writer = SummaryWriter(log_dir=str(self.checkpoint_dir / 'logs'))\n self.loss_type = loss_type\n if loss_type == 'ctc':\n self.criterion = CTCLoss()\n elif loss_type == 'cross_entropy':\n self.criterion = CrossEntropyLoss()\n else:\n raise ValueError(f'Loss not supported: {loss_type}')\n\n def train(self,\n model: Model,\n checkpoint: Dict[str, Any],\n store_phoneme_dict_in_model: bool = True) -> None:\n \"\"\"\n Performs training of a transformer model.\n\n Args:\n model (Model): Model to be trained (can be a fresh model or restored from a checkpoint).\n checkpoint (Dict[str, Any]): Dictionary with entries 'optimizer': optimizer state dict,\n 'preprocessor': Preprocessor and 'config': Dict.\n store_phoneme_dict_in_model (bool): Whether to store a dictionary of word-phoneme mappings\n in the model checkpoint so that it can be automatically\n loaded by a Phonemizer object.\n\n Returns:\n None: the checkpoints will be stored in a folder provided when instantiating a Trainer.\n \"\"\"\n\n config = checkpoint['config']\n data_dir = Path(config['paths']['data_dir'])\n\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n model = model.to(device)\n model.train()\n\n criterion = self.criterion.to(device)\n\n optimizer = Adam(model.parameters())\n if 'optimizer' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n for g in optimizer.param_groups:\n g['lr'] = config['training']['learning_rate']\n\n train_loader = new_dataloader(dataset_file=data_dir / 'train_dataset.pkl',\n drop_last=True, batch_size=config['training']['batch_size'])\n val_loader = new_dataloader(dataset_file=data_dir / 'val_dataset.pkl',\n drop_last=False, batch_size=config['training']['batch_size_val'])\n if store_phoneme_dict_in_model:\n phoneme_dict = unpickle_binary(data_dir / 'phoneme_dict.pkl')\n checkpoint['phoneme_dict'] = phoneme_dict\n\n val_batches = sorted([b for b in val_loader], key=lambda x: -x['text_len'][0])\n\n scheduler = ReduceLROnPlateau(optimizer,\n factor=config['training']['scheduler_plateau_factor'],\n patience=config['training']['scheduler_plateau_patience'],\n mode='min')\n losses = []\n best_per = math.inf\n if 'step' not in checkpoint:\n checkpoint['step'] = 0\n start_epoch = checkpoint['step'] // len(train_loader)\n\n for epoch in range(start_epoch + 1, config['training']['epochs'] + 1):\n pbar = tqdm.tqdm(enumerate(train_loader, 1), total=len(train_loader))\n for i, batch in pbar:\n checkpoint['step'] += 1\n step = checkpoint['step']\n self._set_warmup_lr(optimizer=optimizer, step=step,\n config=config)\n batch = to_device(batch, device)\n avg_loss = sum(losses) / len(losses) if len(losses) > 0 else math.inf\n pbar.set_description(desc=f'Epoch: {epoch} | Step {step} '\n f'| Loss: {avg_loss:#.4}', refresh=True)\n pred = model(batch)\n loss = criterion(pred, batch)\n\n if not (torch.isnan(loss) or torch.isinf(loss)):\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n optimizer.step()\n losses.append(loss.item())\n\n self.writer.add_scalar('Loss/train', loss.item(), global_step=step)\n self.writer.add_scalar('Params/batch_size', config['training']['batch_size'],\n global_step=step)\n self.writer.add_scalar('Params/learning_rate', [g['lr'] for g in optimizer.param_groups][0],\n global_step=step)\n\n if step % config['training']['validate_steps'] == 0:\n val_loss = self._validate(model, val_batches)\n self.writer.add_scalar('Loss/val', val_loss, global_step=step)\n\n if step % config['training']['generate_steps'] == 0:\n lang_samples = self._generate_samples(model=model,\n preprocessor=checkpoint['preprocessor'],\n val_batches=val_batches)\n eval_result = evaluate_samples(lang_samples=lang_samples)\n self._write_summaries(lang_samples=lang_samples,\n eval_result=eval_result,\n n_generate_samples=config['training']['n_generate_samples'],\n step=step)\n if eval_result['mean_per'] is not None and eval_result['mean_per'] < best_per:\n self._save_model(model=model, optimizer=optimizer, checkpoint=checkpoint,\n path=self.checkpoint_dir / f'best_model.pt')\n self._save_model(model=model, optimizer=None, checkpoint=checkpoint,\n path=self.checkpoint_dir / f'best_model_no_optim.pt')\n scheduler.step(eval_result['mean_per'])\n\n if step % config['training']['checkpoint_steps'] == 0:\n step = step // 1000\n self._save_model(model=model, optimizer=optimizer, checkpoint=checkpoint,\n path=self.checkpoint_dir / f'model_step_{step}k.pt')\n\n losses = []\n self._save_model(model=model, optimizer=optimizer, checkpoint=checkpoint,\n path=self.checkpoint_dir / 'latest_model.pt')\n\n def _validate(self, model: Model, val_batches: List[dict]) -> float:\n device = next(model.parameters()).device\n criterion = self.criterion.to(device)\n model.eval()\n val_losses = []\n for batch in val_batches:\n batch = to_device(batch, device)\n with torch.no_grad():\n pred = model(batch)\n loss = criterion(pred, batch)\n if not (torch.isnan(loss) or torch.isinf(loss)):\n val_losses.append(loss.item())\n model.train()\n return sum(val_losses) / len(val_losses)\n\n @ignore_exception\n def _generate_samples(self,\n model: Model,\n preprocessor: Preprocessor,\n val_batches: List[dict]) -> Dict[str, List[Tuple[List[str], List[str], List[str]]]]:\n\n \"\"\" Returns a dictionary with entries lang: Tuple of (word, generated, target) \"\"\"\n\n device = next(model.parameters()).device\n model.eval()\n text_tokenizer = preprocessor.text_tokenizer\n phoneme_tokenizer = preprocessor.phoneme_tokenizer\n lang_tokenizer = preprocessor.lang_tokenizer\n lang_prediction_result = dict()\n\n for batch in val_batches:\n batch = to_device(batch, device)\n generated_batch, _ = model.generate(batch)\n for i in range(batch['text'].size(0)):\n text_len = batch['text_len'][i]\n text = batch['text'][i, :text_len]\n target = batch['phonemes'][i, :]\n lang = batch['language'][i]\n lang = lang_tokenizer.decode(lang.detach().cpu().item())\n generated = generated_batch[i, :].cpu()\n generated = _trim_util_stop(generated, phoneme_tokenizer.end_index)\n text, target = text.detach().cpu(), target.detach().cpu()\n text = text_tokenizer.decode(text, remove_special_tokens=True)\n generated = phoneme_tokenizer.decode(generated, remove_special_tokens=True)\n target = phoneme_tokenizer.decode(target, remove_special_tokens=True)\n lang_prediction_result[lang] = lang_prediction_result.get(lang, []) + [(text, generated, target)]\n\n model.train()\n\n return lang_prediction_result\n\n @ignore_exception\n def _write_summaries(self,\n lang_samples: Dict[str, List[Tuple[List[str], List[str], List[str]]]],\n eval_result: Dict[str, Any],\n n_generate_samples: int,\n step: int) -> None:\n\n self.writer.add_scalar(f'Phoneme_Error_Rate/mean',\n eval_result['mean_per'], global_step=step)\n self.writer.add_scalar(f'Word_Error_Rate/mean',\n eval_result['mean_wer'], global_step=step)\n\n for lang in lang_samples.keys():\n result = eval_result[lang]\n self.writer.add_scalar(f'Phoneme_Error_Rate/{lang}',\n result['per'], global_step=step)\n self.writer.add_scalar(f'Word_Error_Rate/{lang}',\n result['wer'], global_step=step)\n\n for lang, samples in lang_samples.items():\n samples = [(''.join(w), ''.join(p), ''.join(t)) for w, p, t in samples]\n word_counts = Counter([word for word, _, _ in samples])\n samples_dedup = [(w, p, t) for w, p, t in samples if word_counts[w] == 1]\n log_texts = dict()\n for word, pred, target in samples_dedup:\n log_texts[word] = f' {word:<30} {pred:<30} {target:<30}'\n log_text_items = sorted(log_texts.items(), key=lambda x: -len(x[0]))\n log_text_list = [v for k, v in log_text_items]\n log_text = '\\n'.join(log_text_list[:n_generate_samples])\n self.writer.add_text(f'{lang}/text_prediction_target', log_text, global_step=step)\n\n def _save_model(self,\n model: torch.nn.Module,\n optimizer: torch.optim,\n checkpoint: Dict[str, Any],\n path: Path) -> None:\n checkpoint['model'] = model.state_dict()\n if optimizer is not None:\n checkpoint['optimizer'] = optimizer.state_dict()\n else:\n checkpoint['optimizer'] = None\n torch.save(checkpoint, str(path))\n\n def _set_warmup_lr(self,\n optimizer: torch.optim,\n step: int,\n config: Dict[str, Any]) -> None:\n\n warmup_steps = config['training']['warmup_steps']\n if warmup_steps > 0 and step <= warmup_steps:\n warmup_factor = 1.0 - max(warmup_steps - step, 0) / warmup_steps\n for g in optimizer.param_groups:\n g['lr'] = config['training']['learning_rate'] * warmup_factor\n"
] |
[
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.isinf",
"torch.isnan",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rvegaml/DA_Linear
|
[
"e22694b743b5bfb322ec1ee34090df636e526973"
] |
[
"MLib/Models/KerasModels.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom MLib.Core.layers import LinearLayer, ConvLayer\nfrom tensorflow.keras.layers import MaxPooling2D, Flatten\nfrom tensorflow.keras import Model\n\nclass SimpleCNNModel(Model):\n\tdef __init__(self, num_units):\n\n\t\tsuper(SimpleCNNModel, self).__init__()\n\n\t\t# Define the architecture of the network\n\t\tself.conv1 = ConvLayer(size=[3,3], num_filters=32, gate=tf.nn.relu)\n\t\tself.pool1 = MaxPooling2D(pool_size=[2,2])\n\n\t\tself.conv2 = ConvLayer(size=[3,3], num_filters=128, gate=tf.nn.relu)\n\t\tself.pool2 = MaxPooling2D(pool_size=[2,2])\n\n\t\tself.conv3 = ConvLayer(size=[3,3], num_filters=256, gate=tf.nn.relu)\n\t\tself.pool3 = MaxPooling2D(pool_size=[2,2])\n\n\t\tself.conv4 = ConvLayer(size=[3,3], num_filters=512, gate=tf.nn.relu)\n\t\tself.pool4 = MaxPooling2D(pool_size=[2,2])\n\n\t\tself.flat = Flatten()\n\n\t\tself.hidden = LinearLayer(units=num_units)\n\n\t\tself.final = LinearLayer(units=10)\n\n\tdef call(self, inputs):\n\n\t\t# First conv-pooling layer\n\t\tx = self.conv1(inputs)\n\t\tx = self.pool1(x)\n\n\t\t# Second conv-pooling layer\n\t\tx = self.conv2(x)\n\t\tx = self.pool2(x)\n\n\t\t# Third conv-pooling layer\n\t\tx = self.conv3(x)\n\t\tx = self.pool3(x)\n\n\t\t# Fourth conv-pooling layer\n\t\tx = self.conv4(x)\n\t\tx = self.pool4(x)\n\n\t\t# Flatten the array \n\t\tx = self.flat(x)\n\n\t\t# First fully connected layer\n\t\tz = self.hidden(x)\n\t\tz = tf.nn.relu(z)\n\n\t\t# Prediction layer\n\t\tx = self.final(z)\n\t\tx = tf.nn.softmax(x)\n\n\t\treturn z, x\n\ndef main():\n\treturn -1\n\nif __name__ == '__main__':\n\tmain()"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
VickyChing/ICASSP19
|
[
"15b756692cca80e7b16cdbdf28d40f43db2c03b6",
"15b756692cca80e7b16cdbdf28d40f43db2c03b6"
] |
[
"run.py",
"models/ofos.py"
] |
[
"import torch\n\nimport argparse\nimport shutil\nimport os\nimport re\n\nimport importlib\nimport numpy as np\n\n\ndef run_config(args, config):\n run_path = 'runs/{}'.format(config['run_id'])\n\n # fail early, if we should train, and dir exists #######################################\n if args.train:\n if args.force:\n if os.path.exists(run_path):\n shutil.rmtree(run_path)\n try:\n directory = 'runs/{}'.format(config['run_id'])\n os.makedirs(directory)\n except Exception as e:\n print('run directory \"{}\" already exists'.format(directory))\n return\n torch.save(config, 'runs/{}/config.pkl'.format(config['run_id']))\n\n run_module = importlib.import_module(config['modules']['run']['name'])\n run = run_module.Run(config, args.cuda)\n\n dataloader_module = importlib.import_module(config['modules']['dataloader']['name'])\n\n # only load config-specified data if necessary\n if args.train or args.test or args.find_learnrate:\n dataloaders = dataloader_module.get_loaders(config)\n run.set_dataloaders(dataloaders)\n\n if args.find_learnrate:\n run.find_learnrate(args.min_lr, args.max_lr)\n elif args.train:\n run.save('runs/{}/initial.pkl'.format(config['run_id']))\n for i_epoch in range(config['n_epochs']):\n abort = run.advance()\n if abort:\n run.save('runs/{}/aborted.pkl'.format(config['run_id']))\n break\n else:\n run.save('runs/{}/current.pkl'.format(config['run_id']))\n elif args.test:\n if args.checkpoint is not None:\n if os.path.exists(args.checkpoint):\n run.load(args.checkpoint)\n run.test()\n else:\n print('checkpoint_filename \"{}\" does not exist'.format(args.checkpoint))\n exit(-1)\n else:\n print('no checkpoint specified, exiting...')\n exit(-1)\n elif args.process:\n if args.checkpoint is not None:\n if os.path.exists(args.checkpoint):\n if not os.path.exists(args.infile):\n print('input file \"{}\" does not exist'.format(args.infile))\n exit(-1)\n if os.path.exists(args.outfile):\n print('output file \"{}\" does already exist'.format(args.outfile))\n exit(-1)\n\n run.load(args.checkpoint)\n run.process(dataloader_module, args.infile, args.outfile)\n else:\n print('checkpoint_filename \"{}\" does not exist'.format(args.checkpoint))\n exit(-1)\n else:\n print('no checkpoint specified, exiting...')\n exit(-1)\n else:\n print('nothing to do specified')\n exit(-1)\n\n\ndef regex_in(key, regexes):\n for regex in regexes:\n match = re.match(regex, key)\n if match is not None:\n return True\n return False\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('configfile')\n parser.add_argument('--run_ids', nargs='+', default=[])\n\n parser.add_argument('--cuda', default=False, action='store_true')\n parser.add_argument('--dry', default=False, action='store_true')\n parser.add_argument('--force', default=False, action='store_true')\n\n parser.add_argument('--find-learnrate', default=False, action='store_true')\n parser.add_argument('--train', default=False, action='store_true')\n\n parser.add_argument('--test', default=False, action='store_true')\n parser.add_argument('--checkpoint', type=str, default=None)\n\n parser.add_argument('--process', default=False, action='store_true')\n parser.add_argument('--infile', type=str, default=None)\n parser.add_argument('--outfile', type=str, default=None)\n\n parser.add_argument('--min_lr', type=float, default=1e-8)\n parser.add_argument('--max_lr', type=float, default=10.0)\n parser.add_argument('--split', nargs=2, default=[0, 1])\n args = parser.parse_args()\n\n # hacky hacky hacky #########################################\n configfile_contents = open(args.configfile, 'r').read()\n _globals = dict()\n _locals = dict()\n exec(configfile_contents, _globals, _locals)\n\n all_selected_configs = []\n for config in _locals['get_config']():\n if len(args.run_ids) == 0 or regex_in(config['run_id'], args.run_ids):\n all_selected_configs.append(config)\n\n n_splits = int(args.split[1])\n i_split = int(args.split[0])\n\n splits = np.array_split(all_selected_configs, n_splits)\n print('all selected run_ids')\n\n for i, split in enumerate(splits):\n print('### split {}/{} #####################'.format(i, n_splits))\n print('\\n'.join([config['run_id'] for config in split]))\n\n print('### running split {} ################'.format(i_split))\n for config in splits[i_split]:\n if args.dry:\n print('dry: {}'.format(config['run_id']))\n else:\n print('run: {}'.format(config['run_id']))\n run_config(args, config)\n\n\nmain()\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.distributions.normal import Normal\nfrom sklearn.metrics import precision_recall_fscore_support as prfs\nimport numpy as np\n\nimport mir_eval\nfrom madmom.io import midi\nfrom adsr import ADSRNoteTrackingProcessor\nfrom collections import defaultdict\n\n\ndef get_onsets_and_pitch_labels(midifile):\n pattern = midi.MIDIFile(midifile)\n intervals = []\n labels = []\n for onset, _pitch, duration, velocity, _channel in pattern.sustained_notes:\n label = int(_pitch) # do not subtract 21; mir_eval needs pitches strictly >= 0 anyways\n intervals.append([onset, onset + duration])\n labels.append(label)\n return np.array(intervals), np.array(labels)\n\n\nclass GaussianDropout(nn.Module):\n def __init__(self, rate):\n super().__init__()\n self.dist = Normal(\n torch.cuda.FloatTensor([1.]),\n torch.cuda.FloatTensor([np.sqrt(rate / (1 - rate))])\n )\n\n def forward(self, x):\n if self.training:\n noise = self.dist.sample(x.size()).squeeze(-1)\n return x * noise\n else:\n return x\n\n\nclass GaussianNoise(nn.Module):\n def __init__(self, stddev):\n super().__init__()\n self.dist = Normal(\n torch.cuda.FloatTensor([0.]),\n torch.cuda.FloatTensor([stddev])\n )\n\n def forward(self, x):\n if self.training:\n noise = self.dist.sample(x.size()).squeeze(-1)\n return x + noise\n else:\n return x\n\n\nclass Flatten(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x.view(x.size()[0], -1)\n\n\nclass Net(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.batch_norm = nn.BatchNorm2d(1)\n self.conv_stem = nn.Sequential(\n nn.Conv2d(1, 30, (3, 3), bias=True),\n nn.ELU(),\n GaussianDropout(0.1),\n GaussianNoise(0.1),\n\n nn.Conv2d(30, 30, (1, 35), bias=True),\n nn.ELU(),\n GaussianDropout(0.1),\n GaussianNoise(0.1),\n\n nn.Conv2d(30, 30, (7, 1), bias=True),\n nn.ELU(),\n GaussianDropout(0.1),\n GaussianNoise(0.1)\n )\n\n self.note_frames = nn.Sequential(\n nn.Conv2d(30, 10, (3, 3), bias=True),\n nn.ELU(),\n GaussianDropout(0.5),\n GaussianNoise(0.1),\n\n Flatten(),\n\n nn.Linear(1060, 88)\n )\n\n self.note_onsets = nn.Sequential(\n nn.Conv2d(30, 10, (3, 3), bias=True),\n nn.ELU(),\n GaussianDropout(0.5),\n GaussianNoise(0.1),\n\n Flatten(),\n\n nn.Linear(1060, 88)\n )\n\n self.note_offsets = nn.Sequential(\n nn.Conv2d(30, 10, (3, 3), bias=True),\n nn.ELU(),\n GaussianDropout(0.5),\n GaussianNoise(0.1),\n\n Flatten(),\n\n nn.Linear(1060, 88)\n )\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.xavier_uniform_(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n init.xavier_uniform_(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n self.best = dict(\n f=0,\n loss=10.\n )\n\n def forward(self, batch):\n h_bn = self.batch_norm(batch['x'])\n h_stem = self.conv_stem(h_bn)\n\n y_onsets = self.note_onsets(h_stem)\n y_frames = self.note_frames(h_stem)\n y_offsets = self.note_offsets(h_stem)\n\n if self.training:\n return dict(\n y_onsets=y_onsets,\n y_frames=y_frames,\n y_offsets=y_offsets\n )\n else:\n return dict(\n y_onsets=torch.sigmoid(y_onsets),\n y_frames=torch.sigmoid(y_frames),\n y_offsets=torch.sigmoid(y_offsets)\n )\n\n def get_train_loss_function(self):\n lambdas = self.config['lambdas']\n bce_onsets = nn.BCEWithLogitsLoss(reduction='mean')\n bce_frames = nn.BCEWithLogitsLoss(reduction='mean')\n bce_offsets = nn.BCEWithLogitsLoss(reduction='mean')\n\n def loss_function(output, target):\n loss = lambdas['y_onsets'] * bce_onsets(output['y_onsets'], target['y_onsets'])\n loss += lambdas['y_frames'] * bce_frames(output['y_frames'], target['y_frames'])\n loss += lambdas['y_offsets'] * bce_offsets(output['y_offsets'], target['y_offsets'])\n return loss\n\n return loss_function\n\n def evaluate_adsr(self, metadata, predictions):\n clip = 1e-2\n # see adsr.py for this!\n activations = np.stack([\n predictions['y_frames'],\n predictions['y_onsets'],\n predictions['y_offsets']\n ], axis=-1)\n\n # import matplotlib.pyplot as plt\n # fig, axes = plt.subplots(nrows=3, sharex=True, sharey=True)\n # axes[0].imshow(activations[:, :, 0].T, origin='lower')\n # axes[1].imshow(activations[:, :, 1].T, origin='lower')\n # axes[2].imshow(activations[:, :, 2].T, origin='lower')\n # plt.show()\n\n midifilename = metadata['midi_filename']\n ref_intervals, ref_pitches = get_onsets_and_pitch_labels(midifilename)\n\n results = dict()\n # this is just to get an approximate feeling for the whole note performance\n # currently. we'll tune this after training with a gridsearch\n oothresholds = [\n [0.8, 0.1, 0.4],\n ]\n\n for onset_note_prob, offset_prob, threshold in oothresholds:\n trial = 'onnp_{}_offp_{}_thrs_{}'.format(\n onset_note_prob,\n offset_prob,\n threshold\n )\n\n adsr = ADSRNoteTrackingProcessor(\n onset_prob=onset_note_prob,\n note_prob=onset_note_prob,\n offset_prob=offset_prob,\n attack_length=0.04,\n decay_length=0.04,\n release_length=0.02,\n complete=True,\n onset_threshold=threshold,\n note_threshold=threshold,\n fps=50,\n pitch_offset=21\n )\n notes, __paths = adsr.process(activations, clip=1e-2)\n\n if notes.shape[1] > 0:\n est_intervals = []\n est_pitches = []\n for onset, pitch, duration in notes:\n est_intervals.append([onset, onset + duration])\n est_pitches.append(pitch)\n est_intervals = np.array(est_intervals)\n est_pitches = np.array(est_pitches)\n\n # evaluate onsets and pitches\n on_p, on_r, on_f, on_o = mir_eval.transcription.precision_recall_f1_overlap(\n ref_intervals,\n ref_pitches,\n est_intervals,\n est_pitches,\n pitch_tolerance=0, # no numerical tolerance for midi note numbers\n onset_tolerance=0.05, # +- 50 ms\n offset_ratio=None, # do not evaluate offsets\n strict=False\n )\n\n # evaluate notes and pitches\n fu_p, fu_r, fu_f, fu_o = mir_eval.transcription.precision_recall_f1_overlap(\n ref_intervals,\n ref_pitches,\n est_intervals,\n est_pitches,\n pitch_tolerance=0, # no numerical tolerance for midi note numbers\n onset_tolerance=0.05, # +- 50 ms\n offset_ratio=0.2, # evaluate complete notes\n strict=False\n )\n results[trial] = dict(\n onsets=dict(p=on_p, r=on_r, f=on_f, o=on_o),\n full=dict(p=fu_p, r=fu_r, f=fu_f, o=fu_o)\n )\n else:\n results[trial] = dict(\n onsets=dict(p=0, r=0, f=0, o=0),\n full=dict(p=0, r=0, f=0, o=0)\n )\n return results\n\n def evalute_one(self, metadata, predictions, batches):\n def log_loss(y, _p):\n eps = 1e-3\n p = np.clip(_p, eps, 1. - eps)\n return np.mean(-(y * np.log(p) + (1 - y) * np.log(1 - p)))\n\n outputs = ['y_onsets', 'y_frames', 'y_offsets']\n\n result = dict()\n for output in outputs:\n loss = log_loss(batches[output], predictions[output])\n y_true = (batches[output] > 0.5) * 1\n y_pred = (predictions[output] > 0.5) * 1\n\n p, r, f, _ = prfs(y_true, y_pred, average='micro')\n\n result[output] = dict(\n loss=loss,\n p=p,\n r=r,\n f=f\n )\n\n result['adsr'] = self.evaluate_adsr(metadata, predictions)\n\n return result\n\n def evaluate_aggregate_checkpoint(self,\n name,\n all_predictions,\n all_batches,\n logger,\n epoch,\n scheduler):\n results = []\n for ip, ib in zip(all_predictions, all_batches):\n results.append(self.evalute_one(\n ip['metadata'],\n ip['predictions'],\n ib['batches']\n ))\n\n outputs = ['y_onsets', 'y_frames', 'y_offsets']\n\n mean_loss = 0\n mean_p = 0\n mean_r = 0\n mean_f = 0\n for output in outputs:\n loss, p, r, f = 0, 0, 0, 0\n for result in results:\n loss += result[output]['loss']\n p += result[output]['p']\n r += result[output]['r']\n f += result[output]['f']\n loss /= len(results)\n p /= len(results)\n r /= len(results)\n f /= len(results)\n\n mean_loss += loss\n mean_p += p\n mean_r += r\n mean_f += f\n\n logger.add_scalar('{}_individual_losses/{}_loss'.format(name, output), loss, global_step=epoch)\n logger.add_scalar('{}_individual_p/{}_p'.format(name, output), p, global_step=epoch)\n logger.add_scalar('{}_individual_r/{}_r'.format(name, output), r, global_step=epoch)\n logger.add_scalar('{}_individual_f/{}_f'.format(name, output), f, global_step=epoch)\n\n mean_loss /= len(outputs)\n mean_f /= len(outputs)\n mean_p /= len(outputs)\n mean_r /= len(outputs)\n\n logger.add_scalar('{}_means/loss'.format(name), mean_loss, global_step=epoch)\n logger.add_scalar('{}_means/p'.format(name), mean_p, global_step=epoch)\n logger.add_scalar('{}_means/r'.format(name), mean_r, global_step=epoch)\n logger.add_scalar('{}_means/f'.format(name), mean_f, global_step=epoch)\n\n #####################################################################\n # adsr eval\n trials = defaultdict(list)\n for result in results:\n for trial_key, trial_result in result['adsr'].items():\n for what in ['onsets', 'full']:\n for prfo in ['p', 'r', 'f', 'o']:\n flat_trial_key = '{}_{}_{}/{}'.format(name, what, trial_key, prfo)\n trials[flat_trial_key].append(\n trial_result[what][prfo]\n )\n for flat_trial_key in trials.keys():\n trials[flat_trial_key] = np.mean(trials[flat_trial_key])\n\n for flat_trial_key, flat_trial_result in trials.items():\n logger.add_scalar(flat_trial_key, flat_trial_result, global_step=epoch)\n\n checkpoint_filename = None\n if name == 'valid':\n if mean_f >= self.best['f']:\n self.best['f'] = mean_f\n\n if mean_loss <= self.best['loss']:\n checkpoint_filename = 'runs/{}/best_valid_loss.pkl'.format(\n self.config['run_id']\n )\n self.best['loss'] = mean_loss\n\n logger.add_scalar('{}_best/f'.format(name), self.best['f'], global_step=epoch)\n logger.add_scalar('{}_best/loss'.format(name), self.best['loss'], global_step=epoch)\n return checkpoint_filename\n"
] |
[
[
"numpy.array_split"
],
[
"torch.sigmoid",
"numpy.log",
"numpy.sqrt",
"numpy.clip",
"torch.nn.ELU",
"torch.nn.Conv2d",
"numpy.stack",
"torch.cuda.FloatTensor",
"torch.nn.Linear",
"torch.nn.BCEWithLogitsLoss",
"sklearn.metrics.precision_recall_fscore_support",
"numpy.mean",
"torch.nn.init.xavier_uniform_",
"torch.nn.BatchNorm2d",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Hetti219/Python-Projects
|
[
"cf483836496d329ea056928b719e485cbab303a8"
] |
[
"W3Schools Online Lessons/ML/Scatter Plot.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy\n\nx = [5, 7, 8, 7, 2, 17, 2, 9, 4, 11, 12, 9, 6]\n\ny = [99, 86, 87, 88, 111, 86, 103, 87, 94, 78, 77, 85, 86]\n\nplt.scatter(x, y)\n\nplt.show()\n\n# Random Data Distributions\n\np = numpy.random.normal(9.2, 8.2, 832176)\nq = numpy.random.normal(8.7, 1.2, 982537)\n\nplt.scatter(x, y)\nplt.show()\n"
] |
[
[
"numpy.random.normal",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BeauvoirR/Real-Time-Voice-Cloning
|
[
"2e29e2e09ad4696c836a7872a58f581fda04c649",
"2e29e2e09ad4696c836a7872a58f581fda04c649"
] |
[
"encoder/audio.py",
"encoder/model.py"
] |
[
"from encoder import sampling_rate, mel_window_length, mel_window_step, mel_n_channels\nfrom scipy.ndimage.morphology import binary_dilation\n#from encoder.params_data import *\nfrom pathlib import Path\nfrom typing import Optional, Union\nfrom warnings import warn\nimport numpy as np\nimport librosa\nimport struct\n\ntry:\n import webrtcvad\nexcept:\n warn(\"Unable to import 'webrtcvad'. This package enables noise removal and is recommended.\")\n webrtcvad=None\n\nint16_max = (2 ** 15) - 1\n\n\ndef preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray],\n source_sr: Optional[int] = None):\n \"\"\"\n Applies the preprocessing operations used in training the Speaker Encoder to a waveform\n either on disk or in memory. The waveform will be resampled to match the data hyperparameters.\n\n :param fpath_or_wav: either a filepath to an audio file (many extensions are supported, not\n just .wav), either the waveform as a numpy array of floats.\n :param source_sr: if passing an audio waveform, the sampling rate of the waveform before\n preprocessing. After preprocessing, the waveform's sampling rate will match the data\n hyperparameters. If passing a filepath, the sampling rate will be automatically detected and\n this argument will be ignored.\n \"\"\"\n # Load the wav from disk if needed\n if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path):\n wav, source_sr = librosa.load(str(fpath_or_wav), sr=None)\n else:\n wav = fpath_or_wav\n\n # Resample the wav if needed\n if source_sr is not None and source_sr != sampling_rate:\n wav = librosa.resample(wav, source_sr, sampling_rate)\n\n # Apply the preprocessing: normalize volume and shorten long silences\n wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True)\n if webrtcvad:\n wav = trim_long_silences(wav)\n\n return wav\n\n\ndef wav_to_mel_spectrogram(wav):\n \"\"\"\n Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform.\n Note: this not a log-mel spectrogram.\n \"\"\"\n frames = librosa.feature.melspectrogram(\n wav,\n sampling_rate,\n n_fft=int(sampling_rate * mel_window_length / 1000),\n hop_length=int(sampling_rate * mel_window_step / 1000),\n n_mels=mel_n_channels\n )\n return frames.astype(np.float32).T\n\n\ndef trim_long_silences(wav):\n \"\"\"\n Ensures that segments without voice in the waveform remain no longer than a\n threshold determined by the VAD parameters in params.py.\n\n :param wav: the raw waveform as a numpy array of floats\n :return: the same waveform with silences trimmed away (length <= original wav length)\n \"\"\"\n # Compute the voice detection window size\n samples_per_window = (vad_window_length * sampling_rate) // 1000\n\n # Trim the end of the audio to have a multiple of the window size\n wav = wav[:len(wav) - (len(wav) % samples_per_window)]\n\n # Convert the float waveform to 16-bit mono PCM\n pcm_wave = struct.pack(\"%dh\" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))\n\n # Perform voice activation detection\n voice_flags = []\n vad = webrtcvad.Vad(mode=3)\n for window_start in range(0, len(wav), samples_per_window):\n window_end = window_start + samples_per_window\n voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],\n sample_rate=sampling_rate))\n voice_flags = np.array(voice_flags)\n\n # Smooth the voice detection with a moving average\n def moving_average(array, width):\n array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))\n ret = np.cumsum(array_padded, dtype=float)\n ret[width:] = ret[width:] - ret[:-width]\n return ret[width - 1:] / width\n\n audio_mask = moving_average(voice_flags, vad_moving_average_width)\n audio_mask = np.round(audio_mask).astype(np.bool)\n\n # Dilate the voiced regions\n audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))\n audio_mask = np.repeat(audio_mask, samples_per_window)\n\n return wav[audio_mask == True]\n\n\ndef normalize_volume(wav, target_dBFS, increase_only=False, decrease_only=False):\n if increase_only and decrease_only:\n raise ValueError(\"Both increase only and decrease only are set\")\n dBFS_change = target_dBFS - 10 * np.log10(np.mean(wav ** 2))\n if (dBFS_change < 0 and increase_only) or (dBFS_change > 0 and decrease_only):\n return wav\n return wav * (10 ** (dBFS_change / 20))\n",
"from encoder import mel_n_channels, model_hidden_size, model_num_layers\nfrom encoder import model_embedding_size, sampling_rate\n#from encoder.params_model import *\n#from encoder.params_data import *\nfrom scipy.interpolate import interp1d\nfrom sklearn.metrics import roc_curve\nfrom torch.nn.utils import clip_grad_norm_\nfrom scipy.optimize import brentq\nfrom torch import nn\nimport numpy as np\nimport torch\n\n\nclass SpeakerEncoder(nn.Module):\n def __init__(self, device, loss_device):\n super().__init__()\n self.loss_device = loss_device\n\n # Network defition\n self.lstm = nn.LSTM(input_size=mel_n_channels,\n hidden_size=model_hidden_size,\n num_layers=model_num_layers,\n batch_first=True).to(device)\n self.linear = nn.Linear(in_features=model_hidden_size,\n out_features=model_embedding_size).to(device)\n self.relu = torch.nn.ReLU().to(device)\n\n # Cosine similarity scaling (with fixed initial parameter values)\n self.similarity_weight = nn.Parameter(torch.tensor([10.])).to(loss_device)\n self.similarity_bias = nn.Parameter(torch.tensor([-5.])).to(loss_device)\n\n # Loss\n self.loss_fn = nn.CrossEntropyLoss().to(loss_device)\n\n def do_gradient_ops(self):\n # Gradient scale\n self.similarity_weight.grad *= 0.01\n self.similarity_bias.grad *= 0.01\n\n # Gradient clipping\n clip_grad_norm_(self.parameters(), 3, norm_type=2)\n\n def forward(self, utterances, hidden_init=None):\n \"\"\"\n Computes the embeddings of a batch of utterance spectrograms.\n\n :param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape\n (batch_size, n_frames, n_channels)\n :param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers,\n batch_size, hidden_size). Will default to a tensor of zeros if None.\n :return: the embeddings as a tensor of shape (batch_size, embedding_size)\n \"\"\"\n # Pass the input through the LSTM layers and retrieve all outputs, the final hidden state\n # and the final cell state.\n out, (hidden, cell) = self.lstm(utterances, hidden_init)\n\n # We take only the hidden state of the last layer\n embeds_raw = self.relu(self.linear(hidden[-1]))\n\n # L2-normalize it\n embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)\n\n return embeds\n\n def similarity_matrix(self, embeds):\n \"\"\"\n Computes the similarity matrix according the section 2.1 of GE2E.\n\n :param embeds: the embeddings as a tensor of shape (speakers_per_batch,\n utterances_per_speaker, embedding_size)\n :return: the similarity matrix as a tensor of shape (speakers_per_batch,\n utterances_per_speaker, speakers_per_batch)\n \"\"\"\n speakers_per_batch, utterances_per_speaker = embeds.shape[:2]\n\n # Inclusive centroids (1 per speaker). Cloning is needed for reverse differentiation\n centroids_incl = torch.mean(embeds, dim=1, keepdim=True)\n centroids_incl = centroids_incl.clone() / torch.norm(centroids_incl, dim=2, keepdim=True)\n\n # Exclusive centroids (1 per utterance)\n centroids_excl = (torch.sum(embeds, dim=1, keepdim=True) - embeds)\n centroids_excl /= (utterances_per_speaker - 1)\n centroids_excl = centroids_excl.clone() / torch.norm(centroids_excl, dim=2, keepdim=True)\n\n # Similarity matrix. The cosine similarity of already 2-normed vectors is simply the dot\n # product of these vectors (which is just an element-wise multiplication reduced by a sum).\n # We vectorize the computation for efficiency.\n sim_matrix = torch.zeros(speakers_per_batch, utterances_per_speaker,\n speakers_per_batch).to(self.loss_device)\n mask_matrix = 1 - np.eye(speakers_per_batch, dtype=np.int)\n for j in range(speakers_per_batch):\n mask = np.where(mask_matrix[j])[0]\n sim_matrix[mask, :, j] = (embeds[mask] * centroids_incl[j]).sum(dim=2)\n sim_matrix[j, :, j] = (embeds[j] * centroids_excl[j]).sum(dim=1)\n\n ## Even more vectorized version (slower maybe because of transpose)\n # sim_matrix2 = torch.zeros(speakers_per_batch, speakers_per_batch, utterances_per_speaker\n # ).to(self.loss_device)\n # eye = np.eye(speakers_per_batch, dtype=np.int)\n # mask = np.where(1 - eye)\n # sim_matrix2[mask] = (embeds[mask[0]] * centroids_incl[mask[1]]).sum(dim=2)\n # mask = np.where(eye)\n # sim_matrix2[mask] = (embeds * centroids_excl).sum(dim=2)\n # sim_matrix2 = sim_matrix2.transpose(1, 2)\n\n sim_matrix = sim_matrix * self.similarity_weight + self.similarity_bias\n return sim_matrix\n\n def loss(self, embeds):\n \"\"\"\n Computes the softmax loss according the section 2.1 of GE2E.\n\n :param embeds: the embeddings as a tensor of shape (speakers_per_batch,\n utterances_per_speaker, embedding_size)\n :return: the loss and the EER for this batch of embeddings.\n \"\"\"\n speakers_per_batch, utterances_per_speaker = embeds.shape[:2]\n\n # Loss\n sim_matrix = self.similarity_matrix(embeds)\n sim_matrix = sim_matrix.reshape((speakers_per_batch * utterances_per_speaker,\n speakers_per_batch))\n ground_truth = np.repeat(np.arange(speakers_per_batch), utterances_per_speaker)\n target = torch.from_numpy(ground_truth).long().to(self.loss_device)\n loss = self.loss_fn(sim_matrix, target)\n\n # EER (not backpropagated)\n with torch.no_grad():\n inv_argmax = lambda i: np.eye(1, speakers_per_batch, i, dtype=np.int)[0]\n labels = np.array([inv_argmax(i) for i in ground_truth])\n preds = sim_matrix.detach().cpu().numpy()\n\n # Snippet from https://yangcha.github.io/EER-ROC/\n fpr, tpr, thresholds = roc_curve(labels.flatten(), preds.flatten())\n eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)\n\n return loss, eer\n"
] |
[
[
"numpy.cumsum",
"numpy.ones",
"numpy.round",
"numpy.mean",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
],
[
"torch.mean",
"torch.nn.CrossEntropyLoss",
"torch.norm",
"torch.nn.LSTM",
"torch.zeros",
"numpy.arange",
"numpy.eye",
"torch.sum",
"torch.from_numpy",
"torch.tensor",
"torch.nn.Linear",
"scipy.interpolate.interp1d",
"torch.no_grad",
"torch.nn.ReLU",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
dpsong/test
|
[
"7b33b24e1e743a71b95862e95f545d9de2634672"
] |
[
"src/lib/extractor/vf_extractor.py"
] |
[
"import torch.nn as nn\n\n\nclass VfExtractor(nn.Module):\n\n def __init__(self, in_channels=1):\n super(VfExtractor, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, 120, kernel_size=(3, 3), padding=(1, 1))\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(120, 120, kernel_size=(3, 3), padding=(1, 1))\n self.relu2 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(120, 80, kernel_size=(3, 3), padding=(1, 1))\n self.relu3 = nn.ReLU(inplace=True)\n self.conv4 = nn.Conv2d(80, 80, kernel_size=(3, 3), padding=(1, 1))\n self.relu4 = nn.ReLU(inplace=True)\n self.gap = nn.AdaptiveAvgPool2d((1, 1))\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.relu2(x)\n x = self.conv3(x)\n x = self.relu3(x)\n x = self.conv4(x)\n x = self.relu4(x)\n x = self.gap(x)\n\n return x\n"
] |
[
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jenssss/xarray
|
[
"43a2a4bdf3a492d89aae9f2c5b0867932ff51cef",
"43a2a4bdf3a492d89aae9f2c5b0867932ff51cef"
] |
[
"xarray/tests/test_conventions.py",
"xarray/tests/test_formatting.py"
] |
[
"import contextlib\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom xarray import (\n Dataset,\n SerializationWarning,\n Variable,\n coding,\n conventions,\n open_dataset,\n)\nfrom xarray.backends.common import WritableCFDataStore\nfrom xarray.backends.memory import InMemoryDataStore\nfrom xarray.conventions import decode_cf\nfrom xarray.testing import assert_identical\n\nfrom . import (\n assert_array_equal,\n raises_regex,\n requires_cftime,\n requires_dask,\n requires_netCDF4,\n)\nfrom .test_backends import CFEncodedBase\n\n\nclass TestBoolTypeArray:\n def test_booltype_array(self):\n x = np.array([1, 0, 1, 1, 0], dtype=\"i1\")\n bx = conventions.BoolTypeArray(x)\n assert bx.dtype == bool\n assert_array_equal(bx, np.array([True, False, True, True, False], dtype=bool))\n\n\nclass TestNativeEndiannessArray:\n def test(self):\n x = np.arange(5, dtype=\">i8\")\n expected = np.arange(5, dtype=\"int64\")\n a = conventions.NativeEndiannessArray(x)\n assert a.dtype == expected.dtype\n assert a.dtype == expected[:].dtype\n assert_array_equal(a, expected)\n\n\ndef test_decode_cf_with_conflicting_fill_missing_value():\n expected = Variable([\"t\"], [np.nan, np.nan, 2], {\"units\": \"foobar\"})\n var = Variable(\n [\"t\"], np.arange(3), {\"units\": \"foobar\", \"missing_value\": 0, \"_FillValue\": 1}\n )\n with warnings.catch_warnings(record=True) as w:\n actual = conventions.decode_cf_variable(\"t\", var)\n assert_identical(actual, expected)\n assert \"has multiple fill\" in str(w[0].message)\n\n expected = Variable([\"t\"], np.arange(10), {\"units\": \"foobar\"})\n\n var = Variable(\n [\"t\"],\n np.arange(10),\n {\"units\": \"foobar\", \"missing_value\": np.nan, \"_FillValue\": np.nan},\n )\n actual = conventions.decode_cf_variable(\"t\", var)\n assert_identical(actual, expected)\n\n var = Variable(\n [\"t\"],\n np.arange(10),\n {\n \"units\": \"foobar\",\n \"missing_value\": np.float32(np.nan),\n \"_FillValue\": np.float32(np.nan),\n },\n )\n actual = conventions.decode_cf_variable(\"t\", var)\n assert_identical(actual, expected)\n\n\n@requires_cftime\nclass TestEncodeCFVariable:\n def test_incompatible_attributes(self):\n invalid_vars = [\n Variable(\n [\"t\"], pd.date_range(\"2000-01-01\", periods=3), {\"units\": \"foobar\"}\n ),\n Variable([\"t\"], pd.to_timedelta([\"1 day\"]), {\"units\": \"foobar\"}),\n Variable([\"t\"], [0, 1, 2], {\"add_offset\": 0}, {\"add_offset\": 2}),\n Variable([\"t\"], [0, 1, 2], {\"_FillValue\": 0}, {\"_FillValue\": 2}),\n ]\n for var in invalid_vars:\n with pytest.raises(ValueError):\n conventions.encode_cf_variable(var)\n\n def test_missing_fillvalue(self):\n v = Variable([\"x\"], np.array([np.nan, 1, 2, 3]))\n v.encoding = {\"dtype\": \"int16\"}\n with pytest.warns(Warning, match=\"floating point data as an integer\"):\n conventions.encode_cf_variable(v)\n\n def test_multidimensional_coordinates(self):\n # regression test for GH1763\n # Set up test case with coordinates that have overlapping (but not\n # identical) dimensions.\n zeros1 = np.zeros((1, 5, 3))\n zeros2 = np.zeros((1, 6, 3))\n zeros3 = np.zeros((1, 5, 4))\n orig = Dataset(\n {\n \"lon1\": ([\"x1\", \"y1\"], zeros1.squeeze(0), {}),\n \"lon2\": ([\"x2\", \"y1\"], zeros2.squeeze(0), {}),\n \"lon3\": ([\"x1\", \"y2\"], zeros3.squeeze(0), {}),\n \"lat1\": ([\"x1\", \"y1\"], zeros1.squeeze(0), {}),\n \"lat2\": ([\"x2\", \"y1\"], zeros2.squeeze(0), {}),\n \"lat3\": ([\"x1\", \"y2\"], zeros3.squeeze(0), {}),\n \"foo1\": ([\"time\", \"x1\", \"y1\"], zeros1, {\"coordinates\": \"lon1 lat1\"}),\n \"foo2\": ([\"time\", \"x2\", \"y1\"], zeros2, {\"coordinates\": \"lon2 lat2\"}),\n \"foo3\": ([\"time\", \"x1\", \"y2\"], zeros3, {\"coordinates\": \"lon3 lat3\"}),\n \"time\": (\"time\", [0.0], {\"units\": \"hours since 2017-01-01\"}),\n }\n )\n orig = conventions.decode_cf(orig)\n # Encode the coordinates, as they would be in a netCDF output file.\n enc, attrs = conventions.encode_dataset_coordinates(orig)\n # Make sure we have the right coordinates for each variable.\n foo1_coords = enc[\"foo1\"].attrs.get(\"coordinates\", \"\")\n foo2_coords = enc[\"foo2\"].attrs.get(\"coordinates\", \"\")\n foo3_coords = enc[\"foo3\"].attrs.get(\"coordinates\", \"\")\n assert set(foo1_coords.split()) == {\"lat1\", \"lon1\"}\n assert set(foo2_coords.split()) == {\"lat2\", \"lon2\"}\n assert set(foo3_coords.split()) == {\"lat3\", \"lon3\"}\n # Should not have any global coordinates.\n assert \"coordinates\" not in attrs\n\n def test_do_not_overwrite_user_coordinates(self):\n orig = Dataset(\n coords={\"x\": [0, 1, 2], \"y\": (\"x\", [5, 6, 7]), \"z\": (\"x\", [8, 9, 10])},\n data_vars={\"a\": (\"x\", [1, 2, 3]), \"b\": (\"x\", [3, 5, 6])},\n )\n orig[\"a\"].encoding[\"coordinates\"] = \"y\"\n orig[\"b\"].encoding[\"coordinates\"] = \"z\"\n enc, _ = conventions.encode_dataset_coordinates(orig)\n assert enc[\"a\"].attrs[\"coordinates\"] == \"y\"\n assert enc[\"b\"].attrs[\"coordinates\"] == \"z\"\n orig[\"a\"].attrs[\"coordinates\"] = \"foo\"\n with raises_regex(ValueError, \"'coordinates' found in both attrs\"):\n conventions.encode_dataset_coordinates(orig)\n\n @requires_dask\n def test_string_object_warning(self):\n original = Variable((\"x\",), np.array([\"foo\", \"bar\"], dtype=object)).chunk()\n with pytest.warns(SerializationWarning, match=\"dask array with dtype=object\"):\n encoded = conventions.encode_cf_variable(original)\n assert_identical(original, encoded)\n\n\n@requires_cftime\nclass TestDecodeCF:\n def test_dataset(self):\n original = Dataset(\n {\n \"t\": (\"t\", [0, 1, 2], {\"units\": \"days since 2000-01-01\"}),\n \"foo\": (\"t\", [0, 0, 0], {\"coordinates\": \"y\", \"units\": \"bar\"}),\n \"y\": (\"t\", [5, 10, -999], {\"_FillValue\": -999}),\n }\n )\n expected = Dataset(\n {\"foo\": (\"t\", [0, 0, 0], {\"units\": \"bar\"})},\n {\n \"t\": pd.date_range(\"2000-01-01\", periods=3),\n \"y\": (\"t\", [5.0, 10.0, np.nan]),\n },\n )\n actual = conventions.decode_cf(original)\n assert_identical(expected, actual)\n\n def test_invalid_coordinates(self):\n # regression test for GH308\n original = Dataset({\"foo\": (\"t\", [1, 2], {\"coordinates\": \"invalid\"})})\n actual = conventions.decode_cf(original)\n assert_identical(original, actual)\n\n def test_decode_coordinates(self):\n # regression test for GH610\n original = Dataset(\n {\"foo\": (\"t\", [1, 2], {\"coordinates\": \"x\"}), \"x\": (\"t\", [4, 5])}\n )\n actual = conventions.decode_cf(original)\n assert actual.foo.encoding[\"coordinates\"] == \"x\"\n\n def test_0d_int32_encoding(self):\n original = Variable((), np.int32(0), encoding={\"dtype\": \"int64\"})\n expected = Variable((), np.int64(0))\n actual = conventions.maybe_encode_nonstring_dtype(original)\n assert_identical(expected, actual)\n\n def test_decode_cf_with_multiple_missing_values(self):\n original = Variable([\"t\"], [0, 1, 2], {\"missing_value\": np.array([0, 1])})\n expected = Variable([\"t\"], [np.nan, np.nan, 2], {})\n with warnings.catch_warnings(record=True) as w:\n actual = conventions.decode_cf_variable(\"t\", original)\n assert_identical(expected, actual)\n assert \"has multiple fill\" in str(w[0].message)\n\n def test_decode_cf_with_drop_variables(self):\n original = Dataset(\n {\n \"t\": (\"t\", [0, 1, 2], {\"units\": \"days since 2000-01-01\"}),\n \"x\": (\"x\", [9, 8, 7], {\"units\": \"km\"}),\n \"foo\": (\n (\"t\", \"x\"),\n [[0, 0, 0], [1, 1, 1], [2, 2, 2]],\n {\"units\": \"bar\"},\n ),\n \"y\": (\"t\", [5, 10, -999], {\"_FillValue\": -999}),\n }\n )\n expected = Dataset(\n {\n \"t\": pd.date_range(\"2000-01-01\", periods=3),\n \"foo\": (\n (\"t\", \"x\"),\n [[0, 0, 0], [1, 1, 1], [2, 2, 2]],\n {\"units\": \"bar\"},\n ),\n \"y\": (\"t\", [5, 10, np.nan]),\n }\n )\n actual = conventions.decode_cf(original, drop_variables=(\"x\",))\n actual2 = conventions.decode_cf(original, drop_variables=\"x\")\n assert_identical(expected, actual)\n assert_identical(expected, actual2)\n\n def test_invalid_time_units_raises_eagerly(self):\n ds = Dataset({\"time\": (\"time\", [0, 1], {\"units\": \"foobar since 123\"})})\n with raises_regex(ValueError, \"unable to decode time\"):\n decode_cf(ds)\n\n @requires_cftime\n def test_dataset_repr_with_netcdf4_datetimes(self):\n # regression test for #347\n attrs = {\"units\": \"days since 0001-01-01\", \"calendar\": \"noleap\"}\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"unable to decode time\")\n ds = decode_cf(Dataset({\"time\": (\"time\", [0, 1], attrs)}))\n assert \"(time) object\" in repr(ds)\n\n attrs = {\"units\": \"days since 1900-01-01\"}\n ds = decode_cf(Dataset({\"time\": (\"time\", [0, 1], attrs)}))\n assert \"(time) datetime64[ns]\" in repr(ds)\n\n @requires_cftime\n def test_decode_cf_datetime_transition_to_invalid(self):\n # manually create dataset with not-decoded date\n from datetime import datetime\n\n ds = Dataset(coords={\"time\": [0, 266 * 365]})\n units = \"days since 2000-01-01 00:00:00\"\n ds.time.attrs = dict(units=units)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"unable to decode time\")\n ds_decoded = conventions.decode_cf(ds)\n\n expected = [datetime(2000, 1, 1, 0, 0), datetime(2265, 10, 28, 0, 0)]\n\n assert_array_equal(ds_decoded.time.values, expected)\n\n @requires_dask\n def test_decode_cf_with_dask(self):\n import dask.array as da\n\n original = Dataset(\n {\n \"t\": (\"t\", [0, 1, 2], {\"units\": \"days since 2000-01-01\"}),\n \"foo\": (\"t\", [0, 0, 0], {\"coordinates\": \"y\", \"units\": \"bar\"}),\n \"bar\": (\"string2\", [b\"a\", b\"b\"]),\n \"baz\": ((\"x\"), [b\"abc\"], {\"_Encoding\": \"utf-8\"}),\n \"y\": (\"t\", [5, 10, -999], {\"_FillValue\": -999}),\n }\n ).chunk()\n decoded = conventions.decode_cf(original)\n print(decoded)\n assert all(\n isinstance(var.data, da.Array)\n for name, var in decoded.variables.items()\n if name not in decoded.indexes\n )\n assert_identical(decoded, conventions.decode_cf(original).compute())\n\n @requires_dask\n def test_decode_dask_times(self):\n original = Dataset.from_dict(\n {\n \"coords\": {},\n \"dims\": {\"time\": 5},\n \"data_vars\": {\n \"average_T1\": {\n \"dims\": (\"time\",),\n \"attrs\": {\"units\": \"days since 1958-01-01 00:00:00\"},\n \"data\": [87659.0, 88024.0, 88389.0, 88754.0, 89119.0],\n }\n },\n }\n )\n assert_identical(\n conventions.decode_cf(original.chunk()),\n conventions.decode_cf(original).chunk(),\n )\n\n def test_decode_cf_time_kwargs(self):\n ds = Dataset.from_dict(\n {\n \"coords\": {\n \"timedelta\": {\n \"data\": np.array([1, 2, 3], dtype=\"int64\"),\n \"dims\": \"timedelta\",\n \"attrs\": {\"units\": \"days\"},\n },\n \"time\": {\n \"data\": np.array([1, 2, 3], dtype=\"int64\"),\n \"dims\": \"time\",\n \"attrs\": {\"units\": \"days since 2000-01-01\"},\n },\n },\n \"dims\": {\"time\": 3, \"timedelta\": 3},\n \"data_vars\": {\n \"a\": {\"dims\": (\"time\", \"timedelta\"), \"data\": np.ones((3, 3))},\n },\n }\n )\n\n dsc = conventions.decode_cf(ds)\n assert dsc.timedelta.dtype == np.dtype(\"m8[ns]\")\n assert dsc.time.dtype == np.dtype(\"M8[ns]\")\n dsc = conventions.decode_cf(ds, decode_times=False)\n assert dsc.timedelta.dtype == np.dtype(\"int64\")\n assert dsc.time.dtype == np.dtype(\"int64\")\n dsc = conventions.decode_cf(ds, decode_times=True, decode_timedelta=False)\n assert dsc.timedelta.dtype == np.dtype(\"int64\")\n assert dsc.time.dtype == np.dtype(\"M8[ns]\")\n dsc = conventions.decode_cf(ds, decode_times=False, decode_timedelta=True)\n assert dsc.timedelta.dtype == np.dtype(\"m8[ns]\")\n assert dsc.time.dtype == np.dtype(\"int64\")\n\n\nclass CFEncodedInMemoryStore(WritableCFDataStore, InMemoryDataStore):\n def encode_variable(self, var):\n \"\"\"encode one variable\"\"\"\n coder = coding.strings.EncodedStringCoder(allows_unicode=True)\n var = coder.encode(var)\n return var\n\n\n@requires_netCDF4\nclass TestCFEncodedDataStore(CFEncodedBase):\n @contextlib.contextmanager\n def create_store(self):\n yield CFEncodedInMemoryStore()\n\n @contextlib.contextmanager\n def roundtrip(\n self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False\n ):\n if save_kwargs is None:\n save_kwargs = {}\n if open_kwargs is None:\n open_kwargs = {}\n store = CFEncodedInMemoryStore()\n data.dump_to_store(store, **save_kwargs)\n yield open_dataset(store, **open_kwargs)\n\n @pytest.mark.skip(\"cannot roundtrip coordinates yet for \" \"CFEncodedInMemoryStore\")\n def test_roundtrip_coordinates(self):\n pass\n\n def test_invalid_dataarray_names_raise(self):\n # only relevant for on-disk file formats\n pass\n\n def test_encoding_kwarg(self):\n # we haven't bothered to raise errors yet for unexpected encodings in\n # this test dummy\n pass\n\n def test_encoding_kwarg_fixed_width_string(self):\n # CFEncodedInMemoryStore doesn't support explicit string encodings.\n pass\n",
"import sys\nfrom textwrap import dedent\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport xarray as xr\nfrom xarray.core import formatting\nfrom xarray.core.npcompat import IS_NEP18_ACTIVE\n\nfrom . import raises_regex\n\n\nclass TestFormatting:\n def test_get_indexer_at_least_n_items(self):\n cases = [\n ((20,), (slice(10),), (slice(-10, None),)),\n ((3, 20), (0, slice(10)), (-1, slice(-10, None))),\n ((2, 10), (0, slice(10)), (-1, slice(-10, None))),\n ((2, 5), (slice(2), slice(None)), (slice(-2, None), slice(None))),\n ((1, 2, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))),\n ((2, 3, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))),\n (\n (1, 10, 1),\n (0, slice(10), slice(None)),\n (-1, slice(-10, None), slice(None)),\n ),\n (\n (2, 5, 1),\n (slice(2), slice(None), slice(None)),\n (slice(-2, None), slice(None), slice(None)),\n ),\n ((2, 5, 3), (0, slice(4), slice(None)), (-1, slice(-4, None), slice(None))),\n (\n (2, 3, 3),\n (slice(2), slice(None), slice(None)),\n (slice(-2, None), slice(None), slice(None)),\n ),\n ]\n for shape, start_expected, end_expected in cases:\n actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=False)\n assert start_expected == actual\n actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=True)\n assert end_expected == actual\n\n def test_first_n_items(self):\n array = np.arange(100).reshape(10, 5, 2)\n for n in [3, 10, 13, 100, 200]:\n actual = formatting.first_n_items(array, n)\n expected = array.flat[:n]\n assert (expected == actual).all()\n\n with raises_regex(ValueError, \"at least one item\"):\n formatting.first_n_items(array, 0)\n\n def test_last_n_items(self):\n array = np.arange(100).reshape(10, 5, 2)\n for n in [3, 10, 13, 100, 200]:\n actual = formatting.last_n_items(array, n)\n expected = array.flat[-n:]\n assert (expected == actual).all()\n\n with raises_regex(ValueError, \"at least one item\"):\n formatting.first_n_items(array, 0)\n\n def test_last_item(self):\n array = np.arange(100)\n\n reshape = ((10, 10), (1, 100), (2, 2, 5, 5))\n expected = np.array([99])\n\n for r in reshape:\n result = formatting.last_item(array.reshape(r))\n assert result == expected\n\n def test_format_item(self):\n cases = [\n (pd.Timestamp(\"2000-01-01T12\"), \"2000-01-01T12:00:00\"),\n (pd.Timestamp(\"2000-01-01\"), \"2000-01-01\"),\n (pd.Timestamp(\"NaT\"), \"NaT\"),\n (pd.Timedelta(\"10 days 1 hour\"), \"10 days 01:00:00\"),\n (pd.Timedelta(\"-3 days\"), \"-3 days +00:00:00\"),\n (pd.Timedelta(\"3 hours\"), \"0 days 03:00:00\"),\n (pd.Timedelta(\"NaT\"), \"NaT\"),\n (\"foo\", \"'foo'\"),\n (b\"foo\", \"b'foo'\"),\n (1, \"1\"),\n (1.0, \"1.0\"),\n ]\n for item, expected in cases:\n actual = formatting.format_item(item)\n assert expected == actual\n\n def test_format_items(self):\n cases = [\n (np.arange(4) * np.timedelta64(1, \"D\"), \"0 days 1 days 2 days 3 days\"),\n (\n np.arange(4) * np.timedelta64(3, \"h\"),\n \"00:00:00 03:00:00 06:00:00 09:00:00\",\n ),\n (\n np.arange(4) * np.timedelta64(500, \"ms\"),\n \"00:00:00 00:00:00.500000 00:00:01 00:00:01.500000\",\n ),\n (pd.to_timedelta([\"NaT\", \"0s\", \"1s\", \"NaT\"]), \"NaT 00:00:00 00:00:01 NaT\"),\n (\n pd.to_timedelta([\"1 day 1 hour\", \"1 day\", \"0 hours\"]),\n \"1 days 01:00:00 1 days 00:00:00 0 days 00:00:00\",\n ),\n ([1, 2, 3], \"1 2 3\"),\n ]\n for item, expected in cases:\n actual = \" \".join(formatting.format_items(item))\n assert expected == actual\n\n def test_format_array_flat(self):\n actual = formatting.format_array_flat(np.arange(100), 2)\n expected = \"...\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(100), 9)\n expected = \"0 ... 99\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(100), 10)\n expected = \"0 1 ... 99\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(100), 13)\n expected = \"0 1 ... 98 99\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(100), 15)\n expected = \"0 1 2 ... 98 99\"\n assert expected == actual\n\n # NB: Probably not ideal; an alternative would be cutting after the\n # first ellipsis\n actual = formatting.format_array_flat(np.arange(100.0), 11)\n expected = \"0.0 ... ...\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(100.0), 12)\n expected = \"0.0 ... 99.0\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(3), 5)\n expected = \"0 1 2\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(4.0), 11)\n expected = \"0.0 ... 3.0\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(0), 0)\n expected = \"\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(1), 1)\n expected = \"0\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(2), 3)\n expected = \"0 1\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(4), 7)\n expected = \"0 1 2 3\"\n assert expected == actual\n\n actual = formatting.format_array_flat(np.arange(5), 7)\n expected = \"0 ... 4\"\n assert expected == actual\n\n long_str = [\" \".join([\"hello world\" for _ in range(100)])]\n actual = formatting.format_array_flat(np.asarray([long_str]), 21)\n expected = \"'hello world hello...\"\n assert expected == actual\n\n def test_pretty_print(self):\n assert formatting.pretty_print(\"abcdefghij\", 8) == \"abcde...\"\n assert formatting.pretty_print(\"ß\", 1) == \"ß\"\n\n def test_maybe_truncate(self):\n assert formatting.maybe_truncate(\"ß\", 10) == \"ß\"\n\n def test_format_timestamp_out_of_bounds(self):\n from datetime import datetime\n\n date = datetime(1300, 12, 1)\n expected = \"1300-12-01\"\n result = formatting.format_timestamp(date)\n assert result == expected\n\n date = datetime(2300, 12, 1)\n expected = \"2300-12-01\"\n result = formatting.format_timestamp(date)\n assert result == expected\n\n def test_attribute_repr(self):\n short = formatting.summarize_attr(\"key\", \"Short string\")\n long = formatting.summarize_attr(\"key\", 100 * \"Very long string \")\n newlines = formatting.summarize_attr(\"key\", \"\\n\\n\\n\")\n tabs = formatting.summarize_attr(\"key\", \"\\t\\t\\t\")\n assert short == \" key: Short string\"\n assert len(long) <= 80\n assert long.endswith(\"...\")\n assert \"\\n\" not in newlines\n assert \"\\t\" not in tabs\n\n def test_diff_array_repr(self):\n da_a = xr.DataArray(\n np.array([[1, 2, 3], [4, 5, 6]], dtype=\"int64\"),\n dims=(\"x\", \"y\"),\n coords={\n \"x\": np.array([\"a\", \"b\"], dtype=\"U1\"),\n \"y\": np.array([1, 2, 3], dtype=\"int64\"),\n },\n attrs={\"units\": \"m\", \"description\": \"desc\"},\n )\n\n da_b = xr.DataArray(\n np.array([1, 2], dtype=\"int64\"),\n dims=\"x\",\n coords={\n \"x\": np.array([\"a\", \"c\"], dtype=\"U1\"),\n \"label\": (\"x\", np.array([1, 2], dtype=\"int64\")),\n },\n attrs={\"units\": \"kg\"},\n )\n\n byteorder = \"<\" if sys.byteorder == \"little\" else \">\"\n expected = dedent(\n \"\"\"\\\n Left and right DataArray objects are not identical\n Differing dimensions:\n (x: 2, y: 3) != (x: 2)\n Differing values:\n L\n array([[1, 2, 3],\n [4, 5, 6]], dtype=int64)\n R\n array([1, 2], dtype=int64)\n Differing coordinates:\n L * x (x) %cU1 'a' 'b'\n R * x (x) %cU1 'a' 'c'\n Coordinates only on the left object:\n * y (y) int64 1 2 3\n Coordinates only on the right object:\n label (x) int64 1 2\n Differing attributes:\n L units: m\n R units: kg\n Attributes only on the left object:\n description: desc\"\"\"\n % (byteorder, byteorder)\n )\n\n actual = formatting.diff_array_repr(da_a, da_b, \"identical\")\n try:\n assert actual == expected\n except AssertionError:\n # depending on platform, dtype may not be shown in numpy array repr\n assert actual == expected.replace(\", dtype=int64\", \"\")\n\n va = xr.Variable(\n \"x\", np.array([1, 2, 3], dtype=\"int64\"), {\"title\": \"test Variable\"}\n )\n vb = xr.Variable((\"x\", \"y\"), np.array([[1, 2, 3], [4, 5, 6]], dtype=\"int64\"))\n\n expected = dedent(\n \"\"\"\\\n Left and right Variable objects are not equal\n Differing dimensions:\n (x: 3) != (x: 2, y: 3)\n Differing values:\n L\n array([1, 2, 3], dtype=int64)\n R\n array([[1, 2, 3],\n [4, 5, 6]], dtype=int64)\"\"\"\n )\n\n actual = formatting.diff_array_repr(va, vb, \"equals\")\n try:\n assert actual == expected\n except AssertionError:\n assert actual == expected.replace(\", dtype=int64\", \"\")\n\n @pytest.mark.filterwarnings(\"error\")\n def test_diff_attrs_repr_with_array(self):\n attrs_a = {\"attr\": np.array([0, 1])}\n\n attrs_b = {\"attr\": 1}\n expected = dedent(\n \"\"\"\\\n Differing attributes:\n L attr: [0 1]\n R attr: 1\n \"\"\"\n ).strip()\n actual = formatting.diff_attrs_repr(attrs_a, attrs_b, \"equals\")\n assert expected == actual\n\n attrs_b = {\"attr\": np.array([-3, 5])}\n expected = dedent(\n \"\"\"\\\n Differing attributes:\n L attr: [0 1]\n R attr: [-3 5]\n \"\"\"\n ).strip()\n actual = formatting.diff_attrs_repr(attrs_a, attrs_b, \"equals\")\n assert expected == actual\n\n # should not raise a warning\n attrs_b = {\"attr\": np.array([0, 1, 2])}\n expected = dedent(\n \"\"\"\\\n Differing attributes:\n L attr: [0 1]\n R attr: [0 1 2]\n \"\"\"\n ).strip()\n actual = formatting.diff_attrs_repr(attrs_a, attrs_b, \"equals\")\n assert expected == actual\n\n def test_diff_dataset_repr(self):\n ds_a = xr.Dataset(\n data_vars={\n \"var1\": ((\"x\", \"y\"), np.array([[1, 2, 3], [4, 5, 6]], dtype=\"int64\")),\n \"var2\": (\"x\", np.array([3, 4], dtype=\"int64\")),\n },\n coords={\n \"x\": np.array([\"a\", \"b\"], dtype=\"U1\"),\n \"y\": np.array([1, 2, 3], dtype=\"int64\"),\n },\n attrs={\"units\": \"m\", \"description\": \"desc\"},\n )\n\n ds_b = xr.Dataset(\n data_vars={\"var1\": (\"x\", np.array([1, 2], dtype=\"int64\"))},\n coords={\n \"x\": (\"x\", np.array([\"a\", \"c\"], dtype=\"U1\"), {\"source\": 0}),\n \"label\": (\"x\", np.array([1, 2], dtype=\"int64\")),\n },\n attrs={\"units\": \"kg\"},\n )\n\n byteorder = \"<\" if sys.byteorder == \"little\" else \">\"\n expected = dedent(\n \"\"\"\\\n Left and right Dataset objects are not identical\n Differing dimensions:\n (x: 2, y: 3) != (x: 2)\n Differing coordinates:\n L * x (x) %cU1 'a' 'b'\n R * x (x) %cU1 'a' 'c'\n source: 0\n Coordinates only on the left object:\n * y (y) int64 1 2 3\n Coordinates only on the right object:\n label (x) int64 1 2\n Differing data variables:\n L var1 (x, y) int64 1 2 3 4 5 6\n R var1 (x) int64 1 2\n Data variables only on the left object:\n var2 (x) int64 3 4\n Differing attributes:\n L units: m\n R units: kg\n Attributes only on the left object:\n description: desc\"\"\"\n % (byteorder, byteorder)\n )\n\n actual = formatting.diff_dataset_repr(ds_a, ds_b, \"identical\")\n assert actual == expected\n\n def test_array_repr(self):\n ds = xr.Dataset(coords={\"foo\": [1, 2, 3], \"bar\": [1, 2, 3]})\n ds[(1, 2)] = xr.DataArray([0], dims=\"test\")\n actual = formatting.array_repr(ds[(1, 2)])\n expected = dedent(\n \"\"\"\\\n <xarray.DataArray (1, 2) (test: 1)>\n array([0])\n Dimensions without coordinates: test\"\"\"\n )\n\n assert actual == expected\n\n\[email protected](not IS_NEP18_ACTIVE, reason=\"requires __array_function__\")\ndef test_inline_variable_array_repr_custom_repr():\n class CustomArray:\n def __init__(self, value, attr):\n self.value = value\n self.attr = attr\n\n def _repr_inline_(self, width):\n formatted = f\"({self.attr}) {self.value}\"\n if len(formatted) > width:\n formatted = f\"({self.attr}) ...\"\n\n return formatted\n\n def __array_function__(self, *args, **kwargs):\n return NotImplemented\n\n @property\n def shape(self):\n return self.value.shape\n\n @property\n def dtype(self):\n return self.value.dtype\n\n @property\n def ndim(self):\n return self.value.ndim\n\n value = CustomArray(np.array([20, 40]), \"m\")\n variable = xr.Variable(\"x\", value)\n\n max_width = 10\n actual = formatting.inline_variable_array_repr(variable, max_width=10)\n\n assert actual == value._repr_inline_(max_width)\n\n\ndef test_set_numpy_options():\n original_options = np.get_printoptions()\n with formatting.set_numpy_options(threshold=10):\n assert len(repr(np.arange(500))) < 200\n # original options are restored\n assert np.get_printoptions() == original_options\n\n\ndef test_short_numpy_repr():\n cases = [\n np.random.randn(500),\n np.random.randn(20, 20),\n np.random.randn(5, 10, 15),\n np.random.randn(5, 10, 15, 3),\n np.random.randn(100, 5, 1),\n ]\n # number of lines:\n # for default numpy repr: 167, 140, 254, 248, 599\n # for short_numpy_repr: 1, 7, 24, 19, 25\n for array in cases:\n num_lines = formatting.short_numpy_repr(array).count(\"\\n\") + 1\n assert num_lines < 30\n\n\ndef test_large_array_repr_length():\n\n da = xr.DataArray(np.random.randn(100, 5, 1))\n\n result = repr(da).splitlines()\n assert len(result) < 50\n"
] |
[
[
"numpy.arange",
"numpy.int32",
"numpy.dtype",
"numpy.ones",
"numpy.int64",
"numpy.float32",
"pandas.date_range",
"pandas.to_timedelta",
"numpy.array",
"numpy.zeros"
],
[
"numpy.get_printoptions",
"pandas.Timestamp",
"numpy.asarray",
"numpy.arange",
"pandas.Timedelta",
"numpy.timedelta64",
"numpy.random.randn",
"pandas.to_timedelta",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
richung99/digitizePlots
|
[
"74779e9ec640a11bc08d5d1967c85ac4fa44ea5e",
"6b408c820660a415a289726e3223e8f558d3e18b",
"6b408c820660a415a289726e3223e8f558d3e18b",
"6b408c820660a415a289726e3223e8f558d3e18b",
"6b408c820660a415a289726e3223e8f558d3e18b",
"b7879d75a63b6500b2e7d2c3eba5aa7670339274",
"6b408c820660a415a289726e3223e8f558d3e18b",
"6b408c820660a415a289726e3223e8f558d3e18b",
"6b408c820660a415a289726e3223e8f558d3e18b",
"6b408c820660a415a289726e3223e8f558d3e18b"
] |
[
"venv/Lib/site-packages/networkx/algorithms/link_analysis/hits_alg.py",
"venv/Lib/site-packages/nibabel/nifti1.py",
"venv/Lib/site-packages/nibabel/streamlines/tck.py",
"venv/Lib/site-packages/nibabel/tests/test_loadsave.py",
"venv/Lib/site-packages/nibabel/tests/data/check_parrec_reslice.py",
"venv/Lib/site-packages/nipype/interfaces/dipy/setup.py",
"venv/Lib/site-packages/nibabel/externals/tests/test_netcdf.py",
"venv/Lib/site-packages/nibabel/benchmarks/bench_finite_range.py",
"venv/Lib/site-packages/nibabel/cmdline/tests/test_stats.py",
"venv/Scripts/log2design.py"
] |
[
"\"\"\"Hubs and authorities analysis of graph structure.\n\"\"\"\nimport networkx as nx\n\n__all__ = [\"hits\", \"hits_numpy\", \"hits_scipy\", \"authority_matrix\", \"hub_matrix\"]\n\n\ndef hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):\n \"\"\"Returns HITS hubs and authorities values for nodes.\n\n The HITS algorithm computes two numbers for a node.\n Authorities estimates the node value based on the incoming links.\n Hubs estimates the node value based on outgoing links.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n max_iter : integer, optional\n Maximum number of iterations in power method.\n\n tol : float, optional\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional\n Starting value of each node for power method iteration.\n\n normalized : bool (default=True)\n Normalize results by the sum of all of the values.\n\n Returns\n -------\n (hubs,authorities) : two-tuple of dictionaries\n Two dictionaries keyed by node containing the hub and authority\n values.\n\n Raises\n ------\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> h, a = nx.hits(G)\n\n Notes\n -----\n The eigenvector calculation is done by the power iteration method\n and has no guarantee of convergence. The iteration will stop\n after max_iter iterations or an error tolerance of\n number_of_nodes(G)*tol has been reached.\n\n The HITS algorithm was designed for directed graphs but this\n algorithm does not check if the input graph is directed and will\n execute on undirected graphs.\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Jon Kleinberg,\n Authoritative sources in a hyperlinked environment\n Journal of the ACM 46 (5): 604-32, 1999.\n doi:10.1145/324133.324140.\n http://www.cs.cornell.edu/home/kleinber/auth.pdf.\n \"\"\"\n if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:\n raise Exception(\"hits() not defined for graphs with multiedges.\")\n if len(G) == 0:\n return {}, {}\n # choose fixed starting vector if not given\n if nstart is None:\n h = dict.fromkeys(G, 1.0 / G.number_of_nodes())\n else:\n h = nstart\n # normalize starting vector\n s = 1.0 / sum(h.values())\n for k in h:\n h[k] *= s\n for _ in range(max_iter): # power iteration: make up to max_iter iterations\n hlast = h\n h = dict.fromkeys(hlast.keys(), 0)\n a = dict.fromkeys(hlast.keys(), 0)\n # this \"matrix multiply\" looks odd because it is\n # doing a left multiply a^T=hlast^T*G\n for n in h:\n for nbr in G[n]:\n a[nbr] += hlast[n] * G[n][nbr].get(\"weight\", 1)\n # now multiply h=Ga\n for n in h:\n for nbr in G[n]:\n h[n] += a[nbr] * G[n][nbr].get(\"weight\", 1)\n # normalize vector\n s = 1.0 / max(h.values())\n for n in h:\n h[n] *= s\n # normalize vector\n s = 1.0 / max(a.values())\n for n in a:\n a[n] *= s\n # check convergence, l1 norm\n err = sum([abs(h[n] - hlast[n]) for n in h])\n if err < tol:\n break\n else:\n raise nx.PowerIterationFailedConvergence(max_iter)\n if normalized:\n s = 1.0 / sum(a.values())\n for n in a:\n a[n] *= s\n s = 1.0 / sum(h.values())\n for n in h:\n h[n] *= s\n return h, a\n\n\ndef authority_matrix(G, nodelist=None):\n \"\"\"Returns the HITS authority matrix.\"\"\"\n M = nx.to_numpy_array(G, nodelist=nodelist)\n return M.T @ M\n\n\ndef hub_matrix(G, nodelist=None):\n \"\"\"Returns the HITS hub matrix.\"\"\"\n M = nx.to_numpy_array(G, nodelist=nodelist)\n return M @ M.T\n\n\ndef hits_numpy(G, normalized=True):\n \"\"\"Returns HITS hubs and authorities values for nodes.\n\n The HITS algorithm computes two numbers for a node.\n Authorities estimates the node value based on the incoming links.\n Hubs estimates the node value based on outgoing links.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n normalized : bool (default=True)\n Normalize results by the sum of all of the values.\n\n Returns\n -------\n (hubs,authorities) : two-tuple of dictionaries\n Two dictionaries keyed by node containing the hub and authority\n values.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> h, a = nx.hits(G)\n\n Notes\n -----\n The eigenvector calculation uses NumPy's interface to LAPACK.\n\n The HITS algorithm was designed for directed graphs but this\n algorithm does not check if the input graph is directed and will\n execute on undirected graphs.\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Jon Kleinberg,\n Authoritative sources in a hyperlinked environment\n Journal of the ACM 46 (5): 604-32, 1999.\n doi:10.1145/324133.324140.\n http://www.cs.cornell.edu/home/kleinber/auth.pdf.\n \"\"\"\n try:\n import numpy as np\n except ImportError as e:\n raise ImportError(\"hits_numpy() requires NumPy: \" \"http://numpy.org/\") from e\n if len(G) == 0:\n return {}, {}\n H = nx.hub_matrix(G, list(G))\n e, ev = np.linalg.eig(H)\n m = e.argsort()[-1] # index of maximum eigenvalue\n h = np.array(ev[:, m]).flatten()\n A = nx.authority_matrix(G, list(G))\n e, ev = np.linalg.eig(A)\n m = e.argsort()[-1] # index of maximum eigenvalue\n a = np.array(ev[:, m]).flatten()\n if normalized:\n h = h / h.sum()\n a = a / a.sum()\n else:\n h = h / h.max()\n a = a / a.max()\n hubs = dict(zip(G, map(float, h)))\n authorities = dict(zip(G, map(float, a)))\n return hubs, authorities\n\n\ndef hits_scipy(G, max_iter=100, tol=1.0e-6, normalized=True):\n \"\"\"Returns HITS hubs and authorities values for nodes.\n\n The HITS algorithm computes two numbers for a node.\n Authorities estimates the node value based on the incoming links.\n Hubs estimates the node value based on outgoing links.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n max_iter : integer, optional\n Maximum number of iterations in power method.\n\n tol : float, optional\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional\n Starting value of each node for power method iteration.\n\n normalized : bool (default=True)\n Normalize results by the sum of all of the values.\n\n Returns\n -------\n (hubs,authorities) : two-tuple of dictionaries\n Two dictionaries keyed by node containing the hub and authority\n values.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> h, a = nx.hits(G)\n\n Notes\n -----\n This implementation uses SciPy sparse matrices.\n\n The eigenvector calculation is done by the power iteration method\n and has no guarantee of convergence. The iteration will stop\n after max_iter iterations or an error tolerance of\n number_of_nodes(G)*tol has been reached.\n\n The HITS algorithm was designed for directed graphs but this\n algorithm does not check if the input graph is directed and will\n execute on undirected graphs.\n\n Raises\n ------\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Jon Kleinberg,\n Authoritative sources in a hyperlinked environment\n Journal of the ACM 46 (5): 604-632, 1999.\n doi:10.1145/324133.324140.\n http://www.cs.cornell.edu/home/kleinber/auth.pdf.\n \"\"\"\n try:\n import numpy as np\n except ImportError as e:\n raise ImportError(\n \"hits_scipy() requires SciPy and NumPy:\"\n \"http://scipy.org/ http://numpy.org/\"\n ) from e\n if len(G) == 0:\n return {}, {}\n M = nx.to_scipy_sparse_matrix(G, nodelist=list(G))\n (n, m) = M.shape # should be square\n A = M.T * M # authority matrix\n x = np.ones((n, 1)) / n # initial guess\n # power iteration on authority matrix\n i = 0\n while True:\n xlast = x\n x = A * x\n x = x / x.max()\n # check convergence, l1 norm\n err = np.absolute(x - xlast).sum()\n if err < tol:\n break\n if i > max_iter:\n raise nx.PowerIterationFailedConvergence(max_iter)\n i += 1\n\n a = np.asarray(x).flatten()\n # h=M*a\n h = np.asarray(M * a).flatten()\n if normalized:\n h = h / h.sum()\n a = a / a.sum()\n hubs = dict(zip(G, map(float, h)))\n authorities = dict(zip(G, map(float, a)))\n return hubs, authorities\n",
"# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\" Read / write access to NIfTI1 image format\n\nNIfTI1 format defined at http://nifti.nimh.nih.gov/nifti-1/\n\"\"\"\nimport warnings\nfrom io import BytesIO\n\nimport numpy as np\nimport numpy.linalg as npl\nfrom numpy.compat.py3k import asstr\n\nfrom .filebasedimages import SerializableImage\nfrom .volumeutils import Recoder, make_dt_codes, endian_codes\nfrom .spatialimages import HeaderDataError, ImageFileError\nfrom .batteryrunners import Report\nfrom .quaternions import fillpositive, quat2mat, mat2quat\nfrom . import analyze # module import\nfrom .spm99analyze import SpmAnalyzeHeader\nfrom .casting import have_binary128\nfrom .pydicom_compat import have_dicom, pydicom as pdcm\n\n# nifti1 flat header definition for Analyze-like first 348 bytes\n# first number in comments indicates offset in file header in bytes\nheader_dtd = [\n ('sizeof_hdr', 'i4'), # 0; must be 348\n ('data_type', 'S10'), # 4; unused\n ('db_name', 'S18'), # 14; unused\n ('extents', 'i4'), # 32; unused\n ('session_error', 'i2'), # 36; unused\n ('regular', 'S1'), # 38; unused\n ('dim_info', 'u1'), # 39; MRI slice ordering code\n ('dim', 'i2', (8,)), # 40; data array dimensions\n ('intent_p1', 'f4'), # 56; first intent parameter\n ('intent_p2', 'f4'), # 60; second intent parameter\n ('intent_p3', 'f4'), # 64; third intent parameter\n ('intent_code', 'i2'), # 68; NIFTI intent code\n ('datatype', 'i2'), # 70; it's the datatype\n ('bitpix', 'i2'), # 72; number of bits per voxel\n ('slice_start', 'i2'), # 74; first slice index\n ('pixdim', 'f4', (8,)), # 76; grid spacings (units below)\n ('vox_offset', 'f4'), # 108; offset to data in image file\n ('scl_slope', 'f4'), # 112; data scaling slope\n ('scl_inter', 'f4'), # 116; data scaling intercept\n ('slice_end', 'i2'), # 120; last slice index\n ('slice_code', 'u1'), # 122; slice timing order\n ('xyzt_units', 'u1'), # 123; units of pixdim[1..4]\n ('cal_max', 'f4'), # 124; max display intensity\n ('cal_min', 'f4'), # 128; min display intensity\n ('slice_duration', 'f4'), # 132; time for 1 slice\n ('toffset', 'f4'), # 136; time axis shift\n ('glmax', 'i4'), # 140; unused\n ('glmin', 'i4'), # 144; unused\n ('descrip', 'S80'), # 148; any text\n ('aux_file', 'S24'), # 228; auxiliary filename\n ('qform_code', 'i2'), # 252; xform code\n ('sform_code', 'i2'), # 254; xform code\n ('quatern_b', 'f4'), # 256; quaternion b param\n ('quatern_c', 'f4'), # 260; quaternion c param\n ('quatern_d', 'f4'), # 264; quaternion d param\n ('qoffset_x', 'f4'), # 268; quaternion x shift\n ('qoffset_y', 'f4'), # 272; quaternion y shift\n ('qoffset_z', 'f4'), # 276; quaternion z shift\n ('srow_x', 'f4', (4,)), # 280; 1st row affine transform\n ('srow_y', 'f4', (4,)), # 296; 2nd row affine transform\n ('srow_z', 'f4', (4,)), # 312; 3rd row affine transform\n ('intent_name', 'S16'), # 328; name or meaning of data\n ('magic', 'S4') # 344; must be 'ni1\\0' or 'n+1\\0'\n]\n\n# Full header numpy dtype\nheader_dtype = np.dtype(header_dtd)\n\n# datatypes not in analyze format, with codes\nif have_binary128():\n # Only enable 128 bit floats if we really have IEEE binary 128 longdoubles\n _float128t = np.longdouble\n _complex256t = np.longcomplex\nelse:\n _float128t = np.void\n _complex256t = np.void\n\n_dtdefs = ( # code, label, dtype definition, niistring\n (0, 'none', np.void, \"\"),\n (1, 'binary', np.void, \"\"),\n (2, 'uint8', np.uint8, \"NIFTI_TYPE_UINT8\"),\n (4, 'int16', np.int16, \"NIFTI_TYPE_INT16\"),\n (8, 'int32', np.int32, \"NIFTI_TYPE_INT32\"),\n (16, 'float32', np.float32, \"NIFTI_TYPE_FLOAT32\"),\n (32, 'complex64', np.complex64, \"NIFTI_TYPE_COMPLEX64\"),\n (64, 'float64', np.float64, \"NIFTI_TYPE_FLOAT64\"),\n (128, 'RGB', np.dtype([('R', 'u1'),\n ('G', 'u1'),\n ('B', 'u1')]), \"NIFTI_TYPE_RGB24\"),\n (255, 'all', np.void, ''),\n (256, 'int8', np.int8, \"NIFTI_TYPE_INT8\"),\n (512, 'uint16', np.uint16, \"NIFTI_TYPE_UINT16\"),\n (768, 'uint32', np.uint32, \"NIFTI_TYPE_UINT32\"),\n (1024, 'int64', np.int64, \"NIFTI_TYPE_INT64\"),\n (1280, 'uint64', np.uint64, \"NIFTI_TYPE_UINT64\"),\n (1536, 'float128', _float128t, \"NIFTI_TYPE_FLOAT128\"),\n (1792, 'complex128', np.complex128, \"NIFTI_TYPE_COMPLEX128\"),\n (2048, 'complex256', _complex256t, \"NIFTI_TYPE_COMPLEX256\"),\n (2304, 'RGBA', np.dtype([('R', 'u1'),\n ('G', 'u1'),\n ('B', 'u1'),\n ('A', 'u1')]), \"NIFTI_TYPE_RGBA32\"),\n)\n\n# Make full code alias bank, including dtype column\ndata_type_codes = make_dt_codes(_dtdefs)\n\n# Transform (qform, sform) codes\nxform_codes = Recoder(( # code, label, niistring\n (0, 'unknown', \"NIFTI_XFORM_UNKNOWN\"),\n (1, 'scanner', \"NIFTI_XFORM_SCANNER_ANAT\"),\n (2, 'aligned', \"NIFTI_XFORM_ALIGNED_ANAT\"),\n (3, 'talairach', \"NIFTI_XFORM_TALAIRACH\"),\n (4, 'mni', \"NIFTI_XFORM_MNI_152\"),\n (5, 'template', \"NIFTI_XFORM_TEMPLATE_OTHER\"),\n ), fields=('code', 'label', 'niistring'))\n\n# unit codes\nunit_codes = Recoder(( # code, label\n (0, 'unknown'),\n (1, 'meter'),\n (2, 'mm'),\n (3, 'micron'),\n (8, 'sec'),\n (16, 'msec'),\n (24, 'usec'),\n (32, 'hz'),\n (40, 'ppm'),\n (48, 'rads')), fields=('code', 'label'))\n\nslice_order_codes = Recoder(( # code, label\n (0, 'unknown'),\n (1, 'sequential increasing', 'seq inc'),\n (2, 'sequential decreasing', 'seq dec'),\n (3, 'alternating increasing', 'alt inc'),\n (4, 'alternating decreasing', 'alt dec'),\n (5, 'alternating increasing 2', 'alt inc 2'),\n (6, 'alternating decreasing 2', 'alt dec 2')), fields=('code', 'label'))\n\nintent_codes = Recoder((\n # code, label, parameters description tuple\n (0, 'none', (), \"NIFTI_INTENT_NONE\"),\n (2, 'correlation', ('p1 = DOF',), \"NIFTI_INTENT_CORREL\"),\n (3, 't test', ('p1 = DOF',), \"NIFTI_INTENT_TTEST\"),\n (4, 'f test', ('p1 = numerator DOF', 'p2 = denominator DOF'),\n \"NIFTI_INTENT_FTEST\"),\n (5, 'z score', (), \"NIFTI_INTENT_ZSCORE\"),\n (6, 'chi2', ('p1 = DOF',), \"NIFTI_INTENT_CHISQ\"),\n # two parameter beta distribution\n (7, 'beta',\n ('p1=a', 'p2=b'),\n \"NIFTI_INTENT_BETA\"),\n # Prob(x) = (p1 choose x) * p2^x * (1-p2)^(p1-x), for x=0,1,...,p1\n (8, 'binomial',\n ('p1 = number of trials', 'p2 = probability per trial'),\n \"NIFTI_INTENT_BINOM\"),\n # 2 parameter gamma\n # Density(x) proportional to # x^(p1-1) * exp(-p2*x)\n (9, 'gamma',\n ('p1 = shape, p2 = scale', 2),\n \"NIFTI_INTENT_GAMMA\"),\n (10, 'poisson',\n ('p1 = mean',),\n \"NIFTI_INTENT_POISSON\"),\n (11, 'normal',\n ('p1 = mean', 'p2 = standard deviation',),\n \"NIFTI_INTENT_NORMAL\"),\n (12, 'non central f test',\n ('p1 = numerator DOF',\n 'p2 = denominator DOF',\n 'p3 = numerator noncentrality parameter',),\n \"NIFTI_INTENT_FTEST_NONC\"),\n (13, 'non central chi2',\n ('p1 = DOF', 'p2 = noncentrality parameter',),\n \"NIFTI_INTENT_CHISQ_NONC\"),\n (14, 'logistic',\n ('p1 = location', 'p2 = scale',),\n \"NIFTI_INTENT_LOGISTIC\"),\n (15, 'laplace',\n ('p1 = location', 'p2 = scale'),\n \"NIFTI_INTENT_LAPLACE\"),\n (16, 'uniform',\n ('p1 = lower end', 'p2 = upper end'),\n \"NIFTI_INTENT_UNIFORM\"),\n (17, 'non central t test',\n ('p1 = DOF', 'p2 = noncentrality parameter'),\n \"NIFTI_INTENT_TTEST_NONC\"),\n (18, 'weibull',\n ('p1 = location', 'p2 = scale, p3 = power'),\n \"NIFTI_INTENT_WEIBULL\"),\n # p1 = 1 = 'half normal' distribution\n # p1 = 2 = Rayleigh distribution\n # p1 = 3 = Maxwell-Boltzmann distribution.\n (19, 'chi', ('p1 = DOF',), \"NIFTI_INTENT_CHI\"),\n (20, 'inverse gaussian',\n ('pi = mu', 'p2 = lambda'),\n \"NIFTI_INTENT_INVGAUSS\"),\n (21, 'extreme value 1',\n ('p1 = location', 'p2 = scale'),\n \"NIFTI_INTENT_EXTVAL\"),\n (22, 'p value', (), \"NIFTI_INTENT_PVAL\"),\n (23, 'log p value', (), \"NIFTI_INTENT_LOGPVAL\"),\n (24, 'log10 p value', (), \"NIFTI_INTENT_LOG10PVAL\"),\n (1001, 'estimate', (), \"NIFTI_INTENT_ESTIMATE\"),\n (1002, 'label', (), \"NIFTI_INTENT_LABEL\"),\n (1003, 'neuroname', (), \"NIFTI_INTENT_NEURONAME\"),\n (1004, 'general matrix',\n ('p1 = M', 'p2 = N'),\n \"NIFTI_INTENT_GENMATRIX\"),\n (1005, 'symmetric matrix', ('p1 = M',), \"NIFTI_INTENT_SYMMATRIX\"),\n (1006, 'displacement vector', (), \"NIFTI_INTENT_DISPVECT\"),\n (1007, 'vector', (), \"NIFTI_INTENT_VECTOR\"),\n (1008, 'pointset', (), \"NIFTI_INTENT_POINTSET\"),\n (1009, 'triangle', (), \"NIFTI_INTENT_TRIANGLE\"),\n (1010, 'quaternion', (), \"NIFTI_INTENT_QUATERNION\"),\n (1011, 'dimensionless', (), \"NIFTI_INTENT_DIMLESS\"),\n (2001, 'time series',\n (),\n \"NIFTI_INTENT_TIME_SERIES\",\n \"NIFTI_INTENT_TIMESERIES\"), # this mis-spell occurs in the wild\n (2002, 'node index', (), \"NIFTI_INTENT_NODE_INDEX\"),\n (2003, 'rgb vector', (), \"NIFTI_INTENT_RGB_VECTOR\"),\n (2004, 'rgba vector', (), \"NIFTI_INTENT_RGBA_VECTOR\"),\n (2005, 'shape', (), \"NIFTI_INTENT_SHAPE\"),\n # FSL-specific intent codes - codes used by FNIRT\n # ($FSLDIR/warpfns/fnirt_file_reader.h:104)\n (2006, 'fnirt disp field', (), 'FSL_FNIRT_DISPLACEMENT_FIELD'),\n (2007, 'fnirt cubic spline coef', (), 'FSL_CUBIC_SPLINE_COEFFICIENTS'),\n (2008, 'fnirt dct coef', (), 'FSL_DCT_COEFFICIENTS'),\n (2009, 'fnirt quad spline coef', (), 'FSL_QUADRATIC_SPLINE_COEFFICIENTS'),\n # FSL-specific intent codes - codes used by TOPUP\n # ($FSLDIR/topup/topup_file_io.h:104)\n (2016, 'topup cubic spline coef ', (),\n 'FSL_TOPUP_CUBIC_SPLINE_COEFFICIENTS'),\n (2017, 'topup quad spline coef', (),\n 'FSL_TOPUP_QUADRATIC_SPLINE_COEFFICIENTS'),\n (2018, 'topup field', (), 'FSL_TOPUP_FIELD'),\n), fields=('code', 'label', 'parameters', 'niistring'))\n\n\nclass Nifti1Extension(object):\n \"\"\"Baseclass for NIfTI1 header extensions.\n\n This class is sufficient to handle very simple text-based extensions, such\n as `comment`. More sophisticated extensions should/will be supported by\n dedicated subclasses.\n \"\"\"\n\n def __init__(self, code, content):\n \"\"\"\n Parameters\n ----------\n code : int or str\n Canonical extension code as defined in the NIfTI standard, given\n either as integer or corresponding label\n (see :data:`~nibabel.nifti1.extension_codes`)\n content : str\n Extension content as read from the NIfTI file header. This content is\n converted into a runtime representation.\n \"\"\"\n try:\n self._code = extension_codes.code[code]\n except KeyError:\n # XXX or fail or at least complain?\n self._code = code\n self._content = self._unmangle(content)\n\n def _unmangle(self, value):\n \"\"\"Convert the extension content into its runtime representation.\n\n The default implementation does nothing at all.\n\n Parameters\n ----------\n value : str\n Extension content as read from file.\n\n Returns\n -------\n The same object that was passed as `value`.\n\n Notes\n -----\n Subclasses should reimplement this method to provide the desired\n unmangling procedure and may return any type of object.\n \"\"\"\n return value\n\n def _mangle(self, value):\n \"\"\"Convert the extension content into NIfTI file header representation.\n\n The default implementation does nothing at all.\n\n Parameters\n ----------\n value : str\n Extension content in runtime form.\n\n Returns\n -------\n str\n\n Notes\n -----\n Subclasses should reimplement this method to provide the desired\n mangling procedure.\n \"\"\"\n return value\n\n def get_code(self):\n \"\"\"Return the canonical extension type code.\"\"\"\n return self._code\n\n def get_content(self):\n \"\"\"Return the extension content in its runtime representation.\"\"\"\n return self._content\n\n def get_sizeondisk(self):\n \"\"\"Return the size of the extension in the NIfTI file.\n \"\"\"\n # need raw value size plus 8 bytes for esize and ecode\n size = len(self._mangle(self._content))\n size += 8\n # extensions size has to be a multiple of 16 bytes\n if size % 16 != 0:\n size += 16 - (size % 16)\n return size\n\n def __repr__(self):\n try:\n code = extension_codes.label[self._code]\n except KeyError:\n # deal with unknown codes\n code = self._code\n\n s = f\"Nifti1Extension('{code}', '{self._content}')\"\n return s\n\n def __eq__(self, other):\n return (self._code, self._content) == (other._code, other._content)\n\n def __ne__(self, other):\n return not self == other\n\n def write_to(self, fileobj, byteswap):\n \"\"\" Write header extensions to fileobj\n\n Write starts at fileobj current file position.\n\n Parameters\n ----------\n fileobj : file-like object\n Should implement ``write`` method\n byteswap : boolean\n Flag if byteswapping the data is required.\n\n Returns\n -------\n None\n \"\"\"\n extstart = fileobj.tell()\n rawsize = self.get_sizeondisk()\n # write esize and ecode first\n extinfo = np.array((rawsize, self._code), dtype=np.int32)\n if byteswap:\n extinfo = extinfo.byteswap()\n fileobj.write(extinfo.tobytes())\n # followed by the actual extension content\n # XXX if mangling upon load is implemented, it should be reverted here\n fileobj.write(self._mangle(self._content))\n # be nice and zero out remaining part of the extension till the\n # next 16 byte border\n fileobj.write(b'\\x00' * (extstart + rawsize - fileobj.tell()))\n\n\nclass Nifti1DicomExtension(Nifti1Extension):\n \"\"\"NIfTI1 DICOM header extension\n\n This class is a thin wrapper around pydicom to read a binary DICOM\n byte string. If pydicom is available, content is exposed as a Dicom Dataset.\n Otherwise, this silently falls back to the standard NiftiExtension class\n and content is the raw bytestring loaded directly from the nifti file\n header.\n \"\"\"\n def __init__(self, code, content, parent_hdr=None):\n \"\"\"\n Parameters\n ----------\n code : int or str\n Canonical extension code as defined in the NIfTI standard, given\n either as integer or corresponding label\n (see :data:`~nibabel.nifti1.extension_codes`)\n content : bytes or pydicom Dataset or None\n Extension content - either a bytestring as read from the NIfTI file\n header or an existing pydicom Dataset. If a bystestring, the content\n is converted into a Dataset on initialization. If None, a new empty\n Dataset is created.\n parent_hdr : :class:`~nibabel.nifti1.Nifti1Header`, optional\n If a dicom extension belongs to an existing\n :class:`~nibabel.nifti1.Nifti1Header`, it may be provided here to\n ensure that the DICOM dataset is written with correctly corresponding\n endianness; otherwise it is assumed the dataset is little endian.\n\n Notes\n -----\n\n code should always be 2 for DICOM.\n \"\"\"\n\n self._code = code\n if parent_hdr:\n self._is_little_endian = parent_hdr.endianness == '<'\n else:\n self._is_little_endian = True\n if isinstance(content, pdcm.dataset.Dataset):\n self._is_implicit_VR = False\n self._raw_content = self._mangle(content)\n self._content = content\n elif isinstance(content, bytes): # Got a byte string - unmangle it\n self._raw_content = content\n self._is_implicit_VR = self._guess_implicit_VR()\n ds = self._unmangle(content, self._is_implicit_VR,\n self._is_little_endian)\n self._content = ds\n elif content is None: # initialize a new dicom dataset\n self._is_implicit_VR = False\n self._content = pdcm.dataset.Dataset()\n else:\n raise TypeError(f\"content must be either a bytestring or a pydicom Dataset. \"\n f\"Got {content.__class__}\")\n\n def _guess_implicit_VR(self):\n \"\"\"Try to guess DICOM syntax by checking for valid VRs.\n\n Without a DICOM Transfer Syntax, it's difficult to tell if Value\n Representations (VRs) are included in the DICOM encoding or not.\n This reads where the first VR would be and checks it against a list of\n valid VRs\n \"\"\"\n potential_vr = self._raw_content[4:6].decode()\n if potential_vr in pdcm.values.converters.keys():\n implicit_VR = False\n else:\n implicit_VR = True\n return implicit_VR\n\n def _unmangle(self, value, is_implicit_VR=False, is_little_endian=True):\n bio = BytesIO(value)\n ds = pdcm.filereader.read_dataset(bio,\n is_implicit_VR,\n is_little_endian)\n return ds\n\n def _mangle(self, dataset):\n bio = BytesIO()\n dio = pdcm.filebase.DicomFileLike(bio)\n dio.is_implicit_VR = self._is_implicit_VR\n dio.is_little_endian = self._is_little_endian\n ds_len = pdcm.filewriter.write_dataset(dio, dataset)\n dio.seek(0)\n return dio.read(ds_len)\n\n\n# NIfTI header extension type codes (ECODE)\n# see nifti1_io.h for a complete list of all known extensions and\n# references to their description or contacts of the respective\n# initiators\nextension_codes = Recoder((\n (0, \"ignore\", Nifti1Extension),\n (2, \"dicom\", Nifti1DicomExtension if have_dicom else Nifti1Extension),\n (4, \"afni\", Nifti1Extension),\n (6, \"comment\", Nifti1Extension),\n (8, \"xcede\", Nifti1Extension),\n (10, \"jimdiminfo\", Nifti1Extension),\n (12, \"workflow_fwds\", Nifti1Extension),\n (14, \"freesurfer\", Nifti1Extension),\n (16, \"pypickle\", Nifti1Extension),\n), fields=('code', 'label', 'handler'))\n\n\nclass Nifti1Extensions(list):\n \"\"\"Simple extension collection, implemented as a list-subclass.\n \"\"\"\n\n def count(self, ecode):\n \"\"\"Returns the number of extensions matching a given *ecode*.\n\n Parameters\n ----------\n code : int | str\n The ecode can be specified either literal or as numerical value.\n \"\"\"\n count = 0\n code = extension_codes.code[ecode]\n for e in self:\n if e.get_code() == code:\n count += 1\n return count\n\n def get_codes(self):\n \"\"\"Return a list of the extension code of all available extensions\"\"\"\n return [e.get_code() for e in self]\n\n def get_sizeondisk(self):\n \"\"\"Return the size of the complete header extensions in the NIfTI file.\n \"\"\"\n return np.sum([e.get_sizeondisk() for e in self])\n\n def __repr__(self):\n return \"Nifti1Extensions(%s)\" % ', '.join(str(e) for e in self)\n\n def __cmp__(self, other):\n return cmp(list(self), list(other))\n\n def write_to(self, fileobj, byteswap):\n \"\"\" Write header extensions to fileobj\n\n Write starts at fileobj current file position.\n\n Parameters\n ----------\n fileobj : file-like object\n Should implement ``write`` method\n byteswap : boolean\n Flag if byteswapping the data is required.\n\n Returns\n -------\n None\n \"\"\"\n for e in self:\n e.write_to(fileobj, byteswap)\n\n @classmethod\n def from_fileobj(klass, fileobj, size, byteswap):\n \"\"\"Read header extensions from a fileobj\n\n Parameters\n ----------\n fileobj : file-like object\n We begin reading the extensions at the current file position\n size : int\n Number of bytes to read. If negative, fileobj will be read till its\n end.\n byteswap : boolean\n Flag if byteswapping the read data is required.\n\n Returns\n -------\n An extension list. This list might be empty in case not extensions\n were present in fileobj.\n \"\"\"\n # make empty extension list\n extensions = klass()\n # assume the file pointer is at the beginning of any extensions.\n # read until the whole header is parsed (each extension is a multiple\n # of 16 bytes) or in case of a separate header file till the end\n # (break inside the body)\n while size >= 16 or size < 0:\n # the next 8 bytes should have esize and ecode\n ext_def = fileobj.read(8)\n # nothing was read and instructed to read till the end\n # -> assume all extensions where parsed and break\n if not len(ext_def) and size < 0:\n break\n # otherwise there should be a full extension header\n if not len(ext_def) == 8:\n raise HeaderDataError('failed to read extension header')\n ext_def = np.frombuffer(ext_def, dtype=np.int32)\n if byteswap:\n ext_def = ext_def.byteswap()\n # be extra verbose\n ecode = ext_def[1]\n esize = ext_def[0]\n if esize % 16:\n warnings.warn(\n 'Extension size is not a multiple of 16 bytes; '\n 'Assuming size is correct and hoping for the best',\n UserWarning)\n # read extension itself; esize includes the 8 bytes already read\n evalue = fileobj.read(int(esize - 8))\n if not len(evalue) == esize - 8:\n raise HeaderDataError('failed to read extension content')\n # note that we read a full extension\n size -= esize\n # store raw extension content, but strip trailing NULL chars\n evalue = evalue.rstrip(b'\\x00')\n # 'extension_codes' also knows the best implementation to handle\n # a particular extension type\n try:\n ext = extension_codes.handler[ecode](ecode, evalue)\n except KeyError:\n # unknown extension type\n # XXX complain or fail or go with a generic extension\n ext = Nifti1Extension(ecode, evalue)\n extensions.append(ext)\n return extensions\n\n\nclass Nifti1Header(SpmAnalyzeHeader):\n \"\"\" Class for NIfTI1 header\n\n The NIfTI1 header has many more coded fields than the simpler Analyze\n variants. NIfTI1 headers also have extensions.\n\n Nifti allows the header to be a separate file, as part of a nifti image /\n header pair, or to precede the data in a single file. The object needs to\n know which type it is, in order to manage the voxel offset pointing to the\n data, extension reading, and writing the correct magic string.\n\n This class handles the header-preceding-data case.\n \"\"\"\n # Copies of module level definitions\n template_dtype = header_dtype\n _data_type_codes = data_type_codes\n\n # fields with recoders for their values\n _field_recoders = {'datatype': data_type_codes,\n 'qform_code': xform_codes,\n 'sform_code': xform_codes,\n 'intent_code': intent_codes,\n 'slice_code': slice_order_codes}\n\n # data scaling capabilities\n has_data_slope = True\n has_data_intercept = True\n\n # Extension class; should implement __call__ for construction, and\n # ``from_fileobj`` for reading from file\n exts_klass = Nifti1Extensions\n\n # Signal whether this is single (header + data) file\n is_single = True\n\n # Default voxel data offsets for single and pair\n pair_vox_offset = 0\n single_vox_offset = 352\n\n # Magics for single and pair\n pair_magic = b'ni1'\n single_magic = b'n+1'\n\n # Quaternion threshold near 0, based on float32 precision\n quaternion_threshold = -np.finfo(np.float32).eps * 3\n\n def __init__(self,\n binaryblock=None,\n endianness=None,\n check=True,\n extensions=()):\n \"\"\" Initialize header from binary data block and extensions\n \"\"\"\n super(Nifti1Header, self).__init__(binaryblock,\n endianness,\n check)\n self.extensions = self.exts_klass(extensions)\n\n def copy(self):\n \"\"\" Return copy of header\n\n Take reference to extensions as well as copy of header contents\n \"\"\"\n return self.__class__(\n self.binaryblock,\n self.endianness,\n False,\n self.extensions)\n\n @classmethod\n def from_fileobj(klass, fileobj, endianness=None, check=True):\n raw_str = fileobj.read(klass.template_dtype.itemsize)\n hdr = klass(raw_str, endianness, check)\n # Read next 4 bytes to see if we have extensions. The nifti standard\n # has this as a 4 byte string; if the first value is not zero, then we\n # have extensions.\n extension_status = fileobj.read(4)\n # Need to test *slice* of extension_status to preserve byte string type\n # on Python 3\n if len(extension_status) < 4 or extension_status[0:1] == b'\\x00':\n return hdr\n # If this is a detached header file read to end\n if not klass.is_single:\n extsize = -1\n else: # otherwise read until the beginning of the data\n extsize = hdr._structarr['vox_offset'] - fileobj.tell()\n byteswap = endian_codes['native'] != hdr.endianness\n hdr.extensions = klass.exts_klass.from_fileobj(fileobj, extsize,\n byteswap)\n return hdr\n\n def write_to(self, fileobj):\n # First check that vox offset is large enough; set if necessary\n if self.is_single:\n vox_offset = self._structarr['vox_offset']\n min_vox_offset = (self.single_vox_offset +\n self.extensions.get_sizeondisk())\n if vox_offset == 0: # vox offset unset; set as necessary\n self._structarr['vox_offset'] = min_vox_offset\n elif vox_offset < min_vox_offset:\n raise HeaderDataError(\n f'vox offset set to {vox_offset}, but need at least {min_vox_offset}')\n super(Nifti1Header, self).write_to(fileobj)\n # Write extensions\n if len(self.extensions) == 0:\n # If single file, write required 0 stream to signal no extensions\n if self.is_single:\n fileobj.write(b'\\x00' * 4)\n return\n # Signal there are extensions that follow\n fileobj.write(b'\\x01\\x00\\x00\\x00')\n byteswap = endian_codes['native'] != self.endianness\n self.extensions.write_to(fileobj, byteswap)\n\n def get_best_affine(self):\n \"\"\" Select best of available transforms \"\"\"\n hdr = self._structarr\n if hdr['sform_code'] != 0:\n return self.get_sform()\n if hdr['qform_code'] != 0:\n return self.get_qform()\n return self.get_base_affine()\n\n @classmethod\n def default_structarr(klass, endianness=None):\n \"\"\" Create empty header binary block with given endianness \"\"\"\n hdr_data = super(Nifti1Header, klass).default_structarr(endianness)\n if klass.is_single:\n hdr_data['magic'] = klass.single_magic\n else:\n hdr_data['magic'] = klass.pair_magic\n return hdr_data\n\n @classmethod\n def from_header(klass, header=None, check=True):\n \"\"\" Class method to create header from another header\n\n Extend Analyze header copy by copying extensions from other Nifti\n types.\n\n Parameters\n ----------\n header : ``Header`` instance or mapping\n a header of this class, or another class of header for\n conversion to this type\n check : {True, False}\n whether to check header for integrity\n\n Returns\n -------\n hdr : header instance\n fresh header instance of our own class\n \"\"\"\n new_hdr = super(Nifti1Header, klass).from_header(header, check)\n if isinstance(header, Nifti1Header):\n new_hdr.extensions[:] = header.extensions[:]\n return new_hdr\n\n def get_data_shape(self):\n \"\"\" Get shape of data\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> hdr.get_data_shape()\n (0,)\n >>> hdr.set_data_shape((1,2,3))\n >>> hdr.get_data_shape()\n (1, 2, 3)\n\n Expanding number of dimensions gets default zooms\n\n >>> hdr.get_zooms()\n (1.0, 1.0, 1.0)\n\n Notes\n -----\n Applies freesurfer hack for large vectors described in `issue 100`_ and\n `save_nifti.m <save77_>`_.\n\n Allows for freesurfer hack for 7th order icosahedron surface described\n in `issue 309`_, load_nifti.m_, and `save_nifti.m <save50_>`_.\n \"\"\"\n shape = super(Nifti1Header, self).get_data_shape()\n # Apply freesurfer hack for large vectors\n if shape[:3] == (-1, 1, 1):\n vec_len = int(self._structarr['glmin'])\n if vec_len == 0:\n raise HeaderDataError('-1 in dim[1] but 0 in glmin; '\n 'inconsistent freesurfer type header?')\n return (vec_len, 1, 1) + shape[3:]\n # Apply freesurfer hack for ico7 surface\n elif shape[:3] == (27307, 1, 6):\n return (163842, 1, 1) + shape[3:]\n else: # Normal case\n return shape\n\n def set_data_shape(self, shape):\n \"\"\" Set shape of data # noqa\n\n If ``ndims == len(shape)`` then we set zooms for dimensions higher than\n ``ndims`` to 1.0\n\n Nifti1 images can have up to seven dimensions. For FreeSurfer-variant\n Nifti surface files, the first dimension is assumed to correspond to\n vertices/nodes on a surface, and dimensions two and three are\n constrained to have depth of 1. Dimensions 4-7 are constrained only by\n type bounds.\n\n Parameters\n ----------\n shape : sequence\n sequence of integers specifying data array shape\n\n Notes\n -----\n Applies freesurfer hack for large vectors described in `issue 100`_ and\n `save_nifti.m <save77_>`_.\n\n Allows for freesurfer hack for 7th order icosahedron surface described\n in `issue 309`_, load_nifti.m_, and `save_nifti.m <save50_>`_.\n\n The Nifti1 `standard header`_ allows for the following \"point set\"\n definition of a surface, not currently implemented in nibabel.\n\n ::\n\n To signify that the vector value at each voxel is really a\n spatial coordinate (e.g., the vertices or nodes of a surface mesh):\n - dataset must have a 5th dimension\n - intent_code must be NIFTI_INTENT_POINTSET\n - dim[0] = 5\n - dim[1] = number of points\n - dim[2] = dim[3] = dim[4] = 1\n - dim[5] must be the dimensionality of space (e.g., 3 => 3D space).\n - intent_name may describe the object these points come from\n (e.g., \"pial\", \"gray/white\" , \"EEG\", \"MEG\").\n\n .. _issue 100: https://github.com/nipy/nibabel/issues/100\n .. _issue 309: https://github.com/nipy/nibabel/issues/309\n .. _save77:\n https://github.com/fieldtrip/fieldtrip/blob/428798b/external/freesurfer/save_nifti.m#L77-L82\n .. _save50:\n https://github.com/fieldtrip/fieldtrip/blob/428798b/external/freesurfer/save_nifti.m#L50-L56\n .. _load_nifti.m:\n https://github.com/fieldtrip/fieldtrip/blob/428798b/external/freesurfer/load_nifti.m#L86-L89\n .. _standard header: http://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h\n \"\"\"\n hdr = self._structarr\n shape = tuple(shape)\n\n # Apply freesurfer hack for ico7 surface\n if shape[:3] == (163842, 1, 1):\n shape = (27307, 1, 6) + shape[3:]\n # Apply freesurfer hack for large vectors\n elif (len(shape) >= 3 and shape[1:3] == (1, 1) and\n shape[0] > np.iinfo(hdr['dim'].dtype.base).max):\n try:\n hdr['glmin'] = shape[0]\n except OverflowError:\n overflow = True\n else:\n overflow = hdr['glmin'] != shape[0]\n if overflow:\n raise HeaderDataError(f'shape[0] {shape[0]} does not fit in glmax datatype')\n warnings.warn('Using large vector Freesurfer hack; header will '\n 'not be compatible with SPM or FSL', stacklevel=2)\n shape = (-1, 1, 1) + shape[3:]\n super(Nifti1Header, self).set_data_shape(shape)\n\n def get_qform_quaternion(self):\n \"\"\" Compute quaternion from b, c, d of quaternion\n\n Fills a value by assuming this is a unit quaternion\n \"\"\"\n hdr = self._structarr\n bcd = [hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d']]\n # Adjust threshold to precision of stored values in header\n return fillpositive(bcd, self.quaternion_threshold)\n\n def get_qform(self, coded=False):\n \"\"\" Return 4x4 affine matrix from qform parameters in header\n\n Parameters\n ----------\n coded : bool, optional\n If True, return {affine or None}, and qform code. If False, just\n return affine. {affine or None} means, return None if qform code\n == 0, and affine otherwise.\n\n Returns\n -------\n affine : None or (4,4) ndarray\n If `coded` is False, always return affine reconstructed from qform\n quaternion. If `coded` is True, return None if qform code is 0,\n else return the affine.\n code : int\n Qform code. Only returned if `coded` is True.\n \"\"\"\n hdr = self._structarr\n code = int(hdr['qform_code'])\n if code == 0 and coded:\n return None, 0\n quat = self.get_qform_quaternion()\n R = quat2mat(quat)\n vox = hdr['pixdim'][1:4].copy()\n if np.any(vox < 0):\n raise HeaderDataError('pixdims[1,2,3] should be positive')\n qfac = hdr['pixdim'][0]\n if qfac not in (-1, 1):\n raise HeaderDataError('qfac (pixdim[0]) should be 1 or -1')\n vox[-1] *= qfac\n S = np.diag(vox)\n M = np.dot(R, S)\n out = np.eye(4)\n out[0:3, 0:3] = M\n out[0:3, 3] = [hdr['qoffset_x'], hdr['qoffset_y'], hdr['qoffset_z']]\n if coded:\n return out, code\n return out\n\n def set_qform(self, affine, code=None, strip_shears=True):\n \"\"\" Set qform header values from 4x4 affine\n\n Parameters\n ----------\n affine : None or 4x4 array\n affine transform to write into sform. If None, only set code.\n code : None, string or integer, optional\n String or integer giving meaning of transform in *affine*.\n The default is None. If code is None, then:\n\n * If affine is None, `code`-> 0\n * If affine not None and existing qform code in header == 0,\n `code`-> 2 (aligned)\n * If affine not None and existing qform code in header != 0,\n `code`-> existing qform code in header\n\n strip_shears : bool, optional\n Whether to strip shears in `affine`. If True, shears will be\n silently stripped. If False, the presence of shears will raise a\n ``HeaderDataError``\n\n Notes\n -----\n The qform transform only encodes translations, rotations and\n zooms. If there are shear components to the `affine` transform, and\n `strip_shears` is True (the default), the written qform gives the\n closest approximation where the rotation matrix is orthogonal. This is\n to allow quaternion representation. The orthogonal representation\n enforces orthogonal axes.\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> int(hdr['qform_code']) # gives 0 - unknown\n 0\n >>> affine = np.diag([1,2,3,1])\n >>> np.all(hdr.get_qform() == affine)\n False\n >>> hdr.set_qform(affine)\n >>> np.all(hdr.get_qform() == affine)\n True\n >>> int(hdr['qform_code']) # gives 2 - aligned\n 2\n >>> hdr.set_qform(affine, code='talairach')\n >>> int(hdr['qform_code'])\n 3\n >>> hdr.set_qform(affine, code=None)\n >>> int(hdr['qform_code'])\n 3\n >>> hdr.set_qform(affine, code='scanner')\n >>> int(hdr['qform_code'])\n 1\n >>> hdr.set_qform(None)\n >>> int(hdr['qform_code'])\n 0\n \"\"\"\n hdr = self._structarr\n old_code = hdr['qform_code']\n if code is None:\n if affine is None:\n code = 0\n elif old_code == 0:\n code = 2 # aligned\n else:\n code = old_code\n else: # code set\n code = self._field_recoders['qform_code'][code]\n hdr['qform_code'] = code\n if affine is None:\n return\n affine = np.asarray(affine)\n if not affine.shape == (4, 4):\n raise TypeError('Need 4x4 affine as input')\n trans = affine[:3, 3]\n RZS = affine[:3, :3]\n zooms = np.sqrt(np.sum(RZS * RZS, axis=0))\n R = RZS / zooms\n # Set qfac to make R determinant positive\n if npl.det(R) > 0:\n qfac = 1\n else:\n qfac = -1\n R[:, -1] *= -1\n # Make R orthogonal (to allow quaternion representation)\n # The orthogonal representation enforces orthogonal axes\n # (a subtle requirement of the NIFTI format qform transform)\n # Transform below is polar decomposition, returning the closest\n # orthogonal matrix PR, to input R\n P, S, Qs = npl.svd(R)\n PR = np.dot(P, Qs)\n if not strip_shears and not np.allclose(PR, R):\n raise HeaderDataError(\"Shears in affine and `strip_shears` is \"\n \"False\")\n # Convert to quaternion\n quat = mat2quat(PR)\n # Set into header\n hdr['qoffset_x'], hdr['qoffset_y'], hdr['qoffset_z'] = trans\n hdr['pixdim'][0] = qfac\n hdr['pixdim'][1:4] = zooms\n hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d'] = quat[1:]\n\n def get_sform(self, coded=False):\n \"\"\" Return 4x4 affine matrix from sform parameters in header\n\n Parameters\n ----------\n coded : bool, optional\n If True, return {affine or None}, and sform code. If False, just\n return affine. {affine or None} means, return None if sform code\n == 0, and affine otherwise.\n\n Returns\n -------\n affine : None or (4,4) ndarray\n If `coded` is False, always return affine from sform fields. If\n `coded` is True, return None if sform code is 0, else return the\n affine.\n code : int\n Sform code. Only returned if `coded` is True.\n \"\"\"\n hdr = self._structarr\n code = int(hdr['sform_code'])\n if code == 0 and coded:\n return None, 0\n out = np.eye(4)\n out[0, :] = hdr['srow_x'][:]\n out[1, :] = hdr['srow_y'][:]\n out[2, :] = hdr['srow_z'][:]\n if coded:\n return out, code\n return out\n\n def set_sform(self, affine, code=None):\n \"\"\" Set sform transform from 4x4 affine\n\n Parameters\n ----------\n affine : None or 4x4 array\n affine transform to write into sform. If None, only set `code`\n code : None, string or integer, optional\n String or integer giving meaning of transform in *affine*.\n The default is None. If code is None, then:\n\n * If affine is None, `code`-> 0\n * If affine not None and existing sform code in header == 0,\n `code`-> 2 (aligned)\n * If affine not None and existing sform code in header != 0,\n `code`-> existing sform code in header\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> int(hdr['sform_code']) # gives 0 - unknown\n 0\n >>> affine = np.diag([1,2,3,1])\n >>> np.all(hdr.get_sform() == affine)\n False\n >>> hdr.set_sform(affine)\n >>> np.all(hdr.get_sform() == affine)\n True\n >>> int(hdr['sform_code']) # gives 2 - aligned\n 2\n >>> hdr.set_sform(affine, code='talairach')\n >>> int(hdr['sform_code'])\n 3\n >>> hdr.set_sform(affine, code=None)\n >>> int(hdr['sform_code'])\n 3\n >>> hdr.set_sform(affine, code='scanner')\n >>> int(hdr['sform_code'])\n 1\n >>> hdr.set_sform(None)\n >>> int(hdr['sform_code'])\n 0\n \"\"\"\n hdr = self._structarr\n old_code = hdr['sform_code']\n if code is None:\n if affine is None:\n code = 0\n elif old_code == 0:\n code = 2 # aligned\n else:\n code = old_code\n else: # code set\n code = self._field_recoders['sform_code'][code]\n hdr['sform_code'] = code\n if affine is None:\n return\n affine = np.asarray(affine)\n hdr['srow_x'][:] = affine[0, :]\n hdr['srow_y'][:] = affine[1, :]\n hdr['srow_z'][:] = affine[2, :]\n\n def get_slope_inter(self):\n \"\"\" Get data scaling (slope) and DC offset (intercept) from header data\n\n Returns\n -------\n slope : None or float\n scaling (slope). None if there is no valid scaling from these\n fields\n inter : None or float\n offset (intercept). None if there is no valid scaling or if offset\n is not finite.\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> hdr.get_slope_inter()\n (1.0, 0.0)\n >>> hdr['scl_slope'] = 0\n >>> hdr.get_slope_inter()\n (None, None)\n >>> hdr['scl_slope'] = np.nan\n >>> hdr.get_slope_inter()\n (None, None)\n >>> hdr['scl_slope'] = 1\n >>> hdr['scl_inter'] = 1\n >>> hdr.get_slope_inter()\n (1.0, 1.0)\n >>> hdr['scl_inter'] = np.inf\n >>> hdr.get_slope_inter() #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n HeaderDataError: Valid slope but invalid intercept inf\n \"\"\"\n # Note that we are returning float (float64) scalefactors and\n # intercepts, although they are stored as in nifti1 as float32.\n slope = float(self['scl_slope'])\n inter = float(self['scl_inter'])\n if slope == 0 or not np.isfinite(slope):\n return None, None\n if not np.isfinite(inter):\n raise HeaderDataError(f'Valid slope but invalid intercept {inter}')\n return slope, inter\n\n def set_slope_inter(self, slope, inter=None):\n \"\"\" Set slope and / or intercept into header\n\n Set slope and intercept for image data, such that, if the image\n data is ``arr``, then the scaled image data will be ``(arr *\n slope) + inter``\n\n (`slope`, `inter`) of (NaN, NaN) is a signal to a containing image to\n set `slope`, `inter` automatically on write.\n\n Parameters\n ----------\n slope : None or float\n If None, implies `slope` of NaN. If `slope` is None or NaN then\n `inter` should be None or NaN. Values of 0, Inf or -Inf raise\n HeaderDataError\n inter : None or float, optional\n Intercept. If None, implies `inter` of NaN. If `slope` is None or\n NaN then `inter` should be None or NaN. Values of Inf or -Inf raise\n HeaderDataError\n \"\"\"\n if slope is None:\n slope = np.nan\n if inter is None:\n inter = np.nan\n if slope in (0, np.inf, -np.inf):\n raise HeaderDataError('Slope cannot be 0 or infinite')\n if inter in (np.inf, -np.inf):\n raise HeaderDataError('Intercept cannot be infinite')\n if np.isnan(slope) ^ np.isnan(inter):\n raise HeaderDataError('None or both of slope, inter should be nan')\n self._structarr['scl_slope'] = slope\n self._structarr['scl_inter'] = inter\n\n def get_dim_info(self):\n \"\"\" Gets NIfTI MRI slice etc dimension information\n\n Returns\n -------\n freq : {None,0,1,2}\n Which data array axis is frequency encode direction\n phase : {None,0,1,2}\n Which data array axis is phase encode direction\n slice : {None,0,1,2}\n Which data array axis is slice encode direction\n\n where ``data array`` is the array returned by ``get_data``\n\n Because NIfTI1 files are natively Fortran indexed:\n 0 is fastest changing in file\n 1 is medium changing in file\n 2 is slowest changing in file\n\n ``None`` means the axis appears not to be specified.\n\n Examples\n --------\n See set_dim_info function\n\n \"\"\"\n hdr = self._structarr\n info = int(hdr['dim_info'])\n freq = info & 3\n phase = (info >> 2) & 3\n slice = (info >> 4) & 3\n return (freq - 1 if freq else None,\n phase - 1 if phase else None,\n slice - 1 if slice else None)\n\n def set_dim_info(self, freq=None, phase=None, slice=None):\n \"\"\" Sets nifti MRI slice etc dimension information\n\n Parameters\n ----------\n freq : {None, 0, 1, 2}\n axis of data array referring to frequency encoding\n phase : {None, 0, 1, 2}\n axis of data array referring to phase encoding\n slice : {None, 0, 1, 2}\n axis of data array referring to slice encoding\n\n ``None`` means the axis is not specified.\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> hdr.set_dim_info(1, 2, 0)\n >>> hdr.get_dim_info()\n (1, 2, 0)\n >>> hdr.set_dim_info(freq=1, phase=2, slice=0)\n >>> hdr.get_dim_info()\n (1, 2, 0)\n >>> hdr.set_dim_info()\n >>> hdr.get_dim_info()\n (None, None, None)\n >>> hdr.set_dim_info(freq=1, phase=None, slice=0)\n >>> hdr.get_dim_info()\n (1, None, 0)\n\n Notes\n -----\n This is stored in one byte in the header\n \"\"\"\n for inp in (freq, phase, slice):\n # Don't use == on None to avoid a FutureWarning in python3\n if inp is not None and inp not in (0, 1, 2):\n raise HeaderDataError('Inputs must be in [None, 0, 1, 2]')\n info = 0\n if freq is not None:\n info = info | ((freq + 1) & 3)\n if phase is not None:\n info = info | (((phase + 1) & 3) << 2)\n if slice is not None:\n info = info | (((slice + 1) & 3) << 4)\n self._structarr['dim_info'] = info\n\n def get_intent(self, code_repr='label'):\n \"\"\" Get intent code, parameters and name\n\n Parameters\n ----------\n code_repr : string\n string giving output form of intent code representation.\n Default is 'label'; use 'code' for integer representation.\n\n Returns\n -------\n code : string or integer\n intent code, or string describing code\n parameters : tuple\n parameters for the intent\n name : string\n intent name\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> hdr.set_intent('t test', (10,), name='some score')\n >>> hdr.get_intent()\n ('t test', (10.0,), 'some score')\n >>> hdr.get_intent('code')\n (3, (10.0,), 'some score')\n \"\"\"\n hdr = self._structarr\n recoder = self._field_recoders['intent_code']\n code = int(hdr['intent_code'])\n known_intent = code in recoder\n if code_repr == 'code':\n label = code\n elif code_repr == 'label':\n if known_intent:\n label = recoder.label[code]\n else:\n label = 'unknown code ' + str(code)\n else:\n raise TypeError('repr can be \"label\" or \"code\"')\n n_params = len(recoder.parameters[code]) if known_intent else 0\n params = (float(hdr['intent_p%d' % (i + 1)]) for i in range(n_params))\n name = asstr(hdr['intent_name'].item())\n return label, tuple(params), name\n\n def set_intent(self, code, params=(), name='', allow_unknown=False):\n \"\"\" Set the intent code, parameters and name\n\n If parameters are not specified, assumed to be all zero. Each\n intent code has a set number of parameters associated. If you\n specify any parameters, then it will need to be the correct number\n (e.g the \"f test\" intent requires 2). However, parameters can\n also be set in the file data, so we also allow not setting any\n parameters (empty parameter tuple).\n\n Parameters\n ----------\n code : integer or string\n code specifying nifti intent\n params : list, tuple of scalars\n parameters relating to intent (see intent_codes)\n defaults to (). Unspecified parameters are set to 0.0\n name : string\n intent name (description). Defaults to ''\n allow_unknown : {False, True}, optional\n Allow unknown integer intent codes. If False (the default),\n a KeyError is raised on attempts to set the intent\n to an unknown code.\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> hdr.set_intent(0) # no intent\n >>> hdr.set_intent('z score')\n >>> hdr.get_intent()\n ('z score', (), '')\n >>> hdr.get_intent('code')\n (5, (), '')\n >>> hdr.set_intent('t test', (10,), name='some score')\n >>> hdr.get_intent()\n ('t test', (10.0,), 'some score')\n >>> hdr.set_intent('f test', (2, 10), name='another score')\n >>> hdr.get_intent()\n ('f test', (2.0, 10.0), 'another score')\n >>> hdr.set_intent('f test')\n >>> hdr.get_intent()\n ('f test', (0.0, 0.0), '')\n >>> hdr.set_intent(9999, allow_unknown=True) # unknown code\n >>> hdr.get_intent()\n ('unknown code 9999', (), '')\n \"\"\"\n hdr = self._structarr\n known_intent = code in intent_codes\n if not known_intent:\n # We can set intent via an unknown integer code, but can't via an\n # unknown string label\n if not allow_unknown or isinstance(code, str):\n raise KeyError('Unknown intent code: ' + str(code))\n if known_intent:\n icode = intent_codes.code[code]\n p_descr = intent_codes.parameters[code]\n else:\n icode = code\n p_descr = ('p1', 'p2', 'p3')\n if len(params) and len(params) != len(p_descr):\n raise HeaderDataError(f'Need params of form {p_descr}, or empty')\n hdr['intent_code'] = icode\n hdr['intent_name'] = name\n all_params = [0] * 3\n all_params[:len(params)] = params[:]\n for i, param in enumerate(all_params):\n hdr['intent_p%d' % (i + 1)] = param\n\n def get_slice_duration(self):\n \"\"\" Get slice duration\n\n Returns\n -------\n slice_duration : float\n time to acquire one slice\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> hdr.set_dim_info(slice=2)\n >>> hdr.set_slice_duration(0.3)\n >>> print(\"%0.1f\" % hdr.get_slice_duration())\n 0.3\n\n Notes\n -----\n The NIfTI1 spec appears to require the slice dimension to be\n defined for slice_duration to have meaning.\n \"\"\"\n _, _, slice_dim = self.get_dim_info()\n if slice_dim is None:\n raise HeaderDataError('Slice dimension must be set '\n 'for duration to be valid')\n return float(self._structarr['slice_duration'])\n\n def set_slice_duration(self, duration):\n \"\"\" Set slice duration\n\n Parameters\n ----------\n duration : scalar\n time to acquire one slice\n\n Examples\n --------\n See ``get_slice_duration``\n \"\"\"\n _, _, slice_dim = self.get_dim_info()\n if slice_dim is None:\n raise HeaderDataError('Slice dimension must be set '\n 'for duration to be valid')\n self._structarr['slice_duration'] = duration\n\n def get_n_slices(self):\n \"\"\" Return the number of slices\n \"\"\"\n _, _, slice_dim = self.get_dim_info()\n if slice_dim is None:\n raise HeaderDataError('Slice dimension not set in header '\n 'dim_info')\n shape = self.get_data_shape()\n try:\n slice_len = shape[slice_dim]\n except IndexError:\n raise HeaderDataError(f'Slice dimension index ({slice_dim}) '\n f'outside shape tuple ({shape})')\n return slice_len\n\n def get_slice_times(self):\n \"\"\" Get slice times from slice timing information\n\n Returns\n -------\n slice_times : tuple\n Times of acquisition of slices, where 0 is the beginning of\n the acquisition, ordered by position in file. nifti allows\n slices at the top and bottom of the volume to be excluded from\n the standard slice timing specification, and calls these\n \"padding slices\". We give padding slices ``None`` as a time\n of acquisition\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> hdr.set_dim_info(slice=2)\n >>> hdr.set_data_shape((1, 1, 7))\n >>> hdr.set_slice_duration(0.1)\n >>> hdr['slice_code'] = slice_order_codes['sequential increasing']\n >>> slice_times = hdr.get_slice_times()\n >>> np.allclose(slice_times, [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6])\n True\n \"\"\"\n hdr = self._structarr\n slice_len = self.get_n_slices()\n duration = self.get_slice_duration()\n slabel = self.get_value_label('slice_code')\n if slabel == 'unknown':\n raise HeaderDataError('Cannot get slice times when '\n 'Slice code is \"unknown\"')\n slice_start, slice_end = (int(hdr['slice_start']),\n int(hdr['slice_end']))\n if slice_start < 0:\n raise HeaderDataError('slice_start should be >= 0')\n if slice_end == 0:\n slice_end = slice_len - 1\n n_timed = slice_end - slice_start + 1\n if n_timed < 1:\n raise HeaderDataError('slice_end should be > slice_start')\n st_order = self._slice_time_order(slabel, n_timed)\n times = st_order * duration\n return ((None,) * slice_start +\n tuple(times) +\n (None,) * (slice_len - slice_end - 1))\n\n def set_slice_times(self, slice_times):\n \"\"\" Set slice times into *hdr*\n\n Parameters\n ----------\n slice_times : tuple\n tuple of slice times, one value per slice\n tuple can include None to indicate no slice time for that slice\n\n Examples\n --------\n >>> hdr = Nifti1Header()\n >>> hdr.set_dim_info(slice=2)\n >>> hdr.set_data_shape([1, 1, 7])\n >>> hdr.set_slice_duration(0.1)\n >>> times = [None, 0.2, 0.4, 0.1, 0.3, 0.0, None]\n >>> hdr.set_slice_times(times)\n >>> hdr.get_value_label('slice_code')\n 'alternating decreasing'\n >>> int(hdr['slice_start'])\n 1\n >>> int(hdr['slice_end'])\n 5\n \"\"\"\n # Check if number of slices matches header\n hdr = self._structarr\n slice_len = self.get_n_slices()\n if slice_len != len(slice_times):\n raise HeaderDataError('Number of slice times does not '\n 'match number of slices')\n # Extract Nones at beginning and end. Check for others\n for ind, time in enumerate(slice_times):\n if time is not None:\n slice_start = ind\n break\n else:\n raise HeaderDataError('Not all slice times can be None')\n for ind, time in enumerate(slice_times[::-1]):\n if time is not None:\n slice_end = slice_len - ind - 1\n break\n timed = slice_times[slice_start:slice_end + 1]\n for time in timed:\n if time is None:\n raise HeaderDataError('Cannot have None in middle '\n 'of slice time vector')\n # Find slice duration, check times are compatible with single\n # duration\n tdiffs = np.diff(np.sort(timed))\n if not np.allclose(np.diff(tdiffs), 0):\n raise HeaderDataError('Slice times not compatible with '\n 'single slice duration')\n duration = np.mean(tdiffs)\n # To slice time order\n st_order = np.round(np.array(timed) / duration)\n # Check if slice times fit known schemes\n n_timed = len(timed)\n so_recoder = self._field_recoders['slice_code']\n labels = so_recoder.value_set('label')\n labels.remove('unknown')\n\n matching_labels = []\n for label in labels:\n if np.all(st_order == self._slice_time_order(\n label,\n n_timed)):\n matching_labels.append(label)\n\n if not matching_labels:\n raise HeaderDataError(f'slice ordering of {st_order} fits with no known scheme')\n if len(matching_labels) > 1:\n warnings.warn(\n f\"Multiple slice orders satisfy: {', '.join(matching_labels)}. \"\n \"Choosing the first one\")\n label = matching_labels[0]\n # Set values into header\n hdr['slice_start'] = slice_start\n hdr['slice_end'] = slice_end\n hdr['slice_duration'] = duration\n hdr['slice_code'] = slice_order_codes.code[label]\n\n def _slice_time_order(self, slabel, n_slices):\n \"\"\" Supporting function to give time order of slices from label \"\"\"\n if slabel == 'sequential increasing':\n sp_ind_time_order = list(range(n_slices))\n elif slabel == 'sequential decreasing':\n sp_ind_time_order = list(range(n_slices)[::-1])\n elif slabel == 'alternating increasing':\n sp_ind_time_order = (list(range(0, n_slices, 2)) +\n list(range(1, n_slices, 2)))\n elif slabel == 'alternating decreasing':\n sp_ind_time_order = (list(range(n_slices - 1, -1, -2)) +\n list(range(n_slices - 2, -1, -2)))\n elif slabel == 'alternating increasing 2':\n sp_ind_time_order = (list(range(1, n_slices, 2)) +\n list(range(0, n_slices, 2)))\n elif slabel == 'alternating decreasing 2':\n sp_ind_time_order = (list(range(n_slices - 2, -1, -2)) +\n list(range(n_slices - 1, -1, -2)))\n else:\n raise HeaderDataError(f'We do not handle slice ordering \"{slabel}\"')\n return np.argsort(sp_ind_time_order)\n\n def get_xyzt_units(self):\n xyz_code = self.structarr['xyzt_units'] % 8\n t_code = self.structarr['xyzt_units'] - xyz_code\n return (unit_codes.label[xyz_code],\n unit_codes.label[t_code])\n\n def set_xyzt_units(self, xyz=None, t=None):\n if xyz is None:\n xyz = 0\n if t is None:\n t = 0\n xyz_code = self.structarr['xyzt_units'] % 8\n t_code = self.structarr['xyzt_units'] - xyz_code\n xyz_code = unit_codes[xyz]\n t_code = unit_codes[t]\n self.structarr['xyzt_units'] = xyz_code + t_code\n\n def _clean_after_mapping(self):\n \"\"\" Set format-specific stuff after converting header from mapping\n\n Clean up header after it has been initialized from an\n ``as_analyze_map`` method of another header type\n\n See :meth:`nibabel.analyze.AnalyzeHeader._clean_after_mapping` for a\n more detailed description.\n \"\"\"\n self._structarr['magic'] = (self.single_magic if self.is_single\n else self.pair_magic)\n\n \"\"\" Checks only below here \"\"\"\n\n @classmethod\n def _get_checks(klass):\n # We need to return our own versions of - e.g. chk_datatype, to\n # pick up the Nifti datatypes from our class\n return (klass._chk_sizeof_hdr,\n klass._chk_datatype,\n klass._chk_bitpix,\n klass._chk_pixdims,\n klass._chk_qfac,\n klass._chk_magic,\n klass._chk_offset,\n klass._chk_qform_code,\n klass._chk_sform_code)\n\n @staticmethod\n def _chk_qfac(hdr, fix=False):\n rep = Report(HeaderDataError)\n if hdr['pixdim'][0] in (-1, 1):\n return hdr, rep\n rep.problem_level = 20\n rep.problem_msg = 'pixdim[0] (qfac) should be 1 (default) or -1'\n if fix:\n hdr['pixdim'][0] = 1\n rep.fix_msg = 'setting qfac to 1'\n return hdr, rep\n\n @staticmethod\n def _chk_magic(hdr, fix=False):\n rep = Report(HeaderDataError)\n magic = hdr['magic'].item()\n if magic in (hdr.pair_magic, hdr.single_magic):\n return hdr, rep\n rep.problem_msg = f'magic string \"{asstr(magic)}\" is not valid'\n rep.problem_level = 45\n if fix:\n rep.fix_msg = 'leaving as is, but future errors are likely'\n return hdr, rep\n\n @staticmethod\n def _chk_offset(hdr, fix=False):\n rep = Report(HeaderDataError)\n # for ease of later string formatting, use scalar of byte string\n magic = hdr['magic'].item()\n offset = hdr['vox_offset'].item()\n if offset == 0:\n return hdr, rep\n if magic == hdr.single_magic and offset < hdr.single_vox_offset:\n rep.problem_level = 40\n rep.problem_msg = ('vox offset %d too low for '\n 'single file nifti1' % offset)\n if fix:\n hdr['vox_offset'] = hdr.single_vox_offset\n rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}'\n return hdr, rep\n if not offset % 16:\n return hdr, rep\n # SPM uses memory mapping to read the data, and\n # apparently this has to start on 16 byte boundaries\n rep.problem_msg = f'vox offset (={offset:g}) not divisible by 16, not SPM compatible'\n rep.problem_level = 30\n if fix:\n rep.fix_msg = 'leaving at current value'\n return hdr, rep\n\n @classmethod\n def _chk_qform_code(klass, hdr, fix=False):\n return klass._chk_xform_code('qform_code', hdr, fix)\n\n @classmethod\n def _chk_sform_code(klass, hdr, fix=False):\n return klass._chk_xform_code('sform_code', hdr, fix)\n\n @classmethod\n def _chk_xform_code(klass, code_type, hdr, fix):\n # utility method for sform and qform codes\n rep = Report(HeaderDataError)\n code = int(hdr[code_type])\n recoder = klass._field_recoders[code_type]\n if code in recoder.value_set():\n return hdr, rep\n rep.problem_level = 30\n rep.problem_msg = '%s %d not valid' % (code_type, code)\n if fix:\n hdr[code_type] = 0\n rep.fix_msg = 'setting to 0'\n return hdr, rep\n\n @classmethod\n def may_contain_header(klass, binaryblock):\n if len(binaryblock) < klass.sizeof_hdr:\n return False\n\n hdr_struct = np.ndarray(shape=(), dtype=header_dtype,\n buffer=binaryblock[:klass.sizeof_hdr])\n return hdr_struct['magic'] in (b'ni1', b'n+1')\n\n\nclass Nifti1PairHeader(Nifti1Header):\n \"\"\" Class for NIfTI1 pair header \"\"\"\n # Signal whether this is single (header + data) file\n is_single = False\n\n\nclass Nifti1Pair(analyze.AnalyzeImage):\n \"\"\" Class for NIfTI1 format image, header pair\n \"\"\"\n header_class = Nifti1PairHeader\n _meta_sniff_len = header_class.sizeof_hdr\n rw = True\n\n def __init__(self, dataobj, affine, header=None,\n extra=None, file_map=None):\n super(Nifti1Pair, self).__init__(dataobj,\n affine,\n header,\n extra,\n file_map)\n # Force set of s/q form when header is None unless affine is also None\n if header is None and affine is not None:\n self._affine2header()\n # Copy docstring\n __init__.__doc__ = analyze.AnalyzeImage.__init__.__doc__ + \"\"\"\n Notes\n -----\n\n If both a `header` and an `affine` are specified, and the `affine` does\n not match the affine that is in the `header`, the `affine` will be used,\n but the ``sform_code`` and ``qform_code`` fields in the header will be\n re-initialised to their default values. This is performed on the basis\n that, if you are changing the affine, you are likely to be changing the\n space to which the affine is pointing. The :meth:`set_sform` and\n :meth:`set_qform` methods can be used to update the codes after an image\n has been created - see those methods, and the :ref:`manual\n <default-sform-qform-codes>` for more details. \"\"\"\n\n def update_header(self):\n \"\"\" Harmonize header with image data and affine\n\n See AnalyzeImage.update_header for more examples\n\n Examples\n --------\n >>> data = np.zeros((2,3,4))\n >>> affine = np.diag([1.0,2.0,3.0,1.0])\n >>> img = Nifti1Image(data, affine)\n >>> hdr = img.header\n >>> np.all(hdr.get_qform() == affine)\n True\n >>> np.all(hdr.get_sform() == affine)\n True\n \"\"\"\n super(Nifti1Pair, self).update_header()\n hdr = self._header\n hdr['magic'] = hdr.pair_magic\n\n def _affine2header(self):\n \"\"\" Unconditionally set affine into the header \"\"\"\n hdr = self._header\n # Set affine into sform with default code\n hdr.set_sform(self._affine, code='aligned')\n # Make qform 'unknown'\n hdr.set_qform(self._affine, code='unknown')\n\n def get_qform(self, coded=False):\n \"\"\" Return 4x4 affine matrix from qform parameters in header\n\n Parameters\n ----------\n coded : bool, optional\n If True, return {affine or None}, and qform code. If False, just\n return affine. {affine or None} means, return None if qform code\n == 0, and affine otherwise.\n\n Returns\n -------\n affine : None or (4,4) ndarray\n If `coded` is False, always return affine reconstructed from qform\n quaternion. If `coded` is True, return None if qform code is 0,\n else return the affine.\n code : int\n Qform code. Only returned if `coded` is True.\n\n See also\n --------\n set_qform\n get_sform\n \"\"\"\n return self._header.get_qform(coded)\n\n def set_qform(self, affine, code=None, strip_shears=True, **kwargs):\n \"\"\" Set qform header values from 4x4 affine\n\n Parameters\n ----------\n affine : None or 4x4 array\n affine transform to write into sform. If None, only set code.\n code : None, string or integer\n String or integer giving meaning of transform in *affine*.\n The default is None. If code is None, then:\n\n * If affine is None, `code`-> 0\n * If affine not None and existing qform code in header == 0,\n `code`-> 2 (aligned)\n * If affine not None and existing qform code in header != 0,\n `code`-> existing qform code in header\n\n strip_shears : bool, optional\n Whether to strip shears in `affine`. If True, shears will be\n silently stripped. If False, the presence of shears will raise a\n ``HeaderDataError``\n update_affine : bool, optional\n Whether to update the image affine from the header best affine\n after setting the qform. Must be keyword argument (because of\n different position in `set_qform`). Default is True\n\n See also\n --------\n get_qform\n set_sform\n\n Examples\n --------\n >>> data = np.arange(24).reshape((2,3,4))\n >>> aff = np.diag([2, 3, 4, 1])\n >>> img = Nifti1Pair(data, aff)\n >>> img.get_qform()\n array([[2., 0., 0., 0.],\n [0., 3., 0., 0.],\n [0., 0., 4., 0.],\n [0., 0., 0., 1.]])\n >>> img.get_qform(coded=True)\n (None, 0)\n >>> aff2 = np.diag([3, 4, 5, 1])\n >>> img.set_qform(aff2, 'talairach')\n >>> qaff, code = img.get_qform(coded=True)\n >>> np.all(qaff == aff2)\n True\n >>> int(code)\n 3\n \"\"\"\n update_affine = kwargs.pop('update_affine', True)\n if kwargs:\n raise TypeError(f'Unexpected keyword argument(s) {kwargs}')\n self._header.set_qform(affine, code, strip_shears)\n if update_affine:\n if self._affine is None:\n self._affine = self._header.get_best_affine()\n else:\n self._affine[:] = self._header.get_best_affine()\n\n def get_sform(self, coded=False):\n \"\"\" Return 4x4 affine matrix from sform parameters in header\n\n Parameters\n ----------\n coded : bool, optional\n If True, return {affine or None}, and sform code. If False, just\n return affine. {affine or None} means, return None if sform code\n == 0, and affine otherwise.\n\n Returns\n -------\n affine : None or (4,4) ndarray\n If `coded` is False, always return affine from sform fields. If\n `coded` is True, return None if sform code is 0, else return the\n affine.\n code : int\n Sform code. Only returned if `coded` is True.\n\n See also\n --------\n set_sform\n get_qform\n \"\"\"\n return self._header.get_sform(coded)\n\n def set_sform(self, affine, code=None, **kwargs):\n \"\"\" Set sform transform from 4x4 affine\n\n Parameters\n ----------\n affine : None or 4x4 array\n affine transform to write into sform. If None, only set `code`\n code : None, string or integer\n String or integer giving meaning of transform in *affine*.\n The default is None. If code is None, then:\n\n * If affine is None, `code`-> 0\n * If affine not None and existing sform code in header == 0,\n `code`-> 2 (aligned)\n * If affine not None and existing sform code in header != 0,\n `code`-> existing sform code in header\n\n update_affine : bool, optional\n Whether to update the image affine from the header best affine\n after setting the qform. Must be keyword argument (because of\n different position in `set_qform`). Default is True\n\n See also\n --------\n get_sform\n set_qform\n\n Examples\n --------\n >>> data = np.arange(24).reshape((2,3,4))\n >>> aff = np.diag([2, 3, 4, 1])\n >>> img = Nifti1Pair(data, aff)\n >>> img.get_sform()\n array([[2., 0., 0., 0.],\n [0., 3., 0., 0.],\n [0., 0., 4., 0.],\n [0., 0., 0., 1.]])\n >>> saff, code = img.get_sform(coded=True)\n >>> saff\n array([[2., 0., 0., 0.],\n [0., 3., 0., 0.],\n [0., 0., 4., 0.],\n [0., 0., 0., 1.]])\n >>> int(code)\n 2\n >>> aff2 = np.diag([3, 4, 5, 1])\n >>> img.set_sform(aff2, 'talairach')\n >>> saff, code = img.get_sform(coded=True)\n >>> np.all(saff == aff2)\n True\n >>> int(code)\n 3\n \"\"\"\n update_affine = kwargs.pop('update_affine', True)\n if kwargs:\n raise TypeError(f'Unexpected keyword argument(s) {kwargs}')\n self._header.set_sform(affine, code)\n if update_affine:\n if self._affine is None:\n self._affine = self._header.get_best_affine()\n else:\n self._affine[:] = self._header.get_best_affine()\n\n def as_reoriented(self, ornt):\n \"\"\"Apply an orientation change and return a new image\n\n If ornt is identity transform, return the original image, unchanged\n\n Parameters\n ----------\n ornt : (n,2) orientation array\n orientation transform. ``ornt[N,1]` is flip of axis N of the\n array implied by `shape`, where 1 means no flip and -1 means\n flip. For example, if ``N==0`` and ``ornt[0,1] == -1``, and\n there's an array ``arr`` of shape `shape`, the flip would\n correspond to the effect of ``np.flipud(arr)``. ``ornt[:,0]`` is\n the transpose that needs to be done to the implied array, as in\n ``arr.transpose(ornt[:,0])``\n \"\"\"\n img = super(Nifti1Pair, self).as_reoriented(ornt)\n\n if img is self:\n return img\n\n # Also apply the transform to the dim_info fields\n new_dim = [\n None if orig_dim is None else int(ornt[orig_dim, 0])\n for orig_dim in img.header.get_dim_info()]\n\n img.header.set_dim_info(*new_dim)\n\n return img\n\n\nclass Nifti1Image(Nifti1Pair, SerializableImage):\n \"\"\" Class for single file NIfTI1 format image\n \"\"\"\n header_class = Nifti1Header\n valid_exts = ('.nii',)\n files_types = (('image', '.nii'),)\n\n @staticmethod\n def _get_fileholders(file_map):\n \"\"\" Return fileholder for header and image\n\n For single-file niftis, the fileholder for the header and the image\n will be the same\n \"\"\"\n return file_map['image'], file_map['image']\n\n def update_header(self):\n \"\"\" Harmonize header with image data and affine \"\"\"\n super(Nifti1Image, self).update_header()\n hdr = self._header\n hdr['magic'] = hdr.single_magic\n\n\ndef load(filename):\n \"\"\" Load NIfTI1 single or pair from `filename`\n\n Parameters\n ----------\n filename : str\n filename of image to be loaded\n\n Returns\n -------\n img : Nifti1Image or Nifti1Pair\n NIfTI1 single or pair image instance\n\n Raises\n ------\n ImageFileError\n if `filename` doesn't look like NIfTI1;\n IOError\n if `filename` does not exist.\n \"\"\"\n try:\n img = Nifti1Image.load(filename)\n except ImageFileError:\n return Nifti1Pair.load(filename)\n return img\n\n\ndef save(img, filename):\n \"\"\" Save NIfTI1 single or pair to `filename`\n\n Parameters\n ----------\n filename : str\n filename to which to save image\n \"\"\"\n try:\n Nifti1Image.instance_to_filename(img, filename)\n except ImageFileError:\n Nifti1Pair.instance_to_filename(img, filename)\n",
"\"\"\" Read / write access to TCK streamlines format.\n\nTCK format is defined at\nhttp://mrtrix.readthedocs.io/en/latest/getting_started/image_data.html?highlight=format#tracks-file-format-tck\n\"\"\"\n\nimport os\nimport warnings\n\nimport numpy as np\nfrom numpy.compat.py3k import asbytes, asstr\n\nfrom nibabel.openers import Opener\n\nfrom .array_sequence import ArraySequence\nfrom .tractogram_file import TractogramFile\nfrom .tractogram_file import HeaderWarning, DataWarning\nfrom .tractogram_file import HeaderError, DataError\nfrom .tractogram import TractogramItem, Tractogram, LazyTractogram\nfrom .header import Field\nfrom .utils import peek_next\n\nMEGABYTE = 1024 * 1024\n\n\nclass TckFile(TractogramFile):\n \"\"\" Convenience class to encapsulate TCK file format.\n\n Notes\n -----\n MRtrix (so its file format: TCK) considers streamlines coordinates\n to be in world space (RAS+ and mm space). MRtrix refers to that space\n as the \"real\" or \"scanner\" space [#]_.\n\n Moreover, when streamlines are mapped back to voxel space [#]_, a\n streamline point located at an integer coordinate (i,j,k) is considered\n to be at the center of the corresponding voxel. This is in contrast with\n TRK's internal convention where it would have referred to a corner.\n\n NiBabel's streamlines internal representation follows the same\n convention as MRtrix.\n\n .. [#] http://www.nitrc.org/pipermail/mrtrix-discussion/2014-January/000859.html\n .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space\n \"\"\"\n # Constants\n MAGIC_NUMBER = \"mrtrix tracks\"\n SUPPORTS_DATA_PER_POINT = False # Not yet\n SUPPORTS_DATA_PER_STREAMLINE = False # Not yet\n\n FIBER_DELIMITER = np.array([[np.nan, np.nan, np.nan]], '<f4')\n EOF_DELIMITER = np.array([[np.inf, np.inf, np.inf]], '<f4')\n\n def __init__(self, tractogram, header=None):\n \"\"\"\n Parameters\n ----------\n tractogram : :class:`Tractogram` object\n Tractogram that will be contained in this :class:`TckFile`.\n header : None or dict, optional\n Metadata associated to this tractogram file. If None, make\n default empty header.\n\n Notes\n -----\n Streamlines of the tractogram are assumed to be in *RAS+* and *mm*\n space. It is also assumed that when streamlines are mapped back to\n voxel space, a streamline point located at an integer coordinate\n (i,j,k) is considered to be at the center of the corresponding voxel.\n This is in contrast with TRK's internal convention where it would\n have referred to a corner.\n \"\"\"\n super(TckFile, self).__init__(tractogram, header)\n\n @classmethod\n def is_correct_format(cls, fileobj):\n \"\"\" Check if the file is in TCK format.\n\n Parameters\n ----------\n fileobj : string or file-like object\n If string, a filename; otherwise an open file-like object in\n binary mode pointing to TCK file (and ready to read from the\n beginning of the TCK header). Note that calling this function\n does not change the file position.\n\n Returns\n -------\n is_correct_format : {True, False}\n Returns True if `fileobj` is compatible with TCK format,\n otherwise returns False.\n \"\"\"\n with Opener(fileobj) as f:\n magic_number = asstr(f.fobj.readline())\n f.seek(-len(magic_number), os.SEEK_CUR)\n\n return magic_number.strip() == cls.MAGIC_NUMBER\n\n @classmethod\n def create_empty_header(cls):\n \"\"\" Return an empty compliant TCK header as dict \"\"\"\n header = {}\n\n # Default values\n header[Field.MAGIC_NUMBER] = cls.MAGIC_NUMBER\n header[Field.NB_STREAMLINES] = 0\n header['datatype'] = \"Float32LE\"\n return header\n\n @classmethod\n def load(cls, fileobj, lazy_load=False):\n \"\"\" Loads streamlines from a filename or file-like object.\n\n Parameters\n ----------\n fileobj : string or file-like object\n If string, a filename; otherwise an open file-like object in\n binary mode pointing to TCK file (and ready to read from the\n beginning of the TCK header). Note that calling this function\n does not change the file position.\n lazy_load : {False, True}, optional\n If True, load streamlines in a lazy manner i.e. they will not be\n kept in memory. Otherwise, load all streamlines in memory.\n\n Returns\n -------\n tck_file : :class:`TckFile` object\n Returns an object containing tractogram data and header\n information.\n\n Notes\n -----\n Streamlines of the tractogram are assumed to be in *RAS+* and *mm*\n space. It is also assumed that when streamlines are mapped back to\n voxel space, a streamline point located at an integer coordinate\n (i,j,k) is considered to be at the center of the corresponding voxel.\n This is in contrast with TRK's internal convention where it would\n have referred to a corner.\n \"\"\"\n hdr = cls._read_header(fileobj)\n\n if lazy_load:\n def _read():\n for pts in cls._read(fileobj, hdr):\n yield TractogramItem(pts, {}, {})\n\n tractogram = LazyTractogram.from_data_func(_read)\n\n else:\n tck_reader = cls._read(fileobj, hdr)\n streamlines = ArraySequence(tck_reader)\n tractogram = Tractogram(streamlines)\n\n # By definition.\n tractogram.affine_to_rasmm = np.eye(4)\n hdr[Field.VOXEL_TO_RASMM] = np.eye(4)\n\n return cls(tractogram, header=hdr)\n\n def _finalize_header(self, f, header, offset=0):\n # Overwrite header with updated one.\n f.seek(offset, os.SEEK_SET)\n self._write_header(f, header)\n\n def save(self, fileobj):\n \"\"\" Save tractogram to a filename or file-like object using TCK format.\n\n Parameters\n ----------\n fileobj : string or file-like object\n If string, a filename; otherwise an open file-like object in\n binary mode pointing to TCK file (and ready to write from the\n beginning of the TCK header data).\n \"\"\"\n # Enforce float32 in little-endian byte order for data.\n dtype = np.dtype('<f4')\n header = self.create_empty_header()\n\n # Override hdr's fields by those contained in `header`.\n header.update(self.header)\n\n # Keep counts for correcting incoherent fields or warn.\n nb_streamlines = 0\n\n with Opener(fileobj, mode=\"wb\") as f:\n # Keep track of the beginning of the header.\n beginning = f.tell()\n\n # Write temporary header that we will update at the end\n self._write_header(f, header)\n\n # Make sure streamlines are in rasmm.\n tractogram = self.tractogram.to_world(lazy=True)\n # Assume looping over the streamlines can be done only once.\n tractogram = iter(tractogram)\n\n try:\n # Use the first element to check\n # 1) the tractogram is not empty;\n # 2) quantity of information saved along each streamline.\n first_item, tractogram = peek_next(tractogram)\n except StopIteration:\n # Empty tractogram\n header[Field.NB_STREAMLINES] = 0\n self._finalize_header(f, header, offset=beginning)\n\n # Add the EOF_DELIMITER.\n f.write(self.EOF_DELIMITER.tobytes())\n return\n\n data_for_streamline = first_item.data_for_streamline\n if len(data_for_streamline) > 0:\n keys = \", \".join(data_for_streamline.keys())\n msg = (\"TCK format does not support saving additional \"\n f\"data alongside streamlines. Dropping: {keys}\")\n warnings.warn(msg, DataWarning)\n\n data_for_points = first_item.data_for_points\n if len(data_for_points) > 0:\n keys = \", \".join(data_for_points.keys())\n msg = (\"TCK format does not support saving additional \"\n f\"data alongside points. Dropping: {keys}\")\n warnings.warn(msg, DataWarning)\n\n for t in tractogram:\n data = np.r_[t.streamline, self.FIBER_DELIMITER]\n f.write(data.astype(dtype).tobytes())\n nb_streamlines += 1\n\n header[Field.NB_STREAMLINES] = nb_streamlines\n\n # Add the EOF_DELIMITER.\n f.write(asbytes(self.EOF_DELIMITER.tobytes()))\n self._finalize_header(f, header, offset=beginning)\n\n @staticmethod\n def _write_header(fileobj, header):\n \"\"\" Write TCK header to file-like object.\n\n Parameters\n ----------\n fileobj : file-like object\n An open file-like object in binary mode pointing to TCK file (and\n ready to read from the beginning of the TCK header).\n \"\"\"\n # Fields to exclude\n exclude = [Field.MAGIC_NUMBER, # Handled separately.\n Field.NB_STREAMLINES, # Handled separately.\n Field.ENDIANNESS, # Handled separately.\n Field.VOXEL_TO_RASMM, # Streamlines are always in RAS+ mm.\n \"count\", \"datatype\", \"file\"] # Fields being replaced.\n\n lines = []\n lines.append(asstr(header[Field.MAGIC_NUMBER]))\n lines.append(f\"count: {header[Field.NB_STREAMLINES]:010}\")\n lines.append(\"datatype: Float32LE\") # Always Float32LE.\n lines.extend([f\"{k}: {v}\"\n for k, v in header.items()\n if k not in exclude and not k.startswith(\"_\")])\n lines.append(\"file: . \") # Manually add this last field.\n out = \"\\n\".join(lines)\n\n # Check the header is well formatted.\n if out.count(\"\\n\") > len(lines) - 1: # \\n only allowed between lines.\n msg = f\"Key-value pairs cannot contain '\\\\n':\\n{out}\"\n raise HeaderError(msg)\n\n if out.count(\":\") > len(lines) - 1:\n # : only one per line (except the last one which contains END).\n msg = f\"Key-value pairs cannot contain ':':\\n{out}\"\n raise HeaderError(msg)\n\n # Write header to file.\n fileobj.write(asbytes(out))\n\n hdr_len_no_offset = len(out) + 5\n # Need to add number of bytes to store offset as decimal string. We\n # start with estimate without string, then update if the\n # offset-as-decimal-string got longer after adding length of the\n # offset string.\n new_offset = -1\n old_offset = hdr_len_no_offset\n while new_offset != old_offset:\n old_offset = new_offset\n new_offset = hdr_len_no_offset + len(str(old_offset))\n\n fileobj.write(asbytes(str(new_offset) + \"\\n\"))\n fileobj.write(asbytes(\"END\\n\"))\n\n @staticmethod\n def _read_header(fileobj):\n \"\"\" Reads a TCK header from a file.\n\n Parameters\n ----------\n fileobj : string or file-like object\n If string, a filename; otherwise an open file-like object in\n binary mode pointing to TCK file (and ready to read from the\n beginning of the TCK header). Note that calling this function\n does not change the file position.\n\n Returns\n -------\n header : dict\n Metadata associated with this tractogram file.\n \"\"\"\n # Record start position if this is a file-like object\n start_position = fileobj.tell() if hasattr(fileobj, 'tell') else None\n\n with Opener(fileobj) as f:\n # Read magic number\n magic_number = f.fobj.readline().strip()\n\n # Read all key-value pairs contained in the header.\n buf = asstr(f.fobj.readline())\n while not buf.rstrip().endswith(\"END\"):\n buf += asstr(f.fobj.readline())\n\n offset_data = f.tell()\n\n # Build header dictionary from the buffer.\n hdr = dict(item.split(': ') for item in buf.rstrip().split('\\n')[:-1])\n hdr[Field.MAGIC_NUMBER] = magic_number\n\n # Check integrity of TCK header.\n if 'datatype' not in hdr:\n msg = (\"Missing 'datatype' attribute in TCK header.\"\n \" Assuming it is Float32LE.\")\n warnings.warn(msg, HeaderWarning)\n hdr['datatype'] = \"Float32LE\"\n\n if not hdr['datatype'].startswith('Float32'):\n msg = (\"TCK only supports float32 dtype but 'datatype: \"\n f\"{hdr['datatype']}' was specified in the header.\")\n raise HeaderError(msg)\n\n if 'file' not in hdr:\n msg = \"Missing 'file' attribute in TCK header. Will try to guess it.\"\n warnings.warn(msg, HeaderWarning)\n hdr['file'] = f'. {offset_data}'\n\n if hdr['file'].split()[0] != '.':\n msg = (\"TCK only supports single-file - in other words the filename part must be \"\n f\"specified as '.' but '{hdr['file'].split()[0]}' was specified.\")\n raise HeaderError(\"Missing 'file' attribute in TCK header.\")\n\n # Set endianness and _dtype attributes in the header.\n hdr[Field.ENDIANNESS] = '>' if hdr['datatype'].endswith('BE') else '<'\n\n hdr['_dtype'] = np.dtype(hdr[Field.ENDIANNESS] + 'f4')\n\n # Keep the file position where the data begin.\n hdr['_offset_data'] = int(hdr['file'].split()[1])\n\n # Set the file position where it was, if it was previously open.\n if start_position is not None:\n fileobj.seek(start_position, os.SEEK_SET)\n\n return hdr\n\n @classmethod\n def _read(cls, fileobj, header, buffer_size=4):\n \"\"\" Return generator that reads TCK data from `fileobj` given `header`\n\n Parameters\n ----------\n fileobj : string or file-like object\n If string, a filename; otherwise an open file-like object in\n binary mode pointing to TCK file (and ready to read from the\n beginning of the TCK header). Note that calling this function\n does not change the file position.\n header : dict\n Metadata associated with this tractogram file.\n buffer_size : float, optional\n Size (in Mb) for buffering.\n\n Yields\n ------\n points : ndarray of shape (n_pts, 3)\n Streamline points\n \"\"\"\n dtype = header[\"_dtype\"]\n coordinate_size = 3 * dtype.itemsize\n # Make buffer_size an integer and a multiple of coordinate_size.\n buffer_size = int(buffer_size * MEGABYTE)\n buffer_size += coordinate_size - (buffer_size % coordinate_size)\n\n with Opener(fileobj) as f:\n start_position = f.tell()\n\n # Set the file position at the beginning of the data.\n f.seek(header[\"_offset_data\"], os.SEEK_SET)\n\n eof = False\n leftover = np.empty((0, 3), dtype='<f4')\n n_streams = 0\n\n while not eof:\n buff = bytearray(buffer_size)\n n_read = f.readinto(buff)\n eof = n_read != buffer_size\n if eof:\n buff = buff[:n_read]\n\n raw_values = np.frombuffer(buff, dtype=dtype)\n\n # Convert raw_values into a list of little-endian triples (for x,y,z coord)\n coords = raw_values.astype('<f4', copy=False).reshape((-1, 3))\n\n # Find stream delimiter locations (all NaNs)\n delims = np.where(np.isnan(coords).all(axis=1))[0]\n\n # Recover leftovers, which can't have delimiters in them\n if leftover.size:\n delims += leftover.shape[0]\n coords = np.vstack((leftover, coords))\n\n begin = 0\n for delim in delims:\n pts = coords[begin:delim]\n if pts.size:\n yield pts\n n_streams += 1\n begin = delim + 1\n\n # The rest becomes the new leftover.\n leftover = coords[begin:]\n\n if not (leftover.shape == (1, 3) and np.isinf(leftover).all()):\n if n_streams == 0:\n msg = \"Cannot find a streamline delimiter. This file might be corrupted.\"\n else:\n msg = \"Expecting end-of-file marker 'inf inf inf'\"\n raise DataError(msg)\n\n # In case the 'count' field was not provided.\n header[Field.NB_STREAMLINES] = n_streams\n\n # Set the file position where it was (in case it was already open).\n f.seek(start_position, os.SEEK_CUR)\n\n def __str__(self):\n \"\"\" Gets a formatted string of the header of a TCK file.\n\n Returns\n -------\n info : string\n Header information relevant to the TCK format.\n \"\"\"\n hdr = self.header\n\n info = \"\"\n info += f\"\\nMAGIC NUMBER: {hdr[Field.MAGIC_NUMBER]}\"\n info += \"\\n\"\n info += \"\\n\".join(f\"{k}: {v}\" for k, v in hdr.items() if not k.startswith('_'))\n return info\n",
"\"\"\" Testing loadsave module\n\"\"\"\n\nfrom os.path import dirname, join as pjoin\nimport shutil\nimport pathlib\n\nimport numpy as np\n\nfrom .. import (Spm99AnalyzeImage, Spm2AnalyzeImage,\n Nifti1Pair, Nifti1Image,\n Nifti2Pair, Nifti2Image)\nfrom ..loadsave import load, read_img_data\nfrom ..filebasedimages import ImageFileError\nfrom ..tmpdirs import InTemporaryDirectory, TemporaryDirectory\n\nfrom ..optpkg import optional_package\n_, have_scipy, _ = optional_package('scipy')\n\nfrom numpy.testing import (assert_almost_equal,\n assert_array_equal)\n\nimport pytest\n\ndata_path = pjoin(dirname(__file__), 'data')\n\n\ndef test_read_img_data():\n fnames_test = [\n 'example4d.nii.gz',\n 'example_nifti2.nii.gz',\n 'minc1_1_scale.mnc',\n 'minc1_4d.mnc',\n 'test.mgz',\n 'tiny.mnc'\n ]\n fnames_test += [pathlib.Path(p) for p in fnames_test]\n for fname in fnames_test:\n # os.path.join doesnt work between str / os.PathLike in py3.5\n fpath = pjoin(data_path, str(fname))\n if isinstance(fname, pathlib.Path):\n fpath = pathlib.Path(fpath)\n img = load(fpath)\n data = img.get_fdata()\n with pytest.deprecated_call():\n data2 = read_img_data(img)\n assert_array_equal(data, data2)\n # These examples have null scaling - assert prefer=unscaled is the same\n dao = img.dataobj\n if hasattr(dao, 'slope') and hasattr(img.header, 'raw_data_from_fileobj'):\n assert (dao.slope, dao.inter) == (1, 0)\n with pytest.deprecated_call():\n assert_array_equal(read_img_data(img, prefer='unscaled'), data)\n # Assert all caps filename works as well\n with TemporaryDirectory() as tmpdir:\n up_fpath = pjoin(tmpdir, str(fname).upper())\n if isinstance(fname, pathlib.Path):\n up_fpath = pathlib.Path(up_fpath)\n # shutil doesnt work with os.PathLike in py3.5\n shutil.copyfile(str(fpath), str(up_fpath))\n img = load(up_fpath)\n assert_array_equal(img.dataobj, data)\n del img\n\n\ndef test_file_not_found():\n with pytest.raises(FileNotFoundError):\n load('does_not_exist.nii.gz')\n\n\ndef test_load_empty_image():\n with InTemporaryDirectory():\n open('empty.nii', 'w').close()\n with pytest.raises(ImageFileError) as err:\n load('empty.nii')\n assert str(err.value).startswith('Empty file: ')\n\n\ndef test_read_img_data_nifti():\n shape = (2, 3, 4)\n data = np.random.normal(size=shape)\n out_dtype = np.dtype(np.int16)\n classes = (Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image)\n if have_scipy:\n classes += (Spm99AnalyzeImage, Spm2AnalyzeImage)\n with InTemporaryDirectory():\n for i, img_class in enumerate(classes):\n img = img_class(data, np.eye(4))\n img.set_data_dtype(out_dtype)\n # No filemap => error\n with pytest.deprecated_call(), pytest.raises(ImageFileError):\n read_img_data(img)\n # Make a filemap\n froot = f'an_image_{i}'\n img.file_map = img.filespec_to_file_map(froot)\n # Trying to read from this filemap will generate an error because\n # we are going to read from files that do not exist\n with pytest.deprecated_call(), pytest.raises(IOError):\n read_img_data(img)\n img.to_file_map()\n # Load - now the scaling and offset correctly applied\n img_fname = img.file_map['image'].filename\n img_back = load(img_fname)\n data_back = img_back.get_fdata()\n with pytest.deprecated_call():\n assert_array_equal(data_back, read_img_data(img_back))\n # This is the same as if we loaded the image and header separately\n hdr_fname = (img.file_map['header'].filename\n if 'header' in img.file_map else img_fname)\n with open(hdr_fname, 'rb') as fobj:\n hdr_back = img_back.header_class.from_fileobj(fobj)\n with open(img_fname, 'rb') as fobj:\n scaled_back = hdr_back.data_from_fileobj(fobj)\n assert_array_equal(data_back, scaled_back)\n # Unscaled is the same as returned from raw_data_from_fileobj\n with open(img_fname, 'rb') as fobj:\n unscaled_back = hdr_back.raw_data_from_fileobj(fobj)\n with pytest.deprecated_call():\n assert_array_equal(unscaled_back, read_img_data(img_back, prefer='unscaled'))\n # If we futz with the scaling in the header, the result changes\n with pytest.deprecated_call():\n assert_array_equal(data_back, read_img_data(img_back))\n has_inter = hdr_back.has_data_intercept\n old_slope = hdr_back['scl_slope']\n old_inter = hdr_back['scl_inter'] if has_inter else 0\n est_unscaled = (data_back - old_inter) / old_slope\n with pytest.deprecated_call():\n actual_unscaled = read_img_data(img_back, prefer='unscaled')\n assert_almost_equal(est_unscaled, actual_unscaled)\n img_back.header['scl_slope'] = 2.1\n if has_inter:\n new_inter = 3.14\n img_back.header['scl_inter'] = 3.14\n else:\n new_inter = 0\n # scaled scaling comes from new parameters in header\n with pytest.deprecated_call():\n assert np.allclose(actual_unscaled * 2.1 + new_inter,\n read_img_data(img_back))\n # Unscaled array didn't change\n with pytest.deprecated_call():\n assert_array_equal(actual_unscaled,\n read_img_data(img_back, prefer='unscaled'))\n # Check the offset too\n img.header.set_data_offset(1024)\n # Delete arrays still pointing to file, so Windows can re-use\n del actual_unscaled, unscaled_back\n img.to_file_map()\n # Write an integer of zeros after\n with open(img_fname, 'ab') as fobj:\n fobj.write(b'\\x00\\x00')\n img_back = load(img_fname)\n data_back = img_back.get_fdata()\n with pytest.deprecated_call():\n assert_array_equal(data_back, read_img_data(img_back))\n img_back.header.set_data_offset(1026)\n # Check we pick up new offset\n exp_offset = np.zeros((data.size,), data.dtype) + old_inter\n exp_offset[:-1] = np.ravel(data_back, order='F')[1:]\n exp_offset = np.reshape(exp_offset, shape, order='F')\n with pytest.deprecated_call():\n assert_array_equal(exp_offset, read_img_data(img_back))\n # Delete stuff that might hold onto file references\n del img, img_back, data_back\n",
"\"\"\" Test differences in affines by reslicing\n\nShould be run from directory containing .PAR _and_ matching .REC files from\nMichael's PAR / REC dataset at:\n\n http://psydata.ovgu.de/philips_achieva_testfiles/conversion2\n\nGives output something like:\n\nRMS of standard image Phantom_EPI_3mm_tra_SENSE_6_1.PAR : 148.619965177\nRMS resliced Phantom_EPI_3mm_cor_20APtrans_15RLrot_SENSE_15_1.PAR : 22.0706570007\nRMS resliced Phantom_EPI_3mm_cor_SENSE_8_1.PAR : 47.5762615987\nRMS resliced Phantom_EPI_3mm_sag_15AP_SENSE_13_1.PAR : 25.0972161667\nRMS resliced Phantom_EPI_3mm_sag_15FH_SENSE_12_1.PAR : 28.7508166372\nRMS resliced Phantom_EPI_3mm_sag_15RL_SENSE_11_1.PAR : 29.0544513507\nRMS resliced Phantom_EPI_3mm_sag_SENSE_7_1.PAR : 25.7621452929\nRMS resliced Phantom_EPI_3mm_tra_-30AP_10RL_20FH_SENSE_14_1.PAR : 32.0602533689\nRMS resliced Phantom_EPI_3mm_tra_15FH_SENSE_9_1.PAR : 28.8953071672\nRMS resliced Phantom_EPI_3mm_tra_15RL_SENSE_10_1.PAR : 29.0793602478\n\nThe *_cor_SENSE* image has a higher RMS because the back of the phantom is out\nof the field of view.\n\"\"\"\nimport glob\nimport numpy as np\nimport numpy.linalg as npl\n\nimport nibabel as nib\nfrom nibabel import parrec\nfrom nibabel.affines import to_matvec\nfrom nibabel.optpkg import optional_package\n\n_, have_scipy, _ = optional_package('scipy')\n\n\ndef resample_img2img(img_to, img_from, order=1, out_class=nib.Nifti1Image):\n if not have_scipy:\n raise Exception('Scipy must be installed to run resample_img2img.')\n\n from scipy import ndimage as spnd\n vox2vox = npl.inv(img_from.affine).dot(img_to.affine)\n rzs, trans = to_matvec(vox2vox)\n data = spnd.affine_transform(img_from.get_fdata(),\n rzs,\n trans,\n img_to.shape,\n order=order)\n return out_class(data, img_to.affine)\n\n\ndef gmean_norm(data):\n in_data = data > np.mean(data) * 0.8\n gmean = np.mean(data[in_data])\n return data / gmean\n\n\nif __name__ == '__main__':\n np.set_printoptions(suppress=True, precision=4)\n normal_fname = \"Phantom_EPI_3mm_tra_SENSE_6_1.PAR\"\n normal_img = parrec.load(normal_fname)\n normal_data = normal_img.get_fdata()\n normal_normed = gmean_norm(normal_data)\n\n print(f\"RMS of standard image {normal_fname:<44}: {np.sqrt(np.sum(normal_normed ** 2))}\")\n\n for parfile in glob.glob(\"*.PAR\"):\n if parfile == normal_fname:\n continue\n funny_img = parrec.load(parfile)\n fixed_img = resample_img2img(normal_img, funny_img)\n fixed_data = fixed_img.get_fdata()\n difference_data = normal_normed - gmean_norm(fixed_data)\n print(f'RMS resliced {parfile:<52} : {np.sqrt(np.sum(difference_data ** 2))}')\n",
"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\n\ndef configuration(parent_package=\"\", top_path=None):\n from numpy.distutils.misc_util import Configuration\n\n config = Configuration(\"dipy\", parent_package, top_path)\n\n # config.add_data_dir('tests')\n return config\n\n\nif __name__ == \"__main__\":\n from numpy.distutils.core import setup\n\n setup(**configuration(top_path=\"\").todict())\n",
"\"\"\" Tests for netcdf \"\"\"\n\nimport os\nfrom os.path import join as pjoin, dirname\nfrom io import BytesIO\nfrom glob import glob\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nimport pytest\n\nfrom ..netcdf import netcdf_file\n\nTEST_DATA_PATH = pjoin(dirname(__file__), 'data')\n\nN_EG_ELS = 11 # number of elements for example variable\nVARTYPE_EG = 'b' # var type for example variable\n\n\n@contextmanager\ndef make_simple(*args, **kwargs):\n f = netcdf_file(*args, **kwargs)\n f.history = 'Created for a test'\n f.createDimension('time', N_EG_ELS)\n time = f.createVariable('time', VARTYPE_EG, ('time',))\n time[:] = np.arange(N_EG_ELS)\n time.units = 'days since 2008-01-01'\n f.flush()\n yield f\n f.close()\n\n\ndef assert_simple_truths(ncfileobj):\n assert ncfileobj.history == b'Created for a test'\n time = ncfileobj.variables['time']\n assert time.units == b'days since 2008-01-01'\n assert time.shape == (N_EG_ELS,)\n assert time[-1] == N_EG_ELS - 1\n\n\ndef test_read_write_files(tmp_path):\n fname = str(tmp_path / 'simple.nc')\n\n with make_simple(fname, 'w') as f:\n pass\n # To read the NetCDF file we just created::\n with netcdf_file(fname) as f:\n # Using mmap is the default\n assert f.use_mmap\n assert_simple_truths(f)\n\n # Now without mmap\n with netcdf_file(fname, mmap=False) as f:\n # Using mmap is the default\n assert not f.use_mmap\n assert_simple_truths(f)\n\n # To read the NetCDF file we just created, as file object, no\n # mmap. When n * n_bytes(var_type) is not divisible by 4, this\n # raised an error in pupynere 1.0.12 and scipy rev 5893, because\n # calculated vsize was rounding up in units of 4 - see\n # https://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html\n fobj = open(fname, 'rb')\n with netcdf_file(fobj) as f:\n # by default, don't use mmap for file-like\n assert not f.use_mmap\n assert_simple_truths(f)\n\n\ndef test_read_write_sio():\n eg_sio1 = BytesIO()\n with make_simple(eg_sio1, 'w') as f1:\n str_val = eg_sio1.getvalue()\n\n eg_sio2 = BytesIO(str_val)\n with netcdf_file(eg_sio2) as f2:\n assert_simple_truths(f2)\n\n # Test that error is raised if attempting mmap for sio\n eg_sio3 = BytesIO(str_val)\n with pytest.raises(ValueError):\n netcdf_file(eg_sio3, 'r', True)\n # Test 64-bit offset write / read\n eg_sio_64 = BytesIO()\n with make_simple(eg_sio_64, 'w', version=2) as f_64:\n str_val = eg_sio_64.getvalue()\n\n eg_sio_64 = BytesIO(str_val)\n with netcdf_file(eg_sio_64) as f_64:\n assert_simple_truths(f_64)\n assert f_64.version_byte == 2\n # also when version 2 explicitly specified\n eg_sio_64 = BytesIO(str_val)\n with netcdf_file(eg_sio_64, version=2) as f_64:\n assert_simple_truths(f_64)\n assert f_64.version_byte == 2\n\n\ndef test_read_example_data():\n # read any example data files\n for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):\n with netcdf_file(fname, 'r') as f:\n pass\n with netcdf_file(fname, 'r', mmap=False) as f:\n pass\n\n\ndef test_itemset_no_segfault_on_readonly():\n # Regression test for ticket #1202.\n # Open the test file in read-only mode.\n filename = pjoin(TEST_DATA_PATH, 'example_1.nc')\n with netcdf_file(filename, 'r') as f:\n time_var = f.variables['time']\n\n # time_var.assignValue(42) should raise a RuntimeError--not seg. fault!\n with pytest.raises(RuntimeError):\n time_var.assignValue(42)\n\n\ndef test_write_invalid_dtype():\n dtypes = ['int64', 'uint64']\n if np.dtype('int').itemsize == 8: # 64-bit machines\n dtypes.append('int')\n if np.dtype('uint').itemsize == 8: # 64-bit machines\n dtypes.append('uint')\n\n with netcdf_file(BytesIO(), 'w') as f:\n f.createDimension('time', N_EG_ELS)\n for dt in dtypes:\n with pytest.raises(ValueError):\n f.createVariable('time', dt, ('time',))\n\n\ndef test_flush_rewind():\n stream = BytesIO()\n with make_simple(stream, mode='w') as f:\n x = f.createDimension('x', 4)\n v = f.createVariable('v', 'i2', ['x'])\n v[:] = 1\n f.flush()\n len_single = len(stream.getvalue())\n f.flush()\n len_double = len(stream.getvalue())\n\n assert len_single == len_double\n\n\ndef test_dtype_specifiers():\n # Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.\n # Specifying np.int16 or similar only works from the same commit as this\n # comment was made.\n with make_simple(BytesIO(), mode='w') as f:\n f.createDimension('x',4)\n f.createVariable('v1', 'i2', ['x'])\n f.createVariable('v2', np.int16, ['x'])\n f.createVariable('v3', np.dtype(np.int16), ['x'])\n\n\ndef test_ticket_1720():\n io = BytesIO()\n\n items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]\n\n with netcdf_file(io, 'w') as f:\n f.history = 'Created for a test'\n f.createDimension('float_var', 10)\n float_var = f.createVariable('float_var', 'f', ('float_var',))\n float_var[:] = items\n float_var.units = 'metres'\n f.flush()\n contents = io.getvalue()\n\n io = BytesIO(contents)\n with netcdf_file(io, 'r') as f:\n assert f.history == b'Created for a test'\n float_var = f.variables['float_var']\n assert float_var.units == b'metres'\n assert float_var.shape == (10,)\n assert np.allclose(float_var[:], items)\n",
"\"\"\" Benchmarks for finite_range routine\n\nRun benchmarks with::\n\n import nibabel as nib\n nib.bench()\n\nRun this benchmark with::\n\n pytest -c <path>/benchmarks/pytest.benchmark.ini <path>/benchmarks/bench_finite_range.py\n\"\"\"\n\nimport sys\n\nimport numpy as np\n\n\nfrom .butils import print_git_title\n\nfrom numpy.testing import measure\n\nfrom nibabel.volumeutils import finite_range # NOQA\n\n\ndef bench_finite_range():\n rng = np.random.RandomState(20111001)\n repeat = 10\n img_shape = (128, 128, 64, 10)\n arr = rng.normal(size=img_shape)\n sys.stdout.flush()\n print_git_title(\"\\nFinite range\")\n mtime = measure('finite_range(arr)', repeat)\n print('%30s %6.2f' % ('float64 all finite', mtime))\n arr[:, :, :, 1] = np.nan\n mtime = measure('finite_range(arr)', repeat)\n print('%30s %6.2f' % ('float64 many NaNs', mtime))\n arr[:, :, :, 1] = np.inf\n mtime = measure('finite_range(arr)', repeat)\n print('%30s %6.2f' % ('float64 many infs', mtime))\n # Int16 input, float output\n arr = np.random.random_integers(low=-1000, high=1000, size=img_shape)\n arr = arr.astype(np.int16)\n mtime = measure('finite_range(arr)', repeat)\n print('%30s %6.2f' % ('int16', mtime))\n sys.stdout.flush()\n",
"#!python\n# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom io import StringIO\nimport sys\nimport numpy as np\n\nfrom nibabel.loadsave import save\nfrom nibabel.cmdline.stats import main\nfrom nibabel import Nifti1Image\n\n\ndef test_volume(tmpdir, capsys):\n mask_data = np.zeros((20, 20, 20), dtype='u1')\n mask_data[5:15, 5:15, 5:15] = 1\n img = Nifti1Image(mask_data, np.eye(4))\n\n infile = tmpdir / \"input.nii\"\n save(img, infile)\n\n args = (f\"{infile} --Volume\")\n main(args.split())\n vol_mm3 = capsys.readouterr()\n args = (f\"{infile} --Volume --units vox\")\n main(args.split())\n vol_vox = capsys.readouterr()\n\n assert float(vol_mm3[0]) == 1000.0\n assert int(vol_vox[0]) == 1000",
"#!C:\\Users\\RIchardC\\Documents\\digitizePlots\\venv\\Scripts\\python.exe\n# Create Lyman/Fitz style long flat Design Files from plain-text onset files\n# EKK / June 2015\n# Python 2/3 compatibile, depends on Pandas and Numpy/Scipy\n\nfrom __future__ import print_function\nfrom pandas import concat, read_csv\nfrom argparse import ArgumentParser, FileType\nfrom numpy import empty\n\n\ndef main(args):\n runs_df = load_onsets(args.onsets_files, args)\n print(\"Saving designfile (%d rows) to %s\" % (runs_df.shape[0], args.out))\n runs_df.to_csv(args.out, index=False)\n\n\ndef load_onsets(onsets_files, args):\n \"\"\"Read onsets file and add metadata from their filenames.\n Return one concatenated pandas dataframe with all trials as rows.\"\"\"\n runs = []\n for i, fid in enumerate(onsets_files):\n run = read_csv(fid)\n\n # If any column arguments were given, convert to a lyman-like design\n # with explicitly named columns. Else, just concatenate and add 'run'.\n if (args.onset_col or args.duration_col or args.condition_col or\n args.pmods_col):\n run = rename_columns(args, run)\n condition_col = 'condition'\n # Remove blanks\n run = run[run[condition_col].notnull()]\n\n # Add fn and run to designfile\n run['filename'] = fid.name\n if 'run' not in run.columns:\n run['run'] = i + 1\n\n # Drop any columns thar are entirely empty (for vanity)\n for col_name, col in run.iteritems():\n if col.isnull().all():\n del(run[col_name])\n\n runs.append(run)\n\n return concat(runs, ignore_index=True)\n\n\ndef rename_columns(args, run):\n cols = ['run', 'onset', 'duration', 'condition']\n\n # Cleanup any columns that might exist if we don't want them\n if args.drop_cols:\n for col in cols:\n if col in run.columns:\n run.drop(col, axis=1, inplace=True)\n\n columns = {}\n\n columns[args.onset_col] = 'onset'\n columns[args.condition_col] = 'condition'\n\n if args.run_col:\n columns[args.run_col] = 'run'\n\n if args.duration_col:\n columns[args.duration_col] = 'duration'\n else:\n run['duration'] = 0\n\n if len(args.pmods_col):\n for pmod in args.pmods_col:\n columns[pmod] = 'pmod-' + pmod\n cols.append('pmod-' + pmod)\n\n run.rename(columns=columns, inplace=True)\n\n return run[cols]\n\n\ndef onsets_for(cond, run_df):\n \"\"\"\n Inputs:\n * Condition Label to grab onsets, durations & amplitudes for.\n * Pandas Dataframe for current run containing onsets values as columns.\n\n Outputs:\n * Returns a dictionary of extracted values for onsets, durations, etc.\n * Returns None if there are no onsets.\n \"\"\"\n condinfo = {}\n cond_df = run_df[run_df['condition'] == cond]\n\n if cond_df['onset'].notnull().any(): # Onsets Present\n if cond_df['duration'].notnull().any():\n durations = cond_df['duration'].tolist()\n else:\n durations = [0]\n\n condinfo = dict(\n name=cond,\n durations=durations,\n onsets=cond_df['onset'].tolist(),\n )\n\n if ('amplitude' in cond_df.columns and\n cond_df['amplitude'].notnull().any()):\n pmods = [dict(\n name=args.pmod_name,\n poly=1,\n param=cond_df['amplitude'].tolist(),\n )]\n condinfo['pmod'] = pmods\n else:\n condinfo = None\n return condinfo\n\n\ndef _lists_to_scipy(onsets_list):\n \"\"\"\n Inputs:\n * List of dicts (one dict for each condition)\n\n [{'name':'Low','durations':0,'onsets':[1,3,5]},\n {'name':'Hi', 'durations':0, 'onsets':[2,4,6]}]\n\n - Or with Parametric Modulators -\n [{'name':'Low','durations':0,'onsets':[1,3,5], 'pmod':[\n {'name': 'RT', 'poly':1, 'param':[42,13,666]}]},\n {'name':'High',, 'durations':0, 'onsets':[2,4,6]}]\n\n Outputs:\n * Dict of scipy arrays for keys names, durations and onsets\n that can be written using scipy.io.savemat\n \"\"\"\n\n conditions_n = len(onsets_list)\n names = empty((conditions_n,), dtype='object')\n durations = empty((conditions_n,), dtype='object')\n onsets = empty((conditions_n,), dtype='object')\n\n pmoddt = [('name', 'O'), ('poly', 'O'), ('param', 'O')]\n pmods = empty((conditions_n), dtype=pmoddt)\n has_pmods = False\n\n for i, ons in enumerate(onsets_list):\n names[i] = ons['name']\n durations[i] = ons['durations']\n onsets[i] = ons['onsets']\n if 'pmod' not in ons.keys():\n pmods[i]['name'], pmods[i]['poly'], pmods[i]['param'] = [], [], []\n else:\n # 'pmod': [{'name':'rt','poly':1,'param':[1,2,3]}]\n # Multiple pmods per condition are allowed, so pmod\n # is a list of dicts.\n has_pmods = True\n cond_pmod_list = ons['pmod']\n current_condition_n_pmods = len(cond_pmod_list)\n pmod_names = empty((current_condition_n_pmods,), dtype='object')\n pmod_param = empty((current_condition_n_pmods,), dtype='object')\n pmod_poly = empty((current_condition_n_pmods,), dtype='object')\n\n for pmod_i, val in enumerate(cond_pmod_list):\n pmod_names[pmod_i] = val['name']\n pmod_param[pmod_i] = val['param']\n pmod_poly[pmod_i] = float(val['poly'])\n\n pmods[i]['name'] = pmod_names\n pmods[i]['poly'] = pmod_poly\n pmods[i]['param'] = pmod_param\n\n scipy_onsets = dict(\n names=names,\n durations=durations,\n onsets=onsets\n )\n\n if has_pmods:\n scipy_onsets['pmod'] = pmods\n\n return scipy_onsets\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('onsets_files', type=FileType('r'),\n help='List of FSL EV onsets to convert', nargs='+')\n parser.add_argument('--out', '-o', default='onsets_',\n help='Output filename.')\n parser.add_argument('--verbose', '-v', action=\"count\",\n help=\"increase output verbosity\")\n parser.add_argument('--pmod-name', default='pmod',\n help='Name to use when writing FSL Amplitude as SPM '\n 'parametric modulator')\n parser.add_argument('--conditions', '-c', default=[], nargs='+')\n parser.add_argument('--condition-col')\n parser.add_argument('--duration-col')\n parser.add_argument('--onset-col', default='')\n parser.add_argument('--pmods-col', default=[], nargs=\"*\")\n parser.add_argument('--run-col')\n parser.add_argument('--drop-cols', help='Drop pre-named columns in'\n 'longform',\n default=True)\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_args()\n if args.verbose >= 2:\n print(args)\n main(args)\n"
] |
[
[
"numpy.absolute",
"numpy.asarray",
"numpy.linalg.eig",
"numpy.ones",
"numpy.array"
],
[
"numpy.diag",
"numpy.dot",
"numpy.asarray",
"numpy.ndarray",
"numpy.dtype",
"numpy.mean",
"numpy.any",
"numpy.iinfo",
"numpy.linalg.svd",
"numpy.allclose",
"numpy.eye",
"numpy.finfo",
"numpy.linalg.det",
"numpy.frombuffer",
"numpy.diff",
"numpy.isnan",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.isfinite",
"numpy.compat.py3k.asstr",
"numpy.sort"
],
[
"numpy.isinf",
"numpy.isnan",
"numpy.compat.py3k.asstr",
"numpy.eye",
"numpy.vstack",
"numpy.dtype",
"numpy.frombuffer",
"numpy.array",
"numpy.compat.py3k.asbytes",
"numpy.empty"
],
[
"numpy.reshape",
"numpy.eye",
"numpy.dtype",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_almost_equal",
"numpy.random.normal",
"numpy.ravel",
"numpy.zeros"
],
[
"numpy.linalg.inv",
"numpy.set_printoptions",
"numpy.mean",
"numpy.sum"
],
[
"numpy.distutils.misc_util.Configuration"
],
[
"numpy.arange",
"numpy.allclose",
"numpy.dtype"
],
[
"numpy.testing.measure",
"numpy.random.RandomState",
"numpy.random.random_integers"
],
[
"numpy.eye",
"numpy.zeros"
],
[
"pandas.concat",
"pandas.read_csv",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
fbr1/textmining-eac
|
[
"d35965fad15a54765c29afe76c98d2a634b36ef0"
] |
[
"main.py"
] |
[
"import numpy as np\r\nimport scipy.cluster.hierarchy as hr\r\nimport scipy.spatial as spa\r\nimport clustering\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nimport filter\r\n\r\nclass textMiningEac:\r\n def __init__(self,k,N,low,high=0):\r\n self.k = k\r\n\r\n # Leer datos desde archivo [Temporal]\r\n #data = np.genfromtxt('iris.data',delimiter=',')\r\n #temp= spa.distance.pdist(data,'euclidean')\r\n #self.D = spa.distance.squareform(temp)\r\n \r\n self.D,self.tweets,self.words,self.freq = filter.filtertweets()\r\n\r\n # Calcula la matriz de coasociacion\r\n self.loadEAC(N,low,high) \r\n\r\n def loadEAC(self,N,low,high=0):\r\n \"\"\"\r\n Genera de vuelta la matriz de coasociacion\r\n \"\"\"\r\n m,n = self.D.shape\r\n coasocMatrix = clustering.EAC(self.D,N,low,high)\r\n print(coasocMatrix)\r\n self.EAC_D = np.ones(n) - coasocMatrix\r\n \r\n def startPAM(self):\r\n \"\"\"\r\n Hace sobre PAM sobre la matriz de distancia del EAC\r\n \"\"\"\r\n (a,b,self.labels) = clustering.PAM(self.EAC_D, self.k,True)\t\r\n return self.labels\r\n\r\n def startHierarchical(self):\r\n \"\"\"\r\n Hace clustering Jerarquico sobre la matriz de distancia del EAC\r\n \"\"\"\r\n z = AgglomerativeClustering(n_clusters=self.k, linkage='ward').fit(self.EAC_D)\r\n self.labels = z.labels_\t\r\n return self.labels\r\n \r\n def getClustersTweets(self):\r\n \"\"\"\r\n Obtiene clusters en relacion a la frecuencia de aparicion de las palabras\r\n \"\"\"\r\n labelsTweets = np.zeros(len(self.tweets),dtype=np.int)\r\n for i in range(len(self.tweets)): \r\n \r\n acum = np.zeros(2)\r\n for j in range(len(self.labels)):\r\n \r\n # Si la palabra se encuentra en el tweet\r\n if(self.words[j] in self.tweets[i]): \r\n #Acumula el valor en el acumulador del indice del cluster\r\n acum[self.labels[j]] += self.freq[j] \r\n \r\n # Asigna el cluster con mayor valor acumulado\r\n labelsTweets[i] = np.argmax(acum)\r\n \r\n lista = labelsTweets.tolist()\r\n \r\n try: \r\n saveFile = open('clustered.csv','w') \r\n for i in range(len(self.tweets)):\r\n saveFile.write(str(lista[i])+': '+' '.join(self.tweets[i])+'\\n')\r\n saveFile.close()\r\n except Exception as e:\r\n print(\"error: {0}\".format(e))\r\n \r\n return labelsTweets\r\n\r\n def getPrecisionIris(self):\t\t\r\n \"\"\"\r\n Metodo de prueba\r\n Calcula una precision de acierto. No es fiable. \r\n \"\"\"\r\n \r\n #Lee los cluster originales\r\n originalClusters = np.genfromtxt('orCL.data',delimiter=',',dtype=None)\r\n \r\n results ={}\r\n \r\n j=0\r\n for i in range(50,151,50):\r\n # Encuentra el cluster con mayor frecuencia\r\n unique, counts = np.unique(self.labels[i-50:i], return_count=True)\r\n print(unique)\r\n print(counts)\r\n maxvalue = np.amax(counts)\r\n results[j]=maxvalue/50\r\n j=j+1\r\n \r\n print(\"Setosa= \" + '%.2f' % results[0] + \"\\nVersicolor= \" + '%.2f' % results[1] + \"\\nVirginica= \" + '%.2f' % results[2])\r\n \r\n def getSilhouette(self):\r\n \"\"\"\r\n Grafica silhouette\r\n \"\"\"\r\n clustering.Silhouette(self.D,self.labels,self.k)"
] |
[
[
"numpy.amax",
"numpy.unique",
"numpy.ones",
"numpy.genfromtxt",
"numpy.argmax",
"sklearn.cluster.AgglomerativeClustering",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nheinsdorf/pymatgen
|
[
"7437cc64fc8a850d4820ce5043c896a89bd0019a",
"7437cc64fc8a850d4820ce5043c896a89bd0019a",
"7437cc64fc8a850d4820ce5043c896a89bd0019a",
"7437cc64fc8a850d4820ce5043c896a89bd0019a",
"7437cc64fc8a850d4820ce5043c896a89bd0019a",
"7437cc64fc8a850d4820ce5043c896a89bd0019a"
] |
[
"pymatgen/analysis/molecule_matcher.py",
"pymatgen/util/testing.py",
"pymatgen/analysis/elasticity/elastic.py",
"pymatgen/analysis/elasticity/stress.py",
"pymatgen/analysis/transition_state.py",
"pymatgen/io/abinit/abitimer.py"
] |
[
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nThis module provides classes to perform fitting of molecule with arbitrary\natom orders.\nThis module is supposed to perform exact comparisons without the atom order\ncorrespondence prerequisite, while molecule_structure_comparator is supposed\nto do rough comparisons with the atom order correspondence prerequisite.\n\nThe implementation is based on an excellent python package called `rmsd` that\nyou can find at https://github.com/charnley/rmsd.\n\"\"\"\n\n__author__ = \"Xiaohui Qu, Adam Fekete\"\n__version__ = \"1.0\"\n__email__ = \"[email protected]\"\n\n\nimport abc\nimport copy\nimport itertools\nimport logging\nimport math\nimport re\n\nimport numpy as np\nfrom monty.dev import requires\nfrom monty.json import MSONable\n\ntry:\n from openbabel import openbabel as ob\n\n from pymatgen.io.babel import BabelMolAdaptor\nexcept ImportError:\n ob = None\n\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.spatial.distance import cdist\n\nfrom pymatgen.core.structure import Molecule # pylint: disable=ungrouped-imports\n\nlogger = logging.getLogger(__name__)\n\n\nclass AbstractMolAtomMapper(MSONable, metaclass=abc.ABCMeta):\n \"\"\"\n Abstract molecular atom order mapping class. A mapping will be able to\n find the uniform atom order of two molecules that can pair the\n geometrically equivalent atoms.\n \"\"\"\n\n @abc.abstractmethod\n def uniform_labels(self, mol1, mol2):\n \"\"\"\n Pair the geometrically equivalent atoms of the molecules.\n\n Args:\n mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object.\n mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object.\n\n Returns:\n (list1, list2) if uniform atom order is found. list1 and list2\n are for mol1 and mol2, respectively. Their length equal\n to the number of atoms. They represents the uniform atom order\n of the two molecules. The value of each element is the original\n atom index in mol1 or mol2 of the current atom in uniform atom\n order.\n (None, None) if unform atom is not available.\n \"\"\"\n\n @abc.abstractmethod\n def get_molecule_hash(self, mol):\n \"\"\"\n Defines a hash for molecules. This allows molecules to be grouped\n efficiently for comparison.\n\n Args:\n mol: The molecule. OpenBabel OBMol or pymatgen Molecule object\n\n Returns:\n A hashable object. Examples can be string formulas, etc.\n \"\"\"\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Args:\n d (): Dict\n\n Returns:\n AbstractMolAtomMapper\n \"\"\"\n for trans_modules in [\"molecule_matcher\"]:\n\n level = 0 # Python 3.x\n mod = __import__(\n \"pymatgen.analysis.\" + trans_modules,\n globals(),\n locals(),\n [d[\"@class\"]],\n level,\n )\n if hasattr(mod, d[\"@class\"]):\n class_proxy = getattr(mod, d[\"@class\"])\n from_dict_proxy = getattr(class_proxy, \"from_dict\")\n return from_dict_proxy(d)\n raise ValueError(\"Invalid Comparator dict\")\n\n\nclass IsomorphismMolAtomMapper(AbstractMolAtomMapper):\n \"\"\"\n Pair atoms by isomorphism permutations in the OpenBabel::OBAlign class\n \"\"\"\n\n def uniform_labels(self, mol1, mol2):\n \"\"\"\n Pair the geometrically equivalent atoms of the molecules.\n Calculate RMSD on all possible isomorphism mappings and return mapping\n with the least RMSD\n\n Args:\n mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object.\n mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object.\n\n Returns:\n (list1, list2) if uniform atom order is found. list1 and list2\n are for mol1 and mol2, respectively. Their length equal\n to the number of atoms. They represents the uniform atom order\n of the two molecules. The value of each element is the original\n atom index in mol1 or mol2 of the current atom in uniform atom\n order.\n (None, None) if unform atom is not available.\n \"\"\"\n obmol1 = BabelMolAdaptor(mol1).openbabel_mol\n obmol2 = BabelMolAdaptor(mol2).openbabel_mol\n\n h1 = self.get_molecule_hash(obmol1)\n h2 = self.get_molecule_hash(obmol2)\n if h1 != h2:\n return None, None\n\n query = ob.CompileMoleculeQuery(obmol1)\n isomapper = ob.OBIsomorphismMapper.GetInstance(query)\n isomorph = ob.vvpairUIntUInt()\n isomapper.MapAll(obmol2, isomorph)\n\n sorted_isomorph = [sorted(x, key=lambda morp: morp[0]) for x in isomorph]\n label2_list = tuple(tuple(p[1] + 1 for p in x) for x in sorted_isomorph)\n\n vmol1 = obmol1\n aligner = ob.OBAlign(True, False)\n aligner.SetRefMol(vmol1)\n least_rmsd = float(\"Inf\")\n best_label2 = None\n label1 = list(range(1, obmol1.NumAtoms() + 1))\n # noinspection PyProtectedMember\n elements1 = InchiMolAtomMapper._get_elements(vmol1, label1)\n for label2 in label2_list:\n # noinspection PyProtectedMember\n elements2 = InchiMolAtomMapper._get_elements(obmol2, label2)\n if elements1 != elements2:\n continue\n vmol2 = ob.OBMol()\n for i in label2:\n vmol2.AddAtom(obmol2.GetAtom(i))\n aligner.SetTargetMol(vmol2)\n aligner.Align()\n rmsd = aligner.GetRMSD()\n if rmsd < least_rmsd:\n least_rmsd = rmsd\n best_label2 = copy.copy(label2)\n return label1, best_label2\n\n def get_molecule_hash(self, mol):\n \"\"\"\n Return inchi as molecular hash\n \"\"\"\n obconv = ob.OBConversion()\n obconv.SetOutFormat(\"inchi\")\n obconv.AddOption(\"X\", ob.OBConversion.OUTOPTIONS, \"DoNotAddH\")\n inchi_text = obconv.WriteString(mol)\n match = re.search(r\"InChI=(?P<inchi>.+)\\n\", inchi_text)\n return match.group(\"inchi\")\n\n def as_dict(self):\n \"\"\"\n Returns:\n Jsonable dict.\n \"\"\"\n return {\n \"version\": __version__,\n \"@module\": type(self).__module__,\n \"@class\": type(self).__name__,\n }\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Args:\n d (dict): Dict representation\n\n Returns:\n IsomorphismMolAtomMapper\n \"\"\"\n return IsomorphismMolAtomMapper()\n\n\nclass InchiMolAtomMapper(AbstractMolAtomMapper):\n \"\"\"\n Pair atoms by inchi labels.\n \"\"\"\n\n def __init__(self, angle_tolerance=10.0):\n \"\"\"\n Args:\n angle_tolerance (float): Angle threshold to assume linear molecule. In degrees.\n \"\"\"\n self._angle_tolerance = angle_tolerance\n self._assistant_mapper = IsomorphismMolAtomMapper()\n\n def as_dict(self):\n \"\"\"\n Returns:\n MSONAble dict.\n \"\"\"\n return {\n \"version\": __version__,\n \"@module\": type(self).__module__,\n \"@class\": type(self).__name__,\n \"angle_tolerance\": self._angle_tolerance,\n }\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Args:\n d (dict): Dict Representation\n\n Returns:\n InchiMolAtomMapper\n \"\"\"\n return InchiMolAtomMapper(angle_tolerance=d[\"angle_tolerance\"])\n\n @staticmethod\n def _inchi_labels(mol):\n \"\"\"\n Get the inchi canonical labels of the heavy atoms in the molecule\n\n Args:\n mol: The molecule. OpenBabel OBMol object\n\n Returns:\n The label mappings. List of tuple of canonical label,\n original label\n List of equivalent atoms.\n \"\"\"\n obconv = ob.OBConversion()\n obconv.SetOutFormat(\"inchi\")\n obconv.AddOption(\"a\", ob.OBConversion.OUTOPTIONS)\n obconv.AddOption(\"X\", ob.OBConversion.OUTOPTIONS, \"DoNotAddH\")\n inchi_text = obconv.WriteString(mol)\n match = re.search(\n r\"InChI=(?P<inchi>.+)\\nAuxInfo=.+\" r\"/N:(?P<labels>[0-9,;]+)/(E:(?P<eq_atoms>[0-9,\" r\";\\(\\)]*)/)?\",\n inchi_text,\n )\n inchi = match.group(\"inchi\")\n label_text = match.group(\"labels\")\n eq_atom_text = match.group(\"eq_atoms\")\n heavy_atom_labels = tuple(int(i) for i in label_text.replace(\";\", \",\").split(\",\"))\n eq_atoms = []\n if eq_atom_text is not None:\n eq_tokens = re.findall(r\"\\(((?:[0-9]+,)+[0-9]+)\\)\", eq_atom_text.replace(\";\", \",\"))\n eq_atoms = tuple(tuple(int(i) for i in t.split(\",\")) for t in eq_tokens)\n return heavy_atom_labels, eq_atoms, inchi\n\n @staticmethod\n def _group_centroid(mol, ilabels, group_atoms):\n \"\"\"\n Calculate the centroids of a group atoms indexed by the labels of inchi\n\n Args:\n mol: The molecule. OpenBabel OBMol object\n ilabel: inchi label map\n\n Returns:\n Centroid. Tuple (x, y, z)\n \"\"\"\n c1x, c1y, c1z = 0.0, 0.0, 0.0\n for i in group_atoms:\n orig_idx = ilabels[i - 1]\n oa1 = mol.GetAtom(orig_idx)\n c1x += float(oa1.x())\n c1y += float(oa1.y())\n c1z += float(oa1.z())\n num_atoms = len(group_atoms)\n c1x /= num_atoms\n c1y /= num_atoms\n c1z /= num_atoms\n return c1x, c1y, c1z\n\n def _virtual_molecule(self, mol, ilabels, eq_atoms):\n \"\"\"\n Create a virtual molecule by unique atoms, the centriods of the\n equivalent atoms\n\n Args:\n mol: The molecule. OpenBabel OBMol object\n ilables: inchi label map\n eq_atoms: equivalent atom labels\n farthest_group_idx: The equivalent atom group index in which\n there is the farthest atom to the centroid\n\n Return:\n The virtual molecule\n \"\"\"\n vmol = ob.OBMol()\n\n non_unique_atoms = {a for g in eq_atoms for a in g}\n all_atoms = set(range(1, len(ilabels) + 1))\n unique_atom_labels = sorted(all_atoms - non_unique_atoms)\n\n # try to align molecules using unique atoms\n for i in unique_atom_labels:\n orig_idx = ilabels[i - 1]\n oa1 = mol.GetAtom(orig_idx)\n a1 = vmol.NewAtom()\n a1.SetAtomicNum(oa1.GetAtomicNum())\n a1.SetVector(oa1.GetVector())\n\n # try to align using centroids of the equivalent atoms\n if vmol.NumAtoms() < 3:\n for symm in eq_atoms:\n c1x, c1y, c1z = self._group_centroid(mol, ilabels, symm)\n min_distance = float(\"inf\")\n for i in range(1, vmol.NumAtoms() + 1):\n va = vmol.GetAtom(i)\n distance = math.sqrt((c1x - va.x()) ** 2 + (c1y - va.y()) ** 2 + (c1z - va.z()) ** 2)\n if distance < min_distance:\n min_distance = distance\n if min_distance > 0.2:\n a1 = vmol.NewAtom()\n a1.SetAtomicNum(9)\n a1.SetVector(c1x, c1y, c1z)\n\n return vmol\n\n @staticmethod\n def _align_heavy_atoms(mol1, mol2, vmol1, vmol2, ilabel1, ilabel2, eq_atoms):\n \"\"\"\n Align the label of topologically identical atoms of second molecule\n towards first molecule\n\n Args:\n mol1: First molecule. OpenBabel OBMol object\n mol2: Second molecule. OpenBabel OBMol object\n vmol1: First virtual molecule constructed by centroids. OpenBabel\n OBMol object\n vmol2: First virtual molecule constructed by centroids. OpenBabel\n OBMol object\n ilabel1: inchi label map of the first molecule\n ilabel2: inchi label map of the second molecule\n eq_atoms: equivalent atom labels\n\n Return:\n corrected inchi labels of heavy atoms of the second molecule\n \"\"\"\n\n nvirtual = vmol1.NumAtoms()\n nheavy = len(ilabel1)\n\n for i in ilabel2: # add all heavy atoms\n a1 = vmol1.NewAtom()\n a1.SetAtomicNum(1)\n a1.SetVector(0.0, 0.0, 0.0) # useless, just to pair with vmol2\n oa2 = mol2.GetAtom(i)\n a2 = vmol2.NewAtom()\n a2.SetAtomicNum(1)\n # align using the virtual atoms, these atoms are not\n # used to align, but match by positions\n a2.SetVector(oa2.GetVector())\n\n aligner = ob.OBAlign(False, False)\n aligner.SetRefMol(vmol1)\n aligner.SetTargetMol(vmol2)\n aligner.Align()\n aligner.UpdateCoords(vmol2)\n\n canon_mol1 = ob.OBMol()\n for i in ilabel1:\n oa1 = mol1.GetAtom(i)\n a1 = canon_mol1.NewAtom()\n a1.SetAtomicNum(oa1.GetAtomicNum())\n a1.SetVector(oa1.GetVector())\n\n aligned_mol2 = ob.OBMol()\n for i in range(nvirtual + 1, nvirtual + nheavy + 1):\n oa2 = vmol2.GetAtom(i)\n a2 = aligned_mol2.NewAtom()\n a2.SetAtomicNum(oa2.GetAtomicNum())\n a2.SetVector(oa2.GetVector())\n\n canon_label2 = list(range(1, nheavy + 1))\n for symm in eq_atoms:\n for i in symm:\n canon_label2[i - 1] = -1\n for symm in eq_atoms:\n candidates1 = list(symm)\n candidates2 = list(symm)\n for c2 in candidates2:\n distance = 99999.0\n canon_idx = candidates1[0]\n a2 = aligned_mol2.GetAtom(c2)\n for c1 in candidates1:\n a1 = canon_mol1.GetAtom(c1)\n d = a1.GetDistance(a2)\n if d < distance:\n distance = d\n canon_idx = c1\n canon_label2[c2 - 1] = canon_idx\n candidates1.remove(canon_idx)\n\n canon_inchi_orig_map2 = list(zip(canon_label2, list(range(1, nheavy + 1)), ilabel2))\n canon_inchi_orig_map2.sort(key=lambda m: m[0])\n heavy_atom_indices2 = tuple(x[2] for x in canon_inchi_orig_map2)\n return heavy_atom_indices2\n\n @staticmethod\n def _align_hydrogen_atoms(mol1, mol2, heavy_indices1, heavy_indices2):\n \"\"\"\n Align the label of topologically identical atoms of second molecule\n towards first molecule\n\n Args:\n mol1: First molecule. OpenBabel OBMol object\n mol2: Second molecule. OpenBabel OBMol object\n heavy_indices1: inchi label map of the first molecule\n heavy_indices2: label map of the second molecule\n\n Return:\n corrected label map of all atoms of the second molecule\n \"\"\"\n num_atoms = mol2.NumAtoms()\n all_atom = set(range(1, num_atoms + 1))\n hydrogen_atoms1 = all_atom - set(heavy_indices1)\n hydrogen_atoms2 = all_atom - set(heavy_indices2)\n label1 = heavy_indices1 + tuple(hydrogen_atoms1)\n label2 = heavy_indices2 + tuple(hydrogen_atoms2)\n\n cmol1 = ob.OBMol()\n for i in label1:\n oa1 = mol1.GetAtom(i)\n a1 = cmol1.NewAtom()\n a1.SetAtomicNum(oa1.GetAtomicNum())\n a1.SetVector(oa1.GetVector())\n cmol2 = ob.OBMol()\n for i in label2:\n oa2 = mol2.GetAtom(i)\n a2 = cmol2.NewAtom()\n a2.SetAtomicNum(oa2.GetAtomicNum())\n a2.SetVector(oa2.GetVector())\n\n aligner = ob.OBAlign(False, False)\n aligner.SetRefMol(cmol1)\n aligner.SetTargetMol(cmol2)\n aligner.Align()\n aligner.UpdateCoords(cmol2)\n\n hydrogen_label2 = []\n hydrogen_label1 = list(range(len(heavy_indices1) + 1, num_atoms + 1))\n for h2 in range(len(heavy_indices2) + 1, num_atoms + 1):\n distance = 99999.0\n idx = hydrogen_label1[0]\n a2 = cmol2.GetAtom(h2)\n for h1 in hydrogen_label1:\n a1 = cmol1.GetAtom(h1)\n d = a1.GetDistance(a2)\n if d < distance:\n distance = d\n idx = h1\n hydrogen_label2.append(idx)\n hydrogen_label1.remove(idx)\n\n hydrogen_orig_idx2 = label2[len(heavy_indices2) :]\n hydrogen_canon_orig_map2 = list(zip(hydrogen_label2, hydrogen_orig_idx2))\n hydrogen_canon_orig_map2.sort(key=lambda m: m[0])\n hydrogen_canon_indices2 = [x[1] for x in hydrogen_canon_orig_map2]\n\n canon_label1 = label1\n canon_label2 = heavy_indices2 + tuple(hydrogen_canon_indices2)\n\n return canon_label1, canon_label2\n\n @staticmethod\n def _get_elements(mol, label):\n \"\"\"\n The the elements of the atoms in the specified order\n\n Args:\n mol: The molecule. OpenBabel OBMol object.\n label: The atom indices. List of integers.\n\n Returns:\n Elements. List of integers.\n \"\"\"\n elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]\n return elements\n\n def _is_molecule_linear(self, mol):\n \"\"\"\n Is the molecule a linear one\n\n Args:\n mol: The molecule. OpenBabel OBMol object.\n\n Returns:\n Boolean value.\n \"\"\"\n if mol.NumAtoms() < 3:\n return True\n a1 = mol.GetAtom(1)\n a2 = mol.GetAtom(2)\n for i in range(3, mol.NumAtoms() + 1):\n angle = float(mol.GetAtom(i).GetAngle(a2, a1))\n if angle < 0.0:\n angle = -angle\n if angle > 90.0:\n angle = 180.0 - angle\n if angle > self._angle_tolerance:\n return False\n return True\n\n def uniform_labels(self, mol1, mol2):\n \"\"\"\n Args:\n mol1 (Molecule): Molecule 1\n mol2 (Molecule): Molecule 2\n\n Returns:\n Labels\n \"\"\"\n obmol1 = BabelMolAdaptor(mol1).openbabel_mol\n obmol2 = BabelMolAdaptor(mol2).openbabel_mol\n\n ilabel1, iequal_atom1, inchi1 = self._inchi_labels(obmol1)\n ilabel2, iequal_atom2, inchi2 = self._inchi_labels(obmol2)\n\n if inchi1 != inchi2:\n return None, None # Topoligically different\n\n if iequal_atom1 != iequal_atom2:\n raise Exception(\"Design Error! Equavilent atoms are inconsistent\")\n\n vmol1 = self._virtual_molecule(obmol1, ilabel1, iequal_atom1)\n vmol2 = self._virtual_molecule(obmol2, ilabel2, iequal_atom2)\n\n if vmol1.NumAtoms() != vmol2.NumAtoms():\n return None, None\n\n if vmol1.NumAtoms() < 3 or self._is_molecule_linear(vmol1) or self._is_molecule_linear(vmol2):\n # using isomorphism for difficult (actually simple) molecules\n clabel1, clabel2 = self._assistant_mapper.uniform_labels(mol1, mol2)\n else:\n heavy_atom_indices2 = self._align_heavy_atoms(obmol1, obmol2, vmol1, vmol2, ilabel1, ilabel2, iequal_atom1)\n clabel1, clabel2 = self._align_hydrogen_atoms(obmol1, obmol2, ilabel1, heavy_atom_indices2)\n if clabel1 and clabel2:\n elements1 = self._get_elements(obmol1, clabel1)\n elements2 = self._get_elements(obmol2, clabel2)\n\n if elements1 != elements2:\n return None, None\n\n return clabel1, clabel2\n\n def get_molecule_hash(self, mol):\n \"\"\"\n Return inchi as molecular hash\n \"\"\"\n obmol = BabelMolAdaptor(mol).openbabel_mol\n inchi = self._inchi_labels(obmol)[2]\n return inchi\n\n\nclass MoleculeMatcher(MSONable):\n \"\"\"\n Class to match molecules and identify whether molecules are the same.\n \"\"\"\n\n @requires(\n ob,\n \"BabelMolAdaptor requires openbabel to be installed with \"\n \"Python bindings. Please get it at http://openbabel.org \"\n \"(version >=3.0.0).\",\n )\n def __init__(self, tolerance=0.01, mapper=InchiMolAtomMapper()):\n \"\"\"\n Args:\n tolerance (float): RMSD difference threshold whether two molecules are\n different\n mapper (AbstractMolAtomMapper): MolAtomMapper object that is able to map the atoms of two\n molecule to uniform order\n \"\"\"\n self._tolerance = tolerance\n self._mapper = mapper\n\n def fit(self, mol1, mol2):\n \"\"\"\n Fit two molecules.\n\n Args:\n mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object\n mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object\n\n Returns:\n A boolean value indicates whether two molecules are the same.\n \"\"\"\n return self.get_rmsd(mol1, mol2) < self._tolerance\n\n def get_rmsd(self, mol1, mol2):\n \"\"\"\n Get RMSD between two molecule with arbitrary atom order.\n\n Returns:\n RMSD if topology of the two molecules are the same\n Infinite if the topology is different\n \"\"\"\n label1, label2 = self._mapper.uniform_labels(mol1, mol2)\n if label1 is None or label2 is None:\n return float(\"Inf\")\n return self._calc_rms(mol1, mol2, label1, label2)\n\n @staticmethod\n def _calc_rms(mol1, mol2, clabel1, clabel2):\n \"\"\"\n Calculate the RMSD.\n\n Args:\n mol1: The first molecule. OpenBabel OBMol or pymatgen Molecule\n object\n mol2: The second molecule. OpenBabel OBMol or pymatgen Molecule\n object\n clabel1: The atom indices that can reorder the first molecule to\n uniform atom order\n clabel1: The atom indices that can reorder the second molecule to\n uniform atom order\n\n Returns:\n The RMSD.\n \"\"\"\n obmol1 = BabelMolAdaptor(mol1).openbabel_mol\n obmol2 = BabelMolAdaptor(mol2).openbabel_mol\n\n cmol1 = ob.OBMol()\n for i in clabel1:\n oa1 = obmol1.GetAtom(i)\n a1 = cmol1.NewAtom()\n a1.SetAtomicNum(oa1.GetAtomicNum())\n a1.SetVector(oa1.GetVector())\n cmol2 = ob.OBMol()\n for i in clabel2:\n oa2 = obmol2.GetAtom(i)\n a2 = cmol2.NewAtom()\n a2.SetAtomicNum(oa2.GetAtomicNum())\n a2.SetVector(oa2.GetVector())\n\n aligner = ob.OBAlign(True, False)\n aligner.SetRefMol(cmol1)\n aligner.SetTargetMol(cmol2)\n aligner.Align()\n return aligner.GetRMSD()\n\n def group_molecules(self, mol_list):\n \"\"\"\n Group molecules by structural equality.\n\n Args:\n mol_list: List of OpenBabel OBMol or pymatgen objects\n\n Returns:\n A list of lists of matched molecules\n Assumption: if s1=s2 and s2=s3, then s1=s3\n This may not be true for small tolerances.\n \"\"\"\n mol_hash = [(i, self._mapper.get_molecule_hash(m)) for i, m in enumerate(mol_list)]\n mol_hash.sort(key=lambda x: x[1])\n\n # Use molecular hash to pre-group molecules.\n raw_groups = tuple(tuple(m[0] for m in g) for k, g in itertools.groupby(mol_hash, key=lambda x: x[1]))\n\n group_indices = []\n for rg in raw_groups:\n mol_eq_test = [\n (p[0], p[1], self.fit(mol_list[p[0]], mol_list[p[1]])) for p in itertools.combinations(sorted(rg), 2)\n ]\n mol_eq = {(p[0], p[1]) for p in mol_eq_test if p[2]}\n not_alone_mols = set(itertools.chain.from_iterable(mol_eq))\n alone_mols = set(rg) - not_alone_mols\n group_indices.extend([[m] for m in alone_mols])\n while len(not_alone_mols) > 0:\n current_group = {not_alone_mols.pop()}\n while len(not_alone_mols) > 0:\n candidate_pairs = {tuple(sorted(p)) for p in itertools.product(current_group, not_alone_mols)}\n mutual_pairs = candidate_pairs & mol_eq\n if len(mutual_pairs) == 0:\n break\n mutual_mols = set(itertools.chain.from_iterable(mutual_pairs))\n current_group |= mutual_mols\n not_alone_mols -= mutual_mols\n group_indices.append(sorted(current_group))\n\n group_indices.sort(key=lambda x: (len(x), -x[0]), reverse=True)\n all_groups = [[mol_list[i] for i in g] for g in group_indices]\n return all_groups\n\n def as_dict(self):\n \"\"\"\n Returns:\n MSONAble dict.\n \"\"\"\n return {\n \"version\": __version__,\n \"@module\": type(self).__module__,\n \"@class\": type(self).__name__,\n \"tolerance\": self._tolerance,\n \"mapper\": self._mapper.as_dict(),\n }\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Args:\n d (dict): Dict representation\n\n Returns:\n MoleculeMatcher\n \"\"\"\n return MoleculeMatcher(\n tolerance=d[\"tolerance\"],\n mapper=AbstractMolAtomMapper.from_dict(d[\"mapper\"]),\n )\n\n\nclass KabschMatcher(MSONable):\n \"\"\"Molecule matcher using Kabsch algorithm\n\n The Kabsch algorithm capable aligning two molecules by finding the parameters\n (translation, rotation) which minimize the root-mean-square-deviation (RMSD) of\n two molecules which are topologically (atom types, geometry) similar two each other.\n\n Notes:\n When aligning molecules, the atoms of the two molecules **must** be in the same\n order for the results to be sensible.\n \"\"\"\n\n def __init__(self, target: Molecule):\n \"\"\"Constructor of the matcher object.\n\n Args:\n target: a `Molecule` object used as a target during the alignment\n \"\"\"\n self.target = target\n\n def match(self, p: Molecule):\n \"\"\"Using the Kabsch algorithm the alignment of two molecules (P, Q)\n happens in three steps:\n - translate the P and Q into their centroid\n - compute of the optimal rotation matrix (U) using Kabsch algorithm\n - compute the translation (V) and rmsd\n\n The function returns the rotation matrix (U), translation vector (V),\n and RMSD between Q and P', where P' is:\n\n P' = P * U + V\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n\n Returns:\n U: Rotation matrix (D,D)\n V: Translation vector (D)\n RMSD : Root mean squared deviation between P and Q\n \"\"\"\n if self.target.atomic_numbers != p.atomic_numbers:\n raise ValueError(\"The order of the species aren't matching! Please try using `PermInvMatcher`.\")\n\n p_coord, q_coord = p.cart_coords, self.target.cart_coords\n\n # Both sets of coordinates must be translated first, so that their\n # centroid coincides with the origin of the coordinate system.\n p_trans, q_trans = p_coord.mean(axis=0), q_coord.mean(axis=0)\n p_centroid, q_centroid = p_coord - p_trans, q_coord - q_trans\n\n # The optimal rotation matrix U using Kabsch algorithm\n U = self.kabsch(p_centroid, q_centroid)\n\n p_prime_centroid = np.dot(p_centroid, U)\n rmsd = np.sqrt(np.mean(np.square(p_prime_centroid - q_centroid)))\n\n V = q_trans - np.dot(p_trans, U)\n\n return U, V, rmsd\n\n def fit(self, p: Molecule):\n \"\"\"Rotate and transform `p` molecule according to the best match.\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n\n Returns:\n p_prime: Rotated and translated of the `p` `Molecule` object\n rmsd: Root-mean-square-deviation between `p_prime` and the `target`\n \"\"\"\n U, V, rmsd = self.match(p)\n\n # Rotate and translate matrix `p` onto the target molecule.\n # P' = P * U + V\n p_prime = p.copy()\n for site in p_prime:\n site.coords = np.dot(site.coords, U) + V\n\n return p_prime, rmsd\n\n @staticmethod\n def kabsch(P: np.ndarray, Q: np.ndarray):\n \"\"\"The Kabsch algorithm is a method for calculating the optimal rotation matrix\n that minimizes the root mean squared deviation (RMSD) between two paired sets of points\n P and Q, centered around the their centroid.\n\n For more info see:\n - http://en.wikipedia.org/wiki/Kabsch_algorithm and\n - https://cnx.org/contents/HV-RsdwL@23/Molecular-Distance-Measures\n\n Args:\n P: Nx3 matrix, where N is the number of points.\n Q: Nx3 matrix, where N is the number of points.\n\n Returns:\n U: 3x3 rotation matrix\n \"\"\"\n\n # Computation of the cross-covariance matrix\n C = np.dot(P.T, Q)\n\n # Computation of the optimal rotation matrix\n # using singular value decomposition (SVD).\n V, S, WT = np.linalg.svd(C)\n\n # Getting the sign of the det(V*Wt) to decide whether\n d = np.linalg.det(np.dot(V, WT))\n\n # And finally calculating the optimal rotation matrix R\n # we need to correct our rotation matrix to ensure a right-handed coordinate system.\n U = np.dot(np.dot(V, np.diag([1, 1, d])), WT)\n\n return U\n\n\nclass BruteForceOrderMatcher(KabschMatcher):\n \"\"\"Finding the best match between molecules by selecting molecule order\n with the smallest RMSD from all the possible order combinations.\n\n Notes:\n When aligning molecules, the atoms of the two molecules **must** have same number\n of atoms from the same species.\n \"\"\"\n\n def match(self, p: Molecule, ignore_warning=False):\n \"\"\"Similar as `KabschMatcher.match` but this method also finds the order of\n atoms which belongs to the best match.\n\n A `ValueError` will be raised when the total number of possible combinations\n become unfeasible (more than a million combination).\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n ignore_warning: ignoring error when the number of combination is too large\n\n Returns:\n inds: The indices of atoms\n U: 3x3 rotation matrix\n V: Translation vector\n rmsd: Root mean squared deviation between P and Q\n \"\"\"\n\n q = self.target\n\n if sorted(p.atomic_numbers) != sorted(q.atomic_numbers):\n raise ValueError(\"The number of the same species aren't matching!\")\n\n _, count = np.unique(p.atomic_numbers, return_counts=True)\n total_permutations = 1\n for c in count:\n total_permutations *= np.math.factorial(c) # type: ignore\n\n if not ignore_warning and total_permutations > 1_000_000:\n raise ValueError(\n \"The number of all possible permutations \"\n \"({}) is not feasible to run this method!\".format(total_permutations)\n )\n\n p_coord, q_coord = p.cart_coords, q.cart_coords\n p_atoms, q_atoms = np.array(p.atomic_numbers), np.array(q.atomic_numbers)\n\n # Both sets of coordinates must be translated first, so that\n # their centroid coincides with the origin of the coordinate system.\n p_trans, q_trans = p_coord.mean(axis=0), q_coord.mean(axis=0)\n p_centroid, q_centroid = p_coord - p_trans, q_coord - q_trans\n\n # Sort the order of the target molecule by the elements\n q_inds = np.argsort(q_atoms)\n q_centroid = q_centroid[q_inds]\n\n # Initializing return values\n rmsd = np.inf\n\n # Generate all permutation grouped/sorted by the elements\n for p_inds_test in self.permutations(p_atoms):\n\n p_centroid_test = p_centroid[p_inds_test]\n U_test = self.kabsch(p_centroid_test, q_centroid)\n\n p_centroid_prime_test = np.dot(p_centroid_test, U_test)\n rmsd_test = np.sqrt(np.mean(np.square(p_centroid_prime_test - q_centroid)))\n\n if rmsd_test < rmsd:\n p_inds, U, rmsd = p_inds_test, U_test, rmsd_test\n\n # Rotate and translate matrix P unto matrix Q using Kabsch algorithm.\n # P' = P * U + V\n V = q_trans - np.dot(p_trans, U)\n\n # Using the original order of the indices\n inds = p_inds[np.argsort(q_inds)]\n\n return inds, U, V, rmsd\n\n def fit(self, p: Molecule, ignore_warning=False):\n \"\"\"Order, rotate and transform `p` molecule according to the best match.\n\n A `ValueError` will be raised when the total number of possible combinations\n become unfeasible (more than a million combinations).\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n ignore_warning: ignoring error when the number of combination is too large\n\n Returns:\n p_prime: Rotated and translated of the `p` `Molecule` object\n rmsd: Root-mean-square-deviation between `p_prime` and the `target`\n \"\"\"\n\n inds, U, V, rmsd = self.match(p, ignore_warning=ignore_warning)\n\n p_prime = Molecule.from_sites([p[i] for i in inds])\n for site in p_prime:\n site.coords = np.dot(site.coords, U) + V\n\n return p_prime, rmsd\n\n @staticmethod\n def permutations(atoms):\n \"\"\"Generates all the possible permutations of atom order. To achieve better\n performance all the cases where the atoms are different has been ignored.\n \"\"\"\n element_iterators = [itertools.permutations(np.where(atoms == element)[0]) for element in np.unique(atoms)]\n\n for inds in itertools.product(*element_iterators):\n yield np.array(list(itertools.chain(*inds)))\n\n\nclass HungarianOrderMatcher(KabschMatcher):\n \"\"\"This method pre-aligns the molecules based on their principal inertia\n axis and then re-orders the input atom list using the Hungarian method.\n\n Notes:\n This method cannot guarantee the best match but is very fast.\n\n When aligning molecules, the atoms of the two molecules **must** have same number\n of atoms from the same species.\n \"\"\"\n\n def match(self, p: Molecule):\n \"\"\"Similar as `KabschMatcher.match` but this method also finds the order of\n atoms which belongs to the best match.\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n\n Returns:\n inds: The indices of atoms\n U: 3x3 rotation matrix\n V: Translation vector\n rmsd: Root mean squared deviation between P and Q\n \"\"\"\n\n if sorted(p.atomic_numbers) != sorted(self.target.atomic_numbers):\n raise ValueError(\"The number of the same species aren't matching!\")\n\n p_coord, q_coord = p.cart_coords, self.target.cart_coords\n p_atoms, q_atoms = (\n np.array(p.atomic_numbers),\n np.array(self.target.atomic_numbers),\n )\n\n p_weights = np.array([site.species.weight for site in p])\n q_weights = np.array([site.species.weight for site in self.target])\n\n # Both sets of coordinates must be translated first, so that\n # their center of mass with the origin of the coordinate system.\n p_trans, q_trans = p.center_of_mass, self.target.center_of_mass\n p_centroid, q_centroid = p_coord - p_trans, q_coord - q_trans\n\n # Initializing return values\n rmsd = np.inf\n\n # Generate all permutation grouped/sorted by the elements\n for p_inds_test in self.permutations(p_atoms, p_centroid, p_weights, q_atoms, q_centroid, q_weights):\n\n p_centroid_test = p_centroid[p_inds_test]\n U_test = self.kabsch(p_centroid_test, q_centroid)\n\n p_centroid_prime_test = np.dot(p_centroid_test, U_test)\n rmsd_test = np.sqrt(np.mean(np.square(p_centroid_prime_test - q_centroid)))\n\n if rmsd_test < rmsd:\n inds, U, rmsd = p_inds_test, U_test, rmsd_test\n\n # Rotate and translate matrix P unto matrix Q using Kabsch algorithm.\n # P' = P * U + V\n V = q_trans - np.dot(p_trans, U)\n\n return inds, U, V, rmsd\n\n def fit(self, p: Molecule):\n \"\"\"Order, rotate and transform `p` molecule according to the best match.\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n\n Returns:\n p_prime: Rotated and translated of the `p` `Molecule` object\n rmsd: Root-mean-square-deviation between `p_prime` and the `target`\n \"\"\"\n\n inds, U, V, rmsd = self.match(p)\n\n # Translate and rotate `mol1` unto `mol2` using Kabsch algorithm.\n p_prime = Molecule.from_sites([p[i] for i in inds])\n for site in p_prime:\n site.coords = np.dot(site.coords, U) + V\n\n return p_prime, rmsd\n\n @staticmethod\n def permutations(p_atoms, p_centroid, p_weights, q_atoms, q_centroid, q_weights):\n \"\"\"Generates two possible permutations of atom order. This method uses the principle component\n of the inertia tensor to prealign the molecules and hungarian method to determine the order.\n There are always two possible permutation depending on the way to pre-aligning the molecules.\n\n Args:\n p_atoms: atom numbers\n p_centroid: array of atom positions\n p_weights: array of atom weights\n q_atoms: atom numbers\n q_centroid: array of atom positions\n q_weights: array of atom weights\n\n Yield:\n perm_inds: array of atoms' order\n \"\"\"\n # get the principal axis of P and Q\n p_axis = HungarianOrderMatcher.get_principal_axis(p_centroid, p_weights)\n q_axis = HungarianOrderMatcher.get_principal_axis(q_centroid, q_weights)\n\n # rotate Q onto P considering that the axis are parallel and antiparallel\n U = HungarianOrderMatcher.rotation_matrix_vectors(q_axis, p_axis)\n p_centroid_test = np.dot(p_centroid, U)\n\n # generate full view from q shape to fill in atom view on the fly\n perm_inds = np.zeros(len(p_atoms), dtype=int)\n\n # Find unique atoms\n species = np.unique(p_atoms)\n\n for specie in species:\n p_atom_inds = np.where(p_atoms == specie)[0]\n q_atom_inds = np.where(q_atoms == specie)[0]\n A = q_centroid[q_atom_inds]\n B = p_centroid_test[p_atom_inds]\n\n # Perform Hungarian analysis on distance matrix between atoms of 1st\n # structure and trial structure\n distances = cdist(A, B, \"euclidean\")\n a_inds, b_inds = linear_sum_assignment(distances)\n\n perm_inds[q_atom_inds] = p_atom_inds[b_inds]\n\n yield perm_inds\n\n # rotate Q onto P considering that the axis are parallel and antiparallel\n U = HungarianOrderMatcher.rotation_matrix_vectors(q_axis, -p_axis)\n p_centroid_test = np.dot(p_centroid, U)\n\n # generate full view from q shape to fill in atom view on the fly\n perm_inds = np.zeros(len(p_atoms), dtype=int)\n\n # Find unique atoms\n species = np.unique(p_atoms)\n\n for specie in species:\n p_atom_inds = np.where(p_atoms == specie)[0]\n q_atom_inds = np.where(q_atoms == specie)[0]\n A = q_centroid[q_atom_inds]\n B = p_centroid_test[p_atom_inds]\n\n # Perform Hungarian analysis on distance matrix between atoms of 1st\n # structure and trial structure\n distances = cdist(A, B, \"euclidean\")\n a_inds, b_inds = linear_sum_assignment(distances)\n\n perm_inds[q_atom_inds] = p_atom_inds[b_inds]\n\n yield perm_inds\n\n @staticmethod\n def get_principal_axis(coords, weights):\n \"\"\"Get the molecule's principal axis.\n\n Args:\n coords: coordinates of atoms\n weights: the weight use for calculating the inertia tensor\n\n Returns:\n Array of dim 3 containing the principal axis\n \"\"\"\n\n Ixx = Iyy = Izz = Ixy = Ixz = Iyz = 0.0\n\n for (x, y, z), wt in zip(coords, weights):\n\n Ixx += wt * (y * y + z * z)\n Iyy += wt * (x * x + z * z)\n Izz += wt * (x * x + y * y)\n\n Ixy += -wt * x * y\n Ixz += -wt * x * z\n Iyz += -wt * y * z\n\n inertia_tensor = np.array([[Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]])\n\n eigvals, eigvecs = np.linalg.eigh(inertia_tensor)\n\n principal_axis = eigvecs[:, 0]\n return principal_axis\n\n @staticmethod\n def rotation_matrix_vectors(v1, v2):\n \"\"\"Returns the rotation matrix that rotates v1 onto v2 using\n Rodrigues' rotation formula.\n\n See more: https://math.stackexchange.com/a/476311\n\n Args:\n v1: initial vector\n v2: target vector\n\n Returns:\n 3x3 rotation matrix\n \"\"\"\n\n if np.allclose(v1, v2):\n # same direction\n return np.eye(3)\n\n if np.allclose(v1, -v2):\n # opposite direction: return a rotation of pi around the y-axis\n return np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])\n\n v = np.cross(v1, v2)\n s = np.linalg.norm(v)\n c = np.vdot(v1, v2)\n\n vx = np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]])\n\n return np.eye(3) + vx + np.dot(vx, vx) * ((1.0 - c) / (s * s))\n\n\nclass GeneticOrderMatcher(KabschMatcher):\n \"\"\"This method was inspired by genetic algorithms and tries to match molecules\n based on their already matched fragments.\n\n It uses the fact that when two molecule is matching their sub-structures have to match as well.\n The main idea here is that in each iteration (generation) we can check the match of all possible\n fragments and ignore those which are not feasible.\n\n Although in the worst case this method has N! complexity (same as the brute force one),\n in practice it performs much faster because many of the combination can be eliminated\n during the fragment matching.\n\n Notes:\n This method very robust and returns with all the possible orders.\n\n There is a well known weakness/corner case: The case when there is\n a outlier with large deviation with a small index might be ignored.\n This happens due to the nature of the average function\n used to calculate the RMSD for the fragments.\n\n When aligning molecules, the atoms of the two molecules **must** have the\n same number of atoms from the same species.\n \"\"\"\n\n def __init__(self, target: Molecule, threshold: float):\n \"\"\"Constructor of the matcher object.\n\n Args:\n target: a `Molecule` object used as a target during the alignment\n threshold: value used to match fragments and prune configuration\n \"\"\"\n super().__init__(target)\n self.threshold = threshold\n self.N = len(target)\n\n def match(self, p: Molecule):\n \"\"\"Similar as `KabschMatcher.match` but this method also finds all of the\n possible atomic orders according to the `threshold`.\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n\n Returns:\n Array of the possible matches where the elements are:\n inds: The indices of atoms\n U: 3x3 rotation matrix\n V: Translation vector\n rmsd: Root mean squared deviation between P and Q\n \"\"\"\n out = []\n for inds in self.permutations(p):\n p_prime = p.copy()\n p_prime._sites = [p_prime[i] for i in inds]\n\n U, V, rmsd = super().match(p_prime)\n\n out.append((inds, U, V, rmsd))\n\n return out\n\n def fit(self, p: Molecule):\n \"\"\"Order, rotate and transform all of the matched `p` molecule\n according to the given `threshold`.\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n\n Returns:\n Array of the possible matches where the elements are:\n p_prime: Rotated and translated of the `p` `Molecule` object\n rmsd: Root-mean-square-deviation between `p_prime` and the `target`\n \"\"\"\n out = []\n for inds in self.permutations(p):\n p_prime = p.copy()\n p_prime._sites = [p_prime[i] for i in inds]\n\n U, V, rmsd = super().match(p_prime)\n\n # Rotate and translate matrix `p` onto the target molecule.\n # P' = P * U + V\n for site in p_prime:\n site.coords = np.dot(site.coords, U) + V\n\n out.append((p_prime, rmsd))\n\n return out\n\n def permutations(self, p: Molecule):\n \"\"\"Generates all of possible permutations of atom order according the threshold.\n\n Args:\n p: a `Molecule` object what will be matched with the target one.\n\n Returns:\n Array of index arrays\n \"\"\"\n\n # caching atomic numbers and coordinates\n p_atoms, q_atoms = p.atomic_numbers, self.target.atomic_numbers\n p_coords, q_coords = p.cart_coords, self.target.cart_coords\n\n if sorted(p_atoms) != sorted(q_atoms):\n raise ValueError(\"The number of the same species aren't matching!\")\n\n # starting matches (only based on element)\n partial_matches = [[j] for j in range(self.N) if p_atoms[j] == q_atoms[0]]\n\n for i in range(1, self.N):\n # extending the target fragment with then next atom\n f_coords = q_coords[: i + 1]\n f_atom = q_atoms[i]\n\n f_trans = f_coords.mean(axis=0)\n f_centroid = f_coords - f_trans\n\n matches = []\n for indices in partial_matches:\n\n for j in range(self.N):\n\n # skipping if the this index is already matched\n if j in indices:\n continue\n\n # skipping if they are different species\n if p_atoms[j] != f_atom:\n continue\n\n inds = indices + [j]\n P = p_coords[inds]\n\n # Both sets of coordinates must be translated first, so that\n # their centroid coincides with the origin of the coordinate system.\n p_trans = P.mean(axis=0)\n p_centroid = P - p_trans\n\n # The optimal rotation matrix U using Kabsch algorithm\n U = self.kabsch(p_centroid, f_centroid)\n\n p_prime_centroid = np.dot(p_centroid, U)\n rmsd = np.sqrt(np.mean(np.square(p_prime_centroid - f_centroid)))\n\n # rejecting if the deviation is too large\n if rmsd > self.threshold:\n continue\n\n logger.debug(f\"match - rmsd: {rmsd}, inds: {inds}\")\n matches.append(inds)\n\n partial_matches = matches\n\n logger.info(f\"number of atom in the fragment: {i + 1}, number of possible matches: {len(matches)}\")\n\n return matches\n",
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nCommon test support for pymatgen test scripts.\n\nThis single module should provide all the common functionality for pymatgen\ntests in a single location, so that test scripts can just import it and work\nright away.\n\"\"\"\n\nimport json\nimport tempfile\nimport unittest\nfrom pathlib import Path\n\nimport numpy.testing as nptu\nfrom monty.dev import requires\nfrom monty.json import MontyDecoder, MSONable\nfrom monty.serialization import loadfn\n\nfrom pymatgen.core import SETTINGS\nfrom pymatgen.ext.matproj import MPRester\n\n\nclass PymatgenTest(unittest.TestCase):\n \"\"\"\n Extends unittest.TestCase with functions (taken from numpy.testing.utils)\n that support the comparison of arrays.\n \"\"\"\n\n _multiprocess_shared_ = True\n MODULE_DIR = Path(__file__).absolute().parent\n STRUCTURES_DIR = MODULE_DIR / \"structures\"\n try:\n TEST_FILES_DIR = Path(SETTINGS[\"PMG_TEST_FILES_DIR\"])\n except KeyError:\n import warnings\n\n warnings.warn(\n \"It is recommended that you set the PMG_TEST_FILES_DIR environment variable explicitly. \"\n \"Now using a fallback location based on relative path from this module.\"\n )\n TEST_FILES_DIR = MODULE_DIR / \"..\" / \"..\" / \"test_files\"\n \"\"\"\n Dict for test structures to aid testing.\n \"\"\"\n TEST_STRUCTURES = {}\n for fn in STRUCTURES_DIR.iterdir():\n TEST_STRUCTURES[fn.name.rsplit(\".\", 1)[0]] = loadfn(str(fn))\n\n @classmethod\n def get_structure(cls, name):\n \"\"\"\n Get a structure from the template directories.\n\n :param name: Name of a structure.\n :return: Structure\n \"\"\"\n return cls.TEST_STRUCTURES[name].copy()\n\n @classmethod\n @requires(SETTINGS.get(\"PMG_MAPI_KEY\"), \"PMG_MAPI_KEY needs to be set.\")\n def get_mp_structure(cls, mpid):\n \"\"\"\n Get a structure from MP.\n\n :param mpid: Materials Project id.\n :return: Structure\n \"\"\"\n m = MPRester()\n return m.get_structure_by_material_id(mpid)\n\n @staticmethod\n def assertArrayAlmostEqual(actual, desired, decimal=7, err_msg=\"\", verbose=True):\n \"\"\"\n Tests if two arrays are almost equal to a tolerance. The CamelCase\n naming is so that it is consistent with standard unittest methods.\n \"\"\"\n return nptu.assert_almost_equal(actual, desired, decimal, err_msg, verbose)\n\n @staticmethod\n def assertDictsAlmostEqual(actual, desired, decimal=7, err_msg=\"\", verbose=True):\n \"\"\"\n Tests if two arrays are almost equal to a tolerance. The CamelCase\n naming is so that it is consistent with standard unittest methods.\n \"\"\"\n\n for k, v in actual.items():\n if k not in desired:\n return False\n v2 = desired[k]\n if isinstance(v, dict):\n pass_test = PymatgenTest.assertDictsAlmostEqual(\n v, v2, decimal=decimal, err_msg=err_msg, verbose=verbose\n )\n if not pass_test:\n return False\n elif isinstance(v, (list, tuple)):\n pass_test = nptu.assert_almost_equal(v, v2, decimal, err_msg, verbose)\n if not pass_test:\n return False\n elif isinstance(v, (int, float)):\n PymatgenTest().assertAlmostEqual(v, v2) # pylint: disable=E1120\n else:\n assert v == v2\n return True\n\n @staticmethod\n def assertArrayEqual(actual, desired, err_msg=\"\", verbose=True):\n \"\"\"\n Tests if two arrays are equal. The CamelCase naming is so that it is\n consistent with standard unittest methods.\n \"\"\"\n return nptu.assert_equal(actual, desired, err_msg=err_msg, verbose=verbose)\n\n @staticmethod\n def assertStrContentEqual(actual, desired, err_msg=\"\", verbose=True):\n \"\"\"\n Tests if two strings are equal, ignoring things like trailing spaces,\n etc.\n \"\"\"\n lines1 = actual.split(\"\\n\")\n lines2 = desired.split(\"\\n\")\n if len(lines1) != len(lines2):\n return False\n failed = []\n for l1, l2 in zip(lines1, lines2):\n if l1.strip() != l2.strip():\n failed.append(f\"{l1} != {l2}\")\n return len(failed) == 0\n\n def serialize_with_pickle(self, objects, protocols=None, test_eq=True):\n \"\"\"\n Test whether the object(s) can be serialized and deserialized with\n pickle. This method tries to serialize the objects with pickle and the\n protocols specified in input. Then it deserializes the pickle format\n and compares the two objects with the __eq__ operator if\n test_eq == True.\n\n Args:\n objects: Object or list of objects.\n protocols: List of pickle protocols to test. If protocols is None,\n HIGHEST_PROTOCOL is tested.\n\n Returns:\n Nested list with the objects deserialized with the specified\n protocols.\n \"\"\"\n # Use the python version so that we get the traceback in case of errors\n import pickle\n\n from pymatgen.util.serialization import pmg_pickle_dump, pmg_pickle_load\n\n # Build a list even when we receive a single object.\n got_single_object = False\n if not isinstance(objects, (list, tuple)):\n got_single_object = True\n objects = [objects]\n\n if protocols is None:\n protocols = [pickle.HIGHEST_PROTOCOL]\n\n # This list will contains the object deserialized with the different\n # protocols.\n objects_by_protocol, errors = [], []\n\n for protocol in protocols:\n # Serialize and deserialize the object.\n mode = \"wb\"\n fd, tmpfile = tempfile.mkstemp(text=\"b\" not in mode)\n\n try:\n with open(tmpfile, mode) as fh:\n pmg_pickle_dump(objects, fh, protocol=protocol)\n except Exception as exc:\n errors.append(f\"pickle.dump with protocol {protocol} raised:\\n{exc}\")\n continue\n\n try:\n with open(tmpfile, \"rb\") as fh:\n new_objects = pmg_pickle_load(fh)\n except Exception as exc:\n errors.append(f\"pickle.load with protocol {protocol} raised:\\n{exc}\")\n continue\n\n # Test for equality\n if test_eq:\n for old_obj, new_obj in zip(objects, new_objects):\n self.assertEqual(old_obj, new_obj)\n\n # Save the deserialized objects and test for equality.\n objects_by_protocol.append(new_objects)\n\n if errors:\n raise ValueError(\"\\n\".join(errors))\n\n # Return nested list so that client code can perform additional tests.\n if got_single_object:\n return [o[0] for o in objects_by_protocol]\n return objects_by_protocol\n\n def assertMSONable(self, obj, test_if_subclass=True):\n \"\"\"\n Tests if obj is MSONable and tries to verify whether the contract is\n fulfilled.\n\n By default, the method tests whether obj is an instance of MSONable.\n This check can be deactivated by setting test_if_subclass to False.\n \"\"\"\n if test_if_subclass:\n self.assertIsInstance(obj, MSONable)\n self.assertDictEqual(obj.as_dict(), obj.__class__.from_dict(obj.as_dict()).as_dict())\n json.loads(obj.to_json(), cls=MontyDecoder)\n",
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nThis module provides a class used to describe the elastic tensor,\nincluding methods used to fit the elastic tensor from linear response\nstress-strain data\n\"\"\"\n\nimport itertools\nimport warnings\n\nimport numpy as np\nimport sympy as sp\nfrom scipy.integrate import quad\nfrom scipy.optimize import root\nfrom scipy.special import factorial\n\nfrom pymatgen.analysis.elasticity.strain import Strain\nfrom pymatgen.analysis.elasticity.stress import Stress\nfrom pymatgen.core.tensors import (\n DEFAULT_QUAD,\n SquareTensor,\n Tensor,\n TensorCollection,\n get_uvec,\n)\nfrom pymatgen.core.units import Unit\n\n__author__ = \"Joseph Montoya\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__credits__ = \"Maarten de Jong, Ian Winter, Shyam Dwaraknath, Mark Asta, Anubhav Jain\"\n__version__ = \"1.0\"\n__maintainer__ = \"Joseph Montoya\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n__date__ = \"July 24, 2018\"\n\n\nclass NthOrderElasticTensor(Tensor):\n \"\"\"\n An object representing an nth-order tensor expansion\n of the stress-strain constitutive equations\n \"\"\"\n\n GPa_to_eV_A3 = Unit(\"GPa\").get_conversion_factor(Unit(\"eV ang^-3\"))\n symbol = \"C\"\n\n def __new__(cls, input_array, check_rank=None, tol=1e-4):\n \"\"\"\n Args:\n input_array ():\n check_rank ():\n tol ():\n \"\"\"\n obj = super().__new__(cls, input_array, check_rank=check_rank)\n if obj.rank % 2 != 0:\n raise ValueError(\"ElasticTensor must have even rank\")\n if not obj.is_voigt_symmetric(tol):\n warnings.warn(\"Input elastic tensor does not satisfy standard voigt symmetries\")\n return obj.view(cls)\n\n @property\n def order(self):\n \"\"\"\n Order of the elastic tensor\n \"\"\"\n return self.rank // 2\n\n def calculate_stress(self, strain):\n \"\"\"\n Calculate's a given elastic tensor's contribution to the\n stress using Einstein summation\n\n Args:\n strain (3x3 array-like): matrix corresponding to strain\n \"\"\"\n strain = np.array(strain)\n if strain.shape == (6,):\n strain = Strain.from_voigt(strain)\n assert strain.shape == (3, 3), \"Strain must be 3x3 or voigt-notation\"\n stress_matrix = self.einsum_sequence([strain] * (self.order - 1)) / factorial(self.order - 1)\n return Stress(stress_matrix)\n\n def energy_density(self, strain, convert_GPa_to_eV=True):\n \"\"\"\n Calculates the elastic energy density due to a strain\n \"\"\"\n e_density = np.sum(self.calculate_stress(strain) * strain) / self.order\n if convert_GPa_to_eV:\n e_density *= self.GPa_to_eV_A3 # Conversion factor for GPa to eV/A^3\n return e_density\n\n @classmethod\n def from_diff_fit(cls, strains, stresses, eq_stress=None, order=2, tol=1e-10):\n \"\"\"\n\n Args:\n strains ():\n stresses ():\n eq_stress ():\n order ():\n tol ():\n\n Returns:\n\n \"\"\"\n return cls(diff_fit(strains, stresses, eq_stress, order, tol)[order - 2])\n\n\ndef raise_error_if_unphysical(f):\n \"\"\"\n Wrapper for functions or properties that should raise an error\n if tensor is unphysical.\n \"\"\"\n\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Args:\n self ():\n *args ():\n **kwargs ():\n\n Returns:\n\n \"\"\"\n if self.k_vrh < 0 or self.g_vrh < 0:\n raise ValueError(\"Bulk or shear modulus is negative, property cannot be determined\")\n return f(self, *args, **kwargs)\n\n return wrapper\n\n\nclass ElasticTensor(NthOrderElasticTensor):\n \"\"\"\n This class extends Tensor to describe the 3x3x3x3\n second-order elastic tensor, C_{ijkl}, with various\n methods for estimating other properties derived from\n the second order elastic tensor\n \"\"\"\n\n def __new__(cls, input_array, tol=1e-4):\n \"\"\"\n Create an ElasticTensor object. The constructor throws an error if\n the shape of the input_matrix argument is not 3x3x3x3, i. e. in true\n tensor notation. Issues a warning if the input_matrix argument does\n not satisfy standard symmetries. Note that the constructor uses\n __new__ rather than __init__ according to the standard method of\n subclassing numpy ndarrays.\n\n Args:\n input_array (3x3x3x3 array-like): the 3x3x3x3 array-like\n representing the elastic tensor\n\n tol (float): tolerance for initial symmetry test of tensor\n \"\"\"\n\n obj = super().__new__(cls, input_array, check_rank=4, tol=tol)\n return obj.view(cls)\n\n @property\n def compliance_tensor(self):\n \"\"\"\n returns the Voigt-notation compliance tensor,\n which is the matrix inverse of the\n Voigt-notation elastic tensor\n \"\"\"\n s_voigt = np.linalg.inv(self.voigt)\n return ComplianceTensor.from_voigt(s_voigt)\n\n @property\n def k_voigt(self):\n \"\"\"\n returns the K_v bulk modulus\n \"\"\"\n return self.voigt[:3, :3].mean()\n\n @property\n def g_voigt(self):\n \"\"\"\n returns the G_v shear modulus\n \"\"\"\n return (\n 2.0 * self.voigt[:3, :3].trace() - np.triu(self.voigt[:3, :3]).sum() + 3 * self.voigt[3:, 3:].trace()\n ) / 15.0\n\n @property\n def k_reuss(self):\n \"\"\"\n returns the K_r bulk modulus\n \"\"\"\n return 1.0 / self.compliance_tensor.voigt[:3, :3].sum()\n\n @property\n def g_reuss(self):\n \"\"\"\n returns the G_r shear modulus\n \"\"\"\n return 15.0 / (\n 8.0 * self.compliance_tensor.voigt[:3, :3].trace()\n - 4.0 * np.triu(self.compliance_tensor.voigt[:3, :3]).sum()\n + 3.0 * self.compliance_tensor.voigt[3:, 3:].trace()\n )\n\n @property\n def k_vrh(self):\n \"\"\"\n returns the K_vrh (Voigt-Reuss-Hill) average bulk modulus\n \"\"\"\n return 0.5 * (self.k_voigt + self.k_reuss)\n\n @property\n def g_vrh(self):\n \"\"\"\n returns the G_vrh (Voigt-Reuss-Hill) average shear modulus\n \"\"\"\n return 0.5 * (self.g_voigt + self.g_reuss)\n\n @property\n def y_mod(self):\n \"\"\"\n Calculates Young's modulus (in SI units) using the\n Voigt-Reuss-Hill averages of bulk and shear moduli\n \"\"\"\n return 9.0e9 * self.k_vrh * self.g_vrh / (3.0 * self.k_vrh + self.g_vrh)\n\n def directional_poisson_ratio(self, n, m, tol=1e-8):\n \"\"\"\n Calculates the poisson ratio for a specific direction\n relative to a second, orthogonal direction\n\n Args:\n n (3-d vector): principal direction\n m (3-d vector): secondary direction orthogonal to n\n tol (float): tolerance for testing of orthogonality\n \"\"\"\n n, m = get_uvec(n), get_uvec(m)\n if not np.abs(np.dot(n, m)) < tol:\n raise ValueError(\"n and m must be orthogonal\")\n v = self.compliance_tensor.einsum_sequence([n] * 2 + [m] * 2)\n v *= -1 / self.compliance_tensor.einsum_sequence([n] * 4)\n return v\n\n def directional_elastic_mod(self, n):\n \"\"\"\n Calculates directional elastic modulus for a specific vector\n \"\"\"\n n = get_uvec(n)\n return self.einsum_sequence([n] * 4)\n\n @raise_error_if_unphysical\n def trans_v(self, structure):\n \"\"\"\n Calculates transverse sound velocity (in SI units) using the\n Voigt-Reuss-Hill average bulk modulus\n\n Args:\n structure: pymatgen structure object\n\n Returns: transverse sound velocity (in SI units)\n\n \"\"\"\n nsites = structure.num_sites\n volume = structure.volume\n natoms = structure.composition.num_atoms\n weight = float(structure.composition.weight)\n mass_density = 1.6605e3 * nsites * weight / (natoms * volume)\n if self.g_vrh < 0:\n raise ValueError(\"k_vrh or g_vrh is negative, sound velocity is undefined\")\n return (1e9 * self.g_vrh / mass_density) ** 0.5\n\n @raise_error_if_unphysical\n def long_v(self, structure):\n \"\"\"\n Calculates longitudinal sound velocity (in SI units)\n using the Voigt-Reuss-Hill average bulk modulus\n\n Args:\n structure: pymatgen structure object\n\n Returns: longitudinal sound velocity (in SI units)\n\n \"\"\"\n nsites = structure.num_sites\n volume = structure.volume\n natoms = structure.composition.num_atoms\n weight = float(structure.composition.weight)\n mass_density = 1.6605e3 * nsites * weight / (natoms * volume)\n if self.g_vrh < 0:\n raise ValueError(\"k_vrh or g_vrh is negative, sound velocity is undefined\")\n return (1e9 * (self.k_vrh + 4.0 / 3.0 * self.g_vrh) / mass_density) ** 0.5\n\n @raise_error_if_unphysical\n def snyder_ac(self, structure):\n \"\"\"\n Calculates Snyder's acoustic sound velocity (in SI units)\n\n Args:\n structure: pymatgen structure object\n\n Returns: Snyder's acoustic sound velocity (in SI units)\n\n \"\"\"\n nsites = structure.num_sites\n volume = structure.volume\n natoms = structure.composition.num_atoms\n num_density = 1e30 * nsites / volume\n tot_mass = sum(e.atomic_mass for e in structure.species)\n avg_mass = 1.6605e-27 * tot_mass / natoms\n return (\n 0.38483\n * avg_mass\n * ((self.long_v(structure) + 2.0 * self.trans_v(structure)) / 3.0) ** 3.0\n / (300.0 * num_density ** (-2.0 / 3.0) * nsites ** (1.0 / 3.0))\n )\n\n @raise_error_if_unphysical\n def snyder_opt(self, structure):\n \"\"\"\n Calculates Snyder's optical sound velocity (in SI units)\n\n Args:\n structure: pymatgen structure object\n\n Returns: Snyder's optical sound velocity (in SI units)\n\n \"\"\"\n nsites = structure.num_sites\n volume = structure.volume\n num_density = 1e30 * nsites / volume\n return (\n 1.66914e-23\n * (self.long_v(structure) + 2.0 * self.trans_v(structure))\n / 3.0\n / num_density ** (-2.0 / 3.0)\n * (1 - nsites ** (-1.0 / 3.0))\n )\n\n @raise_error_if_unphysical\n def snyder_total(self, structure):\n \"\"\"\n Calculates Snyder's total sound velocity (in SI units)\n\n Args:\n structure: pymatgen structure object\n\n Returns: Snyder's total sound velocity (in SI units)\n\n \"\"\"\n return self.snyder_ac(structure) + self.snyder_opt(structure)\n\n @raise_error_if_unphysical\n def clarke_thermalcond(self, structure):\n \"\"\"\n Calculates Clarke's thermal conductivity (in SI units)\n\n Args:\n structure: pymatgen structure object\n\n Returns: Clarke's thermal conductivity (in SI units)\n\n \"\"\"\n nsites = structure.num_sites\n volume = structure.volume\n tot_mass = sum(e.atomic_mass for e in structure.species)\n natoms = structure.composition.num_atoms\n weight = float(structure.composition.weight)\n avg_mass = 1.6605e-27 * tot_mass / natoms\n mass_density = 1.6605e3 * nsites * weight / (natoms * volume)\n return 0.87 * 1.3806e-23 * avg_mass ** (-2.0 / 3.0) * mass_density ** (1.0 / 6.0) * self.y_mod**0.5\n\n @raise_error_if_unphysical\n def cahill_thermalcond(self, structure):\n \"\"\"\n Calculates Cahill's thermal conductivity (in SI units)\n\n Args:\n structure: pymatgen structure object\n\n Returns: Cahill's thermal conductivity (in SI units)\n\n \"\"\"\n nsites = structure.num_sites\n volume = structure.volume\n num_density = 1e30 * nsites / volume\n return 1.3806e-23 / 2.48 * num_density ** (2.0 / 3.0) * (self.long_v(structure) + 2 * self.trans_v(structure))\n\n @raise_error_if_unphysical\n def debye_temperature(self, structure):\n \"\"\"\n Estimates the debye temperature from longitudinal and\n transverse sound velocities\n\n Args:\n structure: pymatgen structure object\n\n Returns: debye temperature (in SI units)\n\n \"\"\"\n v0 = structure.volume * 1e-30 / structure.num_sites\n vl, vt = self.long_v(structure), self.trans_v(structure)\n vm = 3 ** (1.0 / 3.0) * (1 / vl**3 + 2 / vt**3) ** (-1.0 / 3.0)\n td = 1.05457e-34 / 1.38065e-23 * vm * (6 * np.pi**2 / v0) ** (1.0 / 3.0)\n return td\n\n @property\n def universal_anisotropy(self):\n \"\"\"\n returns the universal anisotropy value\n \"\"\"\n return 5.0 * self.g_voigt / self.g_reuss + self.k_voigt / self.k_reuss - 6.0\n\n @property\n def homogeneous_poisson(self):\n \"\"\"\n returns the homogeneous poisson ratio\n \"\"\"\n return (1.0 - 2.0 / 3.0 * self.g_vrh / self.k_vrh) / (2.0 + 2.0 / 3.0 * self.g_vrh / self.k_vrh)\n\n def green_kristoffel(self, u):\n \"\"\"\n Returns the Green-Kristoffel tensor for a second-order tensor\n \"\"\"\n return self.einsum_sequence([u, u], \"ijkl,i,l\")\n\n @property\n def property_dict(self):\n \"\"\"\n returns a dictionary of properties derived from the elastic tensor\n \"\"\"\n props = [\n \"k_voigt\",\n \"k_reuss\",\n \"k_vrh\",\n \"g_voigt\",\n \"g_reuss\",\n \"g_vrh\",\n \"universal_anisotropy\",\n \"homogeneous_poisson\",\n \"y_mod\",\n ]\n return {prop: getattr(self, prop) for prop in props}\n\n def get_structure_property_dict(self, structure, include_base_props=True, ignore_errors=False):\n \"\"\"\n returns a dictionary of properties derived from the elastic tensor\n and an associated structure\n\n Args:\n structure (Structure): structure object for which to calculate\n associated properties\n include_base_props (bool): whether to include base properties,\n like k_vrh, etc.\n ignore_errors (bool): if set to true, will set problem properties\n that depend on a physical tensor to None, defaults to False\n \"\"\"\n s_props = [\n \"trans_v\",\n \"long_v\",\n \"snyder_ac\",\n \"snyder_opt\",\n \"snyder_total\",\n \"clarke_thermalcond\",\n \"cahill_thermalcond\",\n \"debye_temperature\",\n ]\n if ignore_errors and (self.k_vrh < 0 or self.g_vrh < 0):\n sp_dict = {prop: None for prop in s_props}\n else:\n sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}\n sp_dict[\"structure\"] = structure\n if include_base_props:\n sp_dict.update(self.property_dict)\n return sp_dict\n\n @classmethod\n def from_pseudoinverse(cls, strains, stresses):\n \"\"\"\n Class method to fit an elastic tensor from stress/strain\n data. Method uses Moore-Penrose pseudoinverse to invert\n the s = C*e equation with elastic tensor, stress, and\n strain in voigt notation\n\n Args:\n stresses (Nx3x3 array-like): list or array of stresses\n strains (Nx3x3 array-like): list or array of strains\n \"\"\"\n # convert the stress/strain to Nx6 arrays of voigt-notation\n warnings.warn(\n \"Pseudoinverse fitting of Strain/Stress lists may yield \"\n \"questionable results from vasp data, use with caution.\"\n )\n stresses = np.array([Stress(stress).voigt for stress in stresses])\n with warnings.catch_warnings(record=True):\n strains = np.array([Strain(strain).voigt for strain in strains])\n\n voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses))\n return cls.from_voigt(voigt_fit)\n\n @classmethod\n def from_independent_strains(cls, strains, stresses, eq_stress=None, vasp=False, tol=1e-10):\n \"\"\"\n Constructs the elastic tensor least-squares fit of independent strains\n Args:\n strains (list of Strains): list of strain objects to fit\n stresses (list of Stresses): list of stress objects to use in fit\n corresponding to the list of strains\n eq_stress (Stress): equilibrium stress to use in fitting\n vasp (boolean): flag for whether the stress tensor should be\n converted based on vasp units/convention for stress\n tol (float): tolerance for removing near-zero elements of the\n resulting tensor\n \"\"\"\n strain_states = [tuple(ss) for ss in np.eye(6)]\n ss_dict = get_strain_state_dict(strains, stresses, eq_stress=eq_stress)\n if not set(strain_states) <= set(ss_dict.keys()):\n raise ValueError(f\"Missing independent strain states: {set(strain_states) - set(ss_dict)}\")\n if len(set(ss_dict.keys()) - set(strain_states)) > 0:\n warnings.warn(\"Extra strain states in strain-stress pairs are neglected in independent strain fitting\")\n c_ij = np.zeros((6, 6))\n for i in range(6):\n istrains = ss_dict[strain_states[i]][\"strains\"]\n istresses = ss_dict[strain_states[i]][\"stresses\"]\n for j in range(6):\n c_ij[i, j] = np.polyfit(istrains[:, i], istresses[:, j], 1)[0]\n if vasp:\n c_ij *= -0.1 # Convert units/sign convention of vasp stress tensor\n c = cls.from_voigt(c_ij)\n c = c.zeroed(tol)\n return c\n\n\nclass ComplianceTensor(Tensor):\n \"\"\"\n This class represents the compliance tensor, and exists\n primarily to keep the voigt-conversion scheme consistent\n since the compliance tensor has a unique vscale\n \"\"\"\n\n def __new__(cls, s_array):\n \"\"\"\n Args:\n s_array ():\n \"\"\"\n vscale = np.ones((6, 6))\n vscale[3:] *= 2\n vscale[:, 3:] *= 2\n obj = super().__new__(cls, s_array, vscale=vscale)\n return obj.view(cls)\n\n\nclass ElasticTensorExpansion(TensorCollection):\n \"\"\"\n This class is a sequence of elastic tensors corresponding\n to an elastic tensor expansion, which can be used to\n calculate stress and energy density and inherits all\n of the list-based properties of TensorCollection\n (e. g. symmetrization, voigt conversion, etc.)\n \"\"\"\n\n def __init__(self, c_list):\n \"\"\"\n Initialization method for ElasticTensorExpansion\n\n Args:\n c_list (list or tuple): sequence of Tensor inputs\n or tensors from which the elastic tensor\n expansion is constructed.\n \"\"\"\n c_list = [NthOrderElasticTensor(c, check_rank=4 + i * 2) for i, c in enumerate(c_list)]\n super().__init__(c_list)\n\n @classmethod\n def from_diff_fit(cls, strains, stresses, eq_stress=None, tol=1e-10, order=3):\n \"\"\"\n Generates an elastic tensor expansion via the fitting function\n defined below in diff_fit\n \"\"\"\n c_list = diff_fit(strains, stresses, eq_stress, order, tol)\n return cls(c_list)\n\n @property\n def order(self):\n \"\"\"\n Order of the elastic tensor expansion, i. e. the order of the\n highest included set of elastic constants\n \"\"\"\n return self[-1].order\n\n def calculate_stress(self, strain):\n \"\"\"\n Calculate's a given elastic tensor's contribution to the\n stress using Einstein summation\n \"\"\"\n return sum(c.calculate_stress(strain) for c in self)\n\n def energy_density(self, strain, convert_GPa_to_eV=True):\n \"\"\"\n Calculates the elastic energy density due to a strain\n \"\"\"\n return sum(c.energy_density(strain, convert_GPa_to_eV) for c in self)\n\n def get_ggt(self, n, u):\n \"\"\"\n Gets the Generalized Gruneisen tensor for a given\n third-order elastic tensor expansion.\n\n Args:\n n (3x1 array-like): normal mode direction\n u (3x1 array-like): polarization direction\n \"\"\"\n gk = self[0].einsum_sequence([n, u, n, u])\n result = -(\n 2 * gk * np.outer(u, u) + self[0].einsum_sequence([n, n]) + self[1].einsum_sequence([n, u, n, u])\n ) / (2 * gk)\n return result\n\n def get_tgt(self, temperature=None, structure=None, quad=None):\n \"\"\"\n Gets the thermodynamic Gruneisen tensor (TGT) by via an\n integration of the GGT weighted by the directional heat\n capacity.\n\n See refs:\n R. N. Thurston and K. Brugger, Phys. Rev. 113, A1604 (1964).\n K. Brugger Phys. Rev. 137, A1826 (1965).\n\n Args:\n temperature (float): Temperature in kelvin, if not specified\n will return non-cv-normalized value\n structure (float): Structure to be used in directional heat\n capacity determination, only necessary if temperature\n is specified\n quad (dict): quadrature for integration, should be\n dictionary with \"points\" and \"weights\" keys defaults\n to quadpy.sphere.Lebedev(19) as read from file\n \"\"\"\n if temperature and not structure:\n raise ValueError(\"If using temperature input, you must also include structure\")\n\n quad = quad if quad else DEFAULT_QUAD\n points = quad[\"points\"]\n weights = quad[\"weights\"]\n num, denom, c = np.zeros((3, 3)), 0, 1\n for p, w in zip(points, weights):\n gk = ElasticTensor(self[0]).green_kristoffel(p)\n rho_wsquareds, us = np.linalg.eigh(gk)\n us = [u / np.linalg.norm(u) for u in np.transpose(us)]\n for u in us:\n # TODO: this should be benchmarked\n if temperature:\n c = self.get_heat_capacity(temperature, structure, p, u)\n num += c * self.get_ggt(p, u) * w\n denom += c * w\n return SquareTensor(num / denom)\n\n def get_gruneisen_parameter(self, temperature=None, structure=None, quad=None):\n \"\"\"\n Gets the single average gruneisen parameter from the TGT.\n\n Args:\n temperature (float): Temperature in kelvin, if not specified\n will return non-cv-normalized value\n structure (float): Structure to be used in directional heat\n capacity determination, only necessary if temperature\n is specified\n quad (dict): quadrature for integration, should be\n dictionary with \"points\" and \"weights\" keys defaults\n to quadpy.sphere.Lebedev(19) as read from file\n \"\"\"\n return np.trace(self.get_tgt(temperature, structure, quad)) / 3.0\n\n def get_heat_capacity(self, temperature, structure, n, u, cutoff=1e2):\n \"\"\"\n Gets the directional heat capacity for a higher order tensor\n expansion as a function of direction and polarization.\n\n Args:\n temperature (float): Temperature in kelvin\n structure (float): Structure to be used in directional heat\n capacity determination\n n (3x1 array-like): direction for Cv determination\n u (3x1 array-like): polarization direction, note that\n no attempt for verification of eigenvectors is made\n cutoff (float): cutoff for scale of kt / (hbar * omega)\n if lower than this value, returns 0\n \"\"\"\n k = 1.38065e-23\n kt = k * temperature\n hbar_w = 1.05457e-34 * self.omega(structure, n, u)\n if hbar_w > kt * cutoff:\n return 0.0\n c = k * (hbar_w / kt) ** 2\n c *= np.exp(hbar_w / kt) / (np.exp(hbar_w / kt) - 1) ** 2\n return c * 6.022e23\n\n def omega(self, structure, n, u):\n \"\"\"\n Finds directional frequency contribution to the heat\n capacity from direction and polarization\n\n Args:\n structure (Structure): Structure to be used in directional heat\n capacity determination\n n (3x1 array-like): direction for Cv determination\n u (3x1 array-like): polarization direction, note that\n no attempt for verification of eigenvectors is made\n \"\"\"\n l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n)\n l0 *= 1e-10 # in A\n weight = float(structure.composition.weight) * 1.66054e-27 # in kg\n vol = structure.volume * 1e-30 # in m^3\n vel = (1e9 * self[0].einsum_sequence([n, u, n, u]) / (weight / vol)) ** 0.5\n return vel / l0\n\n def thermal_expansion_coeff(self, structure, temperature, mode=\"debye\"):\n \"\"\"\n Gets thermal expansion coefficient from third-order constants.\n\n Args:\n temperature (float): Temperature in kelvin, if not specified\n will return non-cv-normalized value\n structure (Structure): Structure to be used in directional heat\n capacity determination, only necessary if temperature\n is specified\n mode (string): mode for finding average heat-capacity,\n current supported modes are 'debye' and 'dulong-petit'\n \"\"\"\n soec = ElasticTensor(self[0])\n v0 = structure.volume * 1e-30 / structure.num_sites\n if mode == \"debye\":\n td = soec.debye_temperature(structure)\n t_ratio = temperature / td\n\n def integrand(x):\n return (x**4 * np.exp(x)) / (np.exp(x) - 1) ** 2\n\n cv = 9 * 8.314 * t_ratio**3 * quad(integrand, 0, t_ratio**-1)[0]\n elif mode == \"dulong-petit\":\n cv = 3 * 8.314\n else:\n raise ValueError(\"Mode must be debye or dulong-petit\")\n tgt = self.get_tgt(temperature, structure)\n alpha = np.einsum(\"ijkl,ij\", soec.compliance_tensor, tgt)\n alpha *= cv / (1e9 * v0 * 6.022e23)\n return SquareTensor(alpha)\n\n def get_compliance_expansion(self):\n \"\"\"\n Gets a compliance tensor expansion from the elastic\n tensor expansion.\n \"\"\"\n # TODO: this might have a general form\n if not self.order <= 4:\n raise ValueError(\"Compliance tensor expansion only supported for fourth-order and lower\")\n ce_exp = [ElasticTensor(self[0]).compliance_tensor]\n einstring = \"ijpq,pqrsuv,rskl,uvmn->ijklmn\"\n ce_exp.append(np.einsum(einstring, -ce_exp[-1], self[1], ce_exp[-1], ce_exp[-1]))\n if self.order == 4:\n # Four terms in the Fourth-Order compliance tensor\n # pylint: disable=E1130\n einstring_1 = \"pqab,cdij,efkl,ghmn,abcdefgh\"\n tensors_1 = [ce_exp[0]] * 4 + [self[-1]]\n temp = -np.einsum(einstring_1, *tensors_1)\n einstring_2 = \"pqab,abcdef,cdijmn,efkl\"\n einstring_3 = \"pqab,abcdef,efklmn,cdij\"\n einstring_4 = \"pqab,abcdef,cdijkl,efmn\"\n for es in [einstring_2, einstring_3, einstring_4]:\n temp -= np.einsum(es, ce_exp[0], self[-2], ce_exp[1], ce_exp[0])\n ce_exp.append(temp)\n return TensorCollection(ce_exp)\n\n def get_strain_from_stress(self, stress):\n \"\"\"\n Gets the strain from a stress state according\n to the compliance expansion corresponding to the\n tensor expansion.\n \"\"\"\n compl_exp = self.get_compliance_expansion()\n strain = 0\n for n, compl in enumerate(compl_exp):\n strain += compl.einsum_sequence([stress] * (n + 1)) / factorial(n + 1)\n return strain\n\n def get_effective_ecs(self, strain, order=2):\n \"\"\"\n Returns the effective elastic constants\n from the elastic tensor expansion.\n\n Args:\n strain (Strain or 3x3 array-like): strain condition\n under which to calculate the effective constants\n order (int): order of the ecs to be returned\n \"\"\"\n ec_sum = 0\n for n, ecs in enumerate(self[order - 2 :]):\n ec_sum += ecs.einsum_sequence([strain] * n) / factorial(n)\n return ec_sum\n\n def get_wallace_tensor(self, tau):\n \"\"\"\n Gets the Wallace Tensor for determining yield strength\n criteria.\n\n Args:\n tau (3x3 array-like): stress at which to evaluate\n the wallace tensor\n \"\"\"\n b = 0.5 * (\n np.einsum(\"ml,kn->klmn\", tau, np.eye(3))\n + np.einsum(\"km,ln->klmn\", tau, np.eye(3))\n + np.einsum(\"nl,km->klmn\", tau, np.eye(3))\n + np.einsum(\"kn,lm->klmn\", tau, np.eye(3))\n + -2 * np.einsum(\"kl,mn->klmn\", tau, np.eye(3))\n )\n strain = self.get_strain_from_stress(tau)\n b += self.get_effective_ecs(strain)\n return b\n\n def get_symmetric_wallace_tensor(self, tau):\n \"\"\"\n Gets the symmetrized wallace tensor for determining\n yield strength criteria.\n\n Args:\n tau (3x3 array-like): stress at which to evaluate\n the wallace tensor.\n \"\"\"\n wallace = self.get_wallace_tensor(tau)\n return Tensor(0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1])))\n\n def get_stability_criteria(self, s, n):\n \"\"\"\n Gets the stability criteria from the symmetric\n Wallace tensor from an input vector and stress\n value.\n\n Args:\n s (float): Stress value at which to evaluate\n the stability criteria\n n (3x1 array-like): direction of the applied\n stress\n \"\"\"\n n = get_uvec(n)\n stress = s * np.outer(n, n)\n sym_wallace = self.get_symmetric_wallace_tensor(stress)\n return np.linalg.det(sym_wallace.voigt)\n\n def get_yield_stress(self, n):\n \"\"\"\n Gets the yield stress for a given direction\n\n Args:\n n (3x1 array-like): direction for which to find the\n yield stress\n \"\"\"\n # TODO: root finding could be more robust\n comp = root(self.get_stability_criteria, -1, args=n)\n tens = root(self.get_stability_criteria, 1, args=n)\n return (comp.x, tens.x)\n\n\n# TODO: abstract this for other tensor fitting procedures\ndef diff_fit(strains, stresses, eq_stress=None, order=2, tol=1e-10):\n \"\"\"\n nth order elastic constant fitting function based on\n central-difference derivatives with respect to distinct\n strain states. The algorithm is summarized as follows:\n\n 1. Identify distinct strain states as sets of indices\n for which nonzero strain values exist, typically\n [(0), (1), (2), (3), (4), (5), (0, 1) etc.]\n 2. For each strain state, find and sort strains and\n stresses by strain value.\n 3. Find first, second .. nth derivatives of each stress\n with respect to scalar variable corresponding to\n the smallest perturbation in the strain.\n 4. Use the pseudoinverse of a matrix-vector expression\n corresponding to the parameterized stress-strain\n relationship and multiply that matrix by the respective\n calculated first or second derivatives from the\n previous step.\n 5. Place the calculated nth-order elastic\n constants appropriately.\n\n Args:\n order (int): order of the elastic tensor set to return\n strains (nx3x3 array-like): Array of 3x3 strains\n to use in fitting of ECs\n stresses (nx3x3 array-like): Array of 3x3 stresses\n to use in fitting ECs. These should be PK2 stresses.\n eq_stress (3x3 array-like): stress corresponding to\n equilibrium strain (i. e. \"0\" strain state).\n If not specified, function will try to find\n the state in the list of provided stresses\n and strains. If not found, defaults to 0.\n tol (float): value for which strains below\n are ignored in identifying strain states.\n\n Returns:\n Set of tensors corresponding to nth order expansion of\n the stress/strain relation\n \"\"\"\n strain_state_dict = get_strain_state_dict(strains, stresses, eq_stress=eq_stress, tol=tol, add_eq=True, sort=True)\n\n # Collect derivative data\n c_list = []\n dei_dsi = np.zeros((order - 1, 6, len(strain_state_dict)))\n for n, (strain_state, data) in enumerate(strain_state_dict.items()):\n hvec = data[\"strains\"][:, strain_state.index(1)]\n for i in range(1, order):\n coef = get_diff_coeff(hvec, i)\n dei_dsi[i - 1, :, n] = np.dot(coef, data[\"stresses\"])\n\n m, absent = generate_pseudo(list(strain_state_dict.keys()), order)\n for i in range(1, order):\n cvec, carr = get_symbol_list(i + 1)\n svec = np.ravel(dei_dsi[i - 1].T)\n cmap = dict(zip(cvec, np.dot(m[i - 1], svec)))\n c_list.append(v_subs(carr, cmap))\n return [Tensor.from_voigt(c) for c in c_list]\n\n\ndef find_eq_stress(strains, stresses, tol=1e-10):\n \"\"\"\n Finds stress corresponding to zero strain state in stress-strain list\n\n Args:\n strains (Nx3x3 array-like): array corresponding to strains\n stresses (Nx3x3 array-like): array corresponding to stresses\n tol (float): tolerance to find zero strain state\n \"\"\"\n stress_array = np.array(stresses)\n strain_array = np.array(strains)\n eq_stress = stress_array[np.all(abs(strain_array) < tol, axis=(1, 2))]\n\n if eq_stress.size != 0:\n all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all()\n if len(eq_stress) > 1 and not all_same:\n raise ValueError(\n \"Multiple stresses found for equilibrium strain\"\n \" state, please specify equilibrium stress or \"\n \" remove extraneous stresses.\"\n )\n eq_stress = eq_stress[0]\n else:\n warnings.warn(\"No eq state found, returning zero voigt stress\")\n eq_stress = Stress(np.zeros((3, 3)))\n return eq_stress\n\n\ndef get_strain_state_dict(strains, stresses, eq_stress=None, tol=1e-10, add_eq=True, sort=True):\n \"\"\"\n Creates a dictionary of voigt-notation stress-strain sets\n keyed by \"strain state\", i. e. a tuple corresponding to\n the non-zero entries in ratios to the lowest nonzero value,\n e.g. [0, 0.1, 0, 0.2, 0, 0] -> (0,1,0,2,0,0)\n This allows strains to be collected in stencils as to\n evaluate parameterized finite difference derivatives\n\n Args:\n strains (Nx3x3 array-like): strain matrices\n stresses (Nx3x3 array-like): stress matrices\n eq_stress (Nx3x3 array-like): equilibrium stress\n tol (float): tolerance for sorting strain states\n add_eq (bool): flag for whether to add eq_strain\n to stress-strain sets for each strain state\n sort (bool): flag for whether to sort strain states\n\n Returns:\n dict: strain state keys and dictionaries with stress-strain data corresponding to strain state\n \"\"\"\n # Recast stress/strains\n vstrains = np.array([Strain(s).zeroed(tol).voigt for s in strains]) # pylint: disable=E1101\n vstresses = np.array([Stress(s).zeroed(tol).voigt for s in stresses]) # pylint: disable=E1101\n # Collect independent strain states:\n independent = {tuple(np.nonzero(vstrain)[0].tolist()) for vstrain in vstrains}\n strain_state_dict = {}\n if add_eq:\n if eq_stress is not None:\n veq_stress = Stress(eq_stress).voigt\n else:\n veq_stress = find_eq_stress(strains, stresses).voigt\n\n for n, ind in enumerate(independent):\n # match strains with templates\n template = np.zeros(6, dtype=bool)\n np.put(template, ind, True)\n template = np.tile(template, [vstresses.shape[0], 1])\n mode = (template == (np.abs(vstrains) > 1e-10)).all(axis=1)\n mstresses = vstresses[mode]\n mstrains = vstrains[mode]\n # Get \"strain state\", i.e. ratio of each value to minimum strain\n min_nonzero_ind = np.argmin(np.abs(np.take(mstrains[-1], ind)))\n min_nonzero_val = np.take(mstrains[-1], ind)[min_nonzero_ind]\n strain_state = mstrains[-1] / min_nonzero_val\n strain_state = tuple(strain_state)\n\n if add_eq:\n # add zero strain state\n mstrains = np.vstack([mstrains, np.zeros(6)])\n mstresses = np.vstack([mstresses, veq_stress])\n # sort strains/stresses by strain values\n if sort:\n mstresses = mstresses[mstrains[:, ind[0]].argsort()]\n mstrains = mstrains[mstrains[:, ind[0]].argsort()]\n strain_state_dict[strain_state] = {\"strains\": mstrains, \"stresses\": mstresses}\n return strain_state_dict\n\n\ndef generate_pseudo(strain_states, order=3):\n \"\"\"\n Generates the pseudoinverse for a given set of strains.\n\n Args:\n strain_states (6xN array like): a list of voigt-notation\n \"strain-states\", i. e. perturbed indices of the strain\n as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0)\n order (int): order of pseudoinverse to calculate\n\n Returns:\n mis: pseudo inverses for each order tensor, these can\n be multiplied by the central difference derivative\n of the stress with respect to the strain state\n absent_syms: symbols of the tensor absent from the PI\n expression\n \"\"\"\n s = sp.Symbol(\"s\")\n nstates = len(strain_states)\n ni = np.array(strain_states) * s\n mis, absent_syms = [], []\n for degree in range(2, order + 1):\n cvec, carr = get_symbol_list(degree)\n sarr = np.zeros((nstates, 6), dtype=object)\n for n, strain_v in enumerate(ni):\n # Get expressions\n exps = carr.copy()\n for i in range(degree - 1):\n exps = np.dot(exps, strain_v)\n exps /= np.math.factorial(degree - 1)\n sarr[n] = [sp.diff(exp, s, degree - 1) for exp in exps]\n svec = sarr.ravel()\n present_syms = set.union(*(exp.atoms(sp.Symbol) for exp in svec))\n absent_syms += [set(cvec) - present_syms]\n m = np.zeros((6 * nstates, len(cvec)))\n for n, c in enumerate(cvec):\n m[:, n] = v_diff(svec, c)\n mis.append(np.linalg.pinv(m))\n return mis, absent_syms\n\n\ndef get_symbol_list(rank, dim=6):\n \"\"\"\n Returns a symbolic representation of the voigt-notation\n tensor that places identical symbols for entries related\n by index transposition, i. e. C_1121 = C_1211 etc.\n\n Args:\n dim (int): dimension of matrix/tensor, e. g. 6 for\n voigt notation and 3 for standard\n rank (int): rank of tensor, e. g. 3 for third-order ECs\n\n Returns:\n c_vec (array): array representing distinct indices\n c_arr (array): array representing tensor with equivalent\n indices assigned as above\n \"\"\"\n indices = list(itertools.combinations_with_replacement(range(dim), r=rank))\n c_vec = np.zeros(len(indices), dtype=object)\n c_arr = np.zeros([dim] * rank, dtype=object)\n for n, idx in enumerate(indices):\n c_vec[n] = sp.Symbol(\"c_\" + \"\".join([str(i) for i in idx]))\n for perm in itertools.permutations(idx):\n c_arr[perm] = c_vec[n]\n return c_vec, c_arr\n\n\ndef subs(entry, cmap):\n \"\"\"\n Sympy substitution function, primarily for the purposes\n of numpy vectorization\n\n Args:\n entry (symbol or exp): sympy expr to undergo subs\n cmap (dict): map for symbols to values to use in subs\n\n Returns:\n Evaluated expression with substitution\n \"\"\"\n return entry.subs(cmap)\n\n\n# Vectorized functions\nv_subs = np.vectorize(subs)\nv_diff = np.vectorize(sp.diff)\n\n\ndef get_diff_coeff(hvec, n=1):\n \"\"\"\n Helper function to find difference coefficients of an\n derivative on an arbitrary mesh.\n\n Args:\n hvec (1D array-like): sampling stencil\n n (int): degree of derivative to find\n \"\"\"\n hvec = np.array(hvec, dtype=np.float_)\n acc = len(hvec)\n exp = np.column_stack([np.arange(acc)] * acc)\n a = np.vstack([hvec] * acc) ** exp\n b = np.zeros(acc)\n b[n] = factorial(n)\n return np.linalg.solve(a, b)\n",
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nThis module provides the Stress class used to create, manipulate, and\ncalculate relevant properties of the stress tensor.\n\"\"\"\n\nimport math\n\nimport numpy as np\n\nfrom pymatgen.core.tensors import SquareTensor\n\n__author__ = \"Joseph Montoya\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__credits__ = \"Maarten de Jong, Mark Asta, Anubhav Jain\"\n__version__ = \"1.0\"\n__maintainer__ = \"Joseph Montoya\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n__date__ = \"July 24, 2018\"\n\n\nclass Stress(SquareTensor):\n \"\"\"\n This class extends SquareTensor as a representation of the\n stress\n \"\"\"\n\n symbol = \"s\"\n\n def __new__(cls, stress_matrix):\n \"\"\"\n Create a Stress object. Note that the constructor uses __new__\n rather than __init__ according to the standard method of\n subclassing numpy ndarrays.\n\n Args:\n stress_matrix (3x3 array-like): the 3x3 array-like\n representing the stress\n \"\"\"\n obj = super().__new__(cls, stress_matrix)\n return obj.view(cls)\n\n @property\n def dev_principal_invariants(self):\n \"\"\"\n returns the principal invariants of the deviatoric stress tensor,\n which is calculated by finding the coefficients of the characteristic\n polynomial of the stress tensor minus the identity times the mean\n stress\n \"\"\"\n return self.deviator_stress.principal_invariants * np.array([1, -1, 1])\n\n @property\n def von_mises(self):\n \"\"\"\n returns the von mises stress\n \"\"\"\n if not self.is_symmetric():\n raise ValueError(\n \"The stress tensor is not symmetric, Von Mises stress is based on a symmetric stress tensor.\"\n )\n return math.sqrt(3 * self.dev_principal_invariants[1])\n\n @property\n def mean_stress(self):\n \"\"\"\n returns the mean stress\n \"\"\"\n return 1.0 / 3.0 * self.trace()\n\n @property\n def deviator_stress(self):\n \"\"\"\n returns the deviatoric component of the stress\n \"\"\"\n if not self.is_symmetric:\n raise ValueError(\"The stress tensor is not symmetric, so deviator stress will not be either\")\n return self - self.mean_stress * np.eye(3)\n\n def piola_kirchoff_1(self, def_grad):\n \"\"\"\n calculates the first Piola-Kirchoff stress\n\n Args:\n def_grad (3x3 array-like): deformation gradient tensor\n \"\"\"\n if not self.is_symmetric:\n raise ValueError(\n \"The stress tensor is not symmetric, \\\n PK stress is based on a symmetric stress tensor.\"\n )\n def_grad = SquareTensor(def_grad)\n return def_grad.det * np.dot(self, def_grad.inv.trans)\n\n def piola_kirchoff_2(self, def_grad):\n \"\"\"\n calculates the second Piola-Kirchoff stress\n\n Args:\n def_grad (3x3 array-like): rate of deformation tensor\n \"\"\"\n\n def_grad = SquareTensor(def_grad)\n if not self.is_symmetric:\n raise ValueError(\n \"The stress tensor is not symmetric, \\\n PK stress is based on a symmetric stress tensor.\"\n )\n return def_grad.det * np.dot(np.dot(def_grad.inv, self), def_grad.inv.trans)\n",
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nSome reimplementation of Henkelman's Transition State Analysis utilities,\nwhich are originally in Perl. Additional features beyond those offered by\nHenkelman's utilities will be added.\n\nThis allows the usage and customization in Python.\n\"\"\"\n\nimport glob\nimport os\n\nimport numpy as np\nfrom monty.json import MSONable, jsanitize\nfrom scipy.interpolate import CubicSpline\n\nfrom pymatgen.analysis.structure_matcher import StructureMatcher\nfrom pymatgen.io.vasp import Outcar, Poscar\nfrom pymatgen.util.plotting import pretty_plot\n\n\nclass NEBAnalysis(MSONable):\n \"\"\"\n An NEBAnalysis class.\n \"\"\"\n\n def __init__(self, r, energies, forces, structures, spline_options=None):\n \"\"\"\n Initializes an NEBAnalysis from the cumulative root mean squared distances\n between structures, the energies, the forces, the structures and the\n interpolation_order for the analysis.\n\n Args:\n r: Root mean square distances between structures\n energies: Energies of each structure along reaction coordinate\n forces: Tangent forces along the reaction coordinate.\n structures ([Structure]): List of Structures along reaction\n coordinate.\n spline_options (dict): Options for cubic spline. For example,\n {\"saddle_point\": \"zero_slope\"} forces the slope at the saddle to\n be zero.\n \"\"\"\n self.r = np.array(r)\n self.energies = np.array(energies)\n self.forces = np.array(forces)\n self.structures = structures\n self.spline_options = spline_options if spline_options is not None else {}\n\n # We do a piecewise interpolation between the points. Each spline (\n # cubic by default) is constrained by the boundary conditions of the\n # energies and the tangent force, i.e., the derivative of\n # the energy at each pair of points.\n\n self.setup_spline(spline_options=self.spline_options)\n\n def setup_spline(self, spline_options=None):\n \"\"\"\n Setup of the options for the spline interpolation\n\n Args:\n spline_options (dict): Options for cubic spline. For example,\n {\"saddle_point\": \"zero_slope\"} forces the slope at the saddle to\n be zero.\n \"\"\"\n self.spline_options = spline_options\n relative_energies = self.energies - self.energies[0]\n if self.spline_options.get(\"saddle_point\", \"\") == \"zero_slope\":\n imax = np.argmax(relative_energies)\n self.spline = CubicSpline(\n x=self.r[: imax + 1],\n y=relative_energies[: imax + 1],\n bc_type=((1, 0.0), (1, 0.0)),\n )\n cspline2 = CubicSpline(\n x=self.r[imax:],\n y=relative_energies[imax:],\n bc_type=((1, 0.0), (1, 0.0)),\n )\n self.spline.extend(c=cspline2.c, x=cspline2.x[1:])\n else:\n self.spline = CubicSpline(x=self.r, y=relative_energies, bc_type=((1, 0.0), (1, 0.0)))\n\n @classmethod\n def from_outcars(cls, outcars, structures, **kwargs):\n \"\"\"\n Initializes an NEBAnalysis from Outcar and Structure objects. Use\n the static constructors, e.g., :class:`from_dir` instead if you\n prefer to have these automatically generated from a directory of NEB\n calculations.\n\n Args:\n outcars ([Outcar]): List of Outcar objects. Note that these have\n to be ordered from start to end along reaction coordinates.\n structures ([Structure]): List of Structures along reaction\n coordinate. Must be same length as outcar.\n interpolation_order (int): Order of polynomial to use to\n interpolate between images. Same format as order parameter in\n scipy.interplotate.PiecewisePolynomial.\n \"\"\"\n if len(outcars) != len(structures):\n raise ValueError(\"# of Outcars must be same as # of Structures\")\n\n # Calculate cumulative root mean square distance between structures,\n # which serves as the reaction coordinate. Note that these are\n # calculated from the final relaxed structures as the coordinates may\n # have changed from the initial interpolation.\n r = [0]\n prev = structures[0]\n for st in structures[1:]:\n dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])\n r.append(np.sqrt(np.sum(dists**2)))\n prev = st\n r = np.cumsum(r)\n\n energies = []\n forces = []\n for i, o in enumerate(outcars):\n o.read_neb()\n energies.append(o.data[\"energy\"])\n if i in [0, len(outcars) - 1]:\n forces.append(0)\n else:\n forces.append(o.data[\"tangent_force\"])\n forces = np.array(forces)\n r = np.array(r)\n return cls(r=r, energies=energies, forces=forces, structures=structures, **kwargs)\n\n def get_extrema(self, normalize_rxn_coordinate=True):\n \"\"\"\n Returns the positions of the extrema along the MEP. Both local\n minimums and maximums are returned.\n\n Args:\n normalize_rxn_coordinate (bool): Whether to normalize the\n reaction coordinate to between 0 and 1. Defaults to True.\n\n Returns:\n (min_extrema, max_extrema), where the extrema are given as\n [(x1, y1), (x2, y2), ...].\n \"\"\"\n x = np.arange(0, np.max(self.r), 0.01)\n y = self.spline(x) * 1000\n\n scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]\n min_extrema = []\n max_extrema = []\n for i in range(1, len(x) - 1):\n if y[i] < y[i - 1] and y[i] < y[i + 1]:\n min_extrema.append((x[i] * scale, y[i]))\n elif y[i] > y[i - 1] and y[i] > y[i + 1]:\n max_extrema.append((x[i] * scale, y[i]))\n return min_extrema, max_extrema\n\n def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):\n \"\"\"\n Returns the NEB plot. Uses Henkelman's approach of spline fitting\n each section of the reaction path based on tangent force and energies.\n\n Args:\n normalize_rxn_coordinate (bool): Whether to normalize the\n reaction coordinate to between 0 and 1. Defaults to True.\n label_barrier (bool): Whether to label the maximum barrier.\n\n Returns:\n matplotlib.pyplot object.\n \"\"\"\n plt = pretty_plot(12, 8)\n scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]\n x = np.arange(0, np.max(self.r), 0.01)\n y = self.spline(x) * 1000\n relative_energies = self.energies - self.energies[0]\n plt.plot(\n self.r * scale,\n relative_energies * 1000,\n \"ro\",\n x * scale,\n y,\n \"k-\",\n linewidth=2,\n markersize=10,\n )\n plt.xlabel(\"Reaction coordinate\")\n plt.ylabel(\"Energy (meV)\")\n plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))\n if label_barrier:\n data = zip(x * scale, y)\n barrier = max(data, key=lambda d: d[1])\n plt.plot([0, barrier[0]], [barrier[1], barrier[1]], \"k--\")\n plt.annotate(\n f\"{np.max(y) - np.min(y):.0f} meV\",\n xy=(barrier[0] / 2, barrier[1] * 1.02),\n xytext=(barrier[0] / 2, barrier[1] * 1.02),\n horizontalalignment=\"center\",\n )\n plt.tight_layout()\n return plt\n\n @classmethod\n def from_dir(cls, root_dir, relaxation_dirs=None, **kwargs):\n \"\"\"\n Initializes a NEBAnalysis object from a directory of a NEB run.\n Note that OUTCARs must be present in all image directories. For the\n terminal OUTCARs from relaxation calculations, you can specify the\n locations using relaxation_dir. If these are not specified, the code\n will attempt to look for the OUTCARs in 00 and 0n directories,\n followed by subdirs \"start\", \"end\" or \"initial\", \"final\" in the\n root_dir. These are just some typical conventions used\n preferentially in Shyue Ping's MAVRL research group. For the\n non-terminal points, the CONTCAR is read to obtain structures. For\n terminal points, the POSCAR is used. The image directories are\n assumed to be the only directories that can be resolved to integers.\n E.g., \"00\", \"01\", \"02\", \"03\", \"04\", \"05\", \"06\". The minimum\n sub-directory structure that can be parsed is of the following form (\n a 5-image example is shown):\n\n 00:\n - POSCAR\n - OUTCAR\n 01, 02, 03, 04, 05:\n - CONTCAR\n - OUTCAR\n 06:\n - POSCAR\n - OUTCAR\n\n Args:\n root_dir (str): Path to the root directory of the NEB calculation.\n relaxation_dirs (tuple): This specifies the starting and ending\n relaxation directories from which the OUTCARs are read for the\n terminal points for the energies.\n\n Returns:\n NEBAnalysis object.\n \"\"\"\n neb_dirs = []\n\n for d in os.listdir(root_dir):\n pth = os.path.join(root_dir, d)\n if os.path.isdir(pth) and d.isdigit():\n i = int(d)\n neb_dirs.append((i, pth))\n neb_dirs = sorted(neb_dirs, key=lambda d: d[0])\n outcars = []\n structures = []\n\n # Setup the search sequence for the OUTCARs for the terminal\n # directories.\n terminal_dirs = []\n if relaxation_dirs is not None:\n terminal_dirs.append(relaxation_dirs)\n terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))\n terminal_dirs.append([os.path.join(root_dir, d) for d in [\"start\", \"end\"]])\n terminal_dirs.append([os.path.join(root_dir, d) for d in [\"initial\", \"final\"]])\n\n for i, d in neb_dirs:\n outcar = glob.glob(os.path.join(d, \"OUTCAR*\"))\n contcar = glob.glob(os.path.join(d, \"CONTCAR*\"))\n poscar = glob.glob(os.path.join(d, \"POSCAR*\"))\n terminal = i in [0, neb_dirs[-1][0]]\n if terminal:\n for ds in terminal_dirs:\n od = ds[0] if i == 0 else ds[1]\n outcar = glob.glob(os.path.join(od, \"OUTCAR*\"))\n if outcar:\n outcar = sorted(outcar)\n outcars.append(Outcar(outcar[-1]))\n break\n else:\n raise ValueError(f\"OUTCAR cannot be found for terminal point {d}\")\n structures.append(Poscar.from_file(poscar[0]).structure)\n else:\n outcars.append(Outcar(outcar[0]))\n structures.append(Poscar.from_file(contcar[0]).structure)\n return NEBAnalysis.from_outcars(outcars, structures, **kwargs)\n\n def as_dict(self):\n \"\"\"\n Dict representation of NEBAnalysis.\n\n Returns:\n JSON-serializable dict representation.\n \"\"\"\n return {\n \"@module\": type(self).__module__,\n \"@class\": type(self).__name__,\n \"r\": jsanitize(self.r),\n \"energies\": jsanitize(self.energies),\n \"forces\": jsanitize(self.forces),\n \"structures\": [s.as_dict() for s in self.structures],\n }\n\n\ndef combine_neb_plots(neb_analyses, arranged_neb_analyses=False, reverse_plot=False):\n \"\"\"\n neb_analyses: a list of NEBAnalysis objects\n\n arranged_neb_analyses: The code connects two end points with the\n smallest-energy difference. If all end points have very close energies, it's\n likely to result in an inaccurate connection. Manually arrange neb_analyses\n if the combined plot is not as expected compared with all individual plots.\n E.g., if there are two NEBAnalysis objects to combine, arrange in such a\n way that the end-point energy of the first NEBAnalysis object is the\n start-point energy of the second NEBAnalysis object.\n Note that the barrier labeled in y-axis in the combined plot might be\n different from that in the individual plot due to the reference energy used.\n reverse_plot: reverse the plot or percolation direction.\n return: a NEBAnalysis object\n \"\"\"\n x = StructureMatcher()\n for neb_index, neb in enumerate(neb_analyses):\n if neb_index == 0:\n neb1 = neb\n neb1_energies = list(neb1.energies)\n neb1_structures = neb1.structures\n neb1_forces = neb1.forces\n neb1_r = neb1.r\n continue\n\n neb2 = neb\n neb2_energies = list(neb2.energies)\n\n matching = 0\n for neb1_s in [neb1_structures[0], neb1_structures[-1]]:\n if x.fit(neb1_s, neb2.structures[0]) or x.fit(neb1_s, neb2.structures[-1]):\n matching += 1\n break\n if matching == 0:\n raise ValueError(\"no matched structures for connection!\")\n\n neb1_start_e, neb1_end_e = neb1_energies[0], neb1_energies[-1]\n neb2_start_e, neb2_end_e = neb2_energies[0], neb2_energies[-1]\n min_e_diff = min(\n [\n abs(neb1_start_e - neb2_start_e),\n abs(neb1_start_e - neb2_end_e),\n abs(neb1_end_e - neb2_start_e),\n abs(neb1_end_e - neb2_end_e),\n ]\n )\n\n if arranged_neb_analyses:\n neb1_energies = (\n neb1_energies[0 : len(neb1_energies) - 1]\n + [(neb1_energies[-1] + neb2_energies[0]) / 2]\n + neb2_energies[1:]\n )\n neb1_structures = neb1_structures + neb2.structures[1:]\n neb1_forces = list(neb1_forces) + list(neb2.forces)[1:]\n neb1_r = list(neb1_r) + [i + neb1_r[-1] for i in list(neb2.r)[1:]]\n\n elif abs(neb1_start_e - neb2_start_e) == min_e_diff:\n neb1_energies = list(reversed(neb1_energies[1:])) + neb2_energies\n neb1_structures = list(reversed(neb1_structures[1:])) + neb2.structures\n neb1_forces = list(reversed(list(neb1_forces)[1:])) + list(neb2.forces)\n neb1_r = list(reversed([i * -1 - neb1_r[-1] * -1 for i in list(neb1_r)[1:]])) + [\n i + neb1_r[-1] for i in list(neb2.r)\n ]\n\n elif abs(neb1_start_e - neb2_end_e) == min_e_diff:\n neb1_energies = neb2_energies + neb1_energies[1:]\n neb1_structures = neb2.structures + neb1_structures[1:]\n neb1_forces = list(neb2.forces) + list(neb1_forces)[1:]\n neb1_r = list(neb2.r) + [i + list(neb2.r)[-1] for i in list(neb1_r)[1:]]\n\n elif abs(neb1_end_e - neb2_start_e) == min_e_diff:\n neb1_energies = neb1_energies + neb2_energies[1:]\n neb1_structures = neb1_structures + neb2.structures[1:]\n neb1_forces = list(neb1_forces) + list(neb2.forces)[1:]\n neb1_r = list(neb1_r) + [i + neb1_r[-1] for i in list(neb2.r)[1:]]\n\n else:\n neb1_energies = neb1_energies + list(reversed(neb2_energies))[1:]\n neb1_structures = neb1_structures + list(reversed(neb2.structures))[1:]\n neb1_forces = list(neb1_forces) + list(reversed(list(neb2.forces)))[1:]\n neb1_r = list(neb1_r) + list(\n reversed([i * -1 - list(neb2.r)[-1] * -1 + list(neb1_r)[-1] for i in list(neb2.r)[:-1]])\n )\n\n if reverse_plot:\n na = NEBAnalysis(\n list(reversed([i * -1 - neb1_r[-1] * -1 for i in list(neb1_r)])),\n list(reversed(neb1_energies)),\n list(reversed(neb1_forces)),\n list(reversed(neb1_structures)),\n )\n else:\n na = NEBAnalysis(neb1_r, neb1_energies, neb1_forces, neb1_structures)\n return na\n",
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\"\"\"\nThis module provides objects for extracting timing data from the ABINIT output files\nIt also provides tools to analyze and to visualize the parallel efficiency.\n\"\"\"\n\nimport collections\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nfrom monty.string import is_string, list_strings\n\nfrom pymatgen.util.num import minloc\nfrom pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt\n\nlogger = logging.getLogger(__name__)\n\n\ndef alternate(*iterables):\n \"\"\"\n [a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]\n >>> alternate([1,4], [2,5], [3,6])\n [1, 2, 3, 4, 5, 6]\n \"\"\"\n items = []\n for tup in zip(*iterables):\n items.extend(tup)\n return items\n\n\nclass AbinitTimerParserError(Exception):\n \"\"\"Errors raised by AbinitTimerParser\"\"\"\n\n\nclass AbinitTimerParser(collections.abc.Iterable):\n \"\"\"\n Responsible for parsing a list of output files, extracting the timing results\n and analyzing the results.\n Assume the Abinit output files have been produced with `timopt -1`.\n\n Example:\n\n parser = AbinitTimerParser()\n parser.parse(list_of_files)\n\n To analyze all *.abo files within top, use:\n\n parser, paths, okfiles = AbinitTimerParser.walk(top=\".\", ext=\".abo\")\n \"\"\"\n\n # The markers enclosing the data.\n BEGIN_TAG = \"-<BEGIN_TIMER\"\n END_TAG = \"-<END_TIMER>\"\n\n Error = AbinitTimerParserError\n\n # DEFAULT_MPI_RANK = \"0\"\n\n @classmethod\n def walk(cls, top=\".\", ext=\".abo\"):\n \"\"\"\n Scan directory tree starting from top, look for files with extension `ext` and\n parse timing data.\n\n Return: (parser, paths, okfiles)\n where `parser` is the new object, `paths` is the list of files found and `okfiles`\n is the list of files that have been parsed successfully.\n (okfiles == paths) if all files have been parsed.\n \"\"\"\n paths = []\n for root, dirs, files in os.walk(top):\n for f in files:\n if f.endswith(ext):\n paths.append(os.path.join(root, f))\n\n parser = cls()\n okfiles = parser.parse(paths)\n return parser, paths, okfiles\n\n def __init__(self):\n \"\"\"Initialize object.\"\"\"\n # List of files that have been parsed.\n self._filenames = []\n\n # timers[filename][mpi_rank]\n # contains the timer extracted from the file filename associated to the MPI rank mpi_rank.\n self._timers = {}\n\n def __iter__(self):\n return self._timers.__iter__()\n\n def __len__(self):\n return len(self._timers)\n\n @property\n def filenames(self):\n \"\"\"List of files that have been parsed successfully.\"\"\"\n return self._filenames\n\n def parse(self, filenames):\n \"\"\"\n Read and parse a filename or a list of filenames.\n Files that cannot be opened are ignored. A single filename may also be given.\n\n Return: list of successfully read files.\n \"\"\"\n filenames = list_strings(filenames)\n\n read_ok = []\n for fname in filenames:\n try:\n fh = open(fname) # pylint: disable=R1732\n except OSError:\n logger.warning(f\"Cannot open file {fname}\")\n continue\n\n try:\n self._read(fh, fname)\n read_ok.append(fname)\n\n except self.Error as e:\n logger.warning(f\"exception while parsing file {fname}:\\n{e}\")\n continue\n\n finally:\n fh.close()\n\n # Add read_ok to the list of files that have been parsed.\n self._filenames.extend(read_ok)\n return read_ok\n\n def _read(self, fh, fname):\n \"\"\"Parse the TIMER section\"\"\"\n if fname in self._timers:\n raise self.Error(f\"Cannot overwrite timer associated to: {fname} \")\n\n def parse_line(line):\n \"\"\"Parse single line.\"\"\"\n name, vals = line[:25], line[25:].split()\n try:\n ctime, cfract, wtime, wfract, ncalls, gflops = vals\n except ValueError:\n # v8.3 Added two columns at the end [Speedup, Efficacity]\n ctime, cfract, wtime, wfract, ncalls, gflops, speedup, eff = vals\n\n return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)\n\n sections, info, cpu_time, wall_time = None, None, None, None\n data = {}\n parser_failed = False\n inside, has_timer = 0, False\n for line in fh:\n # print(line.strip())\n if line.startswith(self.BEGIN_TAG):\n has_timer = True\n sections = []\n info = {}\n inside = 1\n line = line[len(self.BEGIN_TAG) :].strip()[:-1]\n\n info[\"fname\"] = fname\n for tok in line.split(\",\"):\n key, val = (s.strip() for s in tok.split(\"=\"))\n info[key] = val\n\n elif line.startswith(self.END_TAG):\n inside = 0\n timer = AbinitTimer(sections, info, cpu_time, wall_time)\n mpi_rank = info[\"mpi_rank\"]\n data[mpi_rank] = timer\n\n elif inside:\n inside += 1\n line = line[1:].strip()\n\n if inside == 2:\n d = {}\n for tok in line.split(\",\"):\n key, val = (s.strip() for s in tok.split(\"=\"))\n d[key] = float(val)\n cpu_time, wall_time = d[\"cpu_time\"], d[\"wall_time\"]\n\n elif inside > 5:\n sections.append(parse_line(line))\n\n else:\n try:\n parse_line(line)\n except Exception:\n parser_failed = True\n\n if not parser_failed:\n raise self.Error(\"line should be empty: \" + str(inside) + line)\n\n if not has_timer:\n raise self.Error(f\"{fname}: No timer section found\")\n\n # Add it to the dict\n self._timers[fname] = data\n\n def timers(self, filename=None, mpi_rank=\"0\"):\n \"\"\"\n Return the list of timers associated to the given `filename` and MPI rank mpi_rank.\n \"\"\"\n if filename is not None:\n return [self._timers[filename][mpi_rank]]\n return [self._timers[filename][mpi_rank] for filename in self._filenames]\n\n def section_names(self, ordkey=\"wall_time\"):\n \"\"\"\n Return the names of sections ordered by ordkey.\n For the time being, the values are taken from the first timer.\n \"\"\"\n section_names = []\n\n # FIXME this is not trivial\n for idx, timer in enumerate(self.timers()):\n if idx == 0:\n section_names = [s.name for s in timer.order_sections(ordkey)]\n # check = section_names\n # else:\n # new_set = set( [s.name for s in timer.order_sections(ordkey)])\n # section_names.intersection_update(new_set)\n # check = check.union(new_set)\n\n # if check != section_names:\n # print(\"sections\", section_names)\n # print(\"check\",check)\n\n return section_names\n\n def get_sections(self, section_name):\n \"\"\"\n Return the list of sections stored in self.timers() given `section_name`\n A fake section is returned if the timer does not have section_name.\n \"\"\"\n sections = []\n for timer in self.timers():\n for sect in timer.sections:\n if sect.name == section_name:\n sections.append(sect)\n break\n else:\n sections.append(AbinitTimerSection.fake())\n\n return sections\n\n def pefficiency(self):\n \"\"\"\n Analyze the parallel efficiency.\n\n Return: :class:`ParallelEfficiency` object.\n \"\"\"\n timers = self.timers()\n\n # Number of CPUs employed in each calculation.\n ncpus = [timer.ncpus for timer in timers]\n\n # Find the minimum number of cpus used and its index in timers.\n min_idx = minloc(ncpus)\n min_ncpus = ncpus[min_idx]\n\n # Reference timer\n ref_t = timers[min_idx]\n\n # Compute the parallel efficiency (total and section efficiency)\n peff = {}\n ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]\n wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]\n n = len(timers)\n\n peff[\"total\"] = {}\n peff[\"total\"][\"cpu_time\"] = ctime_peff\n peff[\"total\"][\"wall_time\"] = wtime_peff\n peff[\"total\"][\"cpu_fract\"] = n * [100]\n peff[\"total\"][\"wall_fract\"] = n * [100]\n\n for sect_name in self.section_names():\n # print(sect_name)\n ref_sect = ref_t.get_section(sect_name)\n sects = [t.get_section(sect_name) for t in timers]\n try:\n ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]\n wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]\n except ZeroDivisionError:\n ctime_peff = n * [-1]\n wtime_peff = n * [-1]\n\n assert sect_name not in peff\n peff[sect_name] = {}\n peff[sect_name][\"cpu_time\"] = ctime_peff\n peff[sect_name][\"wall_time\"] = wtime_peff\n\n peff[sect_name][\"cpu_fract\"] = [s.cpu_fract for s in sects]\n peff[sect_name][\"wall_fract\"] = [s.wall_fract for s in sects]\n\n return ParallelEfficiency(self._filenames, min_idx, peff)\n\n def summarize(self, **kwargs):\n \"\"\"\n Return pandas DataFrame with the most important results stored in the timers.\n \"\"\"\n import pandas as pd\n\n colnames = [\n \"fname\",\n \"wall_time\",\n \"cpu_time\",\n \"mpi_nprocs\",\n \"omp_nthreads\",\n \"mpi_rank\",\n ]\n\n frame = pd.DataFrame(columns=colnames)\n for i, timer in enumerate(self.timers()):\n frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)\n frame[\"tot_ncpus\"] = frame[\"mpi_nprocs\"] * frame[\"omp_nthreads\"]\n\n # Compute parallel efficiency (use the run with min number of cpus to normalize).\n i = frame[\"tot_ncpus\"].values.argmin()\n ref_wtime = frame.iloc[i][\"wall_time\"]\n ref_ncpus = frame.iloc[i][\"tot_ncpus\"]\n frame[\"peff\"] = (ref_ncpus * ref_wtime) / (frame[\"wall_time\"] * frame[\"tot_ncpus\"])\n\n return frame\n\n @add_fig_kwargs\n def plot_efficiency(self, key=\"wall_time\", what=\"good+bad\", nmax=5, ax=None, **kwargs):\n \"\"\"\n Plot the parallel efficiency\n\n Args:\n key: Parallel efficiency is computed using the wall_time.\n what: Specifies what to plot: `good` for sections with good parallel efficiency.\n `bad` for sections with bad efficiency. Options can be concatenated with `+`.\n nmax: Maximum number of entries in plot\n ax: matplotlib :class:`Axes` or None if a new figure should be created.\n\n ================ ====================================================\n kwargs Meaning\n ================ ====================================================\n linewidth matplotlib linewidth. Default: 2.0\n markersize matplotlib markersize. Default: 10\n ================ ====================================================\n\n Returns:\n `matplotlib` figure\n \"\"\"\n ax, fig, plt = get_ax_fig_plt(ax=ax)\n lw = kwargs.pop(\"linewidth\", 2.0)\n msize = kwargs.pop(\"markersize\", 10)\n what = what.split(\"+\")\n\n timers = self.timers()\n peff = self.pefficiency()\n n = len(timers)\n xx = np.arange(n)\n\n # ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])\n ax.set_prop_cycle(color=[\"g\", \"b\", \"c\", \"m\", \"y\", \"k\"])\n\n lines, legend_entries = [], []\n # Plot sections with good efficiency.\n if \"good\" in what:\n good = peff.good_sections(key=key, nmax=nmax)\n for g in good:\n # print(g, peff[g])\n yy = peff[g][key]\n (line,) = ax.plot(xx, yy, \"-->\", linewidth=lw, markersize=msize)\n lines.append(line)\n legend_entries.append(g)\n\n # Plot sections with bad efficiency.\n if \"bad\" in what:\n bad = peff.bad_sections(key=key, nmax=nmax)\n for b in bad:\n # print(b, peff[b])\n yy = peff[b][key]\n (line,) = ax.plot(xx, yy, \"-.<\", linewidth=lw, markersize=msize)\n lines.append(line)\n legend_entries.append(b)\n\n # Add total if not already done\n if \"total\" not in legend_entries:\n yy = peff[\"total\"][key]\n (total_line,) = ax.plot(xx, yy, \"r\", linewidth=lw, markersize=msize)\n lines.append(total_line)\n legend_entries.append(\"total\")\n\n ax.legend(lines, legend_entries, loc=\"best\", shadow=True)\n\n # ax.set_title(title)\n ax.set_xlabel(\"Total_NCPUs\")\n ax.set_ylabel(\"Efficiency\")\n ax.grid(True)\n\n # Set xticks and labels.\n labels = [f\"MPI={t.mpi_nprocs}, OMP={t.omp_nthreads}\" for t in timers]\n ax.set_xticks(xx)\n ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)\n\n return fig\n\n @add_fig_kwargs\n def plot_pie(self, key=\"wall_time\", minfract=0.05, **kwargs):\n \"\"\"\n Plot pie charts of the different timers.\n\n Args:\n key: Keyword used to extract data from timers.\n minfract: Don't show sections whose relative weight is less that minfract.\n\n Returns:\n `matplotlib` figure\n \"\"\"\n timers = self.timers()\n n = len(timers)\n\n # Make square figures and axes\n import matplotlib.pyplot as plt\n from matplotlib.gridspec import GridSpec\n\n fig = plt.gcf()\n gspec = GridSpec(n, 1)\n for idx, timer in enumerate(timers):\n ax = plt.subplot(gspec[idx, 0])\n ax.set_title(str(timer))\n timer.pie(ax=ax, key=key, minfract=minfract, show=False)\n\n return fig\n\n @add_fig_kwargs\n def plot_stacked_hist(self, key=\"wall_time\", nmax=5, ax=None, **kwargs):\n \"\"\"\n Plot stacked histogram of the different timers.\n\n Args:\n key: Keyword used to extract data from the timers. Only the first `nmax`\n sections with largest value are show.\n mmax: Maximum number of sections to show. Other entries are grouped together\n in the `others` section.\n ax: matplotlib :class:`Axes` or None if a new figure should be created.\n\n Returns:\n `matplotlib` figure\n \"\"\"\n ax, fig, plt = get_ax_fig_plt(ax=ax)\n\n mpi_rank = \"0\"\n timers = self.timers(mpi_rank=mpi_rank)\n n = len(timers)\n\n names, values = [], []\n rest = np.zeros(n)\n\n for idx, sname in enumerate(self.section_names(ordkey=key)):\n sections = self.get_sections(sname)\n svals = np.asarray([s.__dict__[key] for s in sections])\n if idx < nmax:\n names.append(sname)\n values.append(svals)\n else:\n rest += svals\n\n names.append(f\"others (nmax={nmax})\")\n values.append(rest)\n\n # The dataset is stored in values. Now create the stacked histogram.\n ind = np.arange(n) # the locations for the groups\n width = 0.35 # the width of the bars\n colors = nmax * [\"r\", \"g\", \"b\", \"c\", \"k\", \"y\", \"m\"]\n\n bars = []\n bottom = np.zeros(n)\n for idx, vals in enumerate(values):\n color = colors[idx]\n bar_ = ax.bar(ind, vals, width, color=color, bottom=bottom)\n bars.append(bar_)\n bottom += vals\n\n ax.set_ylabel(key)\n ax.set_title(f\"Stacked histogram with the {nmax} most important sections\")\n\n ticks = ind + width / 2.0\n labels = [f\"MPI={t.mpi_nprocs}, OMP={t.omp_nthreads}\" for t in timers]\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels, rotation=15)\n\n # Add legend.\n ax.legend([bar_[0] for bar_ in bars], names, loc=\"best\")\n\n return fig\n\n def plot_all(self, show=True, **kwargs):\n \"\"\"\n Call all plot methods provided by the parser.\n \"\"\"\n figs = []\n app = figs.append\n app(self.plot_stacked_hist(show=show))\n app(self.plot_efficiency(show=show))\n app(self.plot_pie(show=show))\n return figs\n\n\nclass ParallelEfficiency(dict):\n \"\"\"\n Store results concerning the parallel efficiency of the job.\n \"\"\"\n\n def __init__(self, filenames, ref_idx, *args, **kwargs):\n \"\"\"\n Args:\n filennames: List of filenames\n ref_idx: Index of the Reference time (calculation done with the smallest number of cpus)\n \"\"\"\n self.update(*args, **kwargs)\n self.filenames = filenames\n self._ref_idx = ref_idx\n\n def _order_by_peff(self, key, criterion, reverse=True):\n\n self.estimator = {\n \"min\": min,\n \"max\": max,\n \"mean\": lambda items: sum(items) / len(items),\n }[criterion]\n\n data = []\n for (sect_name, peff) in self.items():\n # Ignore values where we had a division by zero.\n if all(v != -1 for v in peff[key]):\n values = peff[key][:]\n # print(sect_name, values)\n if len(values) > 1:\n ref_value = values.pop(self._ref_idx)\n assert ref_value == 1.0\n\n data.append((sect_name, self.estimator(values)))\n\n data.sort(key=lambda t: t[1], reverse=reverse)\n return tuple(sect_name for (sect_name, e) in data)\n\n def totable(self, stop=None, reverse=True):\n \"\"\"\n Return table (list of lists) with timing results.\n\n Args:\n stop: Include results up to stop. None for all\n reverse: Put items with highest wall_time in first positions if True.\n \"\"\"\n osects = self._order_by_peff(\"wall_time\", criterion=\"mean\", reverse=reverse)\n if stop is not None:\n osects = osects[:stop]\n\n n = len(self.filenames)\n table = [[\"AbinitTimerSection\"] + alternate(self.filenames, n * [\"%\"])]\n for sect_name in osects:\n peff = self[sect_name][\"wall_time\"]\n fract = self[sect_name][\"wall_fract\"]\n vals = alternate(peff, fract)\n\n table.append([sect_name] + [f\"{val:.2f}\" for val in vals])\n\n return table\n\n def good_sections(self, key=\"wall_time\", criterion=\"mean\", nmax=5):\n \"\"\"\n Return first `nmax` sections with best value of key `key` using criterion `criterion`.\n \"\"\"\n good_sections = self._order_by_peff(key, criterion=criterion)\n return good_sections[:nmax]\n\n def bad_sections(self, key=\"wall_time\", criterion=\"mean\", nmax=5):\n \"\"\"\n Return first `nmax` sections with worst value of key `key` using criterion `criterion`.\n \"\"\"\n bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)\n return bad_sections[:nmax]\n\n\nclass AbinitTimerSection:\n \"\"\"Record with the timing results associated to a section of code.\"\"\"\n\n STR_FIELDS = [\"name\"]\n\n NUMERIC_FIELDS = [\n \"wall_time\",\n \"wall_fract\",\n \"cpu_time\",\n \"cpu_fract\",\n \"ncalls\",\n \"gflops\",\n ]\n\n FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)\n\n @classmethod\n def fake(cls):\n \"\"\"Return a fake section. Mainly used to fill missing entries if needed.\"\"\"\n return AbinitTimerSection(\"fake\", 0.0, 0.0, 0.0, 0.0, -1, 0.0)\n\n def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):\n \"\"\"\n Args:\n name: Name of the sections.\n cpu_time: CPU time in seconds.\n cpu_fract: Percentage of CPU time.\n wall_time: Wall-time in seconds.\n wall_fract: Percentage of wall-time.\n ncalls: Number of calls\n gflops: Gigaflops.\n \"\"\"\n self.name = name.strip()\n self.cpu_time = float(cpu_time)\n self.cpu_fract = float(cpu_fract)\n self.wall_time = float(wall_time)\n self.wall_fract = float(wall_fract)\n self.ncalls = int(ncalls)\n self.gflops = float(gflops)\n\n def to_tuple(self):\n \"\"\"Convert object to tuple.\"\"\"\n return tuple(self.__dict__[at] for at in AbinitTimerSection.FIELDS)\n\n def to_dict(self):\n \"\"\"Convert object to dictionary.\"\"\"\n return {at: self.__dict__[at] for at in AbinitTimerSection.FIELDS}\n\n def to_csvline(self, with_header=False):\n \"\"\"Return a string with data in CSV format. Add header if `with_header`\"\"\"\n string = \"\"\n\n if with_header:\n string += \"# \" + \" \".join(at for at in AbinitTimerSection.FIELDS) + \"\\n\"\n\n string += \", \".join(str(v) for v in self.to_tuple()) + \"\\n\"\n return string\n\n def __str__(self):\n \"\"\"String representation.\"\"\"\n string = \"\"\n for a in AbinitTimerSection.FIELDS:\n string += a + \" = \" + self.__dict__[a] + \",\"\n return string[:-1]\n\n\nclass AbinitTimer:\n \"\"\"Container class storing the timing results.\"\"\"\n\n def __init__(self, sections, info, cpu_time, wall_time):\n \"\"\"\n Args:\n sections: List of sections\n info: Dictionary with extra info.\n cpu_time: Cpu-time in seconds.\n wall_time: Wall-time in seconds.\n \"\"\"\n # Store sections and names\n self.sections = tuple(sections)\n self.section_names = tuple(s.name for s in self.sections)\n\n self.info = info\n self.cpu_time = float(cpu_time)\n self.wall_time = float(wall_time)\n self.mpi_nprocs = int(info[\"mpi_nprocs\"])\n self.omp_nthreads = int(info[\"omp_nthreads\"])\n self.mpi_rank = info[\"mpi_rank\"].strip()\n self.fname = info[\"fname\"].strip()\n\n def __str__(self):\n return (\n f\"file={self.fname}, wall_time={self.wall_time:.1f}, \"\n f\"mpi_nprocs={self.mpi_nprocs}, omp_nthreads={self.omp_nthreads}\"\n )\n\n @property\n def ncpus(self):\n \"\"\"Total number of CPUs employed.\"\"\"\n return self.mpi_nprocs * self.omp_nthreads\n\n def get_section(self, section_name):\n \"\"\"Return section associated to `section_name`.\"\"\"\n try:\n idx = self.section_names.index(section_name)\n except Exception:\n raise\n sect = self.sections[idx]\n assert sect.name == section_name\n return sect\n\n def to_csv(self, fileobj=sys.stdout):\n \"\"\"Write data on file fileobj using CSV format.\"\"\"\n openclose = is_string(fileobj)\n\n if openclose:\n fileobj = open(fileobj, \"w\") # pylint: disable=R1732\n\n for idx, section in enumerate(self.sections):\n fileobj.write(section.to_csvline(with_header=(idx == 0)))\n fileobj.flush()\n\n if openclose:\n fileobj.close()\n\n def to_table(self, sort_key=\"wall_time\", stop=None):\n \"\"\"Return a table (list of lists) with timer data\"\"\"\n table = [\n list(AbinitTimerSection.FIELDS),\n ]\n ord_sections = self.order_sections(sort_key)\n\n if stop is not None:\n ord_sections = ord_sections[:stop]\n\n for osect in ord_sections:\n row = [str(item) for item in osect.to_tuple()]\n table.append(row)\n\n return table\n\n # Maintain old API\n totable = to_table\n\n def get_dataframe(self, sort_key=\"wall_time\", **kwargs):\n \"\"\"\n Return a pandas DataFrame with entries sorted according to `sort_key`.\n \"\"\"\n import pandas as pd\n\n frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)\n\n for osect in self.order_sections(sort_key):\n frame = frame.append(osect.to_dict(), ignore_index=True)\n\n # Monkey patch\n frame.info = self.info\n frame.cpu_time = self.cpu_time\n frame.wall_time = self.wall_time\n frame.mpi_nprocs = self.mpi_nprocs\n frame.omp_nthreads = self.omp_nthreads\n frame.mpi_rank = self.mpi_rank\n frame.fname = self.fname\n\n return frame\n\n def get_values(self, keys):\n \"\"\"\n Return a list of values associated to a particular list of keys.\n \"\"\"\n if is_string(keys):\n return [s.__dict__[keys] for s in self.sections]\n values = []\n for k in keys:\n values.append([s.__dict__[k] for s in self.sections])\n return values\n\n def names_and_values(self, key, minval=None, minfract=None, sorted=True):\n \"\"\"\n Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract\n Return the names of the sections and the corresponding values.\n \"\"\"\n values = self.get_values(key)\n names = self.get_values(\"name\")\n\n new_names, new_values = [], []\n other_val = 0.0\n\n if minval is not None:\n assert minfract is None\n\n for n, v in zip(names, values):\n if v >= minval:\n new_names.append(n)\n new_values.append(v)\n else:\n other_val += v\n\n new_names.append(\"below minval \" + str(minval))\n new_values.append(other_val)\n\n elif minfract is not None:\n assert minval is None\n\n total = self.sum_sections(key)\n\n for n, v in zip(names, values):\n if v / total >= minfract:\n new_names.append(n)\n new_values.append(v)\n else:\n other_val += v\n\n new_names.append(\"below minfract \" + str(minfract))\n new_values.append(other_val)\n\n else:\n # all values\n new_names, new_values = names, values\n\n if sorted:\n # Sort new_values and rearrange new_names.\n nandv = list(zip(new_names, new_values))\n nandv.sort(key=lambda t: t[1])\n new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]\n\n return new_names, new_values\n\n def _reduce_sections(self, keys, operator):\n return operator(self.get_values(keys))\n\n def sum_sections(self, keys):\n \"\"\"Sum value of keys.\"\"\"\n return self._reduce_sections(keys, sum)\n\n def order_sections(self, key, reverse=True):\n \"\"\"Sort sections according to the value of key.\"\"\"\n return sorted(self.sections, key=lambda s: s.__dict__[key], reverse=reverse)\n\n @add_fig_kwargs\n def cpuwall_histogram(self, ax=None, **kwargs):\n \"\"\"\n Plot histogram with cpu- and wall-time on axis `ax`.\n\n Args:\n ax: matplotlib :class:`Axes` or None if a new figure should be created.\n\n Returns: `matplotlib` figure\n \"\"\"\n ax, fig, plt = get_ax_fig_plt(ax=ax)\n\n nk = len(self.sections)\n ind = np.arange(nk) # the x locations for the groups\n width = 0.35 # the width of the bars\n\n cpu_times = self.get_values(\"cpu_time\")\n rects1 = plt.bar(ind, cpu_times, width, color=\"r\")\n\n wall_times = self.get_values(\"wall_time\")\n rects2 = plt.bar(ind + width, wall_times, width, color=\"y\")\n\n # Add ylable and title\n ax.set_ylabel(\"Time (s)\")\n\n # plt.title('CPU-time and Wall-time for the different sections of the code')\n\n ticks = self.get_values(\"name\")\n ax.set_xticks(ind + width, ticks)\n\n ax.legend((rects1[0], rects2[0]), (\"CPU\", \"Wall\"), loc=\"best\")\n\n return fig\n\n @add_fig_kwargs\n def pie(self, key=\"wall_time\", minfract=0.05, ax=None, **kwargs):\n \"\"\"\n Plot pie chart for this timer.\n\n Args:\n key: Keyword used to extract data from the timer.\n minfract: Don't show sections whose relative weight is less that minfract.\n ax: matplotlib :class:`Axes` or None if a new figure should be created.\n\n Returns: `matplotlib` figure\n \"\"\"\n ax, fig, plt = get_ax_fig_plt(ax=ax)\n # Set aspect ratio to be equal so that pie is drawn as a circle.\n ax.axis(\"equal\")\n # Don't show section whose value is less that minfract\n labels, vals = self.names_and_values(key, minfract=minfract)\n ax.pie(vals, explode=None, labels=labels, autopct=\"%1.1f%%\", shadow=True)\n return fig\n\n @add_fig_kwargs\n def scatter_hist(self, ax=None, **kwargs):\n \"\"\"\n Scatter plot + histogram.\n\n Args:\n ax: matplotlib :class:`Axes` or None if a new figure should be created.\n\n Returns: `matplotlib` figure\n \"\"\"\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n\n ax, fig, plt = get_ax_fig_plt(ax=ax)\n\n x = np.asarray(self.get_values(\"cpu_time\"))\n y = np.asarray(self.get_values(\"wall_time\"))\n\n # the scatter plot:\n axScatter = plt.subplot(1, 1, 1)\n axScatter.scatter(x, y)\n axScatter.set_aspect(\"auto\")\n\n # create new axes on the right and on the top of the current axes\n # The first argument of the new_vertical(new_horizontal) method is\n # the height (width) of the axes to be created in inches.\n divider = make_axes_locatable(axScatter)\n axHistx = divider.append_axes(\"top\", 1.2, pad=0.1, sharex=axScatter)\n axHisty = divider.append_axes(\"right\", 1.2, pad=0.1, sharey=axScatter)\n\n # make some labels invisible\n plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)\n\n # now determine nice limits by hand:\n binwidth = 0.25\n xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])\n lim = (int(xymax / binwidth) + 1) * binwidth\n\n bins = np.arange(-lim, lim + binwidth, binwidth)\n axHistx.hist(x, bins=bins)\n axHisty.hist(y, bins=bins, orientation=\"horizontal\")\n\n # the xaxis of axHistx and yaxis of axHisty are shared with axScatter,\n # thus there is no need to manually adjust the xlim and ylim of these axis.\n\n # axHistx.axis[\"bottom\"].major_ticklabels.set_visible(False)\n for tl in axHistx.get_xticklabels():\n tl.set_visible(False)\n axHistx.set_yticks([0, 50, 100])\n\n # axHisty.axis[\"left\"].major_ticklabels.set_visible(False)\n for tl in axHisty.get_yticklabels():\n tl.set_visible(False)\n axHisty.set_xticks([0, 50, 100])\n\n # plt.draw()\n return fig\n"
] |
[
[
"numpy.square",
"numpy.dot",
"numpy.linalg.svd",
"numpy.diag",
"numpy.allclose",
"numpy.unique",
"numpy.eye",
"numpy.linalg.norm",
"numpy.math.factorial",
"scipy.spatial.distance.cdist",
"numpy.linalg.eigh",
"numpy.cross",
"numpy.argsort",
"scipy.optimize.linear_sum_assignment",
"numpy.array",
"numpy.where",
"numpy.vdot"
],
[
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_equal"
],
[
"numpy.dot",
"numpy.polyfit",
"numpy.take",
"numpy.einsum",
"scipy.optimize.root",
"numpy.math.factorial",
"numpy.exp",
"numpy.arange",
"numpy.eye",
"numpy.linalg.det",
"scipy.special.factorial",
"numpy.ravel",
"numpy.outer",
"numpy.triu",
"numpy.zeros",
"numpy.nonzero",
"numpy.linalg.inv",
"numpy.linalg.eigh",
"numpy.transpose",
"scipy.integrate.quad",
"numpy.array",
"numpy.sum",
"numpy.linalg.solve",
"numpy.abs",
"numpy.put",
"numpy.tile",
"numpy.linalg.norm",
"numpy.ones",
"numpy.linalg.pinv",
"numpy.vectorize",
"numpy.vstack"
],
[
"numpy.dot",
"numpy.eye",
"numpy.array"
],
[
"numpy.min",
"numpy.cumsum",
"numpy.max",
"numpy.argmax",
"scipy.interpolate.CubicSpline",
"numpy.array",
"numpy.sum"
],
[
"numpy.asarray",
"numpy.arange",
"pandas.DataFrame",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.bar",
"numpy.zeros",
"numpy.fabs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"1.3",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"0.17",
"1.2",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dudals3844/zipline-reloaded
|
[
"3fe18bcf4ed3668b5543a41071c145d5e7b3ab29"
] |
[
"setup.py"
] |
[
"#!/usr/bin/env python\n#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\nfrom pathlib import Path\n\n# ensure the current directory is on sys.path\n# so versioneer can be imported when pip uses\n# PEP 517/518 build rules.\n# https://github.com/python-versioneer/python-versioneer/issues/193\nsys.path.append(Path(__file__).resolve(strict=True).parent.as_posix())\nimport versioneer # noqa: E402\nfrom setuptools import Extension, find_packages, setup # noqa: E402\n\n\nclass LazyBuildExtCommandClass(dict):\n \"\"\"\n Lazy command class that defers operations requiring Cython and numpy until\n they've actually been downloaded and installed by setup_requires.\n \"\"\"\n\n def __contains__(self, key):\n return key == \"build_ext\" or super(LazyBuildExtCommandClass, self).__contains__(\n key\n )\n\n def __setitem__(self, key, value):\n if key == \"build_ext\":\n raise AssertionError(\"build_ext overridden!\")\n super(LazyBuildExtCommandClass, self).__setitem__(key, value)\n\n def __getitem__(self, key):\n if key != \"build_ext\":\n return super(LazyBuildExtCommandClass, self).__getitem__(key)\n\n from Cython.Distutils import build_ext as cython_build_ext\n import numpy\n\n # Cython_build_ext isn't a new-style class in Py2.\n class build_ext(cython_build_ext, object):\n \"\"\"\n Custom build_ext command that lazily adds numpy's include_dir to\n extensions.\n \"\"\"\n\n def build_extensions(self):\n \"\"\"\n Lazily append numpy's include directory to Extension includes.\n\n This is done here rather than at module scope because setup.py\n may be run before numpy has been installed, in which case\n importing numpy and calling `numpy.get_include()` will fail.\n \"\"\"\n numpy_incl = numpy.get_include()\n for ext in self.extensions:\n ext.include_dirs.append(numpy_incl)\n\n super(build_ext, self).build_extensions()\n\n return build_ext\n\n\ndef window_specialization(typename):\n \"\"\"Make an extension for an AdjustedArrayWindow specialization.\"\"\"\n return Extension(\n name=f\"zipline.lib._{typename}window\",\n sources=[f\"src/zipline/lib/_{typename}window.pyx\"],\n depends=[\"src/zipline/lib/_windowtemplate.pxi\"],\n )\n\n\next_options = dict(\n compiler_directives=dict(profile=True, language_level=\"3\"), annotate=True\n)\next_modules = [\n Extension(\n name=\"zipline.assets._assets\", sources=[\"src/zipline/assets/_assets.pyx\"]\n ),\n Extension(\n name=\"zipline.assets.continuous_futures\",\n sources=[\"src/zipline/assets/continuous_futures.pyx\"],\n ),\n Extension(\n name=\"zipline.lib.adjustment\", sources=[\"src/zipline/lib/adjustment.pyx\"]\n ),\n Extension(\n name=\"zipline.lib._factorize\", sources=[\"src/zipline/lib/_factorize.pyx\"]\n ),\n window_specialization(\"float64\"),\n window_specialization(\"int64\"),\n window_specialization(\"int64\"),\n window_specialization(\"uint8\"),\n window_specialization(\"label\"),\n Extension(name=\"zipline.lib.rank\", sources=[\"src/zipline/lib/rank.pyx\"]),\n Extension(\n name=\"zipline.data._equities\", sources=[\"src/zipline/data/_equities.pyx\"]\n ),\n Extension(\n name=\"zipline.data._adjustments\",\n sources=[\"src/zipline/data/_adjustments.pyx\"],\n ),\n Extension(name=\"zipline._protocol\", sources=[\"src/zipline/_protocol.pyx\"]),\n Extension(\n name=\"zipline.finance._finance_ext\",\n sources=[\"src/zipline/finance/_finance_ext.pyx\"],\n ),\n Extension(\n name=\"zipline.gens.sim_engine\", sources=[\"src/zipline/gens/sim_engine.pyx\"]\n ),\n Extension(\n name=\"zipline.data._minute_bar_internal\",\n sources=[\"src/zipline/data/_minute_bar_internal.pyx\"],\n ),\n Extension(\n name=\"zipline.data._resample\", sources=[\"src/zipline/data/_resample.pyx\"]\n ),\n]\nfor ext_module in ext_modules:\n ext_module.cython_directives = dict(language_level=\"3\")\n\nversion = versioneer.get_version()\n\nsetup(\n name='zipline_reloaded_korea',\n version=version,\n cmdclass=LazyBuildExtCommandClass(versioneer.get_cmdclass()),\n entry_points={\n \"console_scripts\": [\n \"zipline = zipline.__main__:main\",\n ],\n },\n # packages=find_packages(include=[\"src/zipline\"]),\n ext_modules=ext_modules,\n # package_dir={'': 'src'},\n # packages=find_packages(where='src'),\n package_data={\n root.replace(os.sep, \".\"): [\"*.pyi\", \"*.pyx\", \"*.pxi\", \"*.pxd\"]\n for root, dirnames, filenames in os.walk(\"src/zipline\")\n if \"__pycache__\" not in root\n },\n)\n"
] |
[
[
"numpy.get_include"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
astirn/neural-inverse-cdf-sampling
|
[
"80eb2eb7cf396a4e53df62bc126e9a1828f55ca9"
] |
[
"neural_inverse_cdf.py"
] |
[
"import os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nfrom scipy.stats import gamma\nfrom matplotlib import pyplot as plt\n\nfrom neural_inverse_cdf_utils import InvertibleNeuralNetworkLayer, train, result_plot\n\n\nclass GammaCDF(object):\n\n def __init__(self, theta_max=15, base_dir=os.getcwd()):\n\n # define theta range\n self.theta_min = 0\n self.theta_max = theta_max\n\n # log directory\n self.log_dir = os.path.join(base_dir, 'InverseCDF', 'Gamma', 'logdir')\n if os.path.exists(self.log_dir):\n shutil.rmtree(self.log_dir)\n\n # checkpoint directory\n self.mdl_dir = os.path.join(base_dir, 'InverseCDF', 'Gamma', 'checkpoint', 'gamma')\n if os.path.exists(self.mdl_dir):\n shutil.rmtree(self.mdl_dir)\n\n def sample_training_points(self, thetas_per_batch, samples_per_theta):\n\n # sample thetas\n thetas = np.random.random(thetas_per_batch) * 2 * self.theta_max - self.theta_max\n thetas[thetas < 0] = np.exp(thetas[thetas < 0])\n\n # loop over theta samples\n z = []\n u = []\n theta = []\n for i in range(len(thetas)):\n\n # sample z\n z.append(np.random.gamma(shape=thetas[i], size=samples_per_theta))\n\n # compute target u\n u.append(gamma.cdf(x=z[-1], a=thetas[i]))\n\n # up-sample theta\n theta.append(thetas[i] * np.ones(samples_per_theta))\n\n # convert to arrays\n z = np.concatenate(z)\n u = np.concatenate(u)\n theta = np.concatenate(theta)\n\n return z, u, theta\n\n def sample_test_points(self, theta, num_points=100):\n\n # compute target theta quantile\n theta = theta * np.ones(num_points)\n\n # compute evaluation points\n # z = np.linspace(0, self.z_max, num_points)\n z = np.random.gamma(shape=theta, size=num_points)\n z = np.sort(z)\n\n # compute target\n u = gamma.cdf(z, theta)\n\n return z, u, theta\n\n @staticmethod\n def u_clamp(u):\n # return clamped value--THIS CLAMP MUST BE INVERTIBLE!\n return tf.nn.sigmoid(u)\n\n @staticmethod\n def z_clamp(z):\n # return clamped value--THIS CLAMP MUST BE INVERTIBLE!\n return tf.nn.elu(z) + 1\n\n\nclass NeuralInverseCDF(object):\n \"\"\"\n Neural CDF Forward: F(z; theta) --> u\n Neural CDF Reverse: F_inv(u; theta) --> z\n \"\"\"\n def __init__(self, target, fwd_direction='cdf', trainable=True):\n\n # save target object\n self.target = target\n\n # check and save learning direction\n assert fwd_direction == 'cdf' or fwd_direction == 'inv_cdf'\n self.fwd_direction = fwd_direction\n\n # configure dimensions\n self.inn_dim = 2\n self.inn_layers = 8\n\n # declare the Invertible Neural Network Blocks\n self.inn = []\n for i in range(self.inn_layers):\n self.inn.append(InvertibleNeuralNetworkLayer(self.inn_dim, 'inn{:d}'.format(i+1), trainable=trainable))\n\n # training placeholders\n self.z_ph = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='z')\n self.u_ph = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='u')\n self.theta_ph = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='theta')\n\n # training outputs (None by default--will be overwritten by self.loss(*))\n self.u_hat = None\n self.z_hat = None\n\n # configure training\n self.thetas_per_batch = 100\n self.samples_per_theta = 100\n self.learning_rate = 5e-4\n self.num_epochs = 1000\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n\n def _forward_eval(self, x, theta):\n\n # run the forward direction\n for i in range(self.inn_layers):\n x = self.inn[i].forward_evaluation(x, theta)\n\n return x\n\n def _inverse_eval(self, x, theta):\n\n # run the inverse direction\n for i in range(self.inn_layers):\n x = self.inn[self.inn_layers - 1 - i].inverse_evaluation(x, theta)\n\n return x\n\n def _load_feed_dict(self, z, u, theta):\n\n # expand dimensions if needed\n if len(np.shape(z)) < 2:\n z = np.expand_dims(z, axis=-1)\n if len(np.shape(u)) < 2:\n u = np.expand_dims(u, axis=-1)\n if len(np.shape(theta)) < 2:\n theta = np.expand_dims(theta, axis=-1)\n\n # initialize feed dictionary\n feed_dict = dict()\n\n # load dictionary\n feed_dict.update({self.z_ph: z, self.u_ph: u, self.theta_ph: theta})\n\n return feed_dict\n\n def loss(self):\n\n # build the forward model (double input to achieve even dimensions)\n if self.fwd_direction == 'cdf':\n x_fwd = self._forward_eval(tf.concat((self.z_ph, self.z_ph), axis=1), self.theta_ph)\n else:\n x_fwd = self._forward_eval(tf.concat((self.u_ph, self.u_ph), axis=1), self.theta_ph)\n\n # apply forward model clamps and build backward model\n if self.fwd_direction == 'cdf':\n self.u_hat = self.target.u_clamp(x_fwd)\n self.z_hat = self._inverse_eval(x_fwd, self.theta_ph)\n else:\n self.z_hat = self.target.z_clamp(x_fwd)\n self.u_hat = self._inverse_eval(x_fwd, self.theta_ph)\n\n # compute both losses\n u_loss = tf.reduce_mean(tf.abs(self.u_ph - self.u_hat))\n z_loss = tf.reduce_mean(tf.abs(self.z_ph - self.z_hat))\n\n # if cdf direction, loss is w.r.t. u\n if self.fwd_direction == 'cdf':\n loss = u_loss\n\n # otherwise it is w.r.t. z\n else:\n loss = z_loss\n\n return loss, u_loss, z_loss\n\n def load_feed_dict_train(self):\n\n # sample training points\n z, u, theta = self.target.sample_training_points(self.thetas_per_batch, self.samples_per_theta)\n\n return self._load_feed_dict(z, u, theta)\n\n def sample_operation(self, u, theta):\n\n # expand dimensions if needed\n if len(u.get_shape().as_list()) < 2:\n u = tf.expand_dims(u, axis=-1)\n if len(theta.get_shape().as_list()) < 2:\n theta = tf.expand_dims(theta, axis=-1)\n\n # cdf is the forward direction\n if self.fwd_direction == 'cdf':\n\n # run the inverse direction (double input to achieve even dimensions)\n z = self._inverse_eval(tf.concat((u, u), axis=1), theta)\n\n # inverse cdf is the forward direction\n else:\n\n # run the forward direction (double input to achieve even dimensions)\n z = self._forward_eval(tf.concat((u, u), axis=1), theta)\n\n # apply the z clamp and take the mean of the two dimensions\n return tf.reduce_mean(self.target.z_clamp(z), axis=-1)\n\n def restore(self, sess, var_list=None):\n\n # variable list not provide\n if var_list is None:\n\n # restore all checkpoint variables\n tf_saver = tf.train.Saver()\n tf_saver.restore(sess, self.target.mdl_dir)\n\n # variable list provided\n else:\n\n # restore all checkpoint variables\n tf_saver = tf.train.Saver(var_list=var_list)\n tf_saver.restore(sess, self.target.mdl_dir)\n\n\nif __name__ == '__main__':\n\n # set random seeds\n np.random.seed(123)\n tf.set_random_seed(123)\n\n # set training parameters\n theta_max = 15\n\n # begin test session\n tf.reset_default_graph()\n with tf.Session() as sess:\n\n # declare model\n mdl = NeuralInverseCDF(target=GammaCDF(theta_max=theta_max))\n\n # train the model\n train(mdl, sess, show_plots=True, save_results=True)\n\n # test loading\n tf.reset_default_graph()\n with tf.Session() as sess:\n\n # declare model\n mdl = NeuralInverseCDF(target=GammaCDF(theta_max=theta_max), trainable=False)\n _ = mdl.loss()\n\n # restore variables (order of operations matter)\n sess.run(tf.global_variables_initializer())\n mdl.restore(sess)\n\n # test it\n fig_results, _ = result_plot(mdl, sess)\n\n # keep plots open\n plt.ioff()\n plt.show()\n"
] |
[
[
"numpy.expand_dims",
"tensorflow.concat",
"numpy.concatenate",
"tensorflow.train.AdamOptimizer",
"scipy.stats.gamma.cdf",
"numpy.exp",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.nn.elu",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"matplotlib.pyplot.show",
"numpy.random.random",
"numpy.random.seed",
"tensorflow.expand_dims",
"numpy.sort",
"numpy.ones",
"matplotlib.pyplot.ioff",
"numpy.shape",
"numpy.random.gamma",
"tensorflow.abs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
wuyifan2233/Tencent_Animal_Surveillance
|
[
"4872852c4868ef5b9dccb7964aa80d397fa7e534"
] |
[
"utils/augmentations.py"
] |
[
"# YOLOv5 image augmentation functions\n\nimport random\n\nimport cv2\nimport math\nimport numpy as np\n\nfrom utils.general import segment2box, resample_segments\nfrom utils.metrics import bbox_ioa\n\n\ndef augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n # HSV color-space augmentation\n if hgain or sgain or vgain:\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n dtype = im.dtype # uint8\n\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed\n\n\ndef hist_equalize(im, clahe=True, bgr=False):\n # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255\n yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)\n if clahe:\n c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n yuv[:, :, 0] = c.apply(yuv[:, :, 0])\n else:\n yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram\n return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB\n\n\ndef replicate(im, labels):\n # Replicate labels\n h, w = im.shape[:2]\n boxes = labels[:, 1:].astype(int)\n x1, y1, x2, y2 = boxes.T\n s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)\n for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices\n x1b, y1b, x2b, y2b = boxes[i]\n bh, bw = y2b - y1b, x2b - x1b\n yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y\n x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]\n im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]\n labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)\n\n return im, labels\n\n\ndef letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n # Resize and pad image while meeting stride-multiple constraints\n shape = im.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return im, ratio, (dw, dh)\n\n\ndef random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,\n border=(0, 0)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxy]\n\n height = im.shape[0] + border[0] * 2 # shape(h,w,c)\n width = im.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -im.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -im.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)\n T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n else: # affine\n im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(img[:, :, ::-1]) # base\n # ax[1].imshow(img2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n if n:\n use_segments = any(x.any() for x in segments)\n new = np.zeros((n, 4))\n if use_segments: # warp segments\n segments = resample_segments(segments) # upsample\n for i, segment in enumerate(segments):\n xy = np.ones((len(segment), 3))\n xy[:, :2] = segment\n xy = xy @ M.T # transform\n xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine\n\n # clip\n new[i] = segment2box(xy, width, height)\n\n else: # warp boxes\n xy = np.ones((n * 4, 3))\n xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # clip\n new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n\n # filter candidates\n i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n targets = targets[i]\n targets[:, 1:5] = new[i]\n\n return im, targets\n\n\ndef copy_paste(im, labels, segments, probability=0.5):\n # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)\n n = len(segments)\n if probability and n:\n h, w, c = im.shape # height, width, channels\n im_new = np.zeros(im.shape, np.uint8)\n for j in random.sample(range(n), k=round(probability * n)):\n l, s = labels[j], segments[j]\n box = w - l[3], l[2], w - l[1], l[4]\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n if (ioa < 0.30).all(): # allow 30% obscuration of existing labels\n labels = np.concatenate((labels, [[l[0], *box]]), 0)\n segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)\n\n result = cv2.bitwise_and(src1=im, src2=im_new)\n result = cv2.flip(result, 1) # augment segments (flip left-right)\n i = result > 0 # pixels to replace\n # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch\n im[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug\n\n return im, labels, segments\n\n\ndef cutout(im, labels):\n # Applies image cutout augmentation https://arxiv.org/abs/1708.04552\n h, w = im.shape[:2]\n\n # create random masks\n scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction\n for s in scales:\n mask_h = random.randint(1, int(h * s))\n mask_w = random.randint(1, int(w * s))\n\n # box\n xmin = max(0, random.randint(0, w) - mask_w // 2)\n ymin = max(0, random.randint(0, h) - mask_h // 2)\n xmax = min(w, xmin + mask_w)\n ymax = min(h, ymin + mask_h)\n\n # apply random color mask\n im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]\n\n # return unobscured labels\n if len(labels) and s > 0.03:\n box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n labels = labels[ioa < 0.60] # remove >60% obscured labels\n\n return labels\n\n\ndef mixup(im, labels, im2, labels2):\n # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf\n r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0\n im = (im * r + im2 * (1 - r)).astype(np.uint8)\n labels = np.concatenate((labels, labels2), 0)\n return im, labels\n\n\ndef box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)\n # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio\n w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio\n return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates\n"
] |
[
[
"numpy.random.beta",
"numpy.maximum",
"numpy.clip",
"numpy.arange",
"numpy.eye",
"numpy.ones",
"numpy.concatenate",
"numpy.append",
"numpy.mod",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CoolPhilChen/cil-road-segmentation-2019
|
[
"0becfe97d77012b3abbaa181a5c52e6edd1a39f1"
] |
[
"model/crfasrnn/network.py"
] |
[
"# encoding: utf-8\nfrom functools import partial\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom config import config\nfrom base_model import resnet50\nfrom seg_opr.seg_oprs import ConvBnRelu\n\nfrom crfrnn import CrfRnn\n\nHAS_BN = True #crfrnn only support batch_size=1 currently while BatchNorm fordbids batch_size=1!\n#todo: group_norm\n\nclass CrfRnnNet(nn.Module):\n def __init__(self, out_planes, criterion=None, pretrained_model=None,\n norm_layer=nn.BatchNorm2d, n_iter=None):\n super(CrfRnnNet, self).__init__()\n self.psp = PSPNet(out_planes, criterion, pretrained_model,\n norm_layer)\n self.crfrnn = CrfRnn(num_labels=out_planes, num_iterations=n_iter)\n self.criterion = criterion\n\n def forward(self, data, label=None):\n psp_fm, aux_fm = self.psp.forward(data, label)\n # print(\"before crfrnn:\", psp_fm.shape) #debug\n out = self.crfrnn(data, psp_fm) #Plug the CRF-RNN module at the end\n # print(\"after crfrnn:\", out.shape) #debug\n \n # if label is not None:\n # psp_loss = self.criterion(psp_fm, label)\n # aux_loss = self.criterion(aux_fm, label)\n # psp_loss = psp_loss + 0.4 * aux_loss\n # loss = self.criterion(out, label)\n # loss = loss + 0.5 * psp_loss # todo\n # return loss\n\n if label is not None:\n loss = self.criterion(out, label)\n return loss\n \n return out\n\n\nclass PSPNet(nn.Module):\n def __init__(self, out_planes, criterion=None, pretrained_model=None,\n norm_layer=nn.BatchNorm2d):\n super(PSPNet, self).__init__()\n self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,\n bn_eps=config.bn_eps,\n bn_momentum=config.bn_momentum,\n deep_stem=True, stem_width=64)\n self.backbone.layer3.apply(partial(self._nostride_dilate, dilate=2))\n self.backbone.layer4.apply(partial(self._nostride_dilate, dilate=4))\n\n self.business_layer = []\n self.psp_layer = PyramidPooling('psp', out_planes, 2048,\n norm_layer=norm_layer)\n self.aux_layer = nn.Sequential(\n ConvBnRelu(1024, 1024, 3, 1, 1,\n has_bn=HAS_BN,\n has_relu=True, has_bias=False, norm_layer=norm_layer),\n nn.Dropout2d(0.1, inplace=False),\n nn.Conv2d(1024, out_planes, kernel_size=1)\n )\n self.business_layer.append(self.psp_layer)\n self.business_layer.append(self.aux_layer)\n\n self.criterion = criterion\n\n def forward(self, data, label=None):\n blocks = self.backbone(data)\n\n psp_fm = self.psp_layer(blocks[-1])\n aux_fm = self.aux_layer(blocks[-2])\n\n psp_fm = F.interpolate(psp_fm, scale_factor=8, mode='bilinear',\n align_corners=True)\n aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',\n align_corners=True)\n psp_fm = F.log_softmax(psp_fm, dim=1)\n aux_fm = F.log_softmax(aux_fm, dim=1)\n\n return psp_fm, aux_fm\n\n # @staticmethod\n def _nostride_dilate(self, m, dilate):\n if isinstance(m, nn.Conv2d):\n if m.stride == (2, 2):\n m.stride = (1, 1)\n if m.kernel_size == (3, 3):\n m.dilation = (dilate // 2, dilate // 2)\n m.padding = (dilate // 2, dilate // 2)\n else:\n if m.kernel_size == (3, 3):\n m.dilation = (dilate, dilate)\n m.padding = (dilate, dilate)\n\n\nclass PyramidPooling(nn.Module):\n def __init__(self, name, out_planes, fc_dim=4096, pool_scales=[1, 2, 3, 6],\n norm_layer=nn.BatchNorm2d):\n super(PyramidPooling, self).__init__()\n\n self.ppm = []\n for scale in pool_scales:\n self.ppm.append(nn.Sequential(OrderedDict([\n ('{}/pool_1'.format(name), nn.AdaptiveAvgPool2d(scale)),\n ('{}/cbr'.format(name),\n ConvBnRelu(fc_dim, 512, 1, 1, 0, has_bn=HAS_BN,\n has_relu=True, has_bias=False,\n norm_layer=norm_layer))\n ])))\n self.ppm = nn.ModuleList(self.ppm)\n\n self.conv6 = nn.Sequential(\n ConvBnRelu(fc_dim + len(pool_scales) * 512, 512, 3, 1, 1,\n has_bn=HAS_BN,\n has_relu=True, has_bias=False, norm_layer=norm_layer),\n nn.Dropout2d(0.1, inplace=False),\n nn.Conv2d(512, out_planes, kernel_size=1)\n )\n\n def forward(self, x):\n input_size = x.size()\n ppm_out = [x]\n for pooling in self.ppm:\n ppm_out.append(\n F.interpolate(pooling(x), size=(input_size[2], input_size[3]),\n mode='bilinear', align_corners=True))\n ppm_out = torch.cat(ppm_out, 1)\n\n ppm_out = self.conv6(ppm_out)\n return ppm_out\n\n\nif __name__ == \"__main__\":\n model = CrfRnnNet(2)\n print(model)"
] |
[
[
"torch.nn.Dropout2d",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dendisuhubdy/attention-lvcsr
|
[
"598d487c118e66875fdd625baa84ed29d283b800",
"598d487c118e66875fdd625baa84ed29d283b800",
"598d487c118e66875fdd625baa84ed29d283b800",
"598d487c118e66875fdd625baa84ed29d283b800",
"598d487c118e66875fdd625baa84ed29d283b800",
"598d487c118e66875fdd625baa84ed29d283b800"
] |
[
"libs/Theano/theano/tests/test_printing.py",
"libs/blocks/tests/bricks/test_recurrent.py",
"libs/Theano/theano/tensor/tests/test_opt.py",
"libs/blocks/tests/test_theano_expressions.py",
"libs/Theano/theano/sandbox/gpuarray/tests/test_dnn.py",
"libs/fuel/fuel/converters/cifar100.py"
] |
[
"\"\"\"\nTests of printing functionality\n\"\"\"\nfrom __future__ import print_function\nimport logging\n\nfrom nose.plugins.skip import SkipTest\nimport numpy\n\nfrom six.moves import StringIO\n\nimport theano\nimport theano.tensor as tensor\n\nfrom theano.printing import min_informative_str, debugprint\n\n\ndef test_pydotprint_cond_highlight():\n \"\"\"\n This is a REALLY PARTIAL TEST.\n\n I did them to help debug stuff.\n \"\"\"\n\n # Skip test if pydot is not available.\n if not theano.printing.pydot_imported:\n raise SkipTest('pydot not available')\n\n x = tensor.dvector()\n f = theano.function([x], x * 2)\n f([1, 2, 3, 4])\n\n s = StringIO()\n new_handler = logging.StreamHandler(s)\n new_handler.setLevel(logging.DEBUG)\n orig_handler = theano.logging_default_handler\n\n theano.theano_logger.removeHandler(orig_handler)\n theano.theano_logger.addHandler(new_handler)\n try:\n theano.printing.pydotprint(f, cond_highlight=True,\n print_output_file=False)\n finally:\n theano.theano_logger.addHandler(orig_handler)\n theano.theano_logger.removeHandler(new_handler)\n\n assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'\n ' is no IfElse node in the graph\\n')\n\n\ndef test_pydotprint_return_image():\n # Skip test if pydot is not available.\n if not theano.printing.pydot_imported:\n raise SkipTest('pydot not available')\n\n x = tensor.dvector()\n ret = theano.printing.pydotprint(x * 2, return_image=True)\n assert isinstance(ret, (str, bytes))\n\n\ndef test_pydotprint_variables():\n \"\"\"\n This is a REALLY PARTIAL TEST.\n\n I did them to help debug stuff.\n\n It make sure the code run.\n \"\"\"\n\n # Skip test if pydot is not available.\n if not theano.printing.pydot_imported:\n raise SkipTest('pydot not available')\n\n x = tensor.dvector()\n\n s = StringIO()\n new_handler = logging.StreamHandler(s)\n new_handler.setLevel(logging.DEBUG)\n orig_handler = theano.logging_default_handler\n\n theano.theano_logger.removeHandler(orig_handler)\n theano.theano_logger.addHandler(new_handler)\n try:\n theano.printing.pydotprint(x * 2)\n if not theano.printing.pd.__name__ == \"pydot_ng\":\n theano.printing.pydotprint_variables(x * 2)\n finally:\n theano.theano_logger.addHandler(orig_handler)\n theano.theano_logger.removeHandler(new_handler)\n\n\ndef test_pydotprint_long_name():\n \"\"\"This is a REALLY PARTIAL TEST.\n\n It prints a graph where there are variable and apply nodes whose long\n names are different, but not the shortened names.\n We should not merge those nodes in the dot graph.\n\n \"\"\"\n\n # Skip test if pydot is not available.\n if not theano.printing.pydot_imported:\n raise SkipTest('pydot not available')\n\n x = tensor.dvector()\n mode = theano.compile.mode.get_default_mode().excluding(\"fusion\")\n f = theano.function([x], [x * 2, x + x], mode=mode)\n f([1, 2, 3, 4])\n\n theano.printing.pydotprint(f, max_label_size=5,\n print_output_file=False)\n theano.printing.pydotprint([x * 2, x + x],\n max_label_size=5,\n print_output_file=False)\n\n\ndef test_pydotprint_profile():\n \"\"\"Just check that pydotprint does not crash with ProfileMode.\"\"\"\n\n # Skip test if pydot is not available.\n if not theano.printing.pydot_imported:\n raise SkipTest('pydot not available')\n\n A = tensor.matrix()\n f = theano.function([A], A + 1, mode='ProfileMode')\n theano.printing.pydotprint(f, print_output_file=False)\n\n\ndef test_min_informative_str():\n \"\"\" evaluates a reference output to make sure the\n min_informative_str function works as intended \"\"\"\n\n A = tensor.matrix(name='A')\n B = tensor.matrix(name='B')\n C = A + B\n C.name = 'C'\n D = tensor.matrix(name='D')\n E = tensor.matrix(name='E')\n\n F = D + E\n G = C + F\n\n mis = min_informative_str(G).replace(\"\\t\", \" \")\n\n reference = \"\"\"A. Elemwise{add,no_inplace}\n B. C\n C. Elemwise{add,no_inplace}\n D. D\n E. E\"\"\"\n\n if mis != reference:\n print('--' + mis + '--')\n print('--' + reference + '--')\n\n assert mis == reference\n\n\ndef test_debugprint():\n A = tensor.matrix(name='A')\n B = tensor.matrix(name='B')\n C = A + B\n C.name = 'C'\n D = tensor.matrix(name='D')\n E = tensor.matrix(name='E')\n\n F = D + E\n G = C + F\n mode = theano.compile.get_default_mode().including('fusion')\n g = theano.function([A, B, D, E], G, mode=mode)\n\n # just test that it work\n debugprint(G)\n\n # test ids=int\n s = StringIO()\n debugprint(G, file=s, ids='int')\n s = s.getvalue()\n # The additional white space are needed!\n reference = '\\n'.join([\n \"Elemwise{add,no_inplace} [id 0] '' \",\n \" |Elemwise{add,no_inplace} [id 1] 'C' \",\n \" | |A [id 2]\",\n \" | |B [id 3]\",\n \" |Elemwise{add,no_inplace} [id 4] '' \",\n \" |D [id 5]\",\n \" |E [id 6]\",\n ]) + '\\n'\n\n if s != reference:\n print('--' + s + '--')\n print('--' + reference + '--')\n\n assert s == reference\n\n # test ids=CHAR\n s = StringIO()\n debugprint(G, file=s, ids='CHAR')\n s = s.getvalue()\n # The additional white space are needed!\n reference = \"\\n\".join([\n \"Elemwise{add,no_inplace} [id A] '' \",\n \" |Elemwise{add,no_inplace} [id B] 'C' \",\n \" | |A [id C]\",\n \" | |B [id D]\",\n \" |Elemwise{add,no_inplace} [id E] '' \",\n \" |D [id F]\",\n \" |E [id G]\",\n ]) + '\\n'\n\n if s != reference:\n print('--' + s + '--')\n print('--' + reference + '--')\n\n assert s == reference\n\n # test ids=CHAR, stop_on_name=True\n s = StringIO()\n debugprint(G, file=s, ids='CHAR', stop_on_name=True)\n s = s.getvalue()\n # The additional white space are needed!\n reference = '\\n'.join([\n \"Elemwise{add,no_inplace} [id A] '' \",\n \" |Elemwise{add,no_inplace} [id B] 'C' \",\n \" |Elemwise{add,no_inplace} [id C] '' \",\n \" |D [id D]\",\n \" |E [id E]\",\n ]) + '\\n'\n\n if s != reference:\n print('--' + s + '--')\n print('--' + reference + '--')\n\n assert s == reference\n\n # test ids=\n s = StringIO()\n debugprint(G, file=s, ids='')\n s = s.getvalue()\n # The additional white space are needed!\n reference = '\\n'.join([\n \"Elemwise{add,no_inplace} '' \",\n \" |Elemwise{add,no_inplace} 'C' \",\n \" | |A \",\n \" | |B \",\n \" |Elemwise{add,no_inplace} '' \",\n \" |D \",\n \" |E \",\n ]) + '\\n'\n if s != reference:\n print('--' + s + '--')\n print('--' + reference + '--')\n\n assert s == reference\n\n # test print_storage=True\n s = StringIO()\n debugprint(g, file=s, ids='', print_storage=True)\n s = s.getvalue()\n # The additional white space are needed!\n reference = '\\n'.join([\n \"Elemwise{add,no_inplace} '' 0 [None]\",\n \" |A [None]\",\n \" |B [None]\",\n \" |D [None]\",\n \" |E [None]\",\n ]) + '\\n'\n if s != reference:\n print('--' + s + '--')\n print('--' + reference + '--')\n\n assert s == reference\n\n\ndef test_scan_debugprint1():\n k = tensor.iscalar(\"k\")\n A = tensor.dvector(\"A\")\n\n # Symbolic description of the result\n result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,\n outputs_info=tensor.ones_like(A),\n non_sequences=A,\n n_steps=k)\n\n final_result = result[-1]\n output_str = theano.printing.debugprint(final_result, file='str')\n lines = []\n for line in output_str.split('\\n'):\n lines += [line]\n\n expected_output = \"\"\"Subtensor{int64} [id A] ''\n |Subtensor{int64::} [id B] ''\n | |for{cpu,scan_fn} [id C] ''\n | | |k [id D]\n | | |IncSubtensor{Set;:int64:} [id E] ''\n | | | |AllocEmpty{dtype='float64'} [id F] ''\n | | | | |Elemwise{add,no_inplace} [id G] ''\n | | | | | |k [id D]\n | | | | | |Subtensor{int64} [id H] ''\n | | | | | |Shape [id I] ''\n | | | | | | |Rebroadcast{0} [id J] ''\n | | | | | | |DimShuffle{x,0} [id K] ''\n | | | | | | |Elemwise{second,no_inplace} [id L] ''\n | | | | | | |A [id M]\n | | | | | | |DimShuffle{x} [id N] ''\n | | | | | | |TensorConstant{1.0} [id O]\n | | | | | |Constant{0} [id P]\n | | | | |Subtensor{int64} [id Q] ''\n | | | | |Shape [id R] ''\n | | | | | |Rebroadcast{0} [id J] ''\n | | | | |Constant{1} [id S]\n | | | |Rebroadcast{0} [id J] ''\n | | | |ScalarFromTensor [id T] ''\n | | | |Subtensor{int64} [id H] ''\n | | |A [id M]\n | |Constant{1} [id U]\n |Constant{-1} [id V]\n\n Inner graphs of the scan ops:\n\n for{cpu,scan_fn} [id C] ''\n >Elemwise{mul,no_inplace} [id W] ''\n > |<TensorType(float64, vector)> [id X] -> [id E]\n > |A_copy [id Y] -> [id M]\"\"\"\n\n for truth, out in zip(expected_output.split(\"\\n\"), lines):\n assert truth.strip() == out.strip()\n\n\ndef test_scan_debugprint2():\n coefficients = theano.tensor.vector(\"coefficients\")\n x = tensor.scalar(\"x\")\n\n max_coefficients_supported = 10000\n\n # Generate the components of the polynomial\n components, updates = theano.scan(fn=lambda coefficient, power,\n free_variable:\n coefficient * (free_variable ** power),\n outputs_info=None,\n sequences=[\n coefficients,\n theano.tensor.arange(\n max_coefficients_supported)],\n non_sequences=x)\n # Sum them up\n polynomial = components.sum()\n\n output_str = theano.printing.debugprint(polynomial, file='str')\n lines = []\n for line in output_str.split('\\n'):\n lines += [line]\n\n expected_output = \"\"\"Sum{acc_dtype=float64} [id A] ''\n |for{cpu,scan_fn} [id B] ''\n |Elemwise{minimum,no_inplace} [id C] ''\n | |Subtensor{int64} [id D] ''\n | | |Shape [id E] ''\n | | | |Subtensor{int64::} [id F] 'coefficients[0:]'\n | | | |coefficients [id G]\n | | | |Constant{0} [id H]\n | | |Constant{0} [id I]\n | |Subtensor{int64} [id J] ''\n | |Shape [id K] ''\n | | |Subtensor{int64::} [id L] ''\n | | |ARange{dtype='int64'} [id M] ''\n | | | |TensorConstant{0} [id N]\n | | | |TensorConstant{10000} [id O]\n | | | |TensorConstant{1} [id P]\n | | |Constant{0} [id Q]\n | |Constant{0} [id R]\n |Subtensor{:int64:} [id S] ''\n | |Subtensor{int64::} [id F] 'coefficients[0:]'\n | |ScalarFromTensor [id T] ''\n | |Elemwise{minimum,no_inplace} [id C] ''\n |Subtensor{:int64:} [id U] ''\n | |Subtensor{int64::} [id L] ''\n | |ScalarFromTensor [id V] ''\n | |Elemwise{minimum,no_inplace} [id C] ''\n |Elemwise{minimum,no_inplace} [id C] ''\n |x [id W]\n\n Inner graphs of the scan ops:\n\n for{cpu,scan_fn} [id B] ''\n >Elemwise{mul,no_inplace} [id X] ''\n > |coefficients[t] [id Y] -> [id S]\n > |Elemwise{pow,no_inplace} [id Z] ''\n > |x_copy [id BA] -> [id W]\n > |<TensorType(int64, scalar)> [id BB] -> [id U]\"\"\"\n\n for truth, out in zip(expected_output.split(\"\\n\"), lines):\n assert truth.strip() == out.strip()\n\n\ndef test_scan_debugprint3():\n coefficients = theano.tensor.dvector(\"coefficients\")\n max_coefficients_supported = 10\n\n k = tensor.iscalar(\"k\")\n A = tensor.dvector(\"A\")\n\n # compute A**k\n def compute_A_k(A, k):\n # Symbolic description of the result\n result, updates = theano.scan(fn=lambda prior_result,\n A: prior_result * A,\n outputs_info=tensor.ones_like(A),\n non_sequences=A,\n n_steps=k)\n\n A_k = result[-1]\n\n return A_k\n\n # Generate the components of the polynomial\n components, updates = theano.scan(fn=lambda coefficient,\n power, some_A, some_k:\n coefficient *\n (compute_A_k(some_A, some_k) ** power),\n outputs_info=None,\n sequences=[\n coefficients,\n theano.tensor.arange(\n max_coefficients_supported)],\n non_sequences=[A, k])\n # Sum them up\n polynomial = components.sum()\n\n final_result = polynomial\n\n output_str = theano.printing.debugprint(final_result, file='str')\n lines = []\n for line in output_str.split('\\n'):\n lines += [line]\n\n expected_output = \"\"\"Sum{acc_dtype=float64} [id A] ''\n |for{cpu,scan_fn} [id B] ''\n |Elemwise{minimum,no_inplace} [id C] ''\n | |Subtensor{int64} [id D] ''\n | | |Shape [id E] ''\n | | | |Subtensor{int64::} [id F] 'coefficients[0:]'\n | | | |coefficients [id G]\n | | | |Constant{0} [id H]\n | | |Constant{0} [id I]\n | |Subtensor{int64} [id J] ''\n | |Shape [id K] ''\n | | |Subtensor{int64::} [id L] ''\n | | |ARange{dtype='int64'} [id M] ''\n | | | |TensorConstant{0} [id N]\n | | | |TensorConstant{10} [id O]\n | | | |TensorConstant{1} [id P]\n | | |Constant{0} [id Q]\n | |Constant{0} [id R]\n |Subtensor{:int64:} [id S] ''\n | |Subtensor{int64::} [id F] 'coefficients[0:]'\n | |ScalarFromTensor [id T] ''\n | |Elemwise{minimum,no_inplace} [id C] ''\n |Subtensor{:int64:} [id U] ''\n | |Subtensor{int64::} [id L] ''\n | |ScalarFromTensor [id V] ''\n | |Elemwise{minimum,no_inplace} [id C] ''\n |Elemwise{minimum,no_inplace} [id C] ''\n |A [id W]\n |k [id X]\n\n Inner graphs of the scan ops:\n\n for{cpu,scan_fn} [id B] ''\n >Elemwise{mul,no_inplace} [id Y] ''\n > |DimShuffle{x} [id Z] ''\n > | |coefficients[t] [id BA] -> [id S]\n > |Elemwise{pow,no_inplace} [id BB] ''\n > |Subtensor{int64} [id BC] ''\n > | |Subtensor{int64::} [id BD] ''\n > | | |for{cpu,scan_fn} [id BE] ''\n > | | | |k_copy [id BF] -> [id X]\n > | | | |IncSubtensor{Set;:int64:} [id BG] ''\n > | | | | |AllocEmpty{dtype='float64'} [id BH] ''\n > | | | | | |Elemwise{add,no_inplace} [id BI] ''\n > | | | | | | |k_copy [id BF] -> [id X]\n > | | | | | | |Subtensor{int64} [id BJ] ''\n > | | | | | | |Shape [id BK] ''\n > | | | | | | | |Rebroadcast{0} [id BL] ''\n > | | | | | | | |DimShuffle{x,0} [id BM] ''\n > | | | | | | | |Elemwise{second,no_inplace} [id BN] ''\n > | | | | | | | |A_copy [id BO] -> [id W]\n > | | | | | | | |DimShuffle{x} [id BP] ''\n > | | | | | | | |TensorConstant{1.0} [id BQ]\n > | | | | | | |Constant{0} [id BR]\n > | | | | | |Subtensor{int64} [id BS] ''\n > | | | | | |Shape [id BT] ''\n > | | | | | | |Rebroadcast{0} [id BL] ''\n > | | | | | |Constant{1} [id BU]\n > | | | | |Rebroadcast{0} [id BL] ''\n > | | | | |ScalarFromTensor [id BV] ''\n > | | | | |Subtensor{int64} [id BJ] ''\n > | | | |A_copy [id BO] -> [id W]\n > | | |Constant{1} [id BW]\n > | |Constant{-1} [id BX]\n > |DimShuffle{x} [id BY] ''\n > |<TensorType(int64, scalar)> [id BZ] -> [id U]\n\n for{cpu,scan_fn} [id BE] ''\n >Elemwise{mul,no_inplace} [id CA] ''\n > |<TensorType(float64, vector)> [id CB] -> [id BG]\n > |A_copy [id CC] -> [id BO]\"\"\"\n\n for truth, out in zip(expected_output.split(\"\\n\"), lines):\n assert truth.strip() == out.strip()\n\n\ndef test_scan_debugprint4():\n\n def fn(a_m2, a_m1, b_m2, b_m1):\n return a_m1 + a_m2, b_m1 + b_m2\n\n a0 = theano.shared(numpy.arange(2, dtype='int64'))\n b0 = theano.shared(numpy.arange(2, dtype='int64'))\n\n (a, b), _ = theano.scan(\n fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]},\n {'initial': b0, 'taps': [-2, -1]}],\n n_steps=5)\n\n final_result = a + b\n output_str = theano.printing.debugprint(final_result, file='str')\n lines = []\n for line in output_str.split('\\n'):\n lines += [line]\n\n expected_output = \"\"\"Elemwise{add,no_inplace} [id A] ''\n |Subtensor{int64::} [id B] ''\n | |for{cpu,scan_fn}.0 [id C] ''\n | | |TensorConstant{5} [id D]\n | | |IncSubtensor{Set;:int64:} [id E] ''\n | | | |AllocEmpty{dtype='int64'} [id F] ''\n | | | | |Elemwise{add,no_inplace} [id G] ''\n | | | | |TensorConstant{5} [id D]\n | | | | |Subtensor{int64} [id H] ''\n | | | | |Shape [id I] ''\n | | | | | |Subtensor{:int64:} [id J] ''\n | | | | | |<TensorType(int64, vector)> [id K]\n | | | | | |Constant{2} [id L]\n | | | | |Constant{0} [id M]\n | | | |Subtensor{:int64:} [id J] ''\n | | | |ScalarFromTensor [id N] ''\n | | | |Subtensor{int64} [id H] ''\n | | |IncSubtensor{Set;:int64:} [id O] ''\n | | |AllocEmpty{dtype='int64'} [id P] ''\n | | | |Elemwise{add,no_inplace} [id Q] ''\n | | | |TensorConstant{5} [id D]\n | | | |Subtensor{int64} [id R] ''\n | | | |Shape [id S] ''\n | | | | |Subtensor{:int64:} [id T] ''\n | | | | |<TensorType(int64, vector)> [id U]\n | | | | |Constant{2} [id V]\n | | | |Constant{0} [id W]\n | | |Subtensor{:int64:} [id T] ''\n | | |ScalarFromTensor [id X] ''\n | | |Subtensor{int64} [id R] ''\n | |Constant{2} [id Y]\n |Subtensor{int64::} [id Z] ''\n |for{cpu,scan_fn}.1 [id C] ''\n |Constant{2} [id BA]\n\n Inner graphs of the scan ops:\n\n for{cpu,scan_fn}.0 [id C] ''\n >Elemwise{add,no_inplace} [id BB] ''\n > |<TensorType(int64, scalar)> [id BC] -> [id E]\n > |<TensorType(int64, scalar)> [id BD] -> [id E]\n >Elemwise{add,no_inplace} [id BE] ''\n > |<TensorType(int64, scalar)> [id BF] -> [id O]\n > |<TensorType(int64, scalar)> [id BG] -> [id O]\n\n for{cpu,scan_fn}.1 [id C] ''\n >Elemwise{add,no_inplace} [id BB] ''\n >Elemwise{add,no_inplace} [id BE] ''\"\"\"\n\n for truth, out in zip(expected_output.split(\"\\n\"), lines):\n assert truth.strip() == out.strip()\n\n\ndef test_scan_debugprint5():\n\n k = tensor.iscalar(\"k\")\n A = tensor.dvector(\"A\")\n\n # Symbolic description of the result\n result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,\n outputs_info=tensor.ones_like(A),\n non_sequences=A,\n n_steps=k)\n\n final_result = tensor.grad(result[-1].sum(), A)\n\n output_str = theano.printing.debugprint(final_result, file='str')\n lines = []\n for line in output_str.split('\\n'):\n lines += [line]\n\n expected_output = \"\"\"Subtensor{int64} [id A] ''\n |for{cpu,grad_of_scan_fn}.1 [id B] ''\n | |Elemwise{sub,no_inplace} [id C] ''\n | | |Subtensor{int64} [id D] ''\n | | | |Shape [id E] ''\n | | | | |for{cpu,scan_fn} [id F] ''\n | | | | |k [id G]\n | | | | |IncSubtensor{Set;:int64:} [id H] ''\n | | | | | |AllocEmpty{dtype='float64'} [id I] ''\n | | | | | | |Elemwise{add,no_inplace} [id J] ''\n | | | | | | | |k [id G]\n | | | | | | | |Subtensor{int64} [id K] ''\n | | | | | | | |Shape [id L] ''\n | | | | | | | | |Rebroadcast{0} [id M] ''\n | | | | | | | | |DimShuffle{x,0} [id N] ''\n | | | | | | | | |Elemwise{second,no_inplace} [id O] ''\n | | | | | | | | |A [id P]\n | | | | | | | | |DimShuffle{x} [id Q] ''\n | | | | | | | | |TensorConstant{1.0} [id R]\n | | | | | | | |Constant{0} [id S]\n | | | | | | |Subtensor{int64} [id T] ''\n | | | | | | |Shape [id U] ''\n | | | | | | | |Rebroadcast{0} [id M] ''\n | | | | | | |Constant{1} [id V]\n | | | | | |Rebroadcast{0} [id M] ''\n | | | | | |ScalarFromTensor [id W] ''\n | | | | | |Subtensor{int64} [id K] ''\n | | | | |A [id P]\n | | | |Constant{0} [id X]\n | | |TensorConstant{1} [id Y]\n | |Subtensor{:int64:} [id Z] ''\n | | |Subtensor{::int64} [id BA] ''\n | | | |Subtensor{:int64:} [id BB] ''\n | | | | |for{cpu,scan_fn} [id F] ''\n | | | | |Constant{-1} [id BC]\n | | | |Constant{-1} [id BD]\n | | |ScalarFromTensor [id BE] ''\n | | |Elemwise{sub,no_inplace} [id C] ''\n | |Subtensor{:int64:} [id BF] ''\n | | |Subtensor{:int64:} [id BG] ''\n | | | |Subtensor{::int64} [id BH] ''\n | | | | |for{cpu,scan_fn} [id F] ''\n | | | | |Constant{-1} [id BI]\n | | | |Constant{-1} [id BJ]\n | | |ScalarFromTensor [id BK] ''\n | | |Elemwise{sub,no_inplace} [id C] ''\n | |Subtensor{::int64} [id BL] ''\n | | |IncSubtensor{Inc;int64::} [id BM] ''\n | | | |Elemwise{second,no_inplace} [id BN] ''\n | | | | |for{cpu,scan_fn} [id BO] ''\n | | | | | |k [id G]\n | | | | | |IncSubtensor{Set;:int64:} [id H] ''\n | | | | | |A [id P]\n | | | | |DimShuffle{x,x} [id BP] ''\n | | | | |TensorConstant{0.0} [id BQ]\n | | | |IncSubtensor{Inc;int64} [id BR] ''\n | | | | |Elemwise{second,no_inplace} [id BS] ''\n | | | | | |Subtensor{int64::} [id BT] ''\n | | | | | | |for{cpu,scan_fn} [id BO] ''\n | | | | | | |Constant{1} [id BU]\n | | | | | |DimShuffle{x,x} [id BV] ''\n | | | | | |TensorConstant{0.0} [id BQ]\n | | | | |Elemwise{second} [id BW] ''\n | | | | | |Subtensor{int64} [id BX] ''\n | | | | | | |Subtensor{int64::} [id BT] ''\n | | | | | | |Constant{-1} [id BY]\n | | | | | |DimShuffle{x} [id BZ] ''\n | | | | | |Elemwise{second,no_inplace} [id CA] ''\n | | | | | |Sum{acc_dtype=float64} [id CB] ''\n | | | | | | |Subtensor{int64} [id BX] ''\n | | | | | |TensorConstant{1.0} [id R]\n | | | | |Constant{-1} [id BY]\n | | | |Constant{1} [id BU]\n | | |Constant{-1} [id CC]\n | |Alloc [id CD] ''\n | | |TensorConstant{0.0} [id BQ]\n | | |Elemwise{add,no_inplace} [id CE] ''\n | | | |Elemwise{sub,no_inplace} [id C] ''\n | | | |TensorConstant{1} [id Y]\n | | |Subtensor{int64} [id CF] ''\n | | |Shape [id CG] ''\n | | | |A [id P]\n | | |Constant{0} [id CH]\n | |A [id P]\n |Constant{-1} [id CI]\n\n Inner graphs of the scan ops:\n\n for{cpu,grad_of_scan_fn}.1 [id B] ''\n >Elemwise{add,no_inplace} [id CJ] ''\n > |Elemwise{mul} [id CK] ''\n > | |<TensorType(float64, vector)> [id CL] -> [id BL]\n > | |A_copy [id CM] -> [id P]\n > |<TensorType(float64, vector)> [id CN] -> [id BL]\n >Elemwise{add,no_inplace} [id CO] ''\n > |Elemwise{mul} [id CP] ''\n > | |<TensorType(float64, vector)> [id CL] -> [id BL]\n > | |<TensorType(float64, vector)> [id CQ] -> [id Z]\n > |<TensorType(float64, vector)> [id CR] -> [id CD]\n\n for{cpu,scan_fn} [id F] ''\n >Elemwise{mul,no_inplace} [id CS] ''\n > |<TensorType(float64, vector)> [id CT] -> [id H]\n > |A_copy [id CU] -> [id P]\n\n for{cpu,scan_fn} [id F] ''\n >Elemwise{mul,no_inplace} [id CS] ''\n\n for{cpu,scan_fn} [id F] ''\n >Elemwise{mul,no_inplace} [id CS] ''\n\n for{cpu,scan_fn} [id BO] ''\n >Elemwise{mul,no_inplace} [id CS] ''\n\n for{cpu,scan_fn} [id BO] ''\n >Elemwise{mul,no_inplace} [id CS] ''\"\"\"\n\n for truth, out in zip(expected_output.split(\"\\n\"), lines):\n assert truth.strip() == out.strip()\n\n\ndef test_printing_scan():\n # Skip test if pydot is not available.\n if not theano.printing.pydot_imported:\n raise SkipTest('pydot not available')\n\n def f_pow2(x_tm1):\n return 2 * x_tm1\n\n state = theano.tensor.scalar('state')\n n_steps = theano.tensor.iscalar('nsteps')\n output, updates = theano.scan(f_pow2,\n [],\n state,\n [],\n n_steps=n_steps,\n truncate_gradient=-1,\n go_backwards=False)\n f = theano.function([state, n_steps],\n output,\n updates=updates,\n allow_input_downcast=True)\n theano.printing.pydotprint(output, scan_graphs=True)\n theano.printing.pydotprint(f, scan_graphs=True)\n",
"import itertools\nimport unittest\nfrom collections import OrderedDict\nimport numpy\nimport theano\nfrom numpy.testing import assert_allclose, assert_raises\nfrom theano import tensor\nfrom theano.gof.graph import is_same_graph\n\nfrom blocks.utils import is_shared_variable\nfrom blocks.bricks.base import application\nfrom blocks.bricks import Tanh\nfrom blocks.bricks.recurrent import (\n recurrent, BaseRecurrent, GatedRecurrent,\n SimpleRecurrent, Bidirectional, LSTM,\n RecurrentStack, RECURRENTSTACK_SEPARATOR)\nfrom blocks.initialization import (\n Constant, IsotropicGaussian, Orthogonal, Identity)\nfrom blocks.filter import get_application_call, VariableFilter\nfrom blocks.graph import ComputationGraph\nfrom blocks.roles import INITIAL_STATE\n\n\nclass RecurrentWrapperTestClass(BaseRecurrent):\n def __init__(self, dim, ** kwargs):\n super(RecurrentWrapperTestClass, self).__init__(self, ** kwargs)\n self.dim = dim\n\n def get_dim(self, name):\n if name in ['inputs', 'states', 'outputs', 'states_2', 'outputs_2']:\n return self.dim\n if name == 'mask':\n return 0\n return super(RecurrentWrapperTestClass, self).get_dim(name)\n\n @recurrent(sequences=['inputs', 'mask'], states=['states', 'states_2'],\n outputs=['outputs', 'states_2', 'outputs_2', 'states'],\n contexts=[])\n def apply(self, inputs=None, states=None, states_2=None, mask=None):\n next_states = states + inputs\n next_states_2 = states_2 + .5\n if mask:\n next_states = (mask[:, None] * next_states +\n (1 - mask[:, None]) * states)\n outputs = 10 * next_states\n outputs_2 = 10 * next_states_2\n return outputs, next_states_2, outputs_2, next_states\n\n\nclass TestRecurrentWrapper(unittest.TestCase):\n def setUp(self):\n self.recurrent_example = RecurrentWrapperTestClass(dim=1)\n\n def test(self):\n X = tensor.tensor3('X')\n out, H2, out_2, H = self.recurrent_example.apply(\n inputs=X, mask=None)\n\n x_val = numpy.ones((5, 1, 1), dtype=theano.config.floatX)\n\n h = H.eval({X: x_val})\n h2 = H2.eval({X: x_val})\n\n out_eval = out.eval({X: x_val})\n out_2_eval = out_2.eval({X: x_val})\n\n # This also implicitly tests that the initial states are zeros\n assert_allclose(h, x_val.cumsum(axis=0))\n assert_allclose(h2, .5 * (numpy.arange(5).reshape((5, 1, 1)) + 1))\n assert_allclose(h * 10, out_eval)\n assert_allclose(h2 * 10, out_2_eval)\n\n\nclass RecurrentBrickWithBugInInitialStates(BaseRecurrent):\n\n @recurrent(sequences=[], contexts=[],\n states=['states'], outputs=['states'])\n def apply(self, states):\n return states\n\n @recurrent(sequences=[], contexts=[],\n states=['states2'], outputs=['states2'])\n def apply2(self, states):\n return states\n\n def get_dim(self, name):\n return 100\n\n\ndef test_bug_in_initial_states():\n def do():\n brick = RecurrentBrickWithBugInInitialStates()\n brick.apply2(n_steps=3, batch_size=5)\n assert_raises(KeyError, do)\n\n\nclass TestSimpleRecurrent(unittest.TestCase):\n def setUp(self):\n self.simple = SimpleRecurrent(dim=3, weights_init=Constant(2),\n activation=Tanh())\n self.simple.initialize()\n\n def test_one_step(self):\n h0 = tensor.matrix('h0')\n x = tensor.matrix('x')\n mask = tensor.vector('mask')\n h1 = self.simple.apply(x, h0, mask=mask, iterate=False)\n next_h = theano.function(inputs=[h0, x, mask], outputs=[h1])\n\n h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],\n dtype=theano.config.floatX)\n x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],\n dtype=theano.config.floatX)\n mask_val = numpy.array([1, 0]).astype(theano.config.floatX)\n h1_val = numpy.tanh(h0_val.dot(2 * numpy.ones((3, 3))) + x_val)\n h1_val = mask_val[:, None] * h1_val + (1 - mask_val[:, None]) * h0_val\n assert_allclose(h1_val, next_h(h0_val, x_val, mask_val)[0])\n\n def test_many_steps(self):\n x = tensor.tensor3('x')\n mask = tensor.matrix('mask')\n h = self.simple.apply(x, mask=mask, iterate=True)\n calc_h = theano.function(inputs=[x, mask], outputs=[h])\n\n x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),\n dtype=theano.config.floatX)\n x_val = numpy.ones((24, 4, 3),\n dtype=theano.config.floatX) * x_val[..., None]\n mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)\n mask_val[12:24, 3] = 0\n h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)\n for i in range(1, 25):\n h_val[i] = numpy.tanh(h_val[i - 1].dot(\n 2 * numpy.ones((3, 3))) + x_val[i - 1])\n h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +\n (1 - mask_val[i - 1, :, None]) * h_val[i - 1])\n h_val = h_val[1:]\n assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)\n\n # Also test that initial state is a parameter\n initial_state, = VariableFilter(roles=[INITIAL_STATE])(\n ComputationGraph(h))\n assert is_shared_variable(initial_state)\n assert initial_state.name == 'initial_state'\n\n\nclass TestLSTM(unittest.TestCase):\n def setUp(self):\n self.lstm = LSTM(dim=3, weights_init=Constant(2),\n biases_init=Constant(0))\n self.lstm.initialize()\n\n def test_one_step(self):\n h0 = tensor.matrix('h0')\n c0 = tensor.matrix('c0')\n x = tensor.matrix('x')\n h1, c1 = self.lstm.apply(x, h0, c0, iterate=False)\n next_h = theano.function(inputs=[x, h0, c0], outputs=[h1])\n\n h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],\n dtype=theano.config.floatX)\n c0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],\n dtype=theano.config.floatX)\n x_val = 0.1 * numpy.array([range(12), range(12, 24)],\n dtype=theano.config.floatX)\n W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)\n W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n\n # omitting biases because they are zero\n activation = numpy.dot(h0_val, W_state_val) + x_val\n\n def sigmoid(x):\n return 1. / (1. + numpy.exp(-x))\n\n i_t = sigmoid(activation[:, :3] + c0_val * W_cell_to_in)\n f_t = sigmoid(activation[:, 3:6] + c0_val * W_cell_to_forget)\n next_cells = f_t * c0_val + i_t * numpy.tanh(activation[:, 6:9])\n o_t = sigmoid(activation[:, 9:12] +\n next_cells * W_cell_to_out)\n h1_val = o_t * numpy.tanh(next_cells)\n assert_allclose(h1_val, next_h(x_val, h0_val, c0_val)[0],\n rtol=1e-6)\n\n def test_many_steps(self):\n x = tensor.tensor3('x')\n mask = tensor.matrix('mask')\n h, c = self.lstm.apply(x, mask=mask, iterate=True)\n calc_h = theano.function(inputs=[x, mask], outputs=[h])\n\n x_val = (0.1 * numpy.asarray(\n list(itertools.islice(itertools.permutations(range(12)), 0, 24)),\n dtype=theano.config.floatX))\n x_val = numpy.ones((24, 4, 12),\n dtype=theano.config.floatX) * x_val[:, None, :]\n mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)\n mask_val[12:24, 3] = 0\n h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)\n c_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)\n W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)\n W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n\n def sigmoid(x):\n return 1. / (1. + numpy.exp(-x))\n\n for i in range(1, 25):\n activation = numpy.dot(h_val[i-1], W_state_val) + x_val[i-1]\n i_t = sigmoid(activation[:, :3] + c_val[i-1] * W_cell_to_in)\n f_t = sigmoid(activation[:, 3:6] + c_val[i-1] * W_cell_to_forget)\n c_val[i] = f_t * c_val[i-1] + i_t * numpy.tanh(activation[:, 6:9])\n o_t = sigmoid(activation[:, 9:12] +\n c_val[i] * W_cell_to_out)\n h_val[i] = o_t * numpy.tanh(c_val[i])\n h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +\n (1 - mask_val[i - 1, :, None]) * h_val[i - 1])\n c_val[i] = (mask_val[i - 1, :, None] * c_val[i] +\n (1 - mask_val[i - 1, :, None]) * c_val[i - 1])\n\n h_val = h_val[1:]\n assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)\n\n # Also test that initial state is a parameter\n initial1, initial2 = VariableFilter(roles=[INITIAL_STATE])(\n ComputationGraph(h))\n assert is_shared_variable(initial1)\n assert is_shared_variable(initial2)\n assert {initial1.name, initial2.name} == {\n 'initial_state', 'initial_cells'}\n\n\nclass TestRecurrentStack(unittest.TestCase):\n def setUp(self):\n depth = 4\n self.depth = depth\n dim = 3 # don't change, hardwired in the code\n transitions = [LSTM(dim=dim) for _ in range(depth)]\n self.stack0 = RecurrentStack(transitions,\n weights_init=Constant(2),\n biases_init=Constant(0))\n self.stack0.initialize()\n\n self.stack2 = RecurrentStack(transitions,\n weights_init=Constant(2),\n biases_init=Constant(0),\n skip_connections=True)\n self.stack2.initialize()\n\n def do_one_step(self, stack, skip_connections=False, low_memory=False):\n depth = self.depth\n\n # batch=2\n h0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth,\n dtype=theano.config.floatX)\n c0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth,\n dtype=theano.config.floatX)\n x_val = 0.1 * numpy.array([range(12), range(12, 24)],\n dtype=theano.config.floatX)\n # we will use same weights on all layers\n W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)\n W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)\n W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n\n kwargs = OrderedDict()\n for d in range(depth):\n if d > 0:\n suffix = RECURRENTSTACK_SEPARATOR + str(d)\n else:\n suffix = ''\n if d == 0 or skip_connections:\n kwargs['inputs' + suffix] = tensor.matrix('inputs' + suffix)\n kwargs['inputs' + suffix].tag.test_value = x_val\n kwargs['states' + suffix] = tensor.matrix('states' + suffix)\n kwargs['states' + suffix].tag.test_value = h0_val[d]\n kwargs['cells' + suffix] = tensor.matrix('cells' + suffix)\n kwargs['cells' + suffix].tag.test_value = c0_val[d]\n results = stack.apply(iterate=False, low_memory=low_memory, **kwargs)\n next_h = theano.function(inputs=list(kwargs.values()),\n outputs=results)\n\n def sigmoid(x):\n return 1. / (1. + numpy.exp(-x))\n\n h1_val = []\n x_v = x_val\n args_val = []\n for d in range(depth):\n if d == 0 or skip_connections:\n args_val.append(x_val)\n h0_v = h0_val[d]\n args_val.append(h0_v)\n c0_v = c0_val[d]\n args_val.append(c0_v)\n\n # omitting biases because they are zero\n activation = numpy.dot(h0_v, W_state_val) + x_v\n if skip_connections and d > 0:\n activation += x_val\n\n i_t = sigmoid(activation[:, :3] + c0_v * W_cell_to_in)\n f_t = sigmoid(activation[:, 3:6] + c0_v * W_cell_to_forget)\n next_cells = f_t * c0_v + i_t * numpy.tanh(activation[:, 6:9])\n o_t = sigmoid(activation[:, 9:12] +\n next_cells * W_cell_to_out)\n h1_v = o_t * numpy.tanh(next_cells)\n # current layer output state transformed to input of next\n x_v = numpy.dot(h1_v, W_state2x_val)\n\n h1_val.append(h1_v)\n\n res = next_h(*args_val)\n for d in range(depth):\n assert_allclose(h1_val[d], res[d * 2], rtol=1e-6)\n\n def test_one_step(self):\n self.do_one_step(self.stack0)\n self.do_one_step(self.stack0, low_memory=True)\n self.do_one_step(self.stack2, skip_connections=True)\n self.do_one_step(self.stack2, skip_connections=True, low_memory=True)\n\n def do_many_steps(self, stack, skip_connections=False, low_memory=False):\n depth = self.depth\n\n # 24 steps\n # 4 batch examples\n # 12 dimensions per step\n x_val = (0.1 * numpy.asarray(\n list(itertools.islice(itertools.permutations(range(12)), 0, 24)),\n dtype=theano.config.floatX))\n x_val = numpy.ones((24, 4, 12),\n dtype=theano.config.floatX) * x_val[:, None, :]\n # mask the last third of steps\n mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)\n mask_val[12:24, 3] = 0\n # unroll all states and cells for all steps and also initial value\n h_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX)\n c_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX)\n # we will use same weights on all layers\n W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)\n W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)\n W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)\n\n kwargs = OrderedDict()\n\n for d in range(depth):\n if d > 0:\n suffix = RECURRENTSTACK_SEPARATOR + str(d)\n else:\n suffix = ''\n if d == 0 or skip_connections:\n kwargs['inputs' + suffix] = tensor.tensor3('inputs' + suffix)\n kwargs['inputs' + suffix].tag.test_value = x_val\n\n kwargs['mask'] = tensor.matrix('mask')\n kwargs['mask'].tag.test_value = mask_val\n results = stack.apply(iterate=True, low_memory=low_memory, **kwargs)\n calc_h = theano.function(inputs=list(kwargs.values()),\n outputs=results)\n\n def sigmoid(x):\n return 1. / (1. + numpy.exp(-x))\n\n for i in range(1, 25):\n x_v = x_val[i - 1]\n h_vs = []\n c_vs = []\n for d in range(depth):\n h_v = h_val[d][i - 1, :, :]\n c_v = c_val[d][i - 1, :, :]\n activation = numpy.dot(h_v, W_state_val) + x_v\n if skip_connections and d > 0:\n activation += x_val[i - 1]\n\n i_t = sigmoid(activation[:, :3] + c_v * W_cell_to_in)\n f_t = sigmoid(activation[:, 3:6] + c_v * W_cell_to_forget)\n c_v1 = f_t * c_v + i_t * numpy.tanh(activation[:, 6:9])\n o_t = sigmoid(activation[:, 9:12] +\n c_v1 * W_cell_to_out)\n h_v1 = o_t * numpy.tanh(c_v1)\n h_v = (mask_val[i - 1, :, None] * h_v1 +\n (1 - mask_val[i - 1, :, None]) * h_v)\n c_v = (mask_val[i - 1, :, None] * c_v1 +\n (1 - mask_val[i - 1, :, None]) * c_v)\n # current layer output state transformed to input of next\n x_v = numpy.dot(h_v, W_state2x_val)\n\n h_vs.append(h_v)\n c_vs.append(c_v)\n\n for d in range(depth):\n h_val[d][i, :, :] = h_vs[d]\n c_val[d][i, :, :] = c_vs[d]\n\n args_val = [x_val]*(depth if skip_connections else 1) + [mask_val]\n res = calc_h(*args_val)\n for d in range(depth):\n assert_allclose(h_val[d][1:], res[d * 2], rtol=1e-4)\n assert_allclose(c_val[d][1:], res[d * 2 + 1], rtol=1e-4)\n\n # Also test that initial state is a parameter\n for h in results:\n initial_states = VariableFilter(roles=[INITIAL_STATE])(\n ComputationGraph(h))\n assert all(is_shared_variable(initial_state)\n for initial_state in initial_states)\n\n def test_many_steps(self):\n self.do_many_steps(self.stack0)\n self.do_many_steps(self.stack0, low_memory=True)\n self.do_many_steps(self.stack2, skip_connections=True)\n self.do_many_steps(self.stack2, skip_connections=True, low_memory=True)\n\n\nclass TestGatedRecurrent(unittest.TestCase):\n def setUp(self):\n self.gated = GatedRecurrent(\n dim=3, activation=Tanh(),\n gate_activation=Tanh(), weights_init=Constant(2))\n self.gated.initialize()\n self.reset_only = GatedRecurrent(\n dim=3, activation=Tanh(),\n gate_activation=Tanh(),\n weights_init=IsotropicGaussian(), seed=1)\n self.reset_only.initialize()\n\n def test_one_step(self):\n h0 = tensor.matrix('h0')\n x = tensor.matrix('x')\n gi = tensor.matrix('gi')\n h1 = self.gated.apply(x, gi, h0, iterate=False)\n next_h = theano.function(inputs=[h0, x, gi], outputs=[h1])\n\n h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],\n dtype=theano.config.floatX)\n x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],\n dtype=theano.config.floatX)\n zi_val = (h0_val + x_val) / 2\n ri_val = -x_val\n W_val = 2 * numpy.ones((3, 3), dtype=theano.config.floatX)\n\n z_val = numpy.tanh(h0_val.dot(W_val) + zi_val)\n r_val = numpy.tanh(h0_val.dot(W_val) + ri_val)\n h1_val = (z_val * numpy.tanh((r_val * h0_val).dot(W_val) + x_val) +\n (1 - z_val) * h0_val)\n assert_allclose(\n h1_val, next_h(h0_val, x_val, numpy.hstack([zi_val, ri_val]))[0],\n rtol=1e-6)\n\n def test_many_steps(self):\n x = tensor.tensor3('x')\n gi = tensor.tensor3('gi')\n mask = tensor.matrix('mask')\n h = self.reset_only.apply(x, gi, mask=mask)\n calc_h = theano.function(inputs=[x, gi, mask], outputs=[h])\n\n x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),\n dtype=theano.config.floatX)\n x_val = numpy.ones((24, 4, 3),\n dtype=theano.config.floatX) * x_val[..., None]\n ri_val = 0.3 - x_val\n zi_val = 2 * ri_val\n mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)\n mask_val[12:24, 3] = 0\n h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)\n W = self.reset_only.state_to_state.get_value()\n Wz = self.reset_only.state_to_gates.get_value()[:, :3]\n Wr = self.reset_only.state_to_gates.get_value()[:, 3:]\n\n for i in range(1, 25):\n z_val = numpy.tanh(h_val[i - 1].dot(Wz) + zi_val[i - 1])\n r_val = numpy.tanh(h_val[i - 1].dot(Wr) + ri_val[i - 1])\n h_val[i] = numpy.tanh((r_val * h_val[i - 1]).dot(W) +\n x_val[i - 1])\n h_val[i] = z_val * h_val[i] + (1 - z_val) * h_val[i - 1]\n h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +\n (1 - mask_val[i - 1, :, None]) * h_val[i - 1])\n h_val = h_val[1:]\n # TODO Figure out why this tolerance needs to be so big\n assert_allclose(\n h_val,\n calc_h(x_val, numpy.concatenate(\n [zi_val, ri_val], axis=2), mask_val)[0],\n 1e-04)\n\n # Also test that initial state is a parameter\n initial_state, = VariableFilter(roles=[INITIAL_STATE])(\n ComputationGraph(h))\n assert is_shared_variable(initial_state)\n assert initial_state.name == 'initial_state'\n\n\nclass TestBidirectional(unittest.TestCase):\n def setUp(self):\n self.bidir = Bidirectional(weights_init=Orthogonal(),\n prototype=SimpleRecurrent(\n dim=3, activation=Tanh()))\n self.simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),\n activation=Tanh(), seed=1)\n self.bidir.allocate()\n self.simple.initialize()\n self.bidir.children[0].parameters[0].set_value(\n self.simple.parameters[0].get_value())\n self.bidir.children[1].parameters[0].set_value(\n self.simple.parameters[0].get_value())\n self.x_val = 0.1 * numpy.asarray(\n list(itertools.permutations(range(4))),\n dtype=theano.config.floatX)\n self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *\n self.x_val[..., None])\n self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)\n self.mask_val[12:24, 3] = 0\n\n def test(self):\n x = tensor.tensor3('x')\n mask = tensor.matrix('mask')\n calc_bidir = theano.function([x, mask],\n [self.bidir.apply(x, mask=mask)])\n calc_simple = theano.function([x, mask],\n [self.simple.apply(x, mask=mask)])\n h_bidir = calc_bidir(self.x_val, self.mask_val)[0]\n h_simple = calc_simple(self.x_val, self.mask_val)[0]\n h_simple_rev = calc_simple(self.x_val[::-1], self.mask_val[::-1])[0]\n\n output_names = self.bidir.apply.outputs\n\n assert output_names == ['states']\n assert_allclose(h_simple, h_bidir[..., :3], rtol=1e-04)\n assert_allclose(h_simple_rev, h_bidir[::-1, ..., 3:], rtol=1e-04)\n\n\nclass TestBidirectionalStack(unittest.TestCase):\n def setUp(self):\n prototype = SimpleRecurrent(dim=3, activation=Tanh())\n self.layers = [\n Bidirectional(weights_init=Orthogonal(), prototype=prototype)\n for _ in range(3)]\n self.stack = RecurrentStack(self.layers)\n for fork in self.stack.forks:\n fork.weights_init = Identity(1)\n fork.biases_init = Constant(0)\n self.stack.initialize()\n\n self.x_val = 0.1 * numpy.asarray(\n list(itertools.permutations(range(4))),\n dtype=theano.config.floatX)\n self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *\n self.x_val[..., None])\n self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)\n self.mask_val[12:24, 3] = 0\n\n def test_steps(self):\n x = tensor.tensor3('x')\n mask = tensor.matrix('mask')\n\n calc_stack_layers = [\n theano.function([x, mask], self.stack.apply(x, mask=mask)[i])\n for i in range(len(self.layers))]\n stack_layers = [\n f(self.x_val, self.mask_val) for f in calc_stack_layers]\n\n h_val = self.x_val\n for stack_layer_value, bidir_net in zip(stack_layers, self.layers):\n calc = theano.function([x, mask], bidir_net.apply(x, mask=mask))\n simple_layer_value = calc(h_val, self.mask_val)\n assert_allclose(stack_layer_value, simple_layer_value, rtol=1e-04)\n h_val = simple_layer_value[..., :3]\n\n def test_dims(self):\n self.assertEqual(self.stack.get_dim(\"inputs\"), 3)\n for i in range(len(self.layers)):\n state_name = self.stack.suffix(\"states\", i)\n self.assertEqual(self.stack.get_dim(state_name), 6)\n\n\ndef test_saved_inner_graph():\n \"\"\"Make sure that the original inner graph is saved.\"\"\"\n x = tensor.tensor3()\n recurrent = SimpleRecurrent(dim=3, activation=Tanh())\n y = recurrent.apply(x)\n\n application_call = get_application_call(y)\n assert application_call.inner_inputs\n assert application_call.inner_outputs\n\n cg = ComputationGraph(application_call.inner_outputs)\n # Check that the inner scan graph is annotated\n # with `recurrent.apply`\n assert len(VariableFilter(applications=[recurrent.apply])(cg)) == 3\n # Check that the inner graph is equivalent to the one\n # produced by a stand-alone of `recurrent.apply`\n assert is_same_graph(application_call.inner_outputs[0],\n recurrent.apply(*application_call.inner_inputs,\n iterate=False))\n\n\ndef test_super_in_recurrent_overrider():\n # A regression test for the issue #475\n class SimpleRecurrentWithContext(SimpleRecurrent):\n @application(contexts=['context'])\n def apply(self, context, *args, **kwargs):\n kwargs['inputs'] += context\n return super(SimpleRecurrentWithContext, self).apply(*args,\n **kwargs)\n\n @apply.delegate\n def apply_delegate(self):\n return super(SimpleRecurrentWithContext, self).apply\n\n brick = SimpleRecurrentWithContext(100, Tanh())\n inputs = tensor.tensor3('inputs')\n context = tensor.matrix('context').dimshuffle('x', 0, 1)\n brick.apply(context, inputs=inputs)\n",
"from __future__ import print_function\n# PENDING REWRITE OF tensor_opt.py\n\nimport copy\nimport logging\nimport pickle\nimport os\nimport sys\nimport time\nimport unittest\n\nimport numpy\nfrom six.moves import xrange\nfrom nose.plugins.skip import SkipTest\nfrom nose.tools import assert_raises\nfrom numpy.testing import dec\nfrom numpy.testing.noseclasses import KnownFailureTest\n\nimport theano\nimport theano.scalar as scal\nfrom six import PY3, StringIO\nfrom theano import compile\nfrom theano.compile import deep_copy_op, DeepCopyOp\nfrom theano import config\nfrom theano import function\nfrom theano import gof\nfrom theano import pprint\nfrom theano import shared\nfrom theano.gof import FunctionGraph\nimport theano.tensor.opt as opt\nfrom theano.tensor.opt import (\n local_add_specialize,\n local_dimshuffle_lift,\n local_greedy_distributor,\n mul_canonizer,\n out2in,\n Shape_i,\n Assert,\n MakeVector,\n make_vector\n )\nfrom theano import tensor\nfrom theano import tensor as T\nfrom theano.tensor import scalar, iscalar, lscalar, fscalar, dscalar\nfrom theano.tensor import vector, ivector, lvector, fvector, dvector\nfrom theano.tensor import matrix, imatrix, lmatrix, fmatrix, dmatrix\nfrom theano.tensor import scalars, vectors, matrices, fmatrices, dmatrices\nfrom theano.tensor import (\n AdvancedSubtensor1,\n as_tensor_variable,\n inplace,\n Join,\n join,\n Subtensor,\n TensorType,\n Tile,\n )\nfrom theano.tensor.elemwise import DimShuffle\nfrom theano.tests import unittest_tools as utt\nfrom theano.compile.mode import optdb\nfrom theano.compile import Mode\nfrom nose.plugins.attrib import attr\n\nmode_opt = theano.config.mode\nif mode_opt == 'FAST_COMPILE':\n mode_opt = 'FAST_RUN'\nmode_opt = theano.compile.mode.get_mode(mode_opt)\n\nds = lambda x, y: DimShuffle(x.type.broadcastable, y)(x)\ndimshuffle_lift = out2in(local_dimshuffle_lift)\n\n_optimizer_stabilize = gof.Query(include=['fast_run'])\n_optimizer_stabilize.position_cutoff = 1.51\n_optimizer_stabilize = compile.optdb.query(_optimizer_stabilize)\n\n_optimizer_specialize = gof.Query(include=['fast_run'])\n_optimizer_specialize.position_cutoff = 2.01\n_optimizer_specialize = compile.optdb.query(_optimizer_specialize)\n\n_optimizer_fast_run = gof.Query(include=['fast_run'])\n_optimizer_fast_run = compile.optdb.query(_optimizer_fast_run)\n\n\ndef optimize(g, level='fast_run'):\n if level == 'fast_run':\n _optimizer_fast_run.optimize(g)\n elif level == 'specialize':\n _optimizer_specialize.optimize(g)\n elif level == 'stabilize':\n _optimizer_stabilize.optimize(g)\n else:\n raise ValueError(level)\n return g\n\n\ndef inputs(xbc=(0, 0), ybc=(0, 0), zbc=(0, 0)):\n x = TensorType(broadcastable=xbc, dtype='float64')('x')\n y = TensorType(broadcastable=ybc, dtype='float64')('y')\n z = TensorType(broadcastable=zbc, dtype='float64')('z')\n return x, y, z\n\n\nclass test_dimshuffle_lift(unittest.TestCase):\n def test_double_transpose(self):\n x, y, z = inputs()\n e = ds(ds(x, (1, 0)), (1, 0))\n g = FunctionGraph([x], [e])\n self.assertTrue(str(g) == \"[DimShuffle{1,0}(DimShuffle{1,0}(x))]\")\n dimshuffle_lift.optimize(g)\n self.assertTrue(str(g) == \"[x]\")\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))\n\n\n def test_merge2(self):\n x, y, z = inputs()\n e = ds(ds(x, (1, 'x', 0)), (2, 0, 'x', 1))\n g = FunctionGraph([x], [e])\n self.assertTrue(\n str(g) == \"[DimShuffle{2,0,x,1}(DimShuffle{1,x,0}(x))]\",\n str(g))\n dimshuffle_lift.optimize(g)\n self.assertTrue(str(g) == \"[DimShuffle{0,1,x,x}(x)]\", str(g))\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))\n\n def test_elim3(self):\n x, y, z = inputs()\n e = ds(ds(ds(x, (0, 'x', 1)), (2, 0, 'x', 1)), (1, 0))\n g = FunctionGraph([x], [e])\n self.assertTrue(\n str(g) == \"[DimShuffle{1,0}(DimShuffle{2,0,x,1}\"\n \"(DimShuffle{0,x,1}(x)))]\",\n str(g))\n dimshuffle_lift.optimize(g)\n self.assertTrue(str(g) == \"[x]\", str(g))\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))\n\n def test_lift(self):\n x, y, z = inputs([False] * 1, [False] * 2, [False] * 3)\n e = x + y + z\n g = FunctionGraph([x, y, z], [e])\n\n # It does not really matter if the DimShuffles are inplace\n # or not.\n init_str_g_inplace = (\n \"[Elemwise{add,no_inplace}(InplaceDimShuffle{x,0,1}\"\n \"(Elemwise{add,no_inplace}(InplaceDimShuffle{x,0}(x), y)), z)]\")\n init_str_g_noinplace = (\n \"[Elemwise{add,no_inplace}(DimShuffle{x,0,1}\"\n \"(Elemwise{add,no_inplace}(DimShuffle{x,0}(x), y)), z)]\")\n self.assertTrue(str(g) in (init_str_g_inplace, init_str_g_noinplace),\n str(g))\n\n opt_str_g_inplace = (\n \"[Elemwise{add,no_inplace}(Elemwise{add,no_inplace}\"\n \"(InplaceDimShuffle{x,x,0}(x), InplaceDimShuffle{x,0,1}(y)), z)]\")\n opt_str_g_noinplace = (\n \"[Elemwise{add,no_inplace}(Elemwise{add,no_inplace}\"\n \"(DimShuffle{x,x,0}(x), DimShuffle{x,0,1}(y)), z)]\")\n dimshuffle_lift.optimize(g)\n self.assertTrue(str(g) in (opt_str_g_inplace, opt_str_g_noinplace),\n str(g))\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))\n\n\n def test_recursive_lift(self):\n v = T.vector(dtype=\"float64\")\n m = T.matrix(dtype=\"float64\")\n out = ((v + 42) * (m + 84)).T\n g = FunctionGraph([v, m], [out])\n init_str_g = (\"[DimShuffle{1,0}(Elemwise{mul,no_inplace}\"\n \"(DimShuffle{x,0}(Elemwise{add,no_inplace}\"\n \"(<TensorType(float64, vector)>, \"\n \"DimShuffle{x}(TensorConstant{42}))), \"\n \"Elemwise{add,no_inplace}\"\n \"(<TensorType(float64, matrix)>, \"\n \"DimShuffle{x,x}(TensorConstant{84}))))]\")\n self.assertTrue(str(g) == init_str_g)\n \n new_out = local_dimshuffle_lift.transform(g.outputs[0].owner)[0]\n new_g = FunctionGraph(g.inputs, [new_out])\n opt_str_g = (\"[Elemwise{mul,no_inplace}(Elemwise{add,no_inplace}\"\n \"(DimShuffle{0,x}(<TensorType(float64, vector)>), \"\n \"DimShuffle{x,x}(TensorConstant{42})), \"\n \"Elemwise{add,no_inplace}(DimShuffle{1,0}\"\n \"(<TensorType(float64, matrix)>), \"\n \"DimShuffle{x,x}(TensorConstant{84})))]\")\n self.assertTrue(str(new_g) == opt_str_g)\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(new_g.outputs[0].tag, 'trace'))\n\n\ndef test_add_canonizer_problem0():\n n_segments = 10\n label = lscalar('label')\n segment_labels = label + theano._asarray([0] * n_segments, dtype='int64')\n\n r = segment_labels * 5\n f = function([label], r)\n\n\nclass test_greedy_distribute(unittest.TestCase):\n def test_main(self):\n a, b, c, d, x, y, z = matrices('abcdxyz')\n\n # 1. ((a/x + b/y) * x * y) --> a*y + b*x\n e = (a / z + b / x) * x * z\n g = FunctionGraph([a, b, c, d, x, y, z], [e])\n # print pprint(g.outputs[0])\n mul_canonizer.optimize(g)\n gof.TopoOptimizer(gof.LocalOptGroup(local_greedy_distributor),\n order='out_to_in').optimize(g)\n # print pprint(g.outputs[0])\n assert str(pprint(g.outputs[0])) == \"((a * x) + (b * z))\"\n\n # 2. ((a/x + b) * x) --> a + b*x\n e = (a / x + b) * x\n g = FunctionGraph([a, b, x], [e])\n # print pprint(g.outputs[0])\n mul_canonizer.optimize(g)\n gof.TopoOptimizer(gof.LocalOptGroup(local_greedy_distributor),\n order='out_to_in').optimize(g)\n # print pprint(g.outputs[0])\n assert str(pprint(g.outputs[0])) == \"(a + (b * x))\"\n\n def test_kording_bug(self):\n x, y = vectors('xy')\n eps = scalar('eps')\n s = scalar('s')\n\n #r = theano.tensor.mul(theano.tensor.fill(x, 2.*a), x/a , (y+z) , a)\n #r = theano.tensor.mul((x/a+y) , a, z)\n r = tensor.mul(s - 1,\n eps + x / s,\n eps + y / s,\n s)\n\n f = function([s, eps, x, y], r ** 2)\n\n s_val = numpy.asarray(4, dtype=config.floatX)\n eps_val = numpy.asarray(1.e-6, dtype=config.floatX)\n x_val = numpy.asarray([1.5, 2], dtype=config.floatX)\n y_val = numpy.asarray([2.3, 3.1], dtype=config.floatX)\n\n r0 = f(s_val, eps_val, x_val, y_val)\n r1 = f(s_val, eps_val, x_val, y_val)\n r2 = f(s_val, eps_val, x_val, y_val)\n\n assert numpy.all(r0 == r1)\n assert numpy.all(r0 == r2)\n\n\nclass test_canonize(unittest.TestCase):\n def test_muldiv(self):\n x, y, z = matrices('xyz')\n a, b, c, d = matrices('abcd')\n# e = (2.0 * x) / (2.0 * y)\n# e = (2.0 * x) / (4.0 * y)\n# e = x / (y / z)\n# e = (x * y) / x\n# e = (x / y) * (y / z) * (z / x)\n# e = (a / b) * (b / c) * (c / d)\n# e = (a * b) / (b * c) / (c * d)\n# e = 2 * x / 2\n# e = x / y / x\n# e = (x / x) * (y / y)\n e = (-1 * x) / y / (-2 * z)\n g = FunctionGraph([x, y, z, a, b, c, d], [e])\n print(pprint(g.outputs[0]))\n mul_canonizer.optimize(g)\n print(pprint(g.outputs[0]))\n\n def test_elemwise_multiple_inputs_optimisation(self):\n \"\"\"verify that the Canonizer merge sequential Elemwise({mul,add}) part 1\n\n This part are that case that is done, but don't include case\n that are not implemented but are suposed to be.\n\n Test with and without DimShuffle\n\n \"\"\"\n\n shp = (5, 5)\n fx, fy, fz = fmatrices('xyz')\n dx, dy, dz = dmatrices('xyz')\n fv = fvector('r').dimshuffle('x', 0)\n dv = dvector('s').dimshuffle('x', 0)\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\n cases = [\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\n # check with dimshuffle of constant\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n\n # check with broadcast of row\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\n ] # [10:11]\n# print cases\n\n # We must be sure that the Canonizer is working, but that we don't have other\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\n mode = compile.mode.get_default_mode()\n opt = gof.Query([\"canonicalize\"])\n opt = opt.excluding('local_elemwise_fusion')\n mode = mode.__class__(linker=mode.linker, optimizer=opt)\n for id, [g, sym_inputs, val_inputs,\n nb_elemwise, out_dtype] in enumerate(cases):\n if isinstance(out_dtype, dict):\n out_dtype = out_dtype[config.cast_policy]\n f = compile.function(list(sym_inputs), g,\n # we need the optimisation enabled, debug do this.\n mode=mode)\n\n out = f(*val_inputs)\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\n assert(out_dtype == out.dtype)\n\n def test_elemwise_multiple_inputs_optimisation2(self):\n \"\"\"\n verify that the Canonizer merge sequential Elemwise({mul,add}) part 2.\n This part are that case that should have been done, but that are not implemented.\n Test with and without DimShuffle\n \"\"\"\n raise SkipTest(\"Current implementation of Canonizer does not \"\n \"implement all cases. Skip the corresponding test.\")\n\n shp = (5, 5)\n fx, fy, fz = fmatrices('xyz')\n dx, dy, dz = dmatrices('xyz')\n fv = fvector('r').dimshuffle('x', 0)\n dv = dvector('s').dimshuffle('x', 0)\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\n cases = [\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\n fzv), 2, 'float32'),\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\n dzv), 2, 'float64'),\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\n 'float64'), # check mixed type add\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\n 'float64'), # check mixed type mul\n # check with dimshuffle of constant\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\n fzv), 1, 'float32'),\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\n fzv), 1, 'float32'),\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\n fzv), 2, 'float32'),\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\n fzv), 2, 'float32'),\n\n # check with broadcast of row\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\n fvv), 1, 'float32'),\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\n fvv), 1, 'float32'),\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\n fvv), 1, 'float32'),\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\n fvv), 1, 'float32'),\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\n fzv, fvv), 2, 'float32'),\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\n fzv, fvv), 2, 'float32'),\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\n fvv), 2, 'float32'),\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\n dvv), 1, 'float64'),\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\n dvv), 1, 'float64'),\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\n dvv), 1, 'float64'),\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\n dvv), 1, 'float64'),\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\n dzv, dvv), 2, 'float64'),\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\n dzv, dvv), 2, 'float64'),\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\n dvv), 2, 'float64'),\n\n ] # [10:11]\n# print cases\n\n # We must be sure that the Canonizer is working, but that we don't have other\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\n mode = compile.mode.get_default_mode()\n mode._optimizer = gof.Query([\"canonicalize\"])\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\n f = compile.function(list(sym_inputs), g,\n # we need the optimisation enabled, debug do this.\n mode=mode)\n\n out = f(*val_inputs)\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\n assert(out_dtype == out.dtype)\n\n @attr('slow')\n def test_multiple_case(self):\n \"\"\" test those case take from the comment in Canonizer\n x / x -> 1\n (x * y) / x -> y\n x / y / x -> 1 / y\n x / y / z -> x / (y * z)\n x / (y / z) -> (x * z) / y\n (a / b) * (b / c) * (c / d) -> a / d\n (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\n 2 * x / 2 -> x\n with and without DimShuffle\n TODO: with DimShuffle\n \"\"\"\n\n shp = (3, 3)\n fx, fy, fz, fw = fmatrices('xyzw')\n dx, dy, dz, dw = dmatrices('xyzw')\n fv = fvector('r').dimshuffle('x', 0)\n dv = dvector('s').dimshuffle('x', 0)\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\n\n # We must be sure that the Canonizer is working, but that we don't have other\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\n mode = compile.mode.get_default_mode()\n\n opt = gof.Query([\"canonicalize\"])\n opt = opt.including('ShapeOpt', 'local_fill_to_alloc')\n opt = opt.excluding(\n 'local_elemwise_fusion')\n mode = mode.__class__(linker=mode.linker, optimizer=opt)\n # test x / x -> 1\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx, [fx], [fxv], 'float32'),\n (dx/dx, [dx], [dxv], 'float64'),\n (fv/fv, [fv], [fvv], 'float32'),\n (dv/dv, [dv], [dvv], 'float64'),\n ]):\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\n topo = f.maker.fgraph.toposort()\n if sym_inputs[0].broadcastable[0]:\n assert len(topo) == 2\n assert isinstance(topo[0].op, Shape_i)\n assert isinstance(topo[1].op, tensor.Alloc)\n else:\n assert len(topo) == 3\n assert isinstance(topo[0].op, Shape_i)\n assert isinstance(topo[1].op, Shape_i)\n assert isinstance(topo[2].op, tensor.Alloc)\n assert(out_dtype == out.dtype)\n\n # test (x * y) / x -> y\n for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\n ((dx*dy)/dx, [dx, dy], [dxv, dyv], 0, 'float64'),\n ((fx*fy)/fx, [fx, fy], [fxv, fyv], 0, 'float32'),\n ((dv*dy)/dv, [dv, dy], [dvv, dyv], 0, 'float64'),\n ((fv*fy)/fv, [fv, fy], [fvv, fyv], 0, 'float32'),\n # must broadcast as their is a dimshuffle in the computation\n ((dx*dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),\n # topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\n ((fx*fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32')\n # topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\n ]):\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert(out_dtype == out.dtype)\n assert numpy.allclose(out, val_inputs[1])\n topo = f.maker.fgraph.toposort()\n if topo and not(len(topo) == 1 and topo[0].op == deep_copy_op):\n for node in topo[:-1]:\n assert isinstance(node.op, Shape_i)\n assert isinstance(topo[-1].op, tensor.Alloc)\n\n # test x / y / x -> 1 / y\n for id, (g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\n ((dx/dy)/dx, [dx, dy], [dxv, dyv], 1, 'float64'),\n ((fx/fy)/fx, [fx, fy], [fxv, fyv], 1, 'float32'),\n ((dv/dy)/dv, [dv, dy], [dvv, dyv], 1, 'float64'),\n ((fv/fy)/fv, [fv, fy], [fvv, fyv], 1, 'float32'),\n # must broadcast as their is a dimshuffle in the computation\n\n ((dx/dv)/dx, [dx, dv], [dxv, dvv], 1, 'float64'),\n# topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\n ((fx/fv)/fx, [fx, fv], [fxv, fvv], 1, 'float32'),\n # topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\n ]):\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert numpy.allclose(out, (1 / val_inputs[1]))\n topo = f.maker.fgraph.toposort()\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\n assert len(elem) == nb_elemwise\n assert isinstance(elem[0].op, (T.Elemwise, ))\n assert isinstance(elem[0].op.scalar_op, (\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\n assert(out_dtype == out.dtype)\n\n # test (a / b) * (b / c) * (c / d) -> a / d\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\n ((dx / dy) * (dy / dz) * (dz / dw), [dx, dy, dz, dw], [dxv, dyv, dzv, dwv], 'float64'),\n ((fx / fy) * (fy / fz) * (fz / fw), [fx, fy, fz, fw], [fxv, fyv, fzv, fwv], 'float32'),\n ((dv / dy) * (dy / dz) * (dz / dw), [dv, dy, dz, dw], [dvv, dyv, dzv, dwv], 'float64'),\n ((fv / fy) * (fy / fz) * (fz / fw), [fv, fy, fz, fw], [fvv, fyv, fzv, fwv], 'float32'),\n ((dx / dv) * (dv / dz) * (dz / dw), [dx, dv, dz, dw], [dxv, dvv, dzv, dwv], 'float64'),\n ((fx / fv) * (fv / fz) * (fz / fw), [fx, fv, fz, fw], [fxv, fvv, fzv, fwv], 'float32'),\n ((dx / dy) * (dy / dv) * (dv / dw), [dx, dy, dv, dw], [dxv, dyv, dvv, dwv], 'float64'),\n ((fx / fy) * (fy / fv) * (fv / fw), [fx, fy, fv, fw], [fxv, fyv, fvv, fwv], 'float32'),\n ((dx / dy) * (dy / dz) * (dz / dv), [dx, dy, dz, dv], [dxv, dyv, dzv, dvv], 'float64'),\n ((fx / fy) * (fy / fz) * (fz / fv), [fx, fy, fz, fv], [fxv, fyv, fzv, fvv], 'float32'),\n ]):\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, (T.Elemwise, ))\n assert isinstance(topo[0].op.scalar_op,\n theano.scalar.basic.TrueDiv)\n assert len(topo[0].inputs) == 2\n assert(out_dtype == out.dtype)\n\n # test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\n (((2.0*dx)/(4.0*dy)), [dx, dy], [dxv, dyv], 'float64'),\n (((2.0*fx)/(4.0*fy)), [fx, fy], [fxv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n (((2.0*dv)/(4.0*dy)), [dv, dy], [dvv, dyv], 'float64'),\n (((2.0*fv)/(4.0*fy)), [fv, fy], [fvv, fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n (((2.0*dx)/(4.0*dv)), [dx, dv], [dxv, dvv], 'float64'),\n (((2.0*fx)/(4.0*fv)), [fx, fv], [fxv, fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n ]):\n\n if isinstance(out_dtype, dict):\n out_dtype = out_dtype[config.cast_policy]\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert numpy.allclose(out, (0.5 *\n val_inputs[0] / val_inputs[1]))\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert isinstance(topo[0].op, (T.Elemwise, ))\n assert isinstance(topo[0].op.scalar_op,\n theano.scalar.basic.Mul)\n assert len(topo[0].inputs) == 2\n assert isinstance(topo[1].op, (T.Elemwise, ))\n assert isinstance(topo[1].op.scalar_op,\n theano.scalar.basic.TrueDiv)\n assert len(topo[1].inputs) == 2\n assert(out_dtype == out.dtype)\n\n # test 2 * x / 2 -> x\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\n ((2*dx)/2, [dx], [dxv], 'float64'),\n ((2*fx)/2, [fx], [fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n ((2*dv)/2, [dv], [dvv], 'float64'),\n ((2*fv)/2, [fv], [fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n ]):\n if isinstance(out_dtype, dict):\n out_dtype = out_dtype[config.cast_policy]\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert numpy.allclose(out, val_inputs[0])\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n topo[0].op == deep_copy_op\n assert(out_dtype == out.dtype)\n\n # test x / abs(x) -> sign(x)\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\n (dx/abs(dx), [dx], [0.5-dxv], 'float64'),\n (fx/abs(fx), [fx], [0.5-fxv], 'float32'),\n (dx/abs(dx), [dx], [0.1*dxv], 'float64'),\n (fx/abs(fx), [fx], [0.1*fxv], 'float32'),\n (dv/abs(dv), [dv], [0.5-dvv], 'float64'),\n (fv/abs(fv), [fv], [0.5-fvv], 'float32'),\n ]):\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert numpy.all(numpy.isfinite(out))\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\n assert(out_dtype == out.dtype)\n assert len(f.maker.fgraph.toposort()) == 1\n\n # test (2*x) / (3*abs(x)) -> sign(x)\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n ]):\n\n if isinstance(out_dtype, dict):\n out_dtype = out_dtype[config.cast_policy]\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n topo = f.maker.fgraph.toposort()\n out = f(*val_inputs)\n assert numpy.all(numpy.isfinite(out))\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\n assert(out_dtype == out.dtype)\n\n def test_abs_mul_div(self):\n \"\"\"\n test that if we have\n 4 * x / abs(2*x) it get simplifier during canonicalisation.\n \"\"\"\n\n x = T.dscalar()\n a = T.abs_(x)\n\n if theano.config.mode == 'FAST_COMPILE':\n mode = theano.compile.mode.get_mode('FAST_RUN').excluding(\n \"local_elemwise_fusion\")\n else:\n mode = theano.compile.mode.get_default_mode().excluding(\n \"local_elemwise_fusion\")\n\n f = theano.function([x], [(4 * x) / abs(2 * x)], mode=mode)\n print(f.maker.fgraph.toposort())\n print()\n f(.1)\n f(-1)\n # some stabilization optimization make the output be finite instead of nan\n # debug_mode will raise an error when he see nan\n if not isinstance(mode, theano.compile.debugmode.DebugMode):\n assert numpy.isfinite(f(0))\n\n assert len(f.maker.fgraph.toposort()) == 2\n assert f.maker.fgraph.toposort()[0].op == T.sgn\n\n f = theano.function([x], [(4 * x) / abs(x / 2)], mode=mode)\n print(f.maker.fgraph.toposort())\n print()\n f(.1)\n f(-1)\n # some stabilization optimization make the output be finite instead of nan\n # debug_mode will raise an error when he see nan\n if not isinstance(mode, theano.compile.debugmode.DebugMode):\n assert numpy.isfinite(f(0))\n\n assert len(f.maker.fgraph.toposort()) == 2\n assert f.maker.fgraph.toposort()[0].op == T.sgn\n\n def test_multiple_case_that_fail(self):\n raise SkipTest(\"Current implementation of Canonizer does not \"\n \"implement all cases. Skip the corresponding test.\")\n\n shp = (4, 4)\n fx, fy, fz = fmatrices('xyz')\n dx, dy, dz = dmatrices('xyz')\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\n # We must be sure that the Canonizer is working, but that we don't have other\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\n mode = compile.mode.get_default_mode()\n\n opt = gof.Query([\"canonicalize\"])\n opt = opt.excluding(\n 'local_elemwise_fusion')\n mode = mode.__class__(linker=mode.linker, optimizer=opt)\n# test fail!\n # test x / y / z -> x / (y * z)\n for (g, sym_inputs, val_inputs, out_dtype) in [\n ((dx/dy)/dz, [dx, dy, dz], [dxv, dyv, dzv], 'float64'),\n ((fx/fy)/fz, [fx, fy, fz], [fxv, fyv, fzv], 'float32')\n ]:\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert numpy.allclose(out, val_inputs[0] /\n val_inputs[1] / val_inputs[2])\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert isinstance(topo[0].op, (T.Elemwise, ))\n assert isinstance(topo[0].op.scalar_op,\n theano.scalar.basic.Inv)\n assert len(topo[0].inputs) == 1\n assert(out_dtype == out.dtype)\n\n # test x / (y / z) -> (x * z) / y\n for (g, sym_inputs, val_inputs, out_dtype) in [\n (dx/(dy/dz), [dx, dy, dz], [dxv, dyv, dzv], 'float64'),\n (fx/(fy/fz), [fx, fy, fz], [fxv, fyv, fzv], 'float32')\n ]:\n f = compile.function(list(sym_inputs), g,\n mode=mode)\n out = f(*val_inputs)\n assert numpy.allclose(out, val_inputs[0] / (\n val_inputs[1] / val_inputs[2]))\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert isinstance(topo[0].op, (T.Elemwise, ))\n assert isinstance(topo[0].op.scalar_op,\n theano.scalar.basic.Inv)\n assert len(topo[0].inputs) == 1\n assert(out_dtype == out.dtype)\n\n def test_dont_merge_if_multiple_client(self):\n \"\"\" test those case take from the comment in Canonizer\n \"\"\"\n raise SkipTest(\"Not implemented\")\n\n def test_canonicalize_nan(self):\n \"\"\"\n Regression test for bug in canonicalization of NaN values.\n\n This bug caused an infinite loop which was caught by the equilibrium\n optimizer, resulting in an error log message.\n \"\"\"\n sio = StringIO()\n handler = logging.StreamHandler(sio)\n handler.setLevel(logging.ERROR)\n logging.getLogger('theano.gof.opt').addHandler(handler)\n try:\n x = vector()\n f = theano.function([x], x + numpy.nan)\n finally:\n logging.getLogger('theano.gof.opt').removeHandler(handler)\n # Ideally this test would only catch the maxed out equilibrium\n # optimizer error message, but to be safe in case this message\n # is modified in the future, we assert that there is no error\n # at all.\n assert not sio.getvalue()\n\n\ndef test_local_merge_abs():\n x, y, z = T.matrices('xyz')\n x_val = numpy.random.rand(5, 5).astype(config.floatX)\n y_val = numpy.random.rand(5, 5).astype(config.floatX)\n z_val = numpy.random.rand(5, 5).astype(config.floatX)\n mode = theano.config.mode\n if mode == \"FAST_COMPILE\":\n mode = \"FAST_RUN\"\n mode = theano.compile.mode.get_mode(mode).excluding(\n \"local_elemwise_fusion\")\n\n f = theano.function([y, z], (abs(y * z * -2)), mode=mode)\n f(y_val, z_val)\n assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op, scal.Abs)\n assert len(f.maker.fgraph.toposort()) == 2\n\n f = theano.function([x, y], abs(x / y), mode=mode)\n f(x_val, y_val)\n assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op, scal.Abs)\n assert len(f.maker.fgraph.toposort()) == 2\n\n\ndef test_merge_abs_bugfix():\n # Test crash in optimization reported by Jeremiah Lowin at\n # https://groups.google.com/d/topic/theano-users/TaXfqXP2Mj0/discussion\n input = T.matrix()\n # normalize on cols\n step1 = input / input.sum(0)\n # normalize on rows\n step2 = step1 / step1.sum(1)\n # get l1 norm\n l1_norm = T.abs_(step2).sum()\n theano.function([input], T.grad(l1_norm, input))\n\n\ndef test_mixeddiv():\n \"\"\"Test that int division is preserved\"\"\"\n i = iscalar()\n d = dscalar()\n assert 0 == function([i, d], d * (i // (i + 1)))(3, 1.0)\n\n\ndef test_const_type_in_mul_canonizer():\n input = dmatrix()\n w = dmatrix()\n visb = dvector()\n hidb = dvector()\n betas = dvector()\n a = dvector()\n\n def sigm(x):\n return 1. / (1 + tensor.exp(-x))\n\n hid = sigm((tensor.dot(w, input) + hidb) * betas)\n\n vis_gauss1 = (tensor.dot(w.T, hid) + visb) * betas / (2 * a * a)\n vis_gauss2 = (tensor.dot(w.T, hid) + visb) * betas / (2. * a * a)\n\n f1 = function([input, w, visb, hidb, betas, a], vis_gauss1)\n f2 = function([input, w, visb, hidb, betas, a], vis_gauss2)\n\n ival = numpy.random.rand(5, 5)\n wval = numpy.random.rand(5, 5)\n visbval = numpy.random.rand(5)\n hidbval = numpy.random.rand(5)\n betaval = numpy.random.rand(5)\n aval = numpy.random.rand(5)\n\n assert numpy.allclose(\n f2(ival, wval, visbval, hidbval, betaval, aval),\n f1(ival, wval, visbval, hidbval, betaval, aval))\n\n\nclass test_fusion(unittest.TestCase):\n def do(self, mode, shared_fn, shp, gpu=False, nb_repeat=1, assert_len_topo=True, slice=None):\n \"\"\"\n param shared_fn: if None, will use compile.function\n verify that the elemwise fusion work\n Test with and without DimShuffle\n \"\"\"\n # TODO: disable the canonizer?\n def my_init(shp, dtype='float64', num=0):\n #ret = theano._asarray(numpy.random.rand(*shp),dtype=dtype)\n ret = numpy.zeros(shp, dtype=dtype) + num\n return ret\n fw, fx, fy, fz = [theano.tensor.tensor(dtype='float32',\n broadcastable=[False] * len(shp),\n name=n) for n in 'wxyz']\n dw, dx, dy, dz = [theano.tensor.tensor(dtype='float64',\n broadcastable=[False] * len(shp),\n name=n) for n in 'wxyz']\n ix, iy, iz = [theano.tensor.tensor(dtype='int32',\n broadcastable=[False] * len(shp),\n name=n) for n in 'xyz']\n fv = fvector('v')\n fs = fscalar('s')\n\n fwv = my_init(shp, 'float32', 1)\n fxv = my_init(shp, 'float32', 2)\n fyv = my_init(shp, 'float32', 3)\n fzv = my_init(shp, 'float32', 4)\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32')\n fsv = numpy.asarray(numpy.random.rand(), dtype='float32')\n dwv = my_init(shp, 'float64', 5)\n ixv = theano._asarray(my_init(shp, num=60), dtype='int32')\n iyv = theano._asarray(my_init(shp, num=70), dtype='int32')\n izv = theano._asarray(my_init(shp, num=70), dtype='int32')\n fwx = fw + fx\n ftanx = theano.tensor.tan(fx)\n cases = [\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv +\n fyv + fzv, 'float32'), # 0\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv *\n fyv * fzv, 'float32'), # 1\n (fx + fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv +\n fyv * fzv, 'float32'), # 2\n (fx * fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv *\n fyv + fzv, 'float32'), # 3\n (fw + fx + fy + fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv + fxv + fyv + fzv, 'float32'),\n ((fw + fx) + (fy + fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv + fxv + fyv + fzv, 'float32'), # 5\n (((fw + fx) + fy) + fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv + fxv + fyv + fzv, 'float32'),\n ((fw + (fx + fy)) + fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv + fxv + fyv + fzv, 'float32'),\n ((fw + (fx + fy) + fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv + fxv + fyv + fzv, 'float32'),\n (fw + (fx + (fy + fz)), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv + fxv + fyv + fzv, 'float32'),\n ((fw+fx)+(fy+fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv+fxv+fyv+fzv, 'float32'), # 10\n (fw*fx*fy*fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv * fxv * fyv * fzv, 'float32'),\n (fw+fx*fy*fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,\n fwv + fxv * fyv * fzv, 'float32'),\n (fx+fy*fz*fx, (fx, fy, fz), (fxv, fyv, fzv), 1,\n fxv + fyv * fzv * fxv, 'float32'),\n (fx*fy+fz+fy, (fx, fy, fz), (fxv, fyv, fzv), 1,\n fxv * fyv + fzv + fyv, 'float32'),\n (fx*fy*fz*fw+fx+fy+fz+fw, (fw, fx, fy, fz), (fwv, fxv,\n fyv, fzv), 1, fxv*fyv*fzv*fwv+fxv+fyv+fzv+fwv, 'float32'), # 15\n # test with constant\n ((fw+fx)+(fy+fz) + 2., (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),\n 1, fwv+fxv+fyv+fzv+2, 'float32'),\n (((fw+fx)+2.+fy)+fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),\n 1, fwv+fxv+fyv+fzv+2, 'float32'),\n ((fw+(fx+2.+fy))+fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),\n 1, fwv+fxv+fyv+fzv+2, 'float32'),\n ((fw+(fx+fy)+2+fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),\n 1, fwv+fxv+fyv+fzv+2, 'float32'),\n (fw+(fx+(fy+fz)+2.), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),\n 1, fwv+fxv+fyv+fzv+2, 'float32'), # 20\n (2+(fw+fx)+(fy+fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),\n 1, fwv+fxv+fyv+fzv+2, 'float32'),\n # mix float32 and float64\n (2+(dw+fx)+(fy+fz), (dw, fx, fy, fz), (dwv, fxv, fyv, fzv),\n 1, dwv+fxv+fyv+fzv+2, 'float64'),\n (2+(fw+dw)+(fy+fz), (fw, dw, fy, fz), (fwv, dwv, fyv, fzv),\n 1, fwv+dwv+fyv+fzv+2, 'float64'),\n (2+(fw+fx)+(dw+fz), (fw, fx, dw, fz), (fwv, fxv, dwv, fzv),\n 1, fwv+fxv+dwv+fzv+2, 'float64'),\n (2+(fw+fx)+(fy+dw), (fw, fx, fy, dw), (fwv, fxv, fyv, dwv),\n 1, fwv+fxv+fyv+dwv+2, 'float64'), # 25\n # test when their is other op then elemwise.\n # the good output for the next test.\n# (Pdb) p f.maker.fgraph.toposort()\n#[Elemwise{add,no_inplace}(w, x), Sum(Elemwise{add,no_inplace}.0), InplaceDimShuffle{x,x}(Sum.0), Elemwise{Composite{_impls=[<function <lambda> at 0x2c5c8c0>], nin=4, _c_code={\n# npy_float32 V%(id)s_tmp1;\n# V%(id)s_tmp1 = %(i2)s + %(i3)s;\n# npy_float32 V%(id)s_tmp2;\n# V%(id)s_tmp2 = %(i0)s + %(i1)s;\n#%(o0)s = V%(id)s_tmp2 + V%(id)s_tmp1;\n#}\n#, nout=1, fgraph=[add(add(<float32>, <float32>), add(<float32>, <float32>))]}}(InplaceDimShuffle{x,x}.0, Elemwise{add,no_inplace}.0, y, z)]\n ((fwx.sum())+(fwx)+(fy+fz), (fw, fx, fy, fz), (fwv, fxv,\n fyv, fzv), 4, (fwv+fxv).sum()+fwv+fxv+fyv+fzv, 'float32'),\n # test other elemwise op\n (fx+fy+tensor.cos(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,\n fxv+fyv+numpy.cos(fzv), 'float32'),\n (fx+fy+tensor.cosh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,\n fxv+fyv+numpy.cosh(fzv), 'float32'),\n (fx+fy+abs(fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv+fyv +\n numpy.absolute(fzv), 'float32'),\n (ix+iy+abs(iz), (ix, iy, iz), (ixv, iyv, izv), 1, ixv+iyv +\n numpy.absolute(izv), 'int32'), # 30\n (fx+fy+theano.tensor.log(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv+fyv+numpy.log(fzv), 'float32'),\n (fx+fy+theano.tensor.log2(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv+fyv+numpy.log2(fzv), 'float32'),\n (fx+fy+theano.tensor.log10(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv+fyv+numpy.log10(fzv), 'float32'),\n (fx+fy**fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv+fyv**fzv,\n 'float32'), # pow\n (fx+fy+theano.tensor.exp(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv+fyv+numpy.exp(fzv), 'float32'), # 35\n (fx-fy-fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-fyv-fzv, 'float32'),\n (fx-(fy/fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv/fzv), 'float32'),\n (fx-theano.tensor.true_div(fy, 2), (fx, fy), (fxv, fyv),\n 1, fxv-(fyv/2), 'float32'),\n (fx-theano.tensor.true_div(fy, fz), (fx, fy, fz), (fxv,\n fyv, fzv), 1, fxv-(fyv/fzv), 'float32'),\n (fx-theano.tensor.int_div(ix*100, iy*1000), (fx, ix,\n iy), (fxv, ixv, iyv), 1, fxv-((ixv*100)//(iyv*1000)), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}), # 40\n (fx-(fy/2), (fx, fy), (fxv, fyv), 1, fxv-(fyv/2), 'float32'),\n (fx-(fy%fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv%fzv), 'float32'),\n (fx-(fy > fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv > fzv), 'float32'),\n (fx-(fy >= fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv >= fzv), 'float32'),\n (fx-(fy < fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv < fzv), 'float32'), # 45\n (fx-(fy <= fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv <= fzv), 'float32'),\n (fx-T.eq(fy, fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(\n fyv == fzv), 'float32'),\n (fx-T.neq(fy, fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(\n fyv != fzv), 'float32'),\n (fx-fy+tensor.tan(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,\n fxv-fyv+numpy.tan(fzv), 'float32'),\n (fx-fy+tensor.tanh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,\n fxv-fyv+numpy.tanh(fzv), 'float32'), # 50\n (fx-fy+tensor.sin(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,\n fxv-fyv+numpy.sin(fzv), 'float32'),\n (fx-fy+tensor.sinh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,\n fxv-fyv+numpy.sinh(fzv), 'float32'),\n (fx-fy+theano.tensor.sqr(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv-fyv+(fzv*fzv), 'float32'),\n (fx-fy+theano.tensor.sqrt(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv-fyv+numpy.sqrt(fzv), 'float32'),\n (fx-fy+theano.tensor.inv(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv-fyv+(1/fzv), 'float32'), # 55\n (fx-fy+theano.tensor.neg(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv-fyv+(-fzv), 'float32'),\n (fx-fy+theano.tensor.round(fz), (fx, fy, fz), (fxv, fyv,\n fzv), 1, fxv-fyv+numpy.round(fzv), 'float32'),\n (ix-iy+theano.tensor.iround(fz), (ix, iy, fz), (ixv,\n iyv, fzv), 1, ixv-iyv+numpy.round(fzv), 'int64'),\n # Bit op\n (fx-theano.tensor.or_(iy, iz), (fx, iy, iz), (fxv, iyv,\n izv), 1, fxv-(iyv|izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n (fx-theano.tensor.xor(iy, iz), (fx, iy, iz), (fxv, iyv,\n izv), 1, fxv-(iyv^izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}), # 60\n (fx-theano.tensor.and_(iy, iz), (fx, iy, iz), (fxv, iyv,\n izv), 1, fxv-(iyv&izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n (fx-theano.tensor.invert(iy), (fx, iy), (fxv, iyv), 1,\n fxv-(~iyv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\n\n (fx-theano.tensor.cast(fy, dtype='float64'), (fx, fy), (fxv, fyv), 1,\n fxv-numpy.asarray(fyv, 'float64'), 'float64'),\n (theano.tensor.pow(fx*fy+fz, fx*fy), (fx, fy, fz), (fxv,\n fyv, fzv), 1, numpy.power(fxv*fyv+fzv, fxv*fyv), 'float32'),\n (fv+fy**fz, (fv, fy, fz), (fvv, fyv, fzv), 2, fvv+fyv**fzv,\n 'float32'), # fused with a dimshuffle #65\n (fv-fy+tensor.tanh(fz), (fv, fy, fz), (fvv, fyv, fzv), 2,\n fvv-fyv+numpy.tanh(fzv), 'float32'), # fused with a dimshuffle\n\n # Cases where the same input is reused many times.\n (theano.tensor.mul(fx, fx, fx, fx), (fx,), (fxv,), 1, fxv *\n fxv*fxv*fxv, 'float32'),\n (theano.tensor.mul(fx, ftanx, ftanx), (fx,), (fxv,), 1,\n fxv*numpy.tan(fxv)*numpy.tan(fxv), 'float32'),\n (theano.tensor.mul(fx, ftanx, ftanx, fx), (fx,), (fxv,),\n 1, fxv*numpy.tan(fxv)*numpy.tan(fxv)*fxv, 'float32'),\n (theano.tensor.mul(ftanx, ftanx, fx+fy), (fx, fy), (fxv,\n fyv), 1, numpy.tan(fxv)*numpy.tan(fxv)*(fxv+fyv), 'float32'), # 70\n\n # Cases with different broadcast pattern. They should not\n # be merged as this would duplicate computation\n # The graph should have 2 elemwise and 1 dimshuffle\n (fx*theano.tensor.sin(fs), (fx, fs), (fxv,\n fsv), 3, fxv*numpy.sin(fsv), 'float32'),\n ]\n if slice:\n cases = cases[slice]\n times = numpy.zeros(len(cases))\n fail1 = []\n fail2 = []\n fail3 = []\n fail4 = []\n for id, [g, sym_inputs, val_inputs,\n nb_elemwise, answer, out_dtype] in enumerate(cases):\n if isinstance(out_dtype, dict):\n out_dtype = out_dtype[config.cast_policy]\n if (gpu and (out_dtype != 'float32' or\n any(i.dtype != 'float32' for i in g.owner.inputs))):\n print(\"Skip test %d as the gpu code currently supports only float32\" % id)\n continue\n print(\"new cases\", id)\n\n if shared_fn is None:\n assert gpu is False\n f = compile.function(list(sym_inputs), g, mode=mode)\n for x in xrange(nb_repeat):\n out = f(*val_inputs)\n t1 = time.time()\n else:\n out = shared_fn(numpy.zeros(shp, dtype=out_dtype), 'out')\n assert out.dtype == g.dtype\n f = function(sym_inputs, [], updates=[(out, g)], mode=mode)\n t0 = time.time()\n for x in xrange(nb_repeat):\n f(*val_inputs)\n t1 = time.time()\n out = out.get_value()\n\n # print \"CASE2/3\", f.maker.fgraph.toposort()\n # print 'CASE2/3', f.maker.fgraph\n # print 'CASE2/3', f.maker.fgraph.toposort()[3].op.scalar_op.fgraph\n\n times[id] = t1 - t0\n atol = 1e-8\n if out_dtype == 'float32':\n atol = 1e-6\n if not numpy.allclose(out, answer * nb_repeat, atol=atol):\n fail1.append(id)\n print(val_inputs)\n print(out)\n print(answer * nb_repeat)\n #assert 0\n topo = f.maker.fgraph.toposort()\n if gpu:\n import theano.sandbox.cuda as cuda\n topo_ = [x for x in topo if not isinstance(\n x.op, (cuda.basic_ops.GpuFromHost, cuda.basic_ops.HostFromGpu))]\n\n gpu_ = [x for x in topo if isinstance(x.op,\n cuda.basic_ops.GpuFromHost)]\n if not len(gpu_) == len(sym_inputs):\n fail2.append((id, gpu_, sym_inputs))\n else:\n topo_ = topo\n if assert_len_topo:\n if not len(topo_) == nb_elemwise:\n fail3.append((id, topo_, nb_elemwise))\n if nb_elemwise == 1:\n # if no variable appears multiple times in the\n # input of g,\n # check that the number of input to the Composite\n # Elemwise is ok\n if len(set(g.owner.inputs)) == len(g.owner.inputs):\n expected_len_sym_inputs = numpy.sum(\n [not isinstance(x, theano.gof.Constant)\n for x in topo_[0].inputs])\n assert expected_len_sym_inputs == len(sym_inputs)\n\n if not out_dtype == out.dtype:\n fail4.append((id, out_dtype, out.dtype))\n\n failed = len(fail1 + fail2 + fail3 + fail4)\n print(\"Executed\", len(cases), \"cases\", \"failed\", failed)\n if failed > 0:\n raise Exception(\"Failed %d cases\" % failed, fail1,\n fail2, fail3, fail4)\n\n return times\n\n def test_elemwise_fusion(self):\n shp = (5, 5)\n mode = copy.copy(compile.mode.get_default_mode())\n # we need the optimisation enabled and the canonicalize.\n # the canonicalize is needed to merge multiplication/addition by constant.\n mode._optimizer = mode._optimizer.including(\n 'local_elemwise_fusion', 'composite_elemwise_fusion',\n 'canonicalize')\n self.do(mode, shared, shp)\n\n @attr('slow')\n def test_elemwise_fusion_4d(self):\n shp = (3, 3, 3, 3)\n mode = copy.copy(compile.mode.get_default_mode())\n # we need the optimisation enabled and the canonicalize.\n # the canonicalize is needed to merge multiplication/addition by constant.\n mode._optimizer = mode._optimizer.including(\n 'local_elemwise_fusion', 'composite_elemwise_fusion',\n 'canonicalize')\n self.do(mode, shared, shp)\n\n def test_gpu_fusion(self):\n shp = (5, 5)\n # we need the optimisation enabled, debug do this.\n if theano.config.mode == \"FAST_COMPILE\":\n mode = theano.compile.mode.get_mode(\"FAST_RUN\").including(\n 'local_elemwise_fusion', 'composite_elemwise_fusion',\n 'canonicalize', 'gpu')\n else:\n mode = theano.compile.mode.get_default_mode().including(\n 'local_elemwise_fusion', 'composite_elemwise_fusion',\n 'canonicalize', 'gpu')\n import theano.sandbox.cuda as cuda\n if not cuda.cuda_available:\n raise SkipTest(\"cuda not available\")\n\n self.do(mode, cuda.float32_shared_constructor, shp, gpu=True)\n\n @attr('slow')\n def test_gpu_fusion_Xd(self):\n # we need the optimisation enabled, debug do this.\n if theano.config.mode == \"FAST_COMPILE\":\n mode = theano.compile.mode.get_mode(\"FAST_RUN\").including(\n 'local_elemwise_fusion', 'composite_elemwise_fusion',\n 'canonicalize', 'gpu')\n else:\n mode = theano.compile.mode.get_default_mode().including(\n 'local_elemwise_fusion', 'composite_elemwise_fusion',\n 'canonicalize', 'gpu')\n import theano.sandbox.cuda as cuda\n if not cuda.cuda_available:\n raise SkipTest(\"cuda not available\")\n sizes = cuda.opt.get_device_type_sizes()\n if sizes['int_size'] == 4:\n shp = (5, 5, 5, 5)\n else:\n shp = (5, 5, 5)\n self.do(mode, cuda.float32_shared_constructor, shp, gpu=True)\n\n def test_fusion_35inputs(self):\n # Make sure a fused graph with more than 35 inputs does not segfault\n # or error.\n inpts = vectors(['i%i' % i for i in xrange(35)])\n # Make an elemwise graph looking like:\n # sin(i34 + sin(i33 + sin(... i1 + sin(i0) ...)))\n out = tensor.sin(inpts[0])\n for idx in xrange(1, 35):\n out = tensor.sin(inpts[idx] + out)\n\n f = function(inpts, out)\n # Test it on some dummy values\n f(*[list(range(i, 4 + i)) for i in xrange(35)])\n\n def test_pickle_big_fusion(self):\n \"\"\"In the past, pickle of Composite generated in tha case\n crashed with max recusion limit. So we where not able to\n generate C code in that case.\n\n \"\"\"\n if not theano.config.cxx:\n raise SkipTest(\"no c compiler, so can't use big elemwise!\")\n factors = []\n sd = tensor.dscalar()\n means = tensor.dvector()\n\n cst_05 = theano.tensor.constant(.5)\n cst_m05 = theano.tensor.constant(-.5)\n cst_2 = theano.tensor.constant(2)\n cst_m2 = theano.tensor.constant(-2)\n ones = theano.tensor.constant(numpy.ones(10))\n n = 85\n if theano.config.mode in [\"DebugMode\", \"DEBUG_MODE\"]:\n n = 10\n\n for i in xrange(n):\n f = (cst_m05 * sd ** cst_m2 * (ones - means[i]) ** cst_2 +\n cst_05 * tensor.log(cst_05 * (sd ** cst_m2) / numpy.pi))\n factors.append(tensor.sum(f))\n\n logp = tensor.add(*factors)\n\n vars = [sd, means]\n dlogp = function(vars, [theano.grad(logp, v) for v in vars])\n dlogp(2, numpy.random.rand(n))\n\n def speed_fusion(self, shared_fn=shared, gpu=False, s=None):\n \"\"\"\n param type s: a slice object\n param s: a slice to apply to the case to execute. If None, exec all case.\n \"\"\"\n\n shp = (3000, 3000)\n shp = (1000, 1000)\n nb_repeat = 50\n# linker=gof.CLinker\n# linker=gof.OpWiseCLinker\n\n mode1 = copy.copy(compile.get_default_mode())\n mode1._optimizer = mode1._optimizer.including('local_elemwise_fusion')\n # TODO:clinker is much faster... but use to much memory\n # Possible cause: as their is do deletion of intermediate value when we don't keep the fct.\n # More plausible cause: we keep a link to the output data?\n # Follow up. Clinker do the same... second cause?\n mode2 = copy.copy(compile.get_default_mode())\n mode2._optimizer = mode2._optimizer.excluding('local_elemwise_fusion')\n print(\"test with linker\", str(mode1.linker))\n times1 = self.do(mode1, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,\n assert_len_topo=False, slice=s)\n times2 = self.do(mode2, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,\n assert_len_topo=False, slice=s)\n print(\"times1 with local_elemwise_fusion\")\n print(times1, times1.min(), times1.max(), times1.sum())\n print(\"times2 without local_elemwise_fusion\")\n print(times2, times2.min(), times2.max(), times2.sum())\n d = times2 / times1\n\n print(\"times2/times1\")\n print(d)\n print(\"min\", d.min(), \"argmin\", d.argmin(), \"max\", d.max(), \\\n \"mean\", d.mean(), \"std\", d.std())\n\n def test_fusion_inplace(self):\n mode = copy.copy(compile.mode.get_default_mode())\n # we need the optimisation enabled and the canonicalize.\n # the canonicalize is needed to merge multiplication/addition by constant.\n mode._optimizer = mode._optimizer.including(\n 'local_elemwise_fusion', 'composite_elemwise_fusion',\n 'canonicalize', 'inplace')\n\n x, y, z = dmatrices('xyz')\n f = theano.function([x, y, z], tensor.dot(x, y) + x + y + z, mode=mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert f.maker.fgraph.toposort()[-1].op.inplace_pattern\n f(numpy.random.random((5, 5)), numpy.random.random((5, 5)),\n numpy.random.random((5, 5)))\n\n def speed_fusion_gpu(self):\n import theano.sandbox.cuda as cuda\n self.speed_fusion(shared_fn=cuda.\n float32_shared_constructor, gpu=True, s=slice(0, 15))\n\n def speed_log_exp(self):\n s = slice(31, 36)\n# linker=gof.CLinker\n linker = gof.OpWiseCLinker\n mode = compile.Mode(linker(), copy.copy(compile.mode.OPT_FAST_RUN))\n mode = compile.ProfileMode()\n print(\"time\", self.do(mode, shared, shp=(1000, 1000), gpu=False,\n assert_len_topo=False, slice=s, nb_repeat=100))\n\n def tes_memory_leak(self, mode=compile.mode.Mode('c', 'merge'),\n shared_fn=shared, shp=(3000, 3000), gpu=False,\n nb_repeat=30, assert_len_topo=True, slice=None):\n \"\"\"\n param shared_fn: if None, will use compile.function\n verify that the elemwise fusion work\n Test with and without DimShuffle\n \"\"\"\n # TODO: disable the canonizer?\n fx = fmatrices('x')\n fy = fmatrices('y')\n fxv = numpy.zeros(shp, dtype='float32') + 2\n cases = [\n (fx, (fx), (fxv), 'float32'), # 1\n ]\n import gc\n import pdb\n import objgraph\n import weakref\n d = {}\n dl = []\n v1 = None\n mode = compile.mode.Mode('c', 'merge')\n # TODO: if mode is Mode('py','merge') then their is no memory leak!\n from theano.compile.function_module import orig_function\n for id, [g, sym_inputs, val_inputs, out_dtype] in enumerate(cases):\n for zzzz in xrange(nb_repeat):\n v = numpy.zeros(shp, dtype=out_dtype)\n gc.collect()\n gc.collect()\n gc.collect()\n# print 'v1',v1\n v1 = weakref.ref(v)\n pdb.set_trace()\n # f = orig_function([compile.In(fx),compile.In(variable=fy, value=None)],\n # [fy+fx],mode=mode)#no memory leak\n f = orig_function([compile.In(fx), compile.In(variable=fy, value=v)],\n [fy + fx], mode=mode) # memory leak\n del v\n gc.collect()\n gc.collect()\n gc.collect()\n pdb.set_trace()\n\n if False:\n gc.collect()\n gc.collect()\n gc.collect()\n nd = objgraph.typestats()\n print('key, old val, new val, diff')\n for key in set(d.keys() + nd.keys()):\n if key in d and key in nd and nd[key] != d[key]:\n print(key, d.get(key), nd.get(key), end=' ')\n if key in d and key in nd:\n print(nd[key] - d[key])\n else:\n print(None)\n gc.collect()\n gc.collect()\n gc.collect()\n d = nd\n\n# pdb.set_trace()\n if False:\n gc.collect()\n gc.collect()\n gc.collect()\n ndl = objgraph.by_type('list')\n ll = []\n if len(dl) > 0:\n nb = 0\n for x in ndl:\n cmp = not isinstance(x, list)\n if not cmp and x:\n cmp = x[0].__class__.__name__ != \\\n 'array_converter'\n if cmp:\n cmp = x[0] != 'Option'\n if cmp:\n cmp = x[0] != 270\n cmp = False\n if cmp and x in dl:\n nb += 1\n ll.append(x)\n# pdb.set_trace()\n pass\n pdb.set_trace()\n dl = ndl\n\n gc.collect()\n gc.collect()\n gc.collect()\n# objgraph.show_most_common_types(limit=40)\n# f(*val_inputs)\n gc.collect()\n gc.collect()\n gc.collect()\n\n# cases[id]=None #to remove g, that link to out that link to the ndarray!\n # g.owner.inputs[0] is out... make owner a weakref?\n\n\nclass TimesN(theano.scalar.basic.UnaryScalarOp):\n \"\"\"Used in test TestCompositeCodegen\n\n Must be outside of the class, otherwise, the c cache code can't\n pickle this class and this cause stuff printing during test.\n \"\"\"\n def __eq__(self, other):\n return super(TimesN, self).__eq__(other) and self.n == other.n\n\n def __hash__(self):\n return super(TimesN, self).__hash__() ^ hash(self.n)\n\n def __init__(self, n, *args, **kwargs):\n self.n = n\n theano.scalar.basic.UnaryScalarOp.__init__(self, *args, **kwargs)\n\n def impl(self, x):\n return x * self.n\n\n def c_support_code_apply(self, node, nodename):\n n = str(self.n)\n return \"\"\"\n float %(nodename)s_timesn(float x) { return x * %(n)s; }\n \"\"\" % locals()\n\n def c_code(self, node, name, inputs, outputs, sub):\n (x,) = inputs\n (z,) = outputs\n return \"%(z)s = %(name)s_timesn(%(x)s);\" % locals()\n\n\nclass TestCompositeCodegen(unittest.TestCase):\n \"\"\"\n Test The Composite Ops code generation in a case where there is multiple\n scalar ops with support code.\n \"\"\"\n def setUp(self):\n upgrade_to_float = theano.scalar.basic.upgrade_to_float\n\n self.scal_times_2 = TimesN(2, upgrade_to_float, name='times_2')\n self.times_2 = theano.tensor.elemwise.Elemwise(\n self.scal_times_2,\n name='times_2')\n\n self.scal_times_3 = TimesN(3, upgrade_to_float, name='times_3')\n self.times_3 = theano.tensor.elemwise.Elemwise(\n self.scal_times_3,\n name='times_3')\n\n self.x = fvector()\n\n def test_nested_composite(self):\n y = self.times_2(self.x)\n z = self.times_3(y)\n f = function([self.x], z)\n if config.mode != \"FAST_COMPILE\":\n assert len(f.maker.fgraph.toposort()) == 1\n fval = f([1, 2, 3])\n assert numpy.all(fval == [6, 12, 18])\n\n def test_nested_gpu(self):\n import theano.sandbox.cuda as cuda\n if not cuda.cuda_available:\n raise SkipTest(\"cuda not available\")\n\n import theano.sandbox.cuda.opt\n\n y = self.times_2(self.x)\n z = self.times_3(y)\n f = theano.function([self.x], cuda.gpu_from_host(z),\n mode=theano.compile.mode.get_default_mode().including('gpu'))\n topo = f.maker.fgraph.toposort()\n if config.mode != \"FAST_COMPILE\":\n assert len(topo) == 2\n assert topo[1].op == cuda.gpu_from_host\n # topo1 is doing the composite work on the CPU. Auto-generation of\n # GPU code for ops with support code is not possible.\n fval = numpy.asarray(f([1, 2, 3]))\n assert numpy.all(fval == [6, 12, 18]), fval\n\n\ndef test_log1p():\n m = theano.config.mode\n if m == 'FAST_COMPILE':\n m = 'FAST_RUN'\n m = compile.mode.get_mode(m)\n m = m.excluding('fusion')\n # check some basic cases\n x = dvector()\n f = function([x], T.log(1 + (x)), mode=m)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.log1p]\n f = function([x], T.log(1 + (-x)), mode=m)\n assert [node.op for node in f.maker.fgraph.toposort()] == [\n T.neg, inplace.log1p_inplace]\n f = function([x], -T.log(1 + (-x)), mode=m)\n assert [node.op for node in f.maker.fgraph.toposort()] == [\n T.neg, inplace.log1p_inplace, inplace.neg_inplace]\n\n # check trickier cases (and use different dtype)\n y = fmatrix()\n f = function([x, y], T.log(tensor.fill(y, 1) + (x)), mode=m)\n # the first three ops are Shape_i, Shape_i, and Dimshuffle\n assert [node.op for node in f.maker.fgraph.toposort()][3:] == [\n T.log1p, tensor.alloc]\n f = function([x, y], T.log(0 + (x) + tensor.fill(y, 1.0)), mode=m)\n assert [node.op for node in f.maker.fgraph.toposort()][3:] == [\n T.log1p, tensor.alloc]\n f = function([x, y], T.log(2 + (x) - tensor.fill(y, 1.0)), mode=m)\n assert [node.op for node in f.maker.fgraph.toposort()][3:] \\\n == [T.log1p, tensor.alloc]\n\n f([1e-7, 10], [[0, 0], [0, 0]]) # debugmode will verify values\n\n if 0:\n # at one point this worked, but it has been broken since\n # the constant up-casting made 1 -> 1.0+0.0j\n # I was never sure if this optimization should work on complex numbers or not.\n z = tensor.zmatrix()\n f = function([z], T.log(1 + (z)), mode=m)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.log1p]\n\n if 1:\n # should work for int\n z = tensor.imatrix()\n f = function([z], T.log(1 + (z)), mode=m)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.log1p]\n\n\ndef test_log_add():\n m = theano.config.mode\n if m == 'FAST_COMPILE':\n m = 'FAST_RUN'\n m = compile.mode.get_mode(m)\n m = m.excluding('fusion')\n m = copy.copy(m)\n # No need to put them back as we have a new object\n m.check_isfinite = False\n\n # check some basic cases\n x = dvector()\n y = dvector()\n f = function([x, y], T.log(T.exp(x) + T.exp(y)), mode=m)\n\n f([10000], [10000]) # causes overflow if handled incorrectly\n assert numpy.isfinite(f([10000], [10000]))\n assert numpy.allclose(f([10000], [10000]), 10000 + numpy.log1p(1))\n\n # test that it give the same result when it don't overflow\n f([10], [10]) # don't causes overflow\n assert numpy.allclose(f([10], [10]), 10 + numpy.log1p(1))\n\n # test that it also works with more than two args, (this currently fails)\n x = dvector()\n y = dvector()\n f = function([x, y], T.log(T.exp(x) + T.exp(y) + T.exp(x - y) + T.exp(\n x + y)), mode=m)\n\n try:\n f([10000], [10000]) # causes overflow if handled incorrectly\n assert numpy.allclose(f([10000], [10000]), 20000)\n except AssertionError:\n raise SkipTest(\"log(add(exp)) is not stabilized when adding \"\n \"more than 2 elements, see #623\")\n\n # TODO: test that the optimization works in the presence of broadcasting.\n\n # TODO: (write and) test that the optimization works with Sum in addition to working with Add.\n\n\ndef test_local_useless_slice():\n # test a simple matrix\n x = tensor.matrix('x')\n mode_unopt = compile.get_default_mode().excluding(\"local_useless_slice\")\n mode_opt = compile.get_default_mode().including(\"local_useless_slice\")\n\n # test with and without the useless slice\n o = 2 * x[0, :]\n f_unopt = theano.function([x], o, mode=mode_unopt)\n f_opt = theano.function([x], o, mode=mode_opt)\n test_inp = numpy.random.randint(-10, 10, (4, 4)).astype('float32')\n assert all(f_opt(test_inp) == f_unopt(test_inp)),\\\n \"The optimization caused a mismatch in the result\"\n # test to see if the slice is truely gone\n apply_node = f_opt.maker.fgraph.toposort()[0]\n subtens = apply_node.op\n assert not any(isinstance(idx, slice) for idx in subtens.idx_list), \"Slice should be gone\"\n\n # Now test that the stack trace is copied over properly,\n # before before and after optimization.\n assert hasattr(f_unopt.outputs[0].variable.tag, 'trace')\n assert hasattr(f_opt.outputs[0].variable.tag, 'trace')\n\n # test a 4d tensor\n z = tensor.tensor4('z')\n o2 = z[1, :, :, 1]\n o3 = z[0, :, :, :]\n f_opt_check = theano.function([z], o2, mode=mode_opt)\n f_opt_check_apply = theano.function([z], o3, mode=mode_opt)\n\n # The optimization shouldn't apply here\n apply_node = f_opt_check.maker.fgraph.toposort()[0]\n subtens = apply_node.op\n assert [isinstance(idx, slice) for idx in subtens.idx_list].count(True) == 2\n # But it should here\n apply_node = f_opt_check_apply.maker.fgraph.toposort()[0]\n subtens = apply_node.op\n assert not any(isinstance(idx, slice) for idx in subtens.idx_list)\n\n # Finally, test that the stack trace is copied over properly,\n # before before and after optimization.\n assert hasattr(f_opt_check.outputs[0].variable.tag, 'trace')\n assert hasattr(f_opt_check_apply.outputs[0].variable.tag, 'trace')\n\ndef test_local_useless_inc_subtensor():\n x = tensor.matrix('x')\n y = tensor.matrix('y')\n mode = compile.get_default_mode().including(\"local_useless_inc_subtensor\")\n for sub in [slice(None), slice(None, None, -1)]:\n o = tensor.set_subtensor(x[::, sub], y)\n f = theano.function([x, y], o, mode=mode)\n o_shape = tensor.set_subtensor(x[::, sub],\n tensor.specify_shape(y, x.shape))\n f_shape = theano.function([x, y], o_shape, mode=mode)\n\n # Test with shape info\n topo = f_shape.maker.fgraph.toposort()\n assert not any(isinstance(n.op, tensor.IncSubtensor) for n in topo)\n out = f_shape([[2, 3]], [[3, 4]])\n assert (out == numpy.asarray([[3, 4]])[::, sub]).all()\n\n # Test that without shape info, we don't apply the opt.\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, tensor.IncSubtensor)\n out = f([[2, 3]], [[3, 4]])\n assert (out == numpy.asarray([[3, 4]])[::, sub]).all()\n\n # Test that we don't remove shape error\n try:\n f([[2, 3]], [[3, 4], [4, 5]])\n assert False\n except (ValueError, AssertionError):\n pass\n\n # Test that we don't remove broadcastability\n out = f([[2, 3], [3, 4]], [[5, 6]])\n assert (out == numpy.asarray([[5, 6], [5, 6]])[::, sub]).all()\n\n # Test that we do not optimize others strides even when sub and y\n # have same shapes\n sub = x[::, ::2]\n o_shape = tensor.set_subtensor(sub,\n tensor.specify_shape(y, sub.shape))\n f_shape = theano.function([x, y], o_shape)\n topo = f_shape.maker.fgraph.toposort()\n # theano.printing.debugprint(f_shape)\n assert any(isinstance(n.op, tensor.IncSubtensor) for n in topo)\n out = f_shape([[2, 3, 6, 7]], [[8, 9]])\n assert (out == numpy.asarray([[8, 3, 9, 7]])).all()\n\n\ndef test_local_useless_subtensor():\n x = tensor.matrix('x')\n\n # Test default\n for dims in [(slice(0, None), ),\n (slice(0, None), slice(0, None)),\n ]:\n f = function([x], tensor.exp(x).__getitem__(dims), mode=mode_opt)\n # theano.printing.debugprint(f)\n prog = f.maker.fgraph.toposort()\n assert prog[0].op == tensor.exp\n assert len(prog) == 1\n f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something\n\n x_c = tensor.specify_shape(x, (2, 3))\n # Test constant\n for dims, res in [((slice(0, 2), ), True),\n ((slice(0, 2), slice(0, None)), True),\n ((slice(0, 2), slice(0, 3)), True),\n ((slice(0, None), slice(0, 3)), True),\n ((slice(0, 3), slice(0, 13)), True),\n ((slice(0, 3), slice(0, 2)), False),\n ((slice(0, 1), slice(0, None)), False),\n ((slice(0, 1), 1), False),\n ]:\n f = function([x], tensor.exp(x_c).__getitem__(dims), mode=mode_opt)\n # theano.printing.debugprint(f)\n prog = f.maker.fgraph.toposort()\n if res:\n assert isinstance(prog[0].op, theano.tensor.SpecifyShape), dims\n assert prog[1].op == tensor.exp, dims\n assert len(prog) == 2, dims\n else:\n assert any([isinstance(node.op, Subtensor) for node in prog])\n f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something\n\n # Test Variable\n for idx, (dims, res) in enumerate([\n ((slice(0, x.shape[0]), ), True),\n ((slice(0, x.shape[1]), ), False),\n ((slice(0, x.shape[0]), slice(0, x.shape[1]), ), True),\n ((slice(0, x.shape[0]), slice(0, x.shape[0]), ), False),\n ((slice(0, x.shape[1]), slice(0, x.shape[0]), ), False),\n ((slice(0, x.shape[1]), slice(0, x.shape[1]), ), False),\n ((slice(0, x.shape[1]), 2), False),\n ((slice(0, x.shape[1]), slice(x.shape[0] - x.shape[0],\n x.shape[1]),), False),\n ((slice(0, T.scalar_from_tensor(x.shape[0])), ), True),\n ]):\n f = function([x], tensor.exp(x).__getitem__(dims), mode=mode_opt)\n # theano.printing.debugprint(f)\n prog = f.maker.fgraph.toposort()\n if res:\n assert prog[0].op == tensor.exp, dims\n assert len(prog) == 1, dims\n else:\n assert any([isinstance(node.op, Subtensor) for node in prog])\n f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something\n # Test mix Variable and Constant\n # Currently not supported\n for idx, (dims, res) in enumerate([\n ((slice(0, x.shape[0]), slice(0, 3)), False),\n ((slice(0, 3), slice(0, x.shape[1])), False),\n ]):\n f = function([x], tensor.exp(x_c).__getitem__(dims), mode=mode_opt)\n # theano.printing.debugprint(f)\n prog = f.maker.fgraph.toposort()\n if res:\n assert prog[0].op == tensor.exp, dims\n assert len(prog) == 1, dims\n else:\n assert any([isinstance(node.op, Subtensor) for node in prog])\n f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something\n\n # Test scalar variable\n s = scal.int32('s')\n for idx, (dims, res) in enumerate([\n ((slice(0, s), ), False),\n ]):\n f = function([x, s], tensor.exp(x).__getitem__(dims), mode=mode_opt)\n # theano.printing.debugprint(f)\n prog = f.maker.fgraph.toposort()\n if res:\n assert prog[0].op == tensor.exp, dims\n assert len(prog) == 1, dims\n else:\n assert any([isinstance(node.op, Subtensor) for node in prog])\n f([[1, 2, 3], [4, 5, 6]], 1)\n f([[1, 2, 3], [4, 5, 6]], 3)\n\n # Test AdvancedSubtensor1 case when all rows are selected by a list/vector\n # or ARange op\n for dims, res in (([0, 1], True),\n ([1, 0], False),\n ([0, 0], False),\n ([0, 0, 1], False),\n (T.arange(2), True),\n (T.arange(0, 2), True),\n (T.arange(0, 2, 2), False),\n (T.arange(0, 2, -1), False),\n (T.arange(1, 2), False)):\n f = function([x], tensor.exp(x_c).__getitem__(dims), mode=mode_opt)\n # theano.printing.debugprint(f)\n prog = f.maker.fgraph.toposort()\n if res:\n assert isinstance(prog[0].op, theano.tensor.SpecifyShape), dims\n assert prog[1].op == tensor.exp, dims\n assert len(prog) == 2, dims\n else:\n assert any([isinstance(node.op, AdvancedSubtensor1)\n for node in prog])\n f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something\n\n\nclass test_local_subtensor_make_vector(unittest.TestCase):\n def test_scalar_idx(self):\n x, y, z = tensor.lscalars('xyz')\n v = make_vector(x, y, z)\n f = function([x, y, z], v[0], mode=mode_opt)\n\n prog = f.maker.fgraph.toposort()\n assert len(prog) == 1\n assert isinstance(prog[0].op, theano.compile.ops.DeepCopyOp)\n assert f(0, 1, 2) == 0\n\n def test_slice_idx_stop(self):\n x, y, z = tensor.lscalars('xyz')\n v = make_vector(x, y, z)\n f = function([x, y, z], v[:2], mode=mode_opt)\n\n prog = f.maker.fgraph.toposort()\n assert len(prog) == 1\n assert isinstance(prog[0].op, MakeVector)\n assert len(prog[0].inputs) == 2\n r = f(0, 1, 2)\n assert r[0] == 0 and r[1] == 1\n\n def test_slice_idx_step(self):\n x, y, z = tensor.lscalars('xyz')\n v = make_vector(x, y, z)\n f = function([x, y, z], v[::2], mode=mode_opt)\n\n prog = f.maker.fgraph.toposort()\n assert len(prog) == 1\n assert isinstance(prog[0].op, MakeVector)\n assert len(prog[0].inputs) == 2\n r = f(0, 1, 2)\n assert r[0] == 0 and r[1] == 2\n\n def test_AdvancedSubtensor1_idx(self):\n x, y, z = tensor.lscalars('xyz')\n v = make_vector(x, y, z)\n f = function([x, y, z], v[[0, 2]], mode=mode_opt)\n\n prog = f.maker.fgraph.toposort()\n assert len(prog) == 1\n assert isinstance(prog[0].op, MakeVector)\n assert len(prog[0].inputs) == 2\n r = f(0, 1, 2)\n assert r[0] == 0 and r[1] == 2\n\n def test_stacktrace(self):\n x, y, z = tensor.lscalars('xyz')\n v = make_vector(x, y, z)\n\n # Compile function using only the 'local_subtensor_make_vector' optimization,\n # which requires us to add the 'canonicalize' phase.\n mode = theano.compile.mode.Mode(optimizer=None).including('canonicalize_db').including(\"local_subtensor_make_vector\")\n f = function([x, y, z], v[0], mode=mode)\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(f.outputs[0].variable.tag, 'trace'))\n #import ipdb; ipdb.set_trace()\n \n \n # Compile function using all optimizations in fast_compile mode, \n # including the 'local_subtensor_make_vector' optimization\n mode = theano.compile.mode.get_mode('FAST_COMPILE').including(\"local_subtensor_make_vector\")\n f = function([x, y, z], v[0], mode=mode)\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(f.outputs[0].variable.tag, 'trace'))\n \n\nclass test_local_subtensor_lift(unittest.TestCase):\n def _verify_stack_trace(self, f):\n for output in f.outputs:\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(output.variable.tag, 'trace'))\n\n def test0(self):\n # basic test that the Op works\n x = tensor.matrix('x')\n f = function([x], tensor.exp(x)[0], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n prog = f.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.Subtensor) # first subtensor\n assert prog[1].op == tensor.exp\n assert len(prog) == 2\n f([[0, 1], [2, 3]]) # let debugmode test something\n\n def test0b(self):\n # as test0, but we reuse the output of the elemwise\n # So we should not lift the subtensor\n x = tensor.matrix('x')\n f = function([x], [tensor.exp(x)[0], tensor.exp(x)], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n prog = f.maker.fgraph.toposort()\n assert prog[0].op == tensor.exp\n assert isinstance(prog[1].op, tensor.Subtensor) # first subtensor\n assert isinstance(prog[2].op, DeepCopyOp)\n assert len(prog) == 3\n f([[0, 1], [2, 3]]) # let debugmode test something\n\n def test1(self):\n # basic test that the optimization work with scalar broadcasted\n x = tensor.matrix('x')\n y = tensor.scalar('y')\n z = tensor.matrix('z')\n f = function([x, y, z], tensor.exp(x + y + z)[0], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n prog = f.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.DimShuffle)\n assert isinstance(prog[1].op, tensor.Subtensor) # first subtensor\n assert isinstance(prog[2].op, tensor.Subtensor) # first subtensor\n assert isinstance(prog[3].op.scalar_op, theano.scalar.\n Composite) # Composite{add,add}\n assert len(prog) == 4\n f([[0, 1], [2, 3]], 4, [[4, 5], [6, 7]])\n # let debugmode test something\n\n def test2(self):\n # as 1, but take a slice\n x = tensor.matrix('x')\n y = tensor.scalar('y')\n z = tensor.matrix('z')\n f = function([x, y, z], tensor.exp(x + y + z)[0:2], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n prog = f.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.DimShuffle)\n assert isinstance(prog[1].op, tensor.Subtensor) # first subtensor\n assert isinstance(prog[2].op, tensor.Subtensor) # first subtensor\n assert isinstance(prog[3].op.scalar_op, theano.scalar.\n Composite) # Composite{add,add}\n assert len(prog) == 4\n f([[0, 1], [2, 3]], 4, [[4, 5], [6, 7]])\n # let debugmode test something\n\n def test3(self):\n # basic test that the optimization does work with broadcasting\n # for unary elemwise.\n y = tensor.vector('y')\n f = function([y], tensor.exp(y.dimshuffle(0, 'x'))[0], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n prog = f.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.DimShuffle)\n assert isinstance(prog[1].op, tensor.Subtensor)\n assert prog[2].op == tensor.exp\n assert len(prog) == 3\n f([4, 5]) # let debugmode test something\n\n def test4(self):\n # basic test that the optimization doesn't work with broadcasting\n # ... It *could* be extended to,\n # ... but right now it doesn't, so it shouldn't try.\n x = tensor.matrix('x')\n y = tensor.vector('y')\n f = function([x, y], tensor.exp(x + y)[0], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n prog = f.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.DimShuffle)\n assert prog[1].op == tensor.add\n assert isinstance(prog[2].op, tensor.Subtensor) # first subtensor\n assert prog[3].op == inplace.exp_inplace\n assert len(prog) == 4\n f([[0, 1], [2, 3]], [4, 5]) # let debugmode test something\n\n def test5(self):\n # test that we don't lift when we reuse the output of the\n # elemwise for other computation.\n x = tensor.matrix('x')\n y = tensor.vector('y')\n f = function([x, y], [tensor.exp(x + y)[0], tensor.exp(x + y) + x],\n mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n prog = f.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.DimShuffle)\n assert isinstance(prog[1].op.scalar_op, theano.scalar.\n Composite) # Composite{add,exp}\n assert prog[2].op == tensor.add\n assert isinstance(prog[3].op, tensor.Subtensor) # first subtensor\n assert len(prog) == 4\n f([[0, 1], [2, 3]], [4, 5]) # let debugmode test something\n\n def test6(self):\n # basic test that the optimization works with a scalar as input,\n # and a scalar as output (no broadcasting of the scalar needed).\n # The optimization used to fail and display an ERROR message.\n\n x = tensor.vector('x')\n y = tensor.scalar('y')\n f = function([x, y], tensor.exp(x + y)[0], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n prog = f.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.Subtensor)\n # Composite{add,exp}\n assert isinstance(prog[1].op.scalar_op, theano.scalar.Composite)\n assert len(prog) == 2\n f([1, 2, 3], 4) # let debugmode test something\n\n def test7(self):\n # test that Subtensor(Rebroadcast(x)) gets optimized into\n # Rebroadcast(Subtensor(x)).\n\n # test basic case\n x = tensor.matrix('x')\n xval = numpy.random.rand(1, 10).astype(config.floatX)\n assert x.broadcastable == (False, False)\n newx = tensor.Rebroadcast((0, True), (1, False))(x)\n assert newx.broadcastable == (True, False)\n\n f1 = function([x], newx[:2, :5], mode=mode_opt)\n self._verify_stack_trace(f1)\n prog = f1.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.Subtensor)\n assert isinstance(prog[1].op, tensor.Rebroadcast)\n assert (f1(xval) == xval[:2, :5]).all()\n\n # corner case 1: rebroadcast changes dims which are dropped through subtensor\n y = tensor.tensor4('x')\n yval = numpy.random.rand(1, 10, 1, 3).astype(config.floatX)\n assert y.broadcastable == (False, False, False, False)\n newy = tensor.Rebroadcast((0, True), (2, True))(y)\n assert newy.broadcastable == (True, False, True, False)\n\n f2 = function([y], newy[:, 3, 0, :], mode=mode_opt)\n self._verify_stack_trace(f2)\n prog = f2.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.Subtensor)\n assert isinstance(prog[1].op, tensor.Rebroadcast)\n assert (f2(yval) == yval[:, 3, 0, :]).all()\n\n # corner case 2: subtensor idx_list is shorter than resulting broadcast pattern\n f3 = function([y], newy[:, 3, 0], mode=mode_opt)\n self._verify_stack_trace(f3)\n prog = f3.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.Subtensor)\n assert isinstance(prog[1].op, tensor.Rebroadcast)\n assert (f3(yval) == yval[:, 3, 0]).all()\n\n # corner case 3: subtensor idx_list is shorter than rebroadcast.axis\n z = tensor.tensor4('x')\n zval = numpy.random.rand(4, 10, 3, 1).astype(config.floatX)\n assert z.broadcastable == (False, False, False, False)\n newz = tensor.Rebroadcast((3, True))(z)\n assert newz.broadcastable == (False, False, False, True)\n\n out = newz[:, 3, 0]\n f4 = function([z], newz[:, 3, 0], mode=mode_opt)\n self._verify_stack_trace(f4)\n prog = f4.maker.fgraph.toposort()\n assert isinstance(prog[0].op, tensor.Subtensor)\n assert isinstance(prog[1].op, tensor.Rebroadcast)\n assert (f4(zval) == zval[:, 3, 0]).all()\n\n\nclass test_local_subtensor_merge(unittest.TestCase):\n def _verify_stack_trace(self, f):\n for output in f.outputs:\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(output.variable.tag, 'trace'))\n\n def setUp(self):\n utt.seed_rng()\n self.x_shapes = [(2, 2), (5, 3), (4, 1), (1, 2),\n (0, 2), (2, 0), (1, 0), (0, 0)]\n self.rng = numpy.random.RandomState(seed=utt.fetch_seed())\n\n def test_const(self):\n # var[const::][-1] -> var[-1]\n x = tensor.matrix('x')\n for idx in xrange(-7, 6):\n f = function([x], x[idx::][-1], mode=mode_opt)\n g = function([x], x[idx::][-1], mode=mode_opt.excluding(\n 'local_subtensor_merge'))\n\n self._verify_stack_trace(f)\n\n topo = f.maker.fgraph.toposort()\n assert len([t for t in topo\n if isinstance(t.op, tensor.Subtensor)]) == 1\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n\n if idx < x_s[0] and x_s[0] > 0:\n # The first subtensor is non-empty, so it makes sense\n f(x_val) # let debugmode test something\n else:\n # A non-empty subtensor of an empty one should be\n # an IndexError\n self.assertRaises(IndexError, f, x_val)\n self.assertRaises(IndexError, g, x_val)\n\n def test_scalar(self):\n # var[int::][-1] -> var[-1]\n x = tensor.matrix('x')\n y = tensor.iscalar('y')\n f = function([x, y], x[y::][-1], mode=mode_opt)\n g = function([x, y], x[y::][-1],\n mode=mode_opt.excluding('local_subtensor_merge'))\n #theano.printing.debugprint(f, print_type=True)\n\n self._verify_stack_trace(f)\n\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo\n if isinstance(t.op, tensor.Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n\n for idx in xrange(-9, 8):\n if (idx < x_s[0]) and (x_s[0] > 0):\n # The first subtensor is non-empty\n f(x_val, idx) # let debugmode test something\n else:\n self.assertRaises(IndexError, f, x_val, idx)\n self.assertRaises(IndexError, g, x_val, idx)\n\n @attr('slow')\n def test_const2(self):\n # var[::-1][const] -> var[-1]\n x = tensor.matrix('x')\n for idx in xrange(-8, 7):\n f = function([x], x[::-1][idx], mode=mode_opt)\n g = function([x], x[::-1][idx],\n mode=mode_opt.excluding('local_subtensor_merge'))\n\n self._verify_stack_trace(f)\n\n #theano.printing.debugprint(f, print_type=True)\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo\n if isinstance(t.op, tensor.Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n if (idx < x_s[0]) and (idx >= -x_s[0]):\n # The first subtensor is non-empty, so it makes sense\n f(x_val) # let debugmode test something\n else:\n # A non-empty subtensor of an empty one should be\n # an IndexError\n self.assertRaises(IndexError, f, x_val)\n self.assertRaises(IndexError, g, x_val)\n\n def test_scalar2(self):\n # var[::-1][int] -> var[-1]\n x = tensor.matrix('x')\n y = tensor.iscalar('y')\n f = function([x, y], x[::-1][y], mode=mode_opt)\n g = function([x, y], x[::-1][y],\n mode=mode_opt.excluding('local_subtensor_merge'))\n #theano.printing.debugprint(f, print_type=True)\n\n self._verify_stack_trace(f)\n\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo\n if isinstance(t.op, tensor.Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n\n for idx in xrange(-x_s[0], x_s[0]):\n f(x_val, idx) # let debugmode test something\n for idx in (list(range(x_s[0], 9)) + list(range(-9, -x_s[0]))):\n self.assertRaises(IndexError, f, x_val, idx)\n self.assertRaises(IndexError, g, x_val, idx)\n\n def test_const3(self):\n # var[::-1][:const] -> var[-1]\n x = tensor.matrix('x')\n for idx in xrange(-9, 8):\n f = function([x], x[::-1][:idx], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n #theano.printing.debugprint(f, print_type=True)\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo\n if isinstance(t.op, tensor.Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n f(x_val) # let debugmode test something\n\n def test_scalar3(self):\n # var[::-1][:int] -> var[-1]\n x = tensor.matrix('x')\n y = tensor.iscalar('y')\n f = function([x, y], x[::-1][:y], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n #theano.printing.debugprint(f, print_type=True)\n\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo\n if isinstance(t.op, tensor.Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n for idx in xrange(-7, 7):\n f(x_val, idx) # let debugmode test something\n\n def test_const4(self):\n # var[const1::][:const2]\n x = tensor.matrix('x')\n for idx1 in xrange(-7, 7):\n for idx2 in xrange(-7, 7):\n f = function([x], x[idx1:][:idx2], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n #theano.printing.debugprint(f, print_type=True)\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo\n if isinstance(t.op, tensor.Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n f(x_val) # let debugmode test something\n\n def test_scalar4(self):\n # var[int1:][:int2]\n x = tensor.matrix('x')\n y = tensor.iscalar('y')\n z = tensor.iscalar('y')\n f = function([x, y, z], x[y:][:z], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n #theano.printing.debugprint(f, print_type=True)\n\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo\n if isinstance(t.op, tensor.Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n for idx1 in xrange(-11, 11):\n for idx2 in xrange(-11, 11):\n f(x_val, idx1, idx2) # let debugmode test something\n\n def test_const_general(self):\n # Some cases of merge: shape, (start, stop, step) of first,\n # (start, stop, step) of second subtensor\n cases = [\n ((2, 3), (None, None, None), (None, None, -1)),\n ((12, 1), (None, None, -4), (None, None, 1)),\n ((5, 3), (1, 4, 2), (None, None, -1)),\n ]\n x = tensor.matrix('x')\n\n for shape, sl1, sl2 in cases:\n z = x[slice(*sl1)][slice(*sl2)]\n f = function([x], z, mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n\n x_val = self.rng.uniform(size=shape).astype(config.floatX)\n f(x_val)\n\n def test_scalar5(self):\n # General case with two real slices\n # var[b1:e1:s1][b2:e2:s2]\n x = tensor.matrix('x')\n b1 = tensor.iscalar('b1')\n e1 = tensor.iscalar('e1')\n s1 = tensor.iscalar('s1')\n b2 = tensor.iscalar('b2')\n e2 = tensor.iscalar('e2')\n s2 = tensor.iscalar('s2')\n f = function([x, b1, e1, s1, b2, e2, s2], x[b1:e1:s1][b2:e2:s2],\n mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n #theano.printing.debugprint(f, print_type=True)\n\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo if isinstance(t.op, tensor.\n Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n b1r = self.rng.permutation(list(range(-8, 8)))[:2]\n e1r = self.rng.permutation(list(range(-8, 8)))[:2]\n b2r = self.rng.permutation(list(range(-8, 8)))[:2]\n e2r = self.rng.permutation(list(range(-8, 8)))[:2]\n\n s1r = self.rng.permutation([-7, -6, -5, -4, -3, -2, -1, 1,\n 2, 3, 4, 5, 6, 7])[:2]\n s2r = self.rng.permutation([-7, -6, -5, -4, -3, -2, -1, 1,\n 2, 3, 4, 5, 6, 7])[:2]\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n for b1 in b1r:\n for e1 in e1r:\n for s1 in s1r:\n for b2 in b2r:\n for e2 in e2r:\n for s2 in s2r:\n f(x_val, b1, e1, s1, b2, e2, s2)\n\n def test_const4(self):\n # Bug reported by Razvan\n data = numpy.asarray(numpy.arange(8),\n dtype=theano.config.floatX)\n x = theano.tensor.vector('x')\n y = x[7:1:-1]\n t = theano.shared(numpy.int64(0))\n\n fun = theano.function([x], y[t])\n\n val = fun(data)\n assert val == data[7:1:-1][0]\n\n def test_const5(self):\n # Bug reported by Graham\n data = self.rng.uniform(size=(8, 8, 8)).astype(theano.config.floatX)\n x = theano.tensor.tensor3('x')\n\n nops = 1\n if theano.config.mode == \"FAST_COMPILE\":\n nops = 2\n\n # test 1)\n y = x[3:6, 2:6, 1:7][1]\n fun = theano.function([x], y)\n val = fun(data)\n assert numpy.all(val == data[3:6, 2:6, 1:7][1])\n assert len([n for n in fun.maker.fgraph.toposort()\n if isinstance(n.op, Subtensor)]) == nops\n\n # test 2)\n y = x[2, 3][1]\n fun = theano.function([x], y)\n val = fun(data)\n assert numpy.all(val == data[2, 3][1])\n assert len([n for n in fun.maker.fgraph.toposort()\n if isinstance(n.op, Subtensor)]) == nops\n\n # test 3)\n y = x[3:6, 2, 1:7][1]\n fun = theano.function([x], y)\n val = fun(data)\n assert numpy.all(val == data[3:6, 2, 1:7][1])\n assert len([n for n in fun.maker.fgraph.toposort()\n if isinstance(n.op, Subtensor)]) == nops\n\n def test_scalar6(self):\n # General case with one slice and one index\n # var[b:e:s][i]\n x = tensor.matrix('x')\n b = tensor.iscalar('b')\n e = tensor.iscalar('e')\n s = tensor.iscalar('s')\n i = tensor.iscalar('i')\n f = function([x, b, e, s, i], x[b:e:s][i], mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n #theano.printing.debugprint(f, print_type=True)\n\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo if isinstance(t.op, tensor.\n Subtensor)]) == 1\n # print topo[-1].op\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n b_r = self.rng.permutation(list(range(-4, 4)))[:3]\n e_r = self.rng.permutation(list(range(-4, 4)))[:3]\n i_r = self.rng.permutation(list(range(-4, 4)))[:3]\n\n s_r = self.rng.permutation([-3, -2, -1, 1, 2, 3])[:3]\n\n for x_s in self.x_shapes:\n n_index_err = 0\n n_ok = 0\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n for b_v in b_r:\n for e_v in e_r:\n for s_v in s_r:\n for i_v in i_r:\n # The index could be out of bounds\n # In that case, an Exception should be raised,\n # otherwise, we let DebugMode check f\n try:\n x_val[b_v:e_v:s_v][i_v]\n except IndexError:\n n_index_err += 1\n self.assertRaises(IndexError,\n f, x_val, b_v, e_v, s_v, i_v)\n else:\n # Executed if the \"try\" clause did not raise\n # any exception\n n_ok += 1\n f(x_val, b_v, e_v, s_v, i_v)\n\n # print 'shape: %s' % (x_s,)\n # print '%% OK: %f' % (float(n_ok) * 100 / (n_ok + n_index_err))\n\n @attr('slow')\n def test_none_slice(self):\n # Test case of two slices, var[b1:e1:s1][b2:e2:s2]\n # where any of the b, e, and s can be None\n x = tensor.matrix('x')\n b1 = tensor.iscalar('b1')\n e1 = tensor.iscalar('e1')\n s1 = tensor.iscalar('s1')\n b2 = tensor.iscalar('b2')\n e2 = tensor.iscalar('e2')\n s2 = tensor.iscalar('s2')\n\n # Generate all possible lists of positions for None in those 6 slots\n # A 1 indicates None is present, 0 that there is a Theano scalar.\n none_positions = numpy.ndindex(2, 2, 2, 2, 2, 2)\n\n # Ranges to be used when not None\n b1r = self.rng.permutation(list(range(-4, 4)))[:]\n e1r = self.rng.permutation(list(range(-4, 4)))[:]\n b2r = self.rng.permutation(list(range(-4, 4)))[:]\n e2r = self.rng.permutation(list(range(-4, 4)))[:]\n s1r = self.rng.permutation([-4, -3, -2, -1, 1, 2, 3, 4])[:]\n s2r = self.rng.permutation([-4, -3, -2, -1, 1, 2, 3, 4])[:]\n\n scalar_vars = [b1, e1, s1, b2, e2, s2]\n scalar_ranges = [b1r, e1r, s1r, b2r, e2r, s2r]\n\n # For each case, we will build a graph, function, and list of values\n # Then, we test it on each input shape.\n for none_pos in none_positions:\n slice_inputs = []\n input_vars = []\n values = []\n if sum(none_pos) == 0:\n # Those case are already tested in test_scalar4\n continue\n\n for i, none_i in enumerate(none_pos):\n if none_i:\n slice_inputs.append(None)\n else:\n slice_inputs.append(scalar_vars[i])\n input_vars.append(scalar_vars[i])\n values.append(scalar_ranges[i])\n\n slice1 = slice(*slice_inputs[:3])\n slice2 = slice(*slice_inputs[3:])\n sub_x = x[slice1][slice2]\n f = theano.function([x] + input_vars, sub_x, mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo if isinstance(t.op,\n tensor.Subtensor)]) <= 1\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n for i_val in zip(*values):\n f(x_val, *i_val)\n\n def test_none_index(self):\n # Test the general case of indexing into a subvector,\n # like x[b:e:s][i], where any of b, e, and s can be None\n x = tensor.matrix('x')\n b = tensor.iscalar('b')\n e = tensor.iscalar('e')\n s = tensor.iscalar('s')\n i = tensor.iscalar('i')\n\n # Generate all possible lists of positions for None in those 6 slots\n # A 1 indicates None is present, 0 that there is a Theano scalar.\n # The last index (i) is never None\n none_positions = numpy.ndindex(2, 2, 2, 1)\n\n # Ranges to be used when not None\n b_r = self.rng.permutation(list(range(-4, 4)))[:]\n e_r = self.rng.permutation(list(range(-4, 4)))[:]\n i_r = self.rng.permutation(list(range(-4, 4)))[:]\n s_r = self.rng.permutation([-4, -3, -2, -1, 1, 2, 3, 4])[:]\n\n scalar_vars = [b, e, s, i]\n scalar_ranges = [b_r, e_r, s_r, i_r]\n\n # For each case, we will build a graph, function, and list of values\n # Then, we test it on each input shape.\n for none_pos in none_positions:\n slice_inputs = []\n input_vars = []\n values = []\n if sum(none_pos) == 0:\n # Those case are already tested in test_scalar6\n continue\n\n for j, none_j in enumerate(none_pos):\n if none_j:\n slice_inputs.append(None)\n\n else:\n slice_inputs.append(scalar_vars[j])\n input_vars.append(scalar_vars[j])\n values.append(scalar_ranges[j])\n\n symbol_slice = slice(*slice_inputs[:3])\n sub_x = x[symbol_slice][i]\n f = theano.function([x] + input_vars, sub_x, mode=mode_opt)\n\n self._verify_stack_trace(f)\n\n topo = f.maker.fgraph.toposort()\n # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]\n assert len([t for t in topo if isinstance(t.op,\n tensor.Subtensor)]) <= 1\n assert isinstance(topo[-1].op, DeepCopyOp)\n\n for x_s in self.x_shapes:\n x_val = self.rng.uniform(size=x_s).astype(config.floatX)\n for i_val in zip(*values):\n # The index could be out of bounds\n # In that case, an Exception should be raised,\n # otherwise, we let DebugMode check f\n # For that, we need to create a numerical slice.\n i_val_idx = 0\n num_slice_inputs = []\n for none_j in none_pos:\n if none_j:\n num_slice_inputs.append(None)\n else:\n num_slice_inputs.append(i_val[i_val_idx])\n i_val_idx += 1\n num_slice = slice(*num_slice_inputs[:3])\n num_i = num_slice_inputs[3]\n\n try:\n x_val[num_slice][num_i]\n except IndexError:\n self.assertRaises(IndexError, f, x_val, *i_val)\n else:\n # Executed if the \"try\" clause did not raise\n # any exception\n f(x_val, *i_val)\n\n\nclass test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):\n def setUp(self):\n utt.seed_rng()\n mode = theano.compile.mode.get_default_mode()\n self.mode = mode.including(\"local_adv_sub1_adv_inc_sub1\").excluding(\"fusion\")\n self.mode_no_assert = self.mode.including(\"local_remove_all_assert\")\n\n def test0(self):\n for dtype1, dtype2 in [(\"float32\", \"float32\"),\n (\"float32\", \"float64\"),\n (\"float64\", \"float32\"),\n (\"float64\", \"float64\")]:\n x = tensor.matrix(dtype=dtype1)\n y = tensor.matrix(dtype=dtype2)\n idx = tensor.ivector()\n\n dx = numpy.random.rand(4, 5).astype(dtype1)\n dy = numpy.random.rand(2, 5).astype(dtype2)\n didx = numpy.asarray([1, 3], \"int32\")\n\n # set_subtensor\n inc = tensor.set_subtensor(x[idx], y)\n o = inc[idx]\n f = theano.function([x, y, idx], o, self.mode_no_assert)\n\n res = f(dx, dy, didx)\n assert numpy.allclose(dy, res)\n topo = f.maker.fgraph.toposort()\n if opt:\n assert len(topo) == 1\n assert isinstance(topo[0].op, (compile.DeepCopyOp, T.Elemwise))\n else:\n assert len(topo) == 2\n\n # inc_subtensor(data[idx], y)\n inc = tensor.inc_subtensor(x[idx], y)\n o = inc[idx]\n f = theano.function([x, y, idx], o, self.mode_no_assert)\n\n res = f(dx, dy, didx)\n assert numpy.allclose((dx[didx] + dy), res)\n topo = f.maker.fgraph.toposort()\n len(topo) == 2\n\n # inc_subtensor(0[idx], y)\n inc = tensor.inc_subtensor(x.zeros_like()[idx], y)\n o = inc[idx]\n f = theano.function([x, y, idx], o, self.mode_no_assert)\n\n res = f(dx, dy, didx)\n assert numpy.allclose(dy, res)\n topo = f.maker.fgraph.toposort()\n if opt:\n assert len(topo) == 1\n assert isinstance(topo[0].op, (compile.DeepCopyOp, T.Elemwise))\n else:\n assert len(topo) > 2\n\n def test_assert(self):\n x = tensor.matrix(\"x\")\n y = tensor.matrix(\"y\")\n idx = tensor.ivector()\n\n dx = numpy.random.rand(4, 5).astype(config.floatX)\n dy = numpy.random.rand(2, 5).astype(config.floatX)\n didx = numpy.asarray([1, 3], \"int32\")\n\n # set_subtensor\n inc = tensor.set_subtensor(x[idx], y)\n o = inc[idx]\n f = theano.function([x, y, idx], o, self.mode)\n # test wrong index\n for i in [dx.shape[0], -dx.shape[0] - 1]:\n self.assertRaises((AssertionError, IndexError),\n f, dx, dy, [i, i])\n # test wrong shape\n self.assertRaises((AssertionError, ValueError),\n f, dx, dy, [1])\n\n def test_stacktrace(self):\n x = tensor.matrix(\"x\")\n y = tensor.matrix(\"y\")\n idx = tensor.ivector()\n\n dx = numpy.random.rand(4, 5).astype(config.floatX)\n dy = numpy.random.rand(2, 5).astype(config.floatX)\n didx = numpy.asarray([1, 3], \"int32\")\n\n # set_subtensor\n inc = tensor.set_subtensor(x[idx], y)\n o = inc[idx]\n # Compile function using only the 'local_subtensor_make_vector' optimization,\n # which requires us to add the 'canonicalize' phase.\n mode = theano.compile.mode.Mode(optimizer=None).including('canonicalize').including(\"local_adv_sub1_adv_inc_sub1\")\n f = theano.function([x, y, idx], o, self.mode)\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(f.outputs[0].variable.tag, 'trace'))\n\n # Compile function using all optimizations in fast_compile mode, \n # including the 'local_subtensor_make_vector' optimization\n mode = theano.compile.mode.get_mode('FAST_COMPILE').including(\"local_adv_sub1_adv_inc_sub1\")\n f = theano.function([x, y, idx], o, self.mode)\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(f.outputs[0].variable.tag, 'trace'))\n\n\nclass Test_alloc_zero(unittest.TestCase):\n def setUp(self):\n mode = theano.compile.mode.get_default_mode()\n self.mode = mode.including(\"local_incsubtensor_of_zeros\",\n \"local_setsubtensor_of_constants\",\n \"local_0_dot_x\")\n\n def test_setsubtensor_allocs0(self):\n x = tensor.matrix()\n y = tensor.matrix()\n x0 = tensor.zeros_like(x)\n y0 = tensor.zeros_like(y)\n z = tensor.set_subtensor(x0[:4], y0)\n f = theano.function([x, y], z, mode=self.mode)\n assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in\n f.maker.fgraph.toposort()])\n\n def test_setsubtensor_allocs1(self):\n y = tensor.matrix()\n x0 = tensor.constant(numpy.asarray(numpy.zeros((4, 4)),\n dtype=config.floatX))\n y0 = tensor.zeros_like(y)\n z = tensor.set_subtensor(x0[:4], y0)\n f = theano.function([y], z, mode=self.mode)\n assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in\n f.maker.fgraph.toposort()])\n\n def test_setsubtensor_allocs1t(self):\n y = tensor.matrix()\n x0 = tensor.constant(numpy.asarray(numpy.zeros((4, 4)),\n dtype=config.floatX))\n y0 = tensor.zeros_like(y)\n z = tensor.set_subtensor(x0[:4], y0.T)\n f = theano.function([y], z, mode=mode_opt)\n assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in\n f.maker.fgraph.toposort()])\n\n def test_setsubtensor_allocs2(self):\n x = tensor.matrix()\n y0 = tensor.constant(numpy.asarray(numpy.zeros_like((4, 4)),\n dtype=config.floatX))\n x0 = tensor.zeros_like(x)\n z = tensor.set_subtensor(x0[:4], y0)\n f = theano.function([x], z, mode=self.mode)\n assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in\n f.maker.fgraph.toposort()])\n\n def test_incsubtensor_allocs0(self):\n x = tensor.matrix()\n y = tensor.matrix()\n y0 = tensor.zeros_like(y)\n z = tensor.inc_subtensor(x[:4], y0)\n f = theano.function([x, y], z, mode=self.mode)\n assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in\n f.maker.fgraph.toposort()])\n\n def test_incsubtensor_allocs0t(self):\n x = tensor.matrix()\n y = tensor.matrix()\n y0 = tensor.zeros_like(y)\n z = tensor.inc_subtensor(x[:4], y0.T)\n f = theano.function([x, y], z, mode=mode_opt)\n assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in\n f.maker.fgraph.toposort()])\n\n def test_incsubtensor_allocs1(self):\n x = tensor.matrix()\n y0 = tensor.constant(numpy.asarray(numpy.zeros_like((4, 4)),\n dtype=config.floatX))\n z = tensor.inc_subtensor(x[:4], y0)\n f = theano.function([x], z, mode=self.mode)\n assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in\n f.maker.fgraph.toposort()])\n\n def test_advancedincsubtensor1_allocs0(self):\n x = tensor.matrix()\n y = tensor.matrix()\n y0 = tensor.zeros_like(y)\n z = tensor.inc_subtensor(x[[0, 1, 2, 3]], y0)\n f = theano.function([x, y], z, mode=self.mode)\n assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor1)\n for n in f.maker.fgraph.toposort()])\n\n def test_advancedincsubtensor1_allocs0t(self):\n x = tensor.matrix()\n y = tensor.matrix()\n y0 = tensor.zeros_like(y)\n z = tensor.inc_subtensor(x[[0, 1, 2, 3]], y0.T)\n f = theano.function([x, y], z, mode=mode_opt)\n assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor1)\n for n in f.maker.fgraph.toposort()])\n\n def test_advancedincsubtensor1_allocs1(self):\n x = tensor.matrix()\n y0 = tensor.constant(numpy.asarray(numpy.zeros_like((4, 4)),\n dtype=config.floatX))\n z = tensor.inc_subtensor(x[[0, 1, 2, 3]], y0)\n f = theano.function([x], z, mode=self.mode)\n assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor1)\n for n in f.maker.fgraph.toposort()])\n\n def test_advancedincsubtensor_allocs0(self):\n if tensor.inplace_increment is None:\n raise SkipTest('NumPy version >= 1.8 not available')\n\n x = tensor.matrix()\n y = tensor.matrix()\n y0 = tensor.zeros_like(y)\n z = tensor.inc_subtensor(x[[[0, 0], [1, 1]], [[0, 1], [0, 1]]], y0)\n f = theano.function([x, y], z, mode=self.mode)\n assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor)\n for n in f.maker.fgraph.toposort()])\n\n def test_advancedincsubtensor_allocs0t(self):\n if tensor.inplace_increment is None:\n raise SkipTest('NumPy version >= 1.8 not available')\n\n x = tensor.matrix()\n y = tensor.matrix()\n y0 = tensor.zeros_like(y)\n z = tensor.inc_subtensor(x[[[0, 0], [1, 1]], [[0, 1], [0, 1]]], y0.T)\n f = theano.function([x, y], z, mode=mode_opt)\n assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor)\n for n in f.maker.fgraph.toposort()])\n\n def test_advancedincsubtensor_allocs1(self):\n if tensor.inplace_increment is None:\n raise SkipTest('NumPy version >= 1.8 not available')\n\n x = tensor.matrix()\n y0 = tensor.constant(numpy.asarray(numpy.zeros_like((2, 2)),\n dtype=config.floatX))\n z = tensor.inc_subtensor(x[[[0, 0], [1, 1]], [[0, 1], [0, 1]]], y0)\n f = theano.function([x], z, mode=self.mode)\n assert numpy.all([not isinstance(n.op, tensor.AdvancedIncSubtensor)\n for n in f.maker.fgraph.toposort()])\n\n def test_dot_allocs_0(self):\n v1 = tensor.vector('v1')\n v2 = tensor.vector('v2')\n m1 = tensor.matrix('m1')\n m2 = tensor.matrix('m2')\n vv2 = numpy.asarray([0, 1], dtype=theano.config.floatX)\n vm2 = numpy.asarray([[1, 2], [4, 5]],\n dtype=theano.config.floatX)\n vv3 = numpy.asarray([0, 1, 2], dtype=theano.config.floatX)\n vm3 = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n dtype=theano.config.floatX)\n for _e1 in [(v1, vv2, vv3), (m1, vm2, vm3)]:\n for _e2 in [(v2, vv2, vv3), (m2, vm2, vm3)]:\n for p in [0, 1]:\n if p == 0:\n e1 = tensor.zeros_like(_e1[0])\n e2 = _e2[0]\n else:\n e1 = _e1[0]\n e2 = tensor.zeros_like(_e2[0])\n o = tensor.dot(e1, e2)\n f = theano.function([_e1[0], _e2[0]], o, mode=self.mode)\n f(_e1[1], _e2[1])\n f(_e1[2], _e2[2])\n assert numpy.all([not isinstance(n.op, tensor.Dot) for n in\n f.maker.fgraph.toposort()])\n\n # test that we don't remove shape errors\n self.assertRaises((ValueError, AssertionError), f,\n _e1[1], _e2[2])\n self.assertRaises((ValueError, AssertionError), f,\n _e1[2], _e2[1])\n\n\ndef test_local_IncSubtensor_serialize():\n d = numpy.random.normal(0, 0.01, size=(100, 100))\n d = d.astype(theano.config.floatX)\n\n W = theano.shared(d, name='W')\n i = T.vector('i', dtype='int64')\n j = T.vector('j', dtype='int64')\n t = T.scalar('t')\n if theano.tensor.subtensor.inplace_increment:\n y = (W[i] + W[j] + W[1] + W[i, j]).sum()\n else:\n y = (W[i] + W[j] + W[1]).sum()\n cost = T.sqr(t - y)\n dW = theano.grad(cost, W)\n mode = theano.compile.mode.get_default_mode().excluding('fusion')\n mode = mode.including(\"local_IncSubtensor_serialize\")\n f = theano.function([i, j, t], updates=[(W, W - 0.01 * dW)], mode=mode)\n topo = f.maker.fgraph.toposort()\n adds = [n for n in topo if isinstance(n.op, T.Elemwise) and\n isinstance(n.op.scalar_op, theano.scalar.Add)]\n for a in adds:\n assert not any([inp.owner and\n isinstance(inp.owner.op,\n (tensor.IncSubtensor,\n tensor.AdvancedIncSubtensor,\n tensor.AdvancedIncSubtensor1))\n for inp in a.inputs])\n\n # Now test that the stack trace is copied over properly,\n # if we return the gradients. We need to use same mode as before.\n f = theano.function([i, j, t], dW, mode=mode)\n assert hasattr(f.outputs[0].variable.tag, 'trace')\n \ndef test_local_set_to_inc_subtensor():\n v = theano.tensor.fmatrix()\n s = v[[2, 1]]\n g = s + 3\n r = theano.tensor.set_subtensor(s, g)\n moder = compile.get_default_mode().excluding('local_set_to_inc_subtensor')\n modet = compile.get_default_mode().including('local_set_to_inc_subtensor')\n f1 = theano.function([v], r, mode=moder)\n f2 = theano.function([v], r, mode=modet)\n\n advi1 = [n for n in f1.maker.fgraph.toposort()\n if isinstance(n.op, tensor.AdvancedIncSubtensor1)]\n\n advi2 = [n for n in f2.maker.fgraph.toposort()\n if isinstance(n.op, tensor.AdvancedIncSubtensor1)]\n\n # We only have SetSubtensor in f1\n assert all(n.op.set_instead_of_inc for n in advi1)\n # We don't have any SetSubtensor in f2\n assert all(not n.op.set_instead_of_inc for n in advi2)\n\n val = numpy.random.randn(3, 2).astype('float32')\n\n r1 = f1(val)\n r2 = f2(val)\n\n utt.assert_allclose(r1, r2)\n\n # Finally, test that the stack trace is copied over properly,\n # before before and after optimization.\n assert hasattr(f1.outputs[0].variable.tag, 'trace')\n assert hasattr(f2.outputs[0].variable.tag, 'trace')\n \n \ndef test_local_subtensor_of_dot():\n m1 = theano.tensor.matrix()\n m2 = theano.tensor.matrix()\n d1 = numpy.arange(6).reshape((3, 2)).astype(config.floatX)\n d2 = numpy.arange(8).reshape((2, 4)).astype(config.floatX) + 10\n mode = compile.get_default_mode().including(\"local_subtensor_of_dot\")\n\n def test_equality(a, b):\n return a.shape == b.shape and numpy.allclose(a, b)\n\n # [cst]\n f = theano.function([m1, m2], theano.dot(m1, m2)[1], mode=mode)\n topo = f.maker.fgraph.toposort()\n assert test_equality(f(d1, d2), numpy.dot(d1, d2)[1])\n # DimShuffle happen in FAST_COMPILE\n assert isinstance(topo[-1].op, (T.blas_c.CGemv, T.blas.Gemv, T.DimShuffle))\n\n # slice\n f = theano.function([m1, m2], theano.dot(m1, m2)[1:2], mode=mode)\n topo = f.maker.fgraph.toposort()\n assert test_equality(f(d1, d2), numpy.dot(d1, d2)[1:2])\n assert isinstance(topo[-1].op, (T.blas.Dot22))\n\n m1 = theano.tensor.tensor3()\n m2 = theano.tensor.tensor3()\n idx = theano.tensor.iscalar()\n d1 = numpy.arange(30).reshape(2, 5, 3).astype(config.floatX)\n d2 = numpy.arange(72).reshape(4, 3, 6).astype(config.floatX) + 100\n\n f = theano.function([m1, m2, idx], theano.dot(m1, m2)[idx, 1:4, :, idx:], mode=mode)\n assert test_equality(f(d1, d2, 1), numpy.dot(d1, d2)[1, 1:4, :, 1:])\n # if we return the gradients. We need to use same mode as before.\n assert hasattr(f.outputs[0].variable.tag, 'trace')\n\n f = theano.function([m1, m2, idx], theano.dot(m1, m2)[1:4, :, idx:, idx], mode=mode)\n assert test_equality(f(d1, d2, 1), numpy.dot(d1, d2)[1:4, :, 1:, 1])\n\n # Now test that the stack trace is copied over properly,\n # if we return the gradients. We need to use same mode as before.\n assert hasattr(f.outputs[0].variable.tag, 'trace')\n\n\nclass Test_local_elemwise_alloc(unittest.TestCase):\n dtype = config.floatX\n\n def setUp(self):\n self.fast_compile_mode = 'FAST_COMPILE'\n self.fast_run_mode = 'FAST_RUN'\n\n self.vec = T.vector('vec', dtype=self.dtype)\n self.mat = T.matrix('mat', dtype=self.dtype)\n self.tens = T.tensor3('tens', dtype=self.dtype)\n\n self.alloc_wo_dep = T.alloc(self.vec, 2, 2)\n self.alloc_wo_dep_broad = T.alloc(self.vec, 1, 2)\n self.alloc_w_dep = T.alloc(self.vec, *self.mat.shape)\n self.alloc_w_dep_broad = T.alloc(self.vec, 1, *self.mat.shape)\n self.alloc_w_dep_broad2 = T.alloc(self.vec, self.mat.shape[0],\n self.mat.shape[1], 1)\n self.alloc_w_dep_tens = T.alloc(\n self.vec,\n self.tens.shape[0],\n self.tens.shape[1]\n )\n self.tv_wo_dep = T.alloc(self.vec, 5, 5)\n self.tm_wo_dep = T.alloc(self.mat, 5, 5, 5)\n self.s = T.iscalar('s')\n self.tv_w_dep = T.alloc(self.vec, self.s, self.s)\n self.tm_w_dep = T.alloc(self.mat, 5, 5, 5)\n self.row = theano.tensor.row(dtype=self.dtype)\n self.o = T.alloc(self.row, 5, 5)\n\n def _verify_alloc_count(self, f, count):\n assert(\n sum([isinstance(elem.op, T.Alloc)\n for elem in f.maker.fgraph.toposort()\n if elem.op is not None]) == count\n )\n\n def _verify_assert_count(self, f, count):\n assert(\n sum([isinstance(elem.op, T.opt.Assert)\n for elem in f.maker.fgraph.toposort()\n if elem.op is not None]) == count\n )\n\n def _verify_stack_trace(self, f):\n for output in f.outputs:\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(output.variable.tag, 'trace'))\n\n def test_remove_alloc_wo_dimshuffle(self):\n # No optimization on alloc\n func = function(\n [self.vec, self.mat],\n self.alloc_wo_dep + self.mat,\n mode=self.fast_compile_mode\n )\n self._verify_alloc_count(func, 1)\n self._verify_assert_count(func, 0)\n self._verify_stack_trace(func)\n\n # Optimization on alloc with assert\n func = function(\n [self.vec, self.mat],\n self.alloc_wo_dep + self.mat,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 0)\n self._verify_assert_count(func, 1)\n\n # Optimization on alloc with assert and broadcast\n func = function(\n [self.vec, self.mat],\n self.alloc_wo_dep_broad + self.mat,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 0)\n self._verify_assert_count(func, 1)\n\n # No optimization on alloc without assert\n func = function(\n [self.vec, self.mat],\n self.alloc_w_dep + self.mat,\n mode=self.fast_compile_mode\n )\n self._verify_alloc_count(func, 1)\n self._verify_assert_count(func, 0)\n\n # Optimization on alloc without assert\n func = function(\n [self.vec, self.mat],\n self.alloc_w_dep + self. mat,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 0)\n self._verify_assert_count(func, 0)\n\n # Optimization on alloc without assert and with broadcast\n func = function(\n [self.vec, self.mat],\n self.alloc_w_dep_broad + self. mat,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 0)\n self._verify_assert_count(func, 0)\n\n # Not optimized case on alloc and with broadcast\n func = function(\n [self.vec, self.mat],\n self.alloc_w_dep_broad2 + self. mat,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 1)\n self._verify_assert_count(func, 0)\n\n def test_remove_alloc_w_dimshuffle(self):\n # No optimization on dimshuffle with assert\n func = function(\n [self.vec, self.tens],\n self.alloc_wo_dep.dimshuffle(0, 1, 'x') + self.tens,\n mode=self.fast_compile_mode\n )\n self._verify_alloc_count(func, 1)\n self._verify_assert_count(func, 0)\n\n # Optimization on dimshuffle with assert\n func = function(\n [self.vec, self.tens],\n self.alloc_wo_dep.dimshuffle(0, 1, 'x') + self.tens,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 0)\n self._verify_assert_count(func, 1)\n\n # No optimization on dimshuffle without assert\n func = function(\n [self.vec, self.tens],\n self.alloc_w_dep_tens.dimshuffle(0, 1, 'x') + self.tens,\n mode=self.fast_compile_mode\n )\n self._verify_alloc_count(func, 1)\n self._verify_assert_count(func, 0)\n\n # Optimization on dimshuffle without assert\n func = function(\n [self.vec, self.tens],\n self.alloc_w_dep_tens.dimshuffle(0, 1, 'x') + self.tens,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 0)\n self._verify_assert_count(func, 0)\n\n def test_multi_input_single_alloc(self):\n # No optimization on dimshuffle with assert\n func = function(\n [self.vec, self.mat],\n self.tv_wo_dep + self.tm_wo_dep,\n mode=self.fast_compile_mode\n )\n self._verify_alloc_count(func, 2)\n self._verify_assert_count(func, 0)\n\n # Optimization on dimshuffle with assert\n func = function(\n [self.vec, self.mat],\n self.tv_wo_dep + self.tm_wo_dep,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 1)\n self._verify_assert_count(func, 0)\n\n # No optimization on dimshuffle without assert\n func = function(\n [self.vec, self.mat, self.s],\n self.tv_w_dep + self.tm_w_dep,\n mode=self.fast_compile_mode\n )\n self._verify_alloc_count(func, 2)\n self._verify_assert_count(func, 0)\n\n # Optimization on dimshuffle without assert\n func = function(\n [self.vec, self.mat, self.s],\n self.tv_w_dep + self.tm_w_dep,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 1)\n self._verify_assert_count(func, 1)\n\n def test_error(self):\n t3fft = theano.tensor.tensor(dtype=self.dtype,\n broadcastable=(False, False, True))\n o = self.o.dimshuffle(0, 1, 'x') + t3fft\n func = function(\n [t3fft, self.row],\n o,\n mode=self.fast_run_mode\n )\n self._verify_alloc_count(func, 0)\n self._verify_assert_count(func, 1)\n d = numpy.random.rand(5, 5, 1).astype(self.dtype)\n r = numpy.random.rand(1, 5).astype(self.dtype)\n func(d, r)\n\n\ndef test_local_subtensor_of_alloc():\n\n # DebugMode should detect if something goes wrong.\n # test shape combination of odd and event shape.\n for shape in [(3, 5), (4, 6), (3, 8), (4, 7),\n (1, 5), (5, 1)]:\n x = tensor.tensor(dtype=theano.config.floatX,\n broadcastable=(shape[0] == 1, shape[1] == 1))\n\n xval = numpy.zeros(shape, dtype=config.floatX)\n yval = numpy.arange(shape[1], dtype=config.floatX)\n\n for y in [theano.shared(yval), tensor.constant([1.])]:\n\n # The rows of yx are copies of y\n yx = tensor.alloc(y, x.shape[0], x.shape[1])\n\n # Slice of each row\n z_mat = yx[:, 3:]\n assert z_mat.ndim == 2\n\n # Only one column\n z_vec = yx[:, 3]\n assert z_vec.ndim == 1\n # results are vector\n slicess = []\n if shape[0] != 1:\n slicess.append((2, slice(None)))\n if shape[1] != 1:\n slicess.append((slice(None), 3))\n\n # results are matrix\n slicess += [\n (slice(None), slice(3, None)),\n (slice(3, None), ),\n (slice(3, None), slice(3, None)),\n (slice(1, 3), slice(None, -1)),\n (slice(None, None, 2)),\n (slice(1, None, 2)),\n ]\n for slices in slicess:\n z = yx.__getitem__(slices)\n f = theano.function([x], z)\n if theano.config.mode != 'FAST_COMPILE':\n # Subtensor can be in the input of Alloc\n assert not isinstance(f.maker.fgraph.toposort()[-1].op,\n Subtensor)\n val = f(xval)\n assert xval.__getitem__(slices).shape == val.shape\n\n\ndef test_local_fill_useless():\n # Test opt local_fill_cut\n x = dvector()\n y = dvector()\n z = lvector()\n m = dmatrix()\n\n x_ = numpy.random.rand(5,)\n y_ = numpy.random.rand(5,)\n z_ = (numpy.random.rand(5,) * 5).astype(\"int64\")\n m_ = numpy.random.rand(5, 5)\n\n # basic case\n f = function([x], T.fill(x, x) * 2, mode=mode_opt)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]\n f(x_)\n\n # basic case\n f = function([x, y], T.second(y, x) * 2, mode=mode_opt)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]\n f(x_, y_)\n\n # basic case\n f = function([x, y], T.fill(x, y) * 2, mode=mode_opt)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]\n f(x_, y_)\n\n # now with different type(cast)\n f = function([x, z], T.fill(z, x) * 2, mode=mode_opt)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]\n f(x_, z_)\n\n # now with different type(cast)\n f = function([x, z], T.fill(x, z) * 2, mode=mode_opt)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]\n f(x_, z_)\n\n # now cutting out the input ??\n f = function([x, y], T.fill(x, y) * 2, mode=mode_opt)\n assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]\n f(x_, y_)\n\n # Test with different number of dimensions\n # The fill is not useless, so it should stay\n f = function([m, x], T.fill(m, x) * 2, mode=mode_opt)\n ops = [node.op.__class__ for node in f.maker.fgraph.toposort()]\n assert T.Alloc in ops\n f(m_, x_)\n\n\nclass Test_local_useless_elemwise_comparison(unittest.TestCase):\n def test_local_useless_elemwise_comparison(self):\n # TODO: test each case individually.\n # The following case is what made me discover those cases.\n X = T.matrix('X')\n Y = T.vector('Y')\n X_sum, updates = theano.scan(fn=lambda x: x.sum(),\n outputs_info=None,\n sequences=[X],\n non_sequences=None)\n Z = X_sum + Y\n theano.printing.debugprint(Z)\n # here is the output for the debug print:\n \"\"\"\n Elemwise{add,no_inplace} [id A] ''\n |for{cpu,scan_fn} [id B] ''\n | |Subtensor{int64} [id C] ''\n | | |Shape [id D] ''\n | | | |Subtensor{int64::} [id E] 'X[0:]'\n | | | |X [id F]\n | | | |Constant{0} [id G]\n | | |Constant{0} [id H]\n | |Subtensor{:int64:} [id I] ''\n | | |Subtensor{int64::} [id E] 'X[0:]'\n | | |ScalarFromTensor [id J] ''\n | | |Subtensor{int64} [id C] ''\n | |Subtensor{int64} [id C] ''\n |Y [id K]\n\n Inner graphs of the scan ops:\n\n for{cpu,scan_fn} [id B] ''\n >Sum{acc_dtype=float64} [id L] ''\n > |X[t] [id M] -> [id I]\n \"\"\"\n\n mode = theano.compile.get_default_mode().excluding('fusion')\n f = theano.function([X, Y], Z, mode=mode)\n theano.printing.debugprint(f, print_type=True)\n # here is the output for the debug print:\n \"\"\"\n Elemwise{Add}[(0, 0)] [id A] <TensorType(float64, vector)> '' 7\n |for{cpu,scan_fn} [id B] <TensorType(float64, vector)> '' 6\n | |Shape_i{0} [id C] <TensorType(int64, scalar)> '' 0\n | | |X [id D] <TensorType(float64, matrix)>\n | |Subtensor{int64:int64:int8} [id E] <TensorType(float64, matrix)> '' 5\n | | |X [id D] <TensorType(float64, matrix)>\n | | |ScalarFromTensor [id F] <int64> '' 4\n | | | |Elemwise{switch,no_inplace} [id G] <TensorType(int64, scalar)> '' 3\n | | | |Elemwise{le,no_inplace} [id H] <TensorType(int8, scalar)> '' 2\n | | | | |Shape_i{0} [id C] <TensorType(int64, scalar)> '' 0\n | | | | |TensorConstant{0} [id I] <TensorType(int8, scalar)>\n | | | |TensorConstant{0} [id I] <TensorType(int8, scalar)>\n | | | |TensorConstant{0} [id J] <TensorType(int64, scalar)>\n | | |ScalarFromTensor [id K] <int64> '' 1\n | | | |Shape_i{0} [id C] <TensorType(int64, scalar)> '' 0\n | | |Constant{1} [id L] <int8>\n | |Shape_i{0} [id C] <TensorType(int64, scalar)> '' 0\n |Y [id M] <TensorType(float64, vector)>\n\n Inner graphs of the scan ops:\n\n for{cpu,scan_fn} [id B] <TensorType(float64, vector)> ''\n >Sum{acc_dtype=float64} [id N] <TensorType(float64, scalar)> ''\n > |X[t] [id O] <TensorType(float64, vector)> -> [id E]\n \"\"\"\n\n def assert_eqs_const(self, f, val):\n topo = f.maker.fgraph.toposort()\n elem = topo[0]\n assert len(topo) == 1, topo\n assert elem.op == deep_copy_op, elem.op\n assert len(elem.inputs) == 1, elem.inputs\n assert isinstance(elem.inputs[0], T.TensorConstant), elem\n assert T.extract_constant(elem.inputs[0]) == val, val\n\n def assert_identity(self, f):\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n x_val = 10\n assert f(x_val) == x_val\n\n #def assert_returns\n\n def test_inequality_with_self(self):\n x = T.scalar('x', dtype=config.floatX)\n mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')\n\n f = theano.function([x], T.lt(x, x), mode=mode)\n self.assert_eqs_const(f, 0)\n\n f = theano.function([x], T.le(x, x), mode=mode)\n self.assert_eqs_const(f, 1)\n\n f = theano.function([x], T.gt(x, x), mode=mode)\n self.assert_eqs_const(f, 0)\n\n f = theano.function([x], T.ge(x, x), mode=mode)\n self.assert_eqs_const(f, 1)\n\n f = theano.function([x], T.minimum(x, x), mode=mode)\n self.assert_identity(f)\n\n f = theano.function([x], T.maximum(x, x), mode=mode)\n self.assert_identity(f)\n\n def test_shape_inequality_with_self(self):\n x = T.vector('x', dtype=config.floatX)\n mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',\n 'local_shape_to_shape_i',\n 'local_track_shape_i',\n 'local_subtensor_make_vector')\n f = theano.function([x], T.lt(x.shape[0], 0), mode=mode)\n self.assert_eqs_const(f, 0)\n\n f = theano.function([x], T.ge(x.shape[0], 0), mode=mode)\n self.assert_eqs_const(f, 1)\n\n f = theano.function([x], T.maximum(x.shape[0], 0), mode=mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, Shape_i), topo[0].op\n x_val = numpy.ones(100, dtype=config.floatX)\n assert f(x_val) == x_val.shape[0]\n\n f = theano.function([x], T.maximum(0, x.shape[0]), mode=mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, Shape_i), topo[0].op\n x_val = numpy.ones(100, dtype=config.floatX)\n assert f(x_val) == x_val.shape[0]\n\n f = theano.function([x], T.minimum(x.shape[0], 0), mode=mode)\n self.assert_eqs_const(f, 0)\n\n f = theano.function([x], T.minimum(0, x.shape[0]), mode=mode)\n self.assert_eqs_const(f, 0)\n\n def test_shape_add_inequality(self):\n x = T.vector('x', dtype=config.floatX)\n mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',\n 'local_shape_to_shape_i',\n 'local_track_shape_i',\n 'local_subtensor_make_vector')\n\n y = T.vector('y', dtype=config.floatX)\n\n f = theano.function([x, y], T.lt(x.shape[0]+y.shape[0], 0), mode=mode)\n self.assert_eqs_const(f, 0)\n\n f = theano.function([x, y], T.ge(x.shape[0]+y.shape[0], 0), mode=mode)\n self.assert_eqs_const(f, 1)\n\n def test_and(self):\n mode = theano.compile.get_default_mode().including('canonicalize')\n\n x = T.scalar('x', dtype='int8')\n\n f = theano.function([x], T.and_(x, 0), mode=mode)\n self.assert_eqs_const(f, 0)\n\n f = theano.function([x], T.and_(0, x), mode=mode)\n self.assert_eqs_const(f, 0)\n\n f = theano.function([x], T.and_(x, 1), mode=mode)\n self.assert_identity(f)\n\n f = theano.function([x], T.and_(1, x), mode=mode)\n self.assert_identity(f)\n\n def test_or(self):\n mode = theano.compile.get_default_mode().including('canonicalize')\n x = T.scalar('x', dtype='int8')\n\n f = theano.function([x], T.or_(x, 1), mode=mode)\n self.assert_eqs_const(f, 1)\n\n f = theano.function([x], T.or_(1, x), mode=mode)\n self.assert_eqs_const(f, 1)\n\n f = theano.function([x], T.or_(x, 0), mode=mode)\n self.assert_identity(f)\n\n f = theano.function([x], T.or_(0, x), mode=mode)\n self.assert_identity(f)\n\n def test_xor(self):\n mode = theano.compile.get_default_mode().including('canonicalize')\n x = T.scalar('x', dtype='int8')\n\n f = theano.function([x], T.xor(x, x), mode=mode)\n self.assert_eqs_const(f, 0)\n\n\nclass Test_local_useless_alloc(unittest.TestCase):\n def _verify_stack_trace(self, f):\n for output in f.outputs:\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(output.variable.tag, 'trace'))\n\n def setUp(self):\n self.rng = numpy.random.RandomState(utt.fetch_seed())\n\n def test0(self):\n x = shared(self.rng.randn(3, 7))\n a = tensor.alloc(x, 6, 7)\n\n # It is a bad idea to have tensor.alloc return x directly,\n # because the shape mismatch cannot be caught.\n assert a.owner and isinstance(a.owner.op, tensor.Alloc)\n\n f = function([], a, mode=mode_opt)\n # The optimization should then be applied, and remove Alloc\n assert ([node.op for node in f.maker.fgraph.toposort()]\n == [deep_copy_op])\n\n # In DebugMode, the shape mismatch should be detected\n if isinstance(mode_opt, compile.DebugMode):\n self.assertRaises(ValueError, f)\n\n self._verify_stack_trace(f)\n\n def test1(self):\n # Test that alloc never gets instantiated during optimization\n mode = mode_opt.excluding('local_useless_alloc')\n\n x = tensor.matrix('x')\n xx = tensor.fill(x, x)\n\n # The optimization 'locall_fill_to_alloc' should call tensor.alloc,\n # which should return x and not alloc(x, ...)\n f = function([x], [xx], mode=mode)\n op_classes = [node.op.__class__ for node in f.maker.fgraph.toposort()]\n assert tensor.Alloc not in op_classes\n\n self._verify_stack_trace(f)\n\n def test2(self):\n # Test that alloc never gets instantiated during optimization\n mode = mode_opt.excluding('local_useless_alloc')\n\n x = tensor.matrix('x')\n y = tensor.tile(x, (1,)*2)\n\n f = function([x], [y], mode=mode)\n op_classes = [node.op.__class__ for node in f.maker.fgraph.toposort()]\n print(op_classes)\n\n # We are supposed to test if tensr.Alloc is not in op_classes,\n # but since the proper proper optimization is not currently\n # implemented it will fail. Once the correct optimization is in place,\n # we have to change the following we should not see tensor.Alloc\n # in op_classes and we have to change the assert.\n assert tensor.Alloc in op_classes\n\n self._verify_stack_trace(f)\n\n\nclass Test_local_useless_inc_subtensor_alloc(unittest.TestCase):\n opt_name = 'local_useless_inc_subtensor_alloc'\n\n def _verify_stack_trace(self, f):\n for output in f.outputs:\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(output.variable.tag, 'trace'))\n\n def setUp(self):\n # The optimization requires the shape feature so we need to compile in\n # FAST_RUN mode.\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n self.mode = compile.mode.get_mode(mode)\n\n def test_advanced_inc_subtensor(self):\n if tensor.inplace_increment is None:\n raise SkipTest('NumPy version >= 1.8 not available')\n\n x = tensor.vector('x')\n y = tensor.scalar('y')\n i = tensor.matrix('i', dtype='int64')\n z = tensor.advanced_inc_subtensor(x, T.alloc(y, *i.shape), i)\n mode1 = self.mode.excluding(self.opt_name)\n mode2 = self.mode.including(self.opt_name)\n f1 = theano.function([x, i, y], z, mode=mode1)\n f2 = theano.function([x, i, y], z, mode=mode2)\n\n # the alloc op should still be there\n assert (len([n for n in f1.maker.fgraph.toposort()\n if isinstance(n.op, tensor.Alloc)]) == 1)\n # the alloc op should have been removed\n assert (len([n for n in f2.maker.fgraph.toposort()\n if isinstance(n.op, tensor.Alloc)]) == 0)\n\n x_value = numpy.random.randn(5).astype(config.floatX)\n y_value = numpy.random.randn()\n i_value = numpy.random.randint(0, 3, size=(2, 3))\n\n r1 = f1(x_value, i_value, y_value)\n r2 = f2(x_value, i_value, y_value)\n\n utt.assert_allclose(r1, r2)\n \n self._verify_stack_trace(f1)\n self._verify_stack_trace(f2)\n \n\n def test_advanced_inc_subtensor1(self):\n if tensor.inplace_increment is None:\n raise SkipTest('NumPy version >= 1.8 not available')\n\n x = tensor.vector('x')\n y = tensor.scalar('y')\n i = tensor.vector('i', dtype='int64')\n z = tensor.advanced_inc_subtensor1(x, T.alloc(y, *i.shape), i)\n mode1 = self.mode.excluding(self.opt_name)\n mode2 = self.mode.including(self.opt_name)\n f1 = theano.function([x, i, y], z, mode=mode1)\n f2 = theano.function([x, i, y], z, mode=mode2)\n\n # the alloc op should still be there\n assert (len([n for n in f1.maker.fgraph.toposort()\n if isinstance(n.op, tensor.Alloc)]) == 1)\n # the alloc op should have been removed\n assert (len([n for n in f2.maker.fgraph.toposort()\n if isinstance(n.op, tensor.Alloc)]) == 0)\n\n x_value = numpy.random.randn(5).astype(config.floatX)\n y_value = numpy.random.randn()\n i_value = numpy.random.randint(0, 3, size=2)\n\n r1 = f1(x_value, i_value, y_value)\n r2 = f2(x_value, i_value, y_value)\n\n utt.assert_allclose(r1, r2)\n \n self._verify_stack_trace(f1)\n self._verify_stack_trace(f2)\n\n def test_incsubtensor(self):\n x = tensor.vector('x')\n y = tensor.scalar('y')\n i = tensor.scalar('i', dtype='int64')\n z = tensor.inc_subtensor(x[:i], T.alloc(y, i))\n mode1 = self.mode.excluding(self.opt_name)\n mode2 = self.mode.including(self.opt_name)\n f1 = theano.function([x, i, y], z, mode=mode1)\n f2 = theano.function([x, i, y], z, mode=mode2)\n\n # the alloc op should still be there\n assert (len([n for n in f1.maker.fgraph.toposort()\n if isinstance(n.op, tensor.Alloc)]) == 1)\n # the alloc op should have been removed\n assert (len([n for n in f2.maker.fgraph.toposort()\n if isinstance(n.op, tensor.Alloc)]) == 0)\n\n x_value = numpy.random.randn(5).astype(config.floatX)\n y_value = numpy.random.randn()\n i_value = 3\n\n r1 = f1(x_value, i_value, y_value)\n r2 = f2(x_value, i_value, y_value)\n\n utt.assert_allclose(r1, r2)\n \n self._verify_stack_trace(f1)\n self._verify_stack_trace(f2)\n\n\nclass test_shapeoptimizer(unittest.TestCase):\n def setUp(self):\n utt.seed_rng()\n\n def test0(self):\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n v = T.vector()\n m = T.matrix()\n f = function([v, m], (v + m).shape, mode=mode)\n for node in f.maker.fgraph.toposort():\n assert node.op != T.add\n\n def test_constant(self):\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n\n v = T.vector()\n f = function([v], v.dimshuffle('x', 'x', 0).shape[1], mode=mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n\n @staticmethod\n def max_pool_c01b(c01b, pool_shp, pool_stride, img_shp):\n \"\"\"Like max_pool but with input using axes ('c', 0, 1, 'b')\n (Alex Krizhevsky format)\n\n pool_shp, pool_stride and img_shp are int that represent\n the same shp in x and y.\n \"\"\"\n mx = None\n\n # Compute index in pooled space of last needed pool\n # (needed = each input pixel must appear in at least one pool)\n def last_pool(im_shp, p_shp, p_strd):\n rval = int(numpy.ceil(float(im_shp - p_shp) / p_strd))\n assert p_strd * rval + p_shp >= im_shp\n assert p_strd * (rval - 1) + p_shp < im_shp\n return rval\n # Compute starting row of the last pool\n last_pool_r = last_pool(img_shp, pool_shp, pool_stride) * pool_stride\n # Compute number of rows needed in img for all indexes to work out\n required_r = last_pool_r + pool_shp\n\n last_pool_c = last_pool(img_shp, pool_shp, pool_stride) * pool_stride\n required_c = last_pool_c + pool_shp\n\n wide_infinity = T.alloc(-numpy.inf, c01b.shape[0],\n required_r, required_c, c01b.shape[3])\n\n c01b = T.set_subtensor(wide_infinity[:, 0:img_shp, 0:img_shp, :], c01b)\n\n for row_within_pool in xrange(pool_shp):\n row_stop = last_pool_r + row_within_pool + 1\n for col_within_pool in xrange(pool_shp):\n col_stop = last_pool_c + col_within_pool + 1\n cur = c01b[:, row_within_pool:row_stop:pool_stride,\n col_within_pool:col_stop:pool_stride, :]\n if mx is None:\n mx = cur\n else:\n mx = T.maximum(mx, cur)\n return mx\n\n def test_broadcasted_dims(self):\n # This test a case that caused a crash during optimization\n shp = (1, 1, 1, 1)\n rng = numpy.random.RandomState(utt.fetch_seed())\n a = shared(rng.rand(*shp).astype(config.floatX))\n out = self.max_pool_c01b(a, 1, 1, 1)\n\n # max_pool_c01b use -inf and this will trigger DebugMode error.\n mode = copy.copy(theano.compile.get_default_mode())\n mode.check_isfinite = False\n f = theano.function([], out, mode=mode)\n f()\n\n def test_constant_merge(self):\n \"\"\"This test the error in gh-1122 that is a caused by the\n combination of merge optimizer and ShapeFeature.\n \"\"\"\n x = tensor.constant([0, 0])\n y = x[1:]\n x1 = x - tensor.join(0, y, y)\n x1.eval()\n\n def test_local_track_shape_i(self):\n class IdentityNoShape(gof.Op):\n '''Op that does not infer the output shape from the input one'''\n def make_node(self, x):\n x = as_tensor_variable(x)\n return gof.Apply(self, [x], [x.type()])\n\n def perform(self, node, inp, out_):\n x, = inp\n out, = out_\n out[0] = x.copy()\n # def infer_shape(self, node, (xshp,)):\n # return [tuple([self.shape_i(i)(r) for i in xrange(r.ndim)])]\n identity_noshape = IdentityNoShape()\n\n class IdentityShape(gof.Op):\n '''Op that does infer the output shape from the input one'''\n def make_node(self, x):\n x = as_tensor_variable(x)\n return gof.Apply(self, [x], [x.type()])\n\n def perform(self, node, inp, out_):\n x, = inp\n out, = out_\n out[0] = x.copy()\n\n def infer_shape(self, node, xshp_):\n # Could also just return.\n xshp, = xshp_\n return (xshp,)\n identity_shape = IdentityShape()\n\n @gof.local_optimizer([IdentityNoShape])\n def local_identity_noshape_to_identity_shape(node):\n '''Optimization transforming the first Op into the second'''\n if isinstance(node.op, IdentityNoShape):\n return [identity_shape(node.inputs[0])]\n\n mode = theano.compile.get_default_mode().including(\n 'ShapeOpt', 'specialize')\n rng = numpy.random.RandomState(utt.fetch_seed())\n x = T.tensor3('x')\n ins_x = identity_noshape(x)\n\n # Without the optimization\n f = theano.function([x], ins_x.shape, mode=mode)\n xval = rng.randn(3, 4, 7).astype(config.floatX)\n assert numpy.all(f(xval) == [3, 4, 7])\n f_ops = [node.op for node in f.maker.fgraph.toposort()]\n assert len(f_ops) == 5\n assert identity_noshape in f_ops\n assert identity_shape not in f_ops\n\n # Register the optimization\n opt.register_specialize(local_identity_noshape_to_identity_shape)\n\n mode = theano.compile.get_default_mode().including(\n 'ShapeOpt', 'specialize')\n # With the optimization\n # The identity_shape op should not be needed anymore to compute\n # the shape\n g = theano.function([x], ins_x.shape, mode=mode)\n xval = rng.randn(6, 1, 2).astype(config.floatX)\n assert numpy.all(g(xval) == [6, 1, 2])\n g_ops = [node.op for node in g.maker.fgraph.toposort()]\n assert len(g_ops) == 4\n assert identity_noshape not in g_ops\n assert identity_shape not in g_ops\n\n # test multiple level of op without infer_shape\n ins_x3 = identity_noshape(identity_noshape(identity_noshape(x)))\n h = theano.function([x], ins_x3.shape, mode=mode)\n xval = rng.randn(6, 1, 2).astype(config.floatX)\n assert numpy.all(h(xval) == [6, 1, 2])\n h_ops = [node.op for node in h.maker.fgraph.toposort()]\n assert len(h_ops) == 4\n assert identity_noshape not in h_ops\n assert identity_shape not in h_ops\n\n def test_no_shapeopt(self):\n # Test that a basic example works even when ShapeOpt is excluded\n X = T.matrix()\n expr = X.shape[0]\n\n mode = theano.compile.get_default_mode().excluding('ShapeOpt')\n f = theano.function([X], expr, mode=mode)\n print(f([[1, 2], [2, 3]]))\n\n def test_no_cycle(self):\n # Optimizing this graph resulted in a cycle, see gh-1549\n # This test depends on cuda\n import theano.sandbox.cuda as cuda\n if not cuda.cuda_available:\n raise SkipTest(\"cuda not available\")\n if sys.version_info[:2] < (2, 5):\n raise SkipTest(\"Test skipped due to a too old python\")\n\n # This pickle file has undergone manual surgery due to changes\n # in scan and may or may not run correctly. It does passes\n # the test below.\n pkl_filename = os.path.join(os.path.dirname(theano.__file__),\n 'tensor', 'tests', 'shape_opt_cycle.pkl')\n # Due to incompatibilities between python 2 and 3 in the format\n # of pickled numpy ndarray, we have to force an encoding\n from theano.misc.pkl_utils import CompatUnpickler\n with open(pkl_filename, \"rb\") as pkl_file:\n if PY3:\n u = CompatUnpickler(pkl_file, encoding=\"latin1\")\n else:\n u = CompatUnpickler(pkl_file)\n fn_args = u.load()\n theano.function(**fn_args)\n\n\nclass test_assert(utt.InferShapeTester):\n\n def setUp(self):\n super(test_assert, self).setUp()\n\n def test0(self):\n x = T.scalar()\n y = T.scalar()\n f = theano.function([x, y], theano.tensor.opt.assert_op(x, T.eq(x, y)))\n f(1, 1)\n self.assertRaises(AssertionError, f, 1, 0)\n\n def test_local_remove_useless_assert1(self):\n # remove assert that are always true\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n mode = compile.mode.get_mode(mode)\n\n x = T.scalar()\n f = theano.function([x], theano.tensor.opt.assert_op(x, 1), mode=mode)\n assert f(1) == 1\n assert f(5) == 5\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n\n def test_test_local_remove_useless_assert2(self):\n # remove assert condition that are always true\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n mode = compile.mode.get_mode(mode)\n\n x = T.scalar()\n y = T.scalar()\n f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 1),\n mode=mode)\n assert f(1, 1) == 1\n assert f(5, 1) == 5\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert len(topo[0].inputs) == 2\n assert topo[1].op == deep_copy_op\n\n def test_local_remove_useless_assert3(self):\n # don't remove assert condition that are always false\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n mode = compile.mode.get_mode(mode)\n\n x = T.scalar()\n y = T.scalar()\n f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 0),\n mode=mode)\n self.assertRaises(AssertionError, f, 1, 0)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert len(topo[0].inputs) == 3\n assert topo[1].op == deep_copy_op\n\n def test_local_remove_all_assert1(self):\n # remove assert condition that are unknown\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n mode = compile.mode.get_mode(mode).including('local_remove_all_assert')\n\n x = T.scalar()\n y = T.scalar()\n f = theano.function([x, y], theano.tensor.opt.assert_op(x, y),\n mode=mode)\n if isinstance(mode, theano.compile.debugmode.DebugMode):\n # DebugMode will run the original version with the Assert\n self.assertRaises(AssertionError, f, 1, 0)\n else:\n f(1, 0) # Without opt, it should fail.\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1, topo\n assert topo[0].op == deep_copy_op, topo\n\n mode = compile.mode.get_default_mode()\n a = theano.tensor.opt.assert_op(x, T.eq(x, 0).any())\n f = theano.function([x], a, mode=mode.excluding('unsafe'))\n topo = f.maker.fgraph.toposort()\n a_op = [n for n in topo if isinstance(n.op, T.opt.Assert)]\n assert len(a_op) == 1\n\n def test_infer_shape(self):\n\n adscal = dscalar()\n bdscal = dscalar()\n adscal_val = numpy.random.rand()\n bdscal_val = numpy.random.rand() + 1\n out = theano.tensor.opt.assert_op(adscal, bdscal)\n self._compile_and_check([adscal, bdscal], [out],\n [adscal_val, bdscal_val], Assert)\n\n admat = dmatrix()\n admat_val = numpy.random.rand(3, 4)\n adscal_val += 1\n out = theano.tensor.opt.assert_op(admat, adscal, bdscal)\n self._compile_and_check([admat, adscal, bdscal], [out],\n [admat_val, adscal_val, bdscal_val], Assert)\n\ndef test_local_mul_specialize():\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n mode = compile.mode.get_mode(mode)\n mode = mode.excluding('fusion')\n\n v = T.vector()\n m = T.vector()\n\n f = function([v], v * 1, mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n nodes == [deep_copy_op]\n\n f = function([v], v * 0, mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [Shape_i(0), T.alloc]\n\n f = function([v], v * (-1), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [T.neg]\n\n f = function([v, m], v * 1 * (-m), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [T.mul]\n\n f = function([v, m], v * 0 * (-m), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [Shape_i(0), T.alloc]\n\n f = function([v, m], v * (-1) * (-m), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [T.mul]\n\n f = function([v, m], v * (-1) * m, mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [T.mul]\n\n\nclass T_Tile(unittest.TestCase):\n def test_local_useless_tile(self):\n # Tile op is deprecated so the tile function doesn't use it\n # anymore, we'll test here the op directly\n v = T.vector()\n m = T.matrix()\n mode = None\n if theano.config.mode == \"FAST_COMPILE\":\n mode = \"FAST_RUN\"\n for var, data in [(v, [1, 2, 3]), (m, [[1, 2], [3, 4]])]:\n # Currently, only a repeat patter == ndim is supported.\n for ndim in [var.ndim]: # range(1, var.ndim):\n f = theano.function([var], Tile(ndim)(var, (1,)*ndim), mode=mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, compile.DeepCopyOp)\n f(data)\n \n # Check that stacktrace is copied over\n self.assertTrue(hasattr(f.outputs[0].variable.tag, 'trace'))\n self.assertTrue(len(f.outputs[0].variable.tag.trace)>0)\n\n\ndef speed_local_pow_specialize_range():\n val = numpy.random.rand(1e7)\n v = T.vector()\n mode = compile.mode.get_default_mode()\n mode_without_pow_opt = mode.excluding('local_pow_specialize')\n for i in xrange(500, 513):\n f1 = function([v], v ** i, mode=mode)\n f2 = function([v], v ** i, mode=mode_without_pow_opt)\n assert len(f1.maker.fgraph.toposort()) == 1\n t1 = time.time()\n f1(val)\n t2 = time.time()\n f2(val)\n t3 = time.time()\n print(i, t2 - t1, t3 - t2, t2 - t1 < t3 - t2)\n if not t2 - t1 < t3 - t2:\n print(\"WARNING WE ARE SLOWER\")\n for i in xrange(-3, -1500, -1):\n f1 = function([v], v ** i, mode=mode)\n f2 = function([v], v ** i, mode=mode_without_pow_opt)\n assert len(f1.maker.fgraph.toposort()) == 1\n t1 = time.time()\n f1(val)\n t2 = time.time()\n f2(val)\n t3 = time.time()\n print(i, t2 - t1, t3 - t2, t2 - t1 < t3 - t2)\n if not t2 - t1 < t3 - t2:\n print(\"WARNING WE ARE SLOWER\")\n\n\ndef test_local_pow_specialize():\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n mode = compile.mode.get_mode(mode)\n mode = mode.excluding('fusion')\n\n v = T.vector()\n val = numpy.arange(10, dtype=theano.config.floatX)\n val_no0 = numpy.arange(1, 10, dtype=theano.config.floatX)\n\n f = function([v], v ** 0, mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [Shape_i(0), T.alloc]\n assert numpy.allclose(f(val), val ** 0)\n\n f = function([v], v ** 1, mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n nodes == [deep_copy_op]\n assert numpy.allclose(f(val), val ** 1)\n\n f = function([v], v ** (-1), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [T.inv]\n assert numpy.allclose(f(val_no0), val_no0 ** (-1))\n\n f = function([v], v ** 2, mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [T.sqr]\n assert numpy.allclose(f(val), val ** 2)\n\n f = function([v], v ** (-2), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert len(nodes) == 2\n assert nodes[0] == T.sqr\n assert isinstance(nodes[1].scalar_op, theano.scalar.basic.Inv)\n# assert nodes == [T.sqr,T.inv]#Why this don't work?\n assert numpy.allclose(f(val_no0), val_no0 ** (-2))\n\n f = function([v], v ** (.5), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert nodes == [T.sqrt]\n assert numpy.allclose(f(val), val ** (.5))\n\n f = function([v], v ** (-.5), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert len(nodes) == 2\n assert nodes[0] == T.sqrt\n assert isinstance(nodes[1].scalar_op, theano.scalar.basic.Inv)\n# assert nodes == [T.sqrt,T.inv]#Why this don't work?\n assert numpy.allclose(f(val_no0), val_no0 ** (-.5))\n\n\ndef test_local_pow_specialize_device_more_aggressive_on_cpu():\n mode = theano.config.mode\n if mode == 'FAST_COMPILE':\n mode = 'FAST_RUN'\n mode = compile.mode.get_mode(mode)\n mode = mode.excluding('fusion').excluding('gpu')\n\n v = T.vector()\n val = numpy.arange(10, dtype=theano.config.floatX)\n val_no0 = numpy.arange(1, 10, dtype=theano.config.floatX)\n f = function([v], v ** (15), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert len(nodes) == 1\n assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 6\n assert isinstance(nodes[0].scalar_op, theano.scalar.Composite)\n assert numpy.allclose(f(val), val ** 15)\n\n f = function([v], v ** (-15), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert len(nodes) == 2\n assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 6\n assert isinstance(nodes[0].scalar_op, theano.scalar.Composite)\n assert isinstance(nodes[-1].scalar_op, theano.scalar.basic.Inv)\n assert numpy.allclose(f(val_no0), val_no0 ** (-15))\n\n f = function([v], v ** (16), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert len(nodes) == 1\n assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 4\n assert isinstance(nodes[0].scalar_op, theano.scalar.Composite)\n assert numpy.allclose(f(val), val ** 16)\n\n f = function([v], v ** (-16), mode=mode)\n nodes = [node.op for node in f.maker.fgraph.toposort()]\n assert len(nodes) == 2\n assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 4\n assert isinstance(nodes[0].scalar_op, theano.scalar.Composite)\n assert isinstance(nodes[-1].scalar_op, theano.scalar.basic.Inv)\n assert numpy.allclose(f(val_no0), val_no0 ** (-16))\n\n\nclass T_Rebroadcast(unittest.TestCase):\n def test_local_useless_rebroadcast(self):\n mode = theano.compile.get_default_mode().including('canonicalize')\n v1 = T.vector()\n v2 = T.vector()\n j = T.join(0, v1, v2)\n f = theano.function([v1, v2], j, mode=mode)\n f([1, 2], [3, 4, 5])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, T.Rebroadcast)]) == 0\n\n assert hasattr(f.outputs[0].variable.tag, 'trace')\n\n def test_rebroadcast_rebroadcast(self):\n mode = theano.compile.get_default_mode().including('canonicalize')\n m = T.matrix()\n s = T.addbroadcast(m, 0, 1)\n v = T.unbroadcast(s, 1)\n f = theano.function([m], v, mode=mode)\n f([[76]])\n e = f.maker.fgraph.toposort()\n rebroadcast_nodes = [n for n in e if isinstance(n.op, T.Rebroadcast)]\n assert len(rebroadcast_nodes) == 1\n assert rebroadcast_nodes[0].op.axis == {0: True}\n\n\nclass T_useless_elemwise(unittest.TestCase):\n def setUp(self):\n self.mode = theano.compile.get_default_mode().including(\n 'canonicalize', 'local_fill_to_alloc')\n\n def test_eq(self):\n x = T.dmatrix()\n y = T.dmatrix()\n f = theano.function([x, y], T.eq(x, y), mode=self.mode)\n vx = numpy.random.rand(5, 4)\n vy = numpy.random.rand(5, 4)\n f(vx, vy)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, T.Elemwise)\n assert isinstance(topo[0].op.scalar_op, theano.scalar.EQ)\n f2 = theano.function([x], T.eq(x, x), mode=self.mode)\n assert numpy.all(f2(vx) == numpy.ones((5, 4)))\n topo2 = f2.maker.fgraph.toposort()\n # Shape_i{1}(<TensorType(float64, matrix)>), Shape_i{0}(<TensorType(float64, matrix)>), Alloc([[1]], Shape_i{0}.0, Shape_i{1}.0\n assert len(topo2) == 3\n assert isinstance(topo2[-1].op, T.Alloc)\n\n def test_neq(self):\n x = T.dmatrix()\n y = T.dmatrix()\n f = theano.function([x, y], T.neq(x, y), mode=self.mode)\n vx = numpy.random.rand(5, 4)\n vy = numpy.random.rand(5, 4)\n f(vx, vy)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, T.Elemwise)\n assert isinstance(topo[0].op.scalar_op, theano.scalar.NEQ)\n f2 = theano.function([x], T.neq(x, x), mode=self.mode)\n assert numpy.all(f2(vx) == numpy.zeros((5, 4)))\n topo2 = f2.maker.fgraph.toposort()\n assert len(topo2) == 3\n assert isinstance(topo2[-1].op, T.Alloc)\n\n def test_mul(self):\n x = T.dmatrix()\n y = T.dmatrix()\n f = theano.function([x], T.mul(x), mode=self.mode)\n vx = numpy.random.rand(5, 4)\n vy = numpy.random.rand(5, 4)\n f(vx)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n f2 = theano.function([x, y], T.mul(x, y), mode=self.mode)\n assert numpy.all(f2(vx, vy) == vx * vy)\n topo2 = f2.maker.fgraph.toposort()\n assert len(topo2) == 1\n assert isinstance(topo2[0].op, T.Elemwise)\n assert isinstance(topo2[0].op.scalar_op, theano.scalar.Mul)\n\n def test_add(self):\n x = T.dmatrix()\n y = T.dmatrix()\n f = theano.function([x], T.add(x), mode=self.mode)\n vx = numpy.random.rand(5, 4)\n vy = numpy.random.rand(5, 4)\n f(vx)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n f2 = theano.function([x, y], T.add(x, y), mode=self.mode)\n assert numpy.all(f2(vx, vy) == vx + vy)\n topo2 = f2.maker.fgraph.toposort()\n assert len(topo2) == 1\n assert isinstance(topo2[0].op, T.Elemwise)\n assert isinstance(topo2[0].op.scalar_op, theano.scalar.Add)\n\n def test_identity(self):\n # scalar.identity is used in 2 Elemwise functions:\n # tensor_copy, and view\n x = T.matrix()\n f = theano.function([x], T.tensor_copy(x), mode=self.mode)\n vx = numpy.random.rand(5, 4).astype(config.floatX)\n f(vx)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n\n\nclass T_cast_cast(unittest.TestCase):\n def setUp(self):\n mode = theano.compile.get_default_mode()\n self.mode = mode.including('local_cast_cast')\n\n def test(self):\n x = T.fmatrix()\n o = T.Elemwise(scal.Cast(scal.Scalar(\"float64\")))(x.astype(\"float64\"))\n f = theano.function([x], o, mode=self.mode)\n dx = numpy.random.rand(5, 4).astype(\"float32\")\n f(dx)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, T.Elemwise)\n\n x = T.dmatrix()\n o = T.Elemwise(scal.Cast(scal.Scalar(\"float32\")))(x.astype(\"float32\"))\n f = theano.function([x], o, mode=self.mode)\n dx = numpy.random.rand(5, 4)\n f(dx)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, T.Elemwise)\n\n\nclass T_func_inverse(unittest.TestCase):\n\n def setUp(self):\n mode = theano.compile.get_default_mode()\n self.mode = mode.including('local_func_inv')\n\n\n def assert_func_pair_optimized(self, func1, func2, data,\n should_copy=True, is_complex=False):\n \"\"\"\n Check that a pair of funcs is optimized properly\n \"\"\"\n\n x = T.cmatrix() if is_complex else T.fmatrix()\n o = func2(func1(x))\n f = theano.function([x], o, mode=self.mode)\n delta = f(data) - data\n topo = f.maker.fgraph.toposort()\n\n if should_copy:\n acceptable_topo_lens = [1]\n else:\n # The 2 funcs can be split apart if they are not inverses\n acceptable_topo_lens = [1, 2]\n\n if should_copy:\n delta_condition = numpy.all(delta == 0)\n else:\n delta_condition = numpy.all(delta != 0)\n\n self.assertTrue(len(topo) in acceptable_topo_lens)\n self.assertTrue(delta_condition)\n self.assertEqual(isinstance(topo[0].op, DeepCopyOp), should_copy,\n \"Inverse functions not removed!\")\n\n def test(self):\n \"\"\"\n test optimization for consecutive functional inverses\n \"\"\"\n\n dx = numpy.random.rand(5, 4).astype(\"float32\")\n self.assert_func_pair_optimized(T.deg2rad, T.rad2deg, dx)\n dx = numpy.random.rand(5, 4).astype(\"float32\")*180\n self.assert_func_pair_optimized(T.rad2deg, T.deg2rad, dx)\n\n # Test the other functional inverses\n dx = numpy.random.rand(5, 4).astype(\"float32\")\n self.assert_func_pair_optimized(T.cosh, T.arccosh, dx)\n self.assert_func_pair_optimized(T.arcsinh, T.sinh, dx)\n self.assert_func_pair_optimized(T.arctanh, T.tanh, dx)\n self.assert_func_pair_optimized(T.inv, T.inv, dx)\n self.assert_func_pair_optimized(T.neg, T.neg, dx)\n cx = dx + complex(0, 1)*(dx + 0.01)\n self.assert_func_pair_optimized(T.conj, T.conj, cx, is_complex=True)\n\n # Test that non-inverse functions are ran normally\n self.assert_func_pair_optimized(T.conj, T.neg, cx,\n should_copy=False, is_complex=True)\n dx = numpy.random.rand(5, 4).astype(\"float32\")+0.01\n self.assert_func_pair_optimized(T.rad2deg, T.rad2deg, dx,\n should_copy=False)\n self.assert_func_pair_optimized(T.rad2deg, T.cosh, dx,\n should_copy=False)\n\n\ndef test_constant_folding():\n \"\"\" Test that constant folding get registered at fast_compile\n\n An error removed that registration during the registration.\n \"\"\"\n x = tensor.dvector()\n mode = theano.compile.get_mode(\"FAST_COMPILE\").excluding(\"fusion\")\n f = theano.function([x], [x * 2, x + x], mode=mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n\n # Test that we do not crash when constant folding elemwise scalar\n # as they should not generate c code.\n\n x = tensor.constant(3)\n assert x.ndim == 0\n mode = theano.compile.get_mode(\"FAST_COMPILE\").excluding(\"fusion\")\n f = theano.function([], [x * 2, x + x], mode=mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert all([isinstance(n.op, DeepCopyOp) for n in topo])\n\n\ndef test_constant_get_stabilized():\n \"\"\"\n Currently Theano enable the constant_folding optimization before stabilization optimization.\n This cause some stabilization optimization not being implemented and thus cause inf value to appear\n when it should not.\n\n .. note: we can't simply move the constant_folding optimization to specialize as this break other optimization!\n We will need to partially duplicate some canonicalize optimzation to specialize to fix this issue.\n \"\"\"\n x2 = T.scalar()\n y2 = T.log(1 + T.exp(x2))\n mode = theano.compile.get_default_mode()\n mode.check_isfinite = False\n f2 = theano.function([x2], y2, mode=mode)\n try:\n assert len(f2.maker.fgraph.toposort()) == 1\n assert f2.maker.fgraph.toposort()[0].op == \\\n theano.tensor.nnet.sigm.softplus\n assert f2(800) == 800\n\n x = T.as_tensor_variable(800)\n y = T.log(1 + T.exp(x))\n f = theano.function([], y, mode=mode)\n assert len(f.maker.fgraph.toposort()) == 0\n assert numpy.isinf(f())\n\n # When this error is fixed, the following line should be ok.\n assert f() == 800, f()\n\n except AssertionError:\n raise SkipTest('Theano optimizes constant before stabilization. '\n 'This breaks stabilization optimization in some '\n 'cases. See #504.')\n\n\nclass T_local_switch_sink(unittest.TestCase):\n def setUp(self):\n # condition values\n self.condm = numpy.asarray([[0.1, 0, 1, -1],\n [0., 0., 0., 0.],\n [1, 1, 1, 1]])\n self.condv = numpy.asarray([0.1, 0, 1, -1])\n self.conds = [0.1, 0, 1, -1]\n\n # x values\n self.xm = numpy.ones((3, 4))\n self.xv = numpy.ones((4,))\n self.xs = 1.\n\n # expected results\n self.resm = [numpy.asarray([[1, 0, 1, 0], [0, 0, 0, 0], [1, 1, 1, 1]])]*3 + [numpy.asarray([[1, 0, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0]])] + \\\n 2*[numpy.asarray([[1, 0, 1, 0]])] + [[numpy.ones((3, 4)), numpy.zeros((3, 4)), numpy.ones((3, 4)), numpy.zeros((3, 4))]] + \\\n [[numpy.ones((4,)), numpy.zeros((4,)), numpy.ones((4,)), numpy.zeros((4,))]] + \\\n [[numpy.asarray(1.0), numpy.asarray(\n 0.0), numpy.asarray(1.0), numpy.asarray(0.0)]]\n\n self.mode = theano.compile.mode.get_default_mode().including(\n 'canonicalize', 'fast_run').excluding('gpu', 'fusion')\n self.mode = copy.copy(self.mode)\n self.mode.check_isfinite = False\n\n def test_local_mul_switch_sink(self):\n c = T.dscalar()\n idx = 0\n for condition in [(T.dmatrix('cond'), self.condm),\n (T.dvector('cond'), self.condv),\n (T.dscalar('cond'), self.conds)]:\n for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv),\n (T.dscalar('x'), self.xs)]:\n y = T.mul(T.switch(condition[0] > 0, 1. * x[0], 0. * x[0]),\n T.switch(condition[0] > 0,\n 1. * x[0], T.log(c) * x[0]))\n f = theano.function([condition[0], x[0], c],\n [y], mode=self.mode)\n if type(condition[1]) is list:\n for i in xrange(len(condition[1])):\n res = f(condition[1][i], x[1], -1)\n assert (res == numpy.asarray(\n self.resm[idx][i])).sum() == self.resm[idx][i].size\n else:\n res = f(condition[1], x[1], -1)\n assert (res == numpy.asarray(self.\n resm[idx])).sum() == self.resm[idx].size\n idx += 1\n\n # This case caused a missed optimization in the past.\n x = T.dscalar('x')\n y = T.switch(x < 7, x, T.sqrt(x - 7))\n f = theano.function([x], T.grad(y, x), self.mode)\n assert f(5) == 1, f(5)\n\n @attr('slow')\n def test_local_div_switch_sink(self):\n c = T.dscalar()\n idx = 0\n for condition in [(T.dmatrix('cond'), self.condm), (T.dvector('cond'), self.condv), (T.dscalar('cond'), self.conds)]:\n for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv), (T.dscalar('x'), self.xs)]:\n y = T.true_div(T.switch(condition[0] > 0, 1. *\n x[0], 0.*x[0]), T.switch(condition[0] > 0, 1.*x[0], T.log(c)*x[0]))\n f = theano.function([condition[0], x[0], c]\n , [y], mode=self.mode)\n if type(condition[1]) is list:\n for i in xrange(len(condition[1])):\n res = f(condition[1][i], x[1], -1)\n assert (res == numpy.\n asarray(self.resm[idx][i])).sum() == self.resm[idx][i].size\n else:\n res = f(condition[1], x[1], -1)\n assert (res == numpy.asarray(self.\n resm[idx])).sum() == self.resm[idx].size\n idx += 1\n\n\nclass T_local_erf(unittest.TestCase):\n def setUp(self):\n self.mode = theano.compile.mode.get_default_mode().including(\n 'canonicalize', 'fast_run').excluding('gpu', 'fusion')\n self.mode._optimizer.position_cutoff = 1.50001\n if theano.config.cxx == '' and not theano.scalar.basic_scipy.imported_scipy_special:\n raise SkipTest(\"erf need a c++ compiler or scipy\")\n\n def test_local_one_plus_erf(self):\n val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],\n dtype=config.floatX)\n x = T.vector()\n\n f = theano.function([x], 1 + T.erf(x), mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [\n T.mul, T.erfc], f.maker.fgraph.toposort()\n f(val)\n\n f = theano.function([x], T.erf(x) + 1, mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [\n T.mul, T.erfc], f.maker.fgraph.toposort()\n f(val)\n\n f = theano.function([x], T.erf(x) + 2, mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert topo[0].op == T.erf\n assert isinstance(topo[1].op, T.Elemwise)\n assert isinstance(topo[1].op.scalar_op, scal.Add)\n f(val)\n\n def test_local_one_minus_erf(self):\n val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],\n dtype=config.floatX)\n x = T.vector()\n\n f = theano.function([x], 1 - T.erf(x), mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n f = theano.function([x], 1 + (-T.erf(x)), mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n f = theano.function([x], (-T.erf(x)) + 1, mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n f = theano.function([x], 2 - T.erf(x), mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2, f.maker.fgraph.toposort()\n assert topo[0].op == T.erf, f.maker.fgraph.toposort()\n assert isinstance(topo[1].op, T.Elemwise), f.maker.fgraph.toposort()\n assert isinstance(topo[1].op.scalar_op, scal.Add)\\\n or isinstance(topo[1].op.scalar_op, scal.Sub), f.maker.fgraph.toposort()\n print(f(val))\n\n def test_local_erf_minus_one(self):\n val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],\n dtype=config.floatX)\n x = T.vector()\n\n f = theano.function([x], T.erf(x) - 1, mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc, T.mul]\n print(f(val))\n\n f = theano.function([x], T.erf(x) + (-1), mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc, T.mul]\n print(f(val))\n\n f = theano.function([x], -1 + T.erf(x), mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc, T.mul]\n print(f(val))\n\n f = theano.function([x], T.erf(x) - 2, mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2\n assert topo[0].op == T.erf\n assert isinstance(topo[1].op, T.Elemwise)\n assert isinstance(topo[1].op.scalar_op, scal.Add)\\\n or isinstance(topo[1].op.scalar_op, scal.Sub)\n print(f(val))\n\n\nclass T_local_erfc(unittest.TestCase):\n def setUp(self):\n self.mode_fusion = theano.compile.mode.get_default_mode().including(\n 'canonicalize').including('fast_run').excluding('gpu')\n self.mode = self.mode_fusion.excluding('fusion')\n self.mode._optimizer.position_cutoff = 1.50001\n if (theano.config.cxx == '' and\n not theano.scalar.basic_scipy.imported_scipy_special):\n raise SkipTest(\"erfc need a c++ compiler or scipy\")\n\n def test_local_one_minus_erfc(self):\n \"\"\" test opt: 1-erfc(x) => erf(x) and -erfc(x)+1 => erf(x)\n \"\"\"\n val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],\n dtype=config.floatX)\n x = T.vector('x')\n\n f = theano.function([x], 1 - T.erfc(x), mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n f = theano.function([x], (-T.erfc(x)) + 1, mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n f = theano.function([x], 2 - T.erfc(x), mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 2, f.maker.fgraph.toposort()\n assert topo[0].op == T.erfc, f.maker.fgraph.toposort()\n assert isinstance(topo[1].op, T.Elemwise), f.maker.fgraph.toposort()\n assert isinstance(topo[1].op.scalar_op, scal.Sub)\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n def test_local_erf_neg_minus_one(self):\n \"\"\" test opt: (-1)+erfc(-x)=>erf(x)\"\"\"\n val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],\n dtype=config.floatX)\n x = T.vector('x')\n\n f = theano.function([x], -1 + T.erfc(-x), mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n f = theano.function([x], T.erfc(-x) - 1, mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n f = theano.function([x], T.erfc(-x) + (-1), mode=self.mode)\n assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\\\n , f.maker.fgraph.toposort()\n print(f(val))\n\n def test_local_log_erfc(self):\n val = [-30, -27, -26, -11, -10, -3, -2, -1, 0, 1, 2, 3, 10,\n 11, 26, 27, 28, 30]\n if theano.config.mode in [\"DebugMode\", \"DEBUG_MODE\", \"FAST_COMPILE\"]:\n # python mode don't like the inv(0)\n val.remove(0)\n val = numpy.asarray(val, dtype=config.floatX)\n x = T.vector('x')\n\n # their is some nan that will happear in the graph for the log of the negatives values\n mode = copy.copy(self.mode)\n mode.check_isfinite = False\n mode_fusion = copy.copy(self.mode_fusion)\n mode_fusion.check_isfinite = False\n\n f = theano.function([x], T.log(T.erfc(x)), mode=mode)\n assert len(f.maker.fgraph.apply_nodes) == 23, len(f.maker.fgraph.apply_nodes)\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n assert all(numpy.isfinite(f(val)))\n\n f = theano.function([x], T.log(T.erfc(-x)), mode=mode)\n assert len(f.maker.fgraph.apply_nodes) == 24, len(f.maker.fgraph.apply_nodes)\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n assert all(numpy.isfinite(f(-val)))\n\n f = theano.function([x], T.log(T.erfc(x)), mode=mode_fusion)\n assert len(f.maker.fgraph.apply_nodes) == 1, len(f.maker.fgraph.apply_nodes)\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n assert len(f.maker.fgraph.toposort()[0].fgraph.toposort()[\n 0].op.scalar_op.fgraph.apply_nodes) == 22, len(f.maker.fgraph.toposort()[0].fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes)\n # TODO: fix this problem\n if theano.config.floatX == \"float32\" and theano.config.mode in [\"DebugMode\", \"DEBUG_MODE\"]:\n raise SkipTest('The python code upcast somewhere internally '\n 'some value of float32 to python float for '\n 'part of its computation. That make that the '\n 'c and python code dont generate the same value. '\n 'You can ignore this error.')\n assert all(numpy.isfinite(f(val)))\n\n def test_local_grad_log_erfc_neg(self):\n val = [-100, -30, -27, -26.4, -26.2, -26, -11, -10, -9, -3, -2, -1, 0,\n 1, 2, 3, 9, 10, 11, 27, 26.4, 26.2, 26, 28, 30, 100]\n if theano.config.mode in [\"DebugMode\", \"DEBUG_MODE\", \"FAST_COMPILE\"]:\n# python mode don't like the inv(0) in computation, but the switch don't select this value. So it is computed for no good reason.\n val.remove(0)\n if theano.config.mode in [\"DebugMode\", \"DEBUG_MODE\"] and theano.config.floatX == 'float32':\n # In float32 their is a plage of values close to 10 that we stabilize as it give bigger error then the stabilized version.\n # The orig value in float32 -30.0, the stab value -20.1 the orig value in float64 -18.1.\n val.remove(10)\n val = numpy.asarray(val, dtype=config.floatX)\n x = T.vector('x')\n y = T.vector('y')\n\n # their is some nan that will happear in the graph for the log of the negatives values\n mode = copy.copy(self.mode)\n mode.check_isfinite = False\n mode_fusion = copy.copy(self.mode_fusion)\n mode_fusion.check_isfinite = False\n\n f = theano.function([x], T.grad(T.log(T.erfc(x)).sum(), x), mode=mode)\n assert len(f.maker.fgraph.apply_nodes) == 23, len(f.maker.fgraph.apply_nodes)\n assert all(numpy.isfinite(f(val)))\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n\n # test with a different mul constant\n f = theano.function([x], T.mul(T.exp(T.neg(T.sqr(x))), -\n 10.12837917) / T.erfc(x), mode=mode)\n assert len(f.maker.fgraph.apply_nodes) == 23, len(f.maker.fgraph.apply_nodes)\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n assert all(numpy.isfinite(f(val)))\n\n # test that we work without the mul\n f = theano.function([x], T.exp(T.neg(T.sqr(x))) / T.erfc(x), mode=mode)\n assert len(f.maker.fgraph.apply_nodes) == 22, len(f.maker.fgraph.apply_nodes)\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n assert all(numpy.isfinite(f(val)))\n\n # test that we don't work if x!=y\n f = theano.function([x, y], T.exp(T.neg(T.sqr(x))) / T.erfc(\n y), mode=mode)\n assert len(f.maker.fgraph.apply_nodes) == 5, len(f.maker.fgraph.apply_nodes)\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n f(val, val - 3)\n\n # test that we work without the sqr and neg\n f = theano.function([x], T.exp(T.mul(-1, x, x)) / T.erfc(x), mode=mode)\n assert len(f.maker.fgraph.apply_nodes) == 21, len(f.maker.fgraph.apply_nodes)\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n assert all(numpy.isfinite(f(val)))\n\n # test that it work correctly if x is x*2 in the graph.\n f = theano.function([x], T.grad(T.log(T.erfc(2 * x)).sum(),\n x), mode=mode)\n assert len(f.maker.fgraph.apply_nodes) == 23, len(f.maker.fgraph.apply_nodes)\n assert numpy.isfinite(f(val)).all()\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n\n f = theano.function([x], T.grad(T.log(T.erfc(x)).sum(), x),\n mode=mode_fusion)\n assert len(f.maker.fgraph.apply_nodes) == 1, len(f.maker.fgraph.apply_nodes)\n assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX\n\n # TODO: fix this problem\n if theano.config.floatX == \"float32\" and theano.config.mode in [\"DebugMode\", \"DEBUG_MODE\"]:\n # The python code upcast somewhere internally some value of float32\n # to python float for part of its computation. That make that the c\n # and python code do not generate the same value. You can ignore\n # this error. This happen in an intermediate step that don't show\n # in the final result.\n\n # Showing this test error is a duplicate of the one in test_local_log_erfc. We hide it.\n pass\n else:\n assert all(numpy.isfinite(f(val)))\n\n def speed_local_log_erfc(self):\n\n val = numpy.random.rand(1e6)\n x = T.vector()\n mode = theano.compile.mode.get_mode(\"FAST_RUN\")\n f1 = theano.function([x], T.log(T.erfc(x)), mode=mode.\n excluding(\"local_log_erfc\"))\n f2 = theano.function([x], T.log(T.erfc(x)), mode=mode)\n print(f1.maker.fgraph.toposort())\n print(f2.maker.fgraph.toposort())\n t0 = time.time()\n f1(val)\n t1 = time.time()\n f2(val)\n t2 = time.time()\n print(t1 - t0, t2 - t1)\n\n\nclass test_local_useless_switch(unittest.TestCase):\n def setUp(self):\n self.mode = mode_opt.excluding('constant_folding')\n\n def test_const0(self):\n\n for dtype1 in ['int32', 'int64']:\n for dtype2 in ['int32', 'int64']:\n x = theano.tensor.matrix('x', dtype=dtype1)\n y = theano.tensor.matrix('y', dtype=dtype2)\n z = theano.tensor.switch(0, x, y)\n f = theano.function([x, y], z, mode=self.mode)\n assert len([node.op for node in f.maker.fgraph.toposort() if\n (isinstance(node.op, theano.tensor.Elemwise)\n and isinstance(node.op.scalar_op,\n theano.scalar.basic.Switch))]) == 0\n vx = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype1)\n vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype=dtype2)\n assert numpy.all(f(vx, vy) == vy)\n\n def test_const1(self):\n\n for dtype1 in ['int32', 'int64']:\n for dtype2 in ['int32', 'int64']:\n x = theano.tensor.matrix('x', dtype=dtype1)\n y = theano.tensor.matrix('y', dtype=dtype2)\n z = theano.tensor.switch(1, x, y)\n f = theano.function([x, y], z, mode=self.mode)\n assert len([node.op for node in f.maker.fgraph.toposort() if\n (isinstance(node.op, theano.tensor.Elemwise)\n and isinstance(node.op.scalar_op,\n theano.scalar.basic.Switch))]) == 0\n vx = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype1)\n vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype=dtype2)\n assert numpy.all(f(vx, vy) == vx)\n\n def test_left_is_right(self):\n\n for dtype1 in ['int32', 'int64']:\n x = theano.tensor.matrix('x', dtype=dtype1)\n varc = theano.tensor.matrix('varc', dtype=dtype1)\n z1 = theano.tensor.switch(1, x, x)\n z0 = theano.tensor.switch(0, x, x)\n z2 = theano.tensor.switch(varc, x, x)\n f1 = theano.function([x], z1, mode=self.mode)\n f0 = theano.function([x], z0, mode=self.mode)\n f2 = theano.function([x,varc], z2, mode=self.mode)\n\n topo = f1.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n\n topo = f0.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n\n topo = f2.maker.fgraph.toposort()\n assert len(topo) == 1\n assert topo[0].op == deep_copy_op\n\n vx = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype1)\n vc = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype1)\n assert numpy.all(f1(vx) == vx)\n assert numpy.all(f0(vx) == vx)\n assert numpy.all(f2(vx,vc) == vx)\n\n def test_shape_le_0(self):\n\n for dtype1 in ['float32', 'float64']:\n x = theano.tensor.matrix('x', dtype=dtype1)\n z0 = theano.tensor.switch(theano.tensor.le(x.shape[0], 0), 0, x.shape[0])\n f0 = theano.function([x], z0, mode=self.mode)\n assert isinstance(f0.maker.fgraph.toposort()[0].op, Shape_i)\n\n z1 = theano.tensor.switch(theano.tensor.le(x.shape[1], 0), 0, x.shape[1])\n f1 = theano.function([x], z1, mode=self.mode)\n assert isinstance(f1.maker.fgraph.toposort()[0].op, Shape_i)\n\n vx = numpy.random.randn(0,5).astype(dtype1)\n assert f0(vx) == 0\n assert f1(vx) == 5\n\n\n def test_broadcast1(self):\n # test switch(cst, matrix, row)\n x = theano.tensor.matrix('x', dtype='int32')\n y = theano.tensor.vector('y', dtype='int64')\n\n z = theano.tensor.switch(1, x, y)\n f = theano.function([x, y], z, mode=self.mode)\n assert len([node.op for node in f.maker.fgraph.toposort() if\n isinstance(node.op, theano.tensor.Elemwise) and\n not isinstance(node.op.scalar_op, theano.scalar.basic.Cast)]) == 0\n vx = numpy.array([[1, 2, 3], [4, 5, 6]], dtype='int32')\n vy = numpy.array([10, 11, 12], dtype='int64')\n assert numpy.all(f(vx, vy) == vx)\n\n z = theano.tensor.switch(0, x, y)\n f = theano.function([x, y], z, mode=self.mode)\n assert len([node.op for node in f.maker.fgraph.toposort() if\n isinstance(node.op, theano.tensor.Elemwise)]) == 0\n vx = numpy.array([[1, 2, 3], [4, 5, 6]], dtype='int32')\n vy = numpy.array([10, 11, 12], dtype='int64')\n assert numpy.all(f(vx, vy) == vy)\n\n def test_broadcast2(self):\n # test switch(cst, vector, matrix)\n\n # This case is not optimized for now.\n x = theano.tensor.vector('x', dtype='int32')\n y = theano.tensor.matrix('y', dtype='int64')\n z = theano.tensor.switch(1, x, y)\n f = theano.function([x, y], z, mode=self.mode)\n assert len([node.op for node in f.maker.fgraph.toposort() if\n isinstance(node.op, theano.tensor.Elemwise) and\n not isinstance(node.op.scalar_op, theano.scalar.basic.Cast)]) == 0\n vx = numpy.array([4, 5, 6], dtype='int32')\n vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype='int64')\n assert numpy.all(f(vx, vy) == vx)\n\n z = theano.tensor.switch(0, x, y)\n f = theano.function([x, y], z, mode=self.mode)\n assert len([node.op for node in f.maker.fgraph.toposort() if\n isinstance(node.op, theano.tensor.Elemwise)]) == 0\n vx = numpy.array([4, 5, 6], dtype='int32')\n vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype='int64')\n assert numpy.all(f(vx, vy) == vy)\n\n def test_broadcast3(self):\n # test switch(matrix, same_vector, same_vector)\n\n x = theano.tensor.matrix('x', dtype='int32')\n y = theano.tensor.vector('y', dtype='int64')\n z = theano.tensor.switch(x, y, y)\n f = theano.function([x, y], z, mode=self.mode)\n vx = numpy.array([[0, 1], [1, 0]], dtype='int32')\n vy = numpy.array([7, 8], dtype='int64')\n utt.assert_allclose(f(vx, vy), numpy.where(vx, vy, vy))\n assert len([node.op for node in f.maker.fgraph.toposort() if\n isinstance(node.op, theano.tensor.Elemwise)]) == 0\n\n\nclass T_local_sum_prod(unittest.TestCase):\n \"\"\"\n Test sum/prod opts in opt.py\n \"\"\"\n def setUp(self):\n self.mode = theano.compile.get_default_mode().including('canonicalize',\n 'specialize')\n\n def test_local_sum_prod_mul_by_scalar(self):\n # Test the optimization local_sum_prod_mul_by_scalar for both Sum and\n # Prod ops in six cases each :\n # 1-the inputs to the mul contain a scalar and no non-scalar\n # 2-the inputs to the mul contain a scalar and one non-scalar\n # 3-the inputs to the mul contain a scalar and two non-scalars\n # 4-the inputs to the mul contain two scalars and no non-scalar\n # 5-the inputs to the mul contain two scalars and one non-scalar\n # 6-the inputs to the mul contain two scalars and two non-scalars\n\n vect = T.dvector()\n mat = T.dmatrix()\n scalar1 = T.dscalar()\n scalar2 = T.dscalar()\n\n v_val = numpy.random.rand(2)\n m_val = numpy.random.rand(2, 2)\n s1_val = numpy.random.rand()\n s2_val = numpy.random.rand()\n\n def test_reduction_opt(inputs, inputs_val, reduction_op,\n expected_output, nb_expected_sum_nodes):\n mul_out = T.mul(*inputs)\n f = theano.function(inputs, reduction_op()(mul_out),\n mode=self.mode)\n out = f(*inputs_val)\n utt.assert_allclose(out, expected_output)\n\n # Ensure that the optimization has been applied properly by\n # ensuring that the optimized graph contains the expected number\n # of apply nodes for the sum op\n prod_nodes = [n for n in f.maker.fgraph.toposort()\n if isinstance(n.op, reduction_op)]\n assert len(prod_nodes) == nb_expected_sum_nodes\n\n # Test sum\n\n # Case 1\n test_reduction_opt([scalar1], [s1_val], T.Sum, s1_val, 0)\n\n # Case 2\n test_reduction_opt([vect, scalar1], [v_val, s1_val], T.Sum,\n s1_val * v_val.sum(), 1)\n\n # Case 3\n test_reduction_opt([vect, mat, scalar1], [v_val, m_val, s1_val], T.Sum,\n s1_val * (v_val * m_val).sum(), 1)\n\n # Case 4\n test_reduction_opt([scalar1, scalar2], [s1_val, s2_val], T.Sum,\n s1_val * s2_val, 0)\n\n # Case 5\n test_reduction_opt([vect, scalar1, scalar2], [v_val, s1_val, s2_val],\n T.Sum, s1_val * s2_val * v_val.sum(), 1)\n\n # Case 6\n test_reduction_opt([vect, mat, scalar1, scalar2],\n [v_val, m_val, s1_val, s2_val], T.Sum,\n s1_val * s2_val * (v_val * m_val).sum(), 1)\n\n # Test prod\n\n # Case 1\n test_reduction_opt([scalar1], [s1_val], T.elemwise.Prod, s1_val, 0)\n\n # Case 2\n test_reduction_opt([vect, scalar1], [v_val, s1_val], T.elemwise.Prod,\n (s1_val * v_val).prod(), 1)\n\n # Case 3\n test_reduction_opt([vect, mat, scalar1], [v_val, m_val, s1_val],\n T.elemwise.Prod, (s1_val * v_val * m_val).prod(), 2)\n\n # Case 4\n test_reduction_opt([scalar1, scalar2], [s1_val, s2_val],\n T.elemwise.Prod, s1_val * s2_val, 0)\n\n # Case 5\n test_reduction_opt([vect, scalar1, scalar2], [v_val, s1_val, s2_val],\n T.elemwise.Prod, (s1_val * s2_val * v_val).prod(),\n 1)\n\n # Case 6\n test_reduction_opt([vect, mat, scalar1, scalar2],\n [v_val, m_val, s1_val, s2_val], T.elemwise.Prod,\n (s1_val * s2_val * v_val * m_val).prod(), 2)\n\n def test_local_sum_prod_all_to_none(self):\n a = T.tensor3()\n input = numpy.arange(3 * 4 * 5, dtype=config.floatX).reshape(3, 4, 5)\n # test sum\n f = theano.function([a], a.sum(), mode=self.mode)\n assert len(f.maker.fgraph.apply_nodes) == 1\n assert numpy.allclose(f(input), input.sum())\n # test prod\n f = theano.function([a], a.prod(), mode=self.mode)\n assert len(f.maker.fgraph.apply_nodes) == 1\n assert numpy.allclose(f(input), input.prod())\n # test sum\n f = theano.function([a], a.sum([0, 1, 2]), mode=self.mode)\n assert len(f.maker.fgraph.apply_nodes) == 1\n assert numpy.allclose(f(input), input.sum())\n # test prod\n f = theano.function([a], a.prod([0, 1, 2]), mode=self.mode)\n assert len(f.maker.fgraph.apply_nodes) == 1\n assert numpy.allclose(f(input), input.prod())\n\n backup = config.warn.sum_sum_bug\n config.warn.sum_sum_bug = False\n try:\n f = theano.function([a], a.sum(0).sum(0).sum(0), mode=self.mode)\n assert len(f.maker.fgraph.apply_nodes) == 1\n assert numpy.allclose(f(input), input.sum())\n finally:\n config.warn.sum_sum_bug = backup\n\n def test_local_sum_sum_prod_prod(self):\n a = T.tensor3()\n input = numpy.arange(3 * 4 * 5, dtype=config.floatX).reshape(3, 4, 5)\n dims = [(0, 0), (1, 0), (2, 0), (0, 1), (1, 1), (2, 1),\n ((0, 1), 0), ((1, 2), 0), (0, (0, 1)),\n (1, (0, 1)), (2, (0, 1))]\n\n backup = config.warn.sum_sum_bug\n config.warn.sum_sum_bug = False\n\n def my_prod(data, d, dd):\n # This prod when d or dd is a tuple of 2 dimensions.\n if not isinstance(d, tuple) and not isinstance(dd, tuple):\n return data.prod(d).prod(dd)\n if isinstance(d, tuple):\n d = sorted(d)\n return data.prod(d[1]).prod(d[0]).prod(dd)\n else:\n dd = sorted(dd)\n return data.prod(d).prod(dd[1]).prod(dd[0])\n\n def my_sum(data, d, dd):\n # This sum when d or dd is a tuple of 2 dimensions.\n if not isinstance(d, tuple) and not isinstance(dd, tuple):\n return data.sum(d).sum(dd)\n if isinstance(d, tuple):\n d = sorted(d)\n return data.sum(d[1]).sum(d[0]).sum(dd)\n else:\n dd = sorted(dd)\n return data.sum(d).sum(dd[1]).sum(dd[0])\n\n def my_sum_prod(data, d, dd):\n # This sum when d or dd is a tuple of 2 dimensions.\n if not isinstance(d, tuple) and not isinstance(dd, tuple):\n return data.sum(d).prod(dd)\n if isinstance(d, tuple):\n d = sorted(d)\n return data.sum(d[1]).sum(d[0]).prod(dd)\n else:\n dd = sorted(dd)\n return data.sum(d).prod(dd[1]).prod(dd[0])\n\n try:\n for d, dd in dims:\n expected = my_sum(input, d, dd)\n f = theano.function([a], a.sum(d).sum(dd), mode=self.mode)\n assert numpy.allclose(f(input), expected)\n assert len(f.maker.fgraph.apply_nodes) == 1\n for d, dd in dims[:6]:\n f = theano.function([a], a.sum(d).sum(dd).\n sum(0), mode=self.mode)\n assert numpy.allclose(f(input), input.sum(d).sum(dd).sum(0))\n assert len(f.maker.fgraph.apply_nodes) == 1\n for d in [0, 1, 2]:\n f = theano.function([a], a.sum(d).sum(None), mode=self.mode)\n assert numpy.allclose(f(input), input.sum(d).sum())\n assert len(f.maker.fgraph.apply_nodes) == 1\n f = theano.function([a], a.sum(None).sum(), mode=self.mode)\n assert numpy.allclose(f(input), input.sum())\n assert len(f.maker.fgraph.apply_nodes) == 1\n finally:\n config.warn.sum_sum_bug = backup\n\n # test prod\n for d, dd in dims:\n expected = my_prod(input, d, dd)\n f = theano.function([a], a.prod(d).prod(dd), mode=self.mode)\n assert numpy.allclose(f(input), expected)\n assert len(f.maker.fgraph.apply_nodes) == 1\n for d, dd in dims[:6]:\n f = theano.function([a], a.prod(d).prod(dd).\n prod(0), mode=self.mode)\n assert numpy.allclose(f(input), input.prod(d).prod(dd).prod(0))\n assert len(f.maker.fgraph.apply_nodes) == 1\n for d in [0, 1, 2]:\n f = theano.function([a], a.prod(d).prod(None), mode=self.mode)\n assert numpy.allclose(f(input), input.prod(d).prod())\n assert len(f.maker.fgraph.apply_nodes) == 1\n f = theano.function([a], a.prod(None).prod(), mode=self.mode)\n assert numpy.allclose(f(input), input.prod())\n assert len(f.maker.fgraph.apply_nodes) == 1\n\n # test sum prod don't get opt.\n for d, dd in dims:\n expected = my_sum_prod(input, d, dd)\n f = theano.function([a], a.sum(d).prod(dd), mode=self.mode)\n assert numpy.allclose(f(input), expected)\n assert len(f.maker.fgraph.apply_nodes) == 2\n for d, dd in dims[:6]:\n f = theano.function([a], a.sum(d).prod(dd).\n prod(0), mode=self.mode)\n assert numpy.allclose(f(input), input.sum(d).prod(dd).prod(0))\n assert len(f.maker.fgraph.apply_nodes) == 2\n for d in [0, 1, 2]:\n f = theano.function([a], a.sum(d).prod(None), mode=self.mode)\n assert numpy.allclose(f(input), input.sum(d).prod())\n assert len(f.maker.fgraph.apply_nodes) == 2\n f = theano.function([a], a.sum(None).prod(), mode=self.mode)\n assert numpy.allclose(f(input), input.sum())\n assert len(f.maker.fgraph.apply_nodes) == 1\n\n\n def test_local_sum_prod_alloc(self):\n a = T.dtensor3()\n input = numpy.asarray(numpy.arange(2 * 3 * 4).reshape(2, 3, 4),\n dtype='float64')\n mode = self.mode.including('specialize').excluding('fusion')\n\n for t_like, n_like, nb_nodes in [(tensor.zeros_like, numpy.zeros_like, (1, 3, 3, 2)),\n (tensor.ones_like, numpy.ones_like, (5, 5, 5, 6))]:\n\n # test sum\n f = theano.function([a], t_like(a).sum(None), mode=mode)\n assert numpy.allclose(f(input), n_like(input).sum())\n assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0]\n\n f = theano.function([a], t_like(a).sum([0, 1, 2]), mode=mode)\n assert numpy.allclose(f(input), n_like(input).sum())\n assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0]\n\n for d in xrange(3):\n f = theano.function([a], t_like(a).sum(d), mode=mode)\n assert numpy.allclose(f(input), n_like(input).sum(d))\n assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1]\n topo = f.maker.fgraph.toposort()\n assert topo[-1].op == T.alloc\n assert not any([isinstance(node.op, T.Sum) for node in topo])\n for i in xrange(3):\n f = theano.function([a], t_like(a).sum(i), mode=mode)\n assert numpy.allclose(f(input), n_like(input).sum(i))\n assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2]\n topo = f.maker.fgraph.toposort()\n assert topo[-1].op == T.alloc\n assert not any([isinstance(node.op, T.Sum) for node in topo])\n\n # test prod\n f = theano.function([a], t_like(a).prod(None), mode=mode)\n assert numpy.allclose(f(input), n_like(input).prod())\n #assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0]\n\n f = theano.function([a], t_like(a).prod([0, 1, 2]), mode=mode)\n assert numpy.allclose(f(input), n_like(input).prod())\n #assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0]\n\n for d in range(3):\n f = theano.function([a], t_like(a).prod(d), mode=mode)\n assert numpy.allclose(f(input), n_like(input).prod(d))\n #assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1]\n topo = f.maker.fgraph.toposort()\n assert topo[-1].op == T.alloc\n assert not any([isinstance(node.op, T.elemwise.Prod) for node in topo])\n for i in range(3):\n f = theano.function([a], t_like(a).prod(i), mode=mode)\n assert numpy.allclose(f(input), n_like(input).prod(i))\n #assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2]\n topo = f.maker.fgraph.toposort()\n assert topo[-1].op == T.alloc\n assert not any([isinstance(node.op, T.elemwise.Prod) for node in topo])\n\n backup = config.warn.sum_sum_bug\n config.warn.sum_sum_bug = False\n try:\n for d, dd in [(0, 0), (1, 0), (2, 0), (0, 1), (1, 1), (2, 1)]:\n f = theano.function([a], t_like(a).\n sum(d).sum(dd), mode=mode)\n assert numpy.allclose(f(input),\n n_like(input).sum(d).sum(dd))\n assert len(f.maker.fgraph.apply_nodes) == nb_nodes[3]\n topo = f.maker.fgraph.toposort()\n assert topo[-1].op == T.alloc\n assert not any([isinstance(node.op,\n T.Sum) for node in topo])\n finally:\n config.warn.sum_sum_bug = backup\n\n def test_local_sum_sum_int8(self):\n \"\"\"\n Test that local_sum_sum works when combining two sums on an int8 array.\n\n This is a regression test for ticket gh-356.\n \"\"\"\n x = tensor.tensor3(dtype='int8')\n y = x.sum(axis=0).sum(axis=1)\n backup = config.on_opt_error\n config.on_opt_error = 'raise'\n try:\n # This compilation would fail prior to fix.\n f = theano.function([x], y)\n finally:\n config.on_opt_error = backup\n\n def test_local_sum_sum_dtype(self):\n \"\"\"\n Test that local_sum_sum works when specifying dtypes manually.\n \"\"\"\n x = tensor.tensor3(dtype='int8')\n y = x.sum(axis=0, dtype='int32').sum(axis=1, dtype='int64')\n backup = config.on_opt_error\n config.on_opt_error = 'raise'\n try:\n # This compilation would fail prior to fix.\n f = theano.function([x], y)\n finally:\n config.on_opt_error = backup\n\n\nclass T_local_reduce(unittest.TestCase):\n def setUp(self):\n self.mode = theano.compile.get_default_mode().including(\n 'canonicalize',\n 'specialize',\n 'uncanonicalize', 'local_max_and_argmax')\n\n def test_local_reduce_broadcast_all_0(self):\n for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,\n tensor.max, tensor.min]:\n x = T.TensorType('int64', (True, True, True))()\n f = theano.function([x], [fct(x)], mode=self.mode)\n assert not any([\n isinstance(node.op, T.CAReduce)\n for node in f.maker.fgraph.toposort()])\n\n def test_local_reduce_broadcast_all_1(self):\n for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,\n tensor.max, tensor.min]:\n x = T.TensorType('int64', (True, True))()\n f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode)\n assert not any([\n isinstance(node.op, T.CAReduce)\n for node in f.maker.fgraph.toposort()])\n\n def test_local_reduce_broadcast_some_0(self):\n for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,\n tensor.max, tensor.min]:\n x = T.TensorType('int64', (True, False, True))()\n f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode)\n\n order = f.maker.fgraph.toposort()\n assert 1 == sum([isinstance(node.op, T.CAReduce)\n for node in order])\n\n node = [node for node in order if isinstance(node.op,\n tensor.CAReduce)][0]\n\n op = node.op\n assert isinstance(op, T.CAReduce)\n # -- the leading broadcastable dimension has been dropped\n # by the local_reduce_broadcastable optimization\n # now summation is over the original x's dimension 1.\n assert node.inputs[0].ndim == 2, node\n assert op.axis == (0,), op.axis\n\n def test_local_reduce_broadcast_some_1(self):\n for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,\n tensor.max, tensor.min]:\n x = T.TensorType('int64', (True, True, True))()\n f = theano.function([x], [fct(x, axis=[0, 2])], mode=self.mode)\n assert not any([\n isinstance(node.op, T.CAReduce)\n for node in f.maker.fgraph.toposort()])\n\n def test_local_reduce_join(self):\n vx = matrix()\n vy = matrix()\n vz = matrix()\n x = numpy.asarray([[1, 0], [3, 4]], dtype=config.floatX)\n y = numpy.asarray([[4, 0], [2, 1]], dtype=config.floatX)\n z = numpy.asarray([[5, 0], [1, 2]], dtype=config.floatX)\n # Test different reduction scalar operation\n for out, res in [\n (T.max((vx, vy), 0), numpy.max((x, y), 0)),\n (T.min((vx, vy), 0), numpy.min((x, y), 0)),\n (T.sum((vx, vy, vz), 0), numpy.sum((x, y, z), 0)),\n (T.prod((vx, vy, vz), 0), numpy.prod((x, y, z), 0)),\n (T.prod((vx, vy.T, vz), 0), numpy.prod((x, y.T, z), 0)),\n ]:\n f = theano.function([vx, vy, vz], out,\n on_unused_input='ignore', mode=self.mode)\n assert (f(x, y, z) == res).all(), out\n topo = f.maker.fgraph.toposort()\n assert len(topo) <= 2, out\n assert isinstance(topo[-1].op, T.Elemwise), out\n\n # Test different axis for the join and the reduction\n # We must force the dtype, of otherwise, this tests will fail\n # on 32 bit systems\n A = theano.shared(numpy.array([1, 2, 3, 4, 5], dtype='int64'))\n\n f = theano.function([], T.sum(T.stack([A, A]), axis=0), mode=self.mode)\n assert numpy.allclose(f(), [2, 4, 6, 8, 10])\n topo = f.maker.fgraph.toposort()\n assert isinstance(topo[-1].op, T.Elemwise)\n\n # Test a case that was bugged in a old Theano bug\n try:\n old = theano.config.warn.reduce_join\n theano.config.warn.reduce_join = False\n f = theano.function([], T.sum(T.stack([A, A]), axis=1),\n mode=self.mode)\n finally:\n theano.config.warn.reduce_join = old\n assert numpy.allclose(f(), [15, 15])\n topo = f.maker.fgraph.toposort()\n assert not isinstance(topo[-1].op, T.Elemwise)\n\n # This case could be optimized\n A = theano.shared(numpy.array([1, 2, 3, 4, 5]).reshape(5, 1))\n f = theano.function([], T.sum(T.concatenate((A, A), axis=1), axis=1),\n mode=self.mode)\n assert numpy.allclose(f(), [2, 4, 6, 8, 10])\n topo = f.maker.fgraph.toposort()\n assert not isinstance(topo[-1].op, T.Elemwise)\n\n A = theano.shared(numpy.array([1, 2, 3, 4, 5]).reshape(5, 1))\n f = theano.function([], T.sum(T.concatenate((A, A), axis=1), axis=0),\n mode=self.mode)\n assert numpy.allclose(f(), [15, 15])\n topo = f.maker.fgraph.toposort()\n assert not isinstance(topo[-1].op, T.Elemwise)\n\n # Test that the optimization does not crash in one case where it\n # is not applied. Reported at\n # https://groups.google.com/d/topic/theano-users/EDgyCU00fFA/discussion\n old = theano.config.warn.reduce_join\n try:\n theano.config.warn.reduce_join = False\n out = tensor.sum([vx, vy, vz], axis=None)\n f = theano.function([vx, vy, vz], out)\n finally:\n theano.config.warn.reduce_join = old\n\n\nclass T_local_sum_prod_dimshuffle(unittest.TestCase):\n def setUp(self):\n self.mode = theano.compile.get_default_mode().including('canonicalize')\n\n def test_local_sum_div_dimshuffle(self):\n a = T.matrix('a')\n b = T.vector('b')\n c = T.tensor3('c')\n d = T.scalar('d')\n sum = tensor.sum\n sums = [\n sum(a / d),\n sum(a / d.dimshuffle('x', 'x')),\n sum(a / d.dimshuffle('x', 'x'), axis=0),\n sum(a / d.dimshuffle('x', 'x'), axis=1),\n sum(b / d),\n sum(b / d.dimshuffle('x')),\n sum(c / d),\n sum(c / d.dimshuffle('x', 'x', 'x')),\n sum(c / d.dimshuffle('x', 'x', 'x'), axis=0),\n sum(c / d.dimshuffle('x', 'x', 'x'), axis=1),\n sum(c / d.dimshuffle('x', 'x', 'x'), axis=2),\n\n sum(a / b, axis=0),\n sum(a / b.dimshuffle(0, 'x'), axis=1),\n sum(a.dimshuffle(0, 1) / b.dimshuffle(0, 'x'), axis=1),\n sum(a.dimshuffle(1, 0) / b.dimshuffle(0, 'x'), axis=1),\n sum(c / a, axis=0),\n sum(c / a.dimshuffle(1, 0), axis=0),\n sum(c / a.dimshuffle(0, 'x', 1), axis=1),\n sum(c / a.dimshuffle(1, 'x', 0), axis=1),\n sum(c / a.dimshuffle(0, 1, 'x'), axis=2),\n sum(c / a.dimshuffle(1, 0, 'x'), axis=2),\n sum(c / b, axis=0),\n sum(c / b, axis=1),\n sum(c / b, axis=(0, 1)),\n sum(c / b.dimshuffle(0, 'x'), axis=0),\n sum(c / b.dimshuffle(0, 'x'), axis=2),\n sum(c / b.dimshuffle(0, 'x'), axis=(0, 2)),\n sum(c / b.dimshuffle(0, 'x', 'x'), axis=1),\n sum(c / b.dimshuffle(0, 'x', 'x'), axis=2),\n sum(c / b.dimshuffle(0, 'x', 'x'), axis=(1, 2)),\n sum(sum(c, axis=0) / b, axis=0),\n sum(sum(c, axis=1) / b, axis=0),\n ]\n\n rng = numpy.random.RandomState(utt.fetch_seed())\n a_val = rng.randn(2, 2).astype(config.floatX)\n b_val = rng.randn(2).astype(config.floatX)\n c_val = rng.randn(2, 2, 2).astype(config.floatX)\n d_val = numpy.asarray(rng.randn(), config.floatX)\n\n backup = config.warn.sum_sum_bug, config.warn.sum_div_dimshuffle_bug\n config.warn.sum_sum_bug = False\n config.warn.sum_div_dimshuffle_bug = False\n try:\n for i, s in enumerate(sums):\n print(i)\n f = theano.function([a, b, c, d], s, mode=self.mode,\n on_unused_input='ignore')\n g = f.maker.fgraph.toposort()\n assert isinstance(g[-1].op.scalar_op,\n theano.scalar.basic.TrueDiv)\n f(a_val, b_val, c_val, d_val)\n finally:\n config.warn.sum_sum_bug, config.warn.sum_div_dimshuffle_bug =\\\n backup\n\n def test_local_prod_div_dimshuffle(self):\n a = T.matrix('a')\n b = T.vector('b')\n c = T.tensor3('c')\n e = T.matrix('e')\n d = T.scalar('d')\n prod = T.prod\n prods = [\n prod(a / d),\n prod(a / d.dimshuffle('x', 'x')),\n prod(a / d.dimshuffle('x', 'x'), axis=0),\n prod(a / d.dimshuffle('x', 'x'), axis=1),\n prod(b / d),\n prod(b / d.dimshuffle('x')),\n prod(c / d),\n prod(c / d.dimshuffle('x', 'x', 'x')),\n prod(c / d.dimshuffle('x', 'x', 'x'), axis=0),\n prod(c / d.dimshuffle('x', 'x', 'x'), axis=1),\n prod(c / d.dimshuffle('x', 'x', 'x'), axis=2),\n\n prod(a / b, axis=0),\n prod(a / b.dimshuffle(0, 'x'), axis=1),\n prod(a.dimshuffle(0, 1) / b.dimshuffle(0, 'x'), axis=1),\n prod(a.dimshuffle(1, 0) / b.dimshuffle(0, 'x'), axis=1),\n prod(c / a, axis=0),\n prod(c / a.dimshuffle(1, 0), axis=0),\n prod(c / a.dimshuffle(0, 'x', 1), axis=1),\n prod(c / a.dimshuffle(1, 'x', 0), axis=1),\n prod(c / a.dimshuffle(0, 1, 'x'), axis=2),\n prod(c / a.dimshuffle(1, 0, 'x'), axis=2),\n prod(c / b, axis=0),\n prod(c / b, axis=1),\n prod(c / b, axis=(0, 1)),\n prod(c / b.dimshuffle(0, 'x'), axis=0),\n prod(c / b.dimshuffle(0, 'x'), axis=2),\n prod(c / b.dimshuffle(0, 'x'), axis=(0, 2)),\n prod(c / b.dimshuffle(0, 'x', 'x'), axis=1),\n prod(c / b.dimshuffle(0, 'x', 'x'), axis=2),\n prod(c / b.dimshuffle(0, 'x', 'x'), axis=(1, 2)),\n prod(c / b.dimshuffle(0, 'x', 'x'), axis=(0, 1)),\n prod(c / b.dimshuffle(0, 'x', 'x'), axis=(1, 0)),\n prod(prod(c, axis=0) / b, axis=0),\n prod(prod(c, axis=1) / b, axis=0)]\n\n rng = numpy.random.RandomState(utt.fetch_seed())\n a_val = rng.randn(2, 2).astype(config.floatX)\n b_val = rng.randn(2).astype(config.floatX)\n c_val = rng.randn(2, 2, 2).astype(config.floatX)\n d_val = numpy.asarray(rng.randn(), config.floatX)\n\n default_mode = theano.compile.mode.get_default_mode()\n # FusionOptimizer is included to make sure that expected_outer_operator\n # remains the same for all optimization modes.\n mode_with_opt = default_mode.including('local_sum_prod_div_dimshuffle',\n 'FusionOptimizer')\n mode_without_opt = default_mode.excluding('local_sum_prod_div_dimshuffle')\n\n # Numerical tests: tests whether the numerical values with and without\n # optimizer are equal or not.\n for i, s in enumerate(prods):\n f = theano.function([a, b, c, d], s,\n on_unused_input='ignore',\n mode=mode_without_opt)\n g = theano.function([a, b, c, d], s,\n on_unused_input='ignore',\n mode=mode_with_opt)\n\n utt.assert_allclose(f(a_val, b_val, c_val, d_val),\n g(a_val, b_val, c_val, d_val))\n\n # Logical tests: tests whether the optimizer has been appplied or not\n # by checking graph structure.\n prods = [\n prod(a / e),\n prod(a / d),\n prod(a / d.dimshuffle('x', 'x')),\n prod(c / d.dimshuffle('x', 'x', 'x'), axis=1),\n prod(a.dimshuffle(1, 0) / b.dimshuffle(0, 'x'), axis=1),\n prod(c / b.dimshuffle(0, 'x', 'x'), axis=(1, 0)),\n prod(prod(c, axis=1) / b, axis=0),\n prod(prod(c, axis=(1, 2)) / b, axis=0)]\n\n expected_outer_operator = [theano.scalar.basic.Mul,\n theano.scalar.basic.Composite,\n theano.scalar.basic.Composite,\n theano.scalar.basic.TrueDiv,\n theano.scalar.basic.Composite,\n theano.scalar.basic.Mul,\n theano.scalar.basic.Composite,\n theano.scalar.basic.Mul]\n\n for i, s in enumerate(prods):\n g = theano.function([a, b, c, d, e], s,\n on_unused_input='ignore',\n mode=mode_with_opt)\n assert isinstance(g.maker.fgraph.toposort()[-1].op.scalar_op,\n expected_outer_operator[i])\n\n # TODO:\n # test_local_sum_prod_dimshuffle (a * b * c)\n # test_local_sum_divprod_dimshuffle ((a * b) / (c * d))\n\n\nclass TestMakeVector(utt.InferShapeTester):\n\n def setUp(self):\n super(TestMakeVector, self).setUp()\n\n def test_make_vector(self):\n b = T.bscalar()\n i = T.iscalar()\n d = T.dscalar()\n\n # TODO: draw random values instead. Not really important.\n val = {b: 2,\n i: -3,\n d: 0.7}\n\n # Should work\n for (dtype, inputs) in [(\"int8\", (b, b)),\n (\"int32\", (i, b)),\n (\"int32\", (b, i)),\n (\"float64\", (b, i)),\n (\"float64\", (b, d)),\n (\"float64\", (d, i)),\n (\"float64\", ()),\n (\"int64\", ()),\n ]:\n mv = opt.MakeVector(dtype=dtype)(*inputs)\n assert mv.dtype == dtype\n f = theano.function([b, i, d], mv, on_unused_input='ignore')\n f_val = f(val[b], val[i], val[d])\n # print 'f_val =', f_val\n\n s = mv.sum()\n gb = T.grad(s, b, disconnected_inputs='ignore')\n gi = T.grad(s, i, disconnected_inputs='ignore')\n gd = T.grad(s, d, disconnected_inputs='ignore')\n # print 'gb =', gb\n # print 'gi =', gi\n # print 'gd =', gd\n\n g = theano.function([b, i, d], [gb, gi, gd])\n g_val = g(val[b], val[i], val[d])\n # print 'g_val =', g_val\n\n if dtype.startswith('int'):\n # The gradient should be 0\n assert numpy.allclose(g_val, 0)\n else:\n for var, grval in zip((b, i, d), g_val):\n float_inputs = []\n if var.dtype.startswith('int'):\n pass\n # Currently we don't do any checks on these variables\n # verify_grad doesn't support integer inputs yet\n # however, the gradient on them is *not* defined to\n # be 0\n elif var not in inputs:\n assert grval == 0\n else:\n float_inputs.append(var)\n\n # Build a function that takes float_inputs, use fix values for the\n # other inputs, and returns the MakeVector. Use it for verify_grad.\n if float_inputs:\n def fun(*fl_inputs):\n f_inputs = []\n for var in f_inputs:\n if var in fl_inputs:\n # use symbolic variable\n f_inputs.append(var)\n else:\n # use constant value\n f_inputs.append(val[var])\n return opt.MakeVector(dtype=dtype)(*f_inputs)\n\n utt.verify_grad(fun, [val[ri] for ri in float_inputs])\n\n # should fail\n for (dtype, inputs) in [(\"int8\", (b, i)),\n (\"int8\", (i, b)),\n (\"int8\", (b, d)),\n (\"int8\", (i, i)),\n (\"int32\", (d, i)),\n (\"int32\", (i, d)),\n (\"float32\", (i, d)),\n ]:\n try:\n opt.MakeVector(dtype=dtype)(*inputs)\n raise Exception(\"Theano should have raised an error\")\n except AssertionError:\n pass\n\n def test_infer_shape(self):\n adscal = dscalar()\n bdscal = dscalar()\n aiscal = iscalar()\n biscal = iscalar()\n ciscal = iscalar()\n discal = iscalar()\n adscal_val = numpy.random.rand()\n bdscal_val = numpy.random.rand()\n aiscal_val = numpy.random.randint(10)\n biscal_val = numpy.random.randint(10)\n ciscal_val = numpy.random.randint(10)\n discal_val = numpy.random.randint(10)\n self._compile_and_check([adscal, aiscal],\n [MakeVector('float64')(adscal, aiscal)],\n [adscal_val, aiscal_val], MakeVector)\n\n self._compile_and_check([adscal, bdscal, aiscal],\n [MakeVector('float64')(adscal, bdscal, aiscal)],\n [adscal_val, bdscal_val, aiscal_val], MakeVector)\n\n self._compile_and_check([aiscal, biscal, ciscal, discal],\n [MakeVector('int32')(aiscal, biscal, ciscal, discal)],\n [aiscal_val, biscal_val, ciscal_val, discal_val],\n MakeVector)\n\n\ndef test_local_join_1():\n # test for vector\n a = tensor.vector('a')\n s = tensor.stack([a])\n f = function([a], s, mode=mode_opt)\n val = f([1])\n assert numpy.all(val == [1])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 0\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n\n # test for matrix join(0,a)\n a = tensor.matrix('a')\n s = join(0, a)\n f = function([a], s, mode=mode_opt)\n val = f([[1]])\n assert numpy.all(val == [[1]])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 0\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n\n # test for matrix join(1,a)\n s = join(1, a)\n f = function([a], s, mode=mode_opt)\n val = f([[1]])\n assert numpy.all(val == [[1]])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 0\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n\n # test we don't apply when their is 2 inputs\n s = join(1, a, a)\n f = function([a], s, mode=mode_opt)\n val = f([[1]])\n assert numpy.all(val == [[1]])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 1\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n\n\ndef test_local_join_empty():\n # test for vector, vector, empty to vector\n empty_vec = numpy.asarray([], dtype=config.floatX)\n a = tensor.vector('a')\n s = tensor.join(0, a, a, empty_vec)\n f = function([a], s, mode=mode_opt)\n val = f([1])\n assert numpy.all(val == [1])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 1\n assert all([not isinstance(n.op, Join) or len(n.inputs) == 3\n for n in e if isinstance(n.op, Join)])\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n\n\n # test for matrix join(1,a)\n empty_mat = numpy.asarray([[]], dtype=config.floatX)\n m = tensor.matrix('m')\n s = join(1, empty_mat, m, m, m)\n f = function([m], s, mode=mode_opt)\n val = f([[1]])\n assert numpy.all(val == [[1]])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 1\n assert all([not isinstance(n.op, Join) or len(n.inputs) == 4\n for n in e if isinstance(n.op, Join)])\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n # test for vector, vector, empty to matrix\n # We can't optimize this case.\n s = tensor.stack([a, a, empty_vec])\n f = function([a], s, mode=mode_opt)\n val = f([])\n assert numpy.all(val == [1])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 1\n assert all([not isinstance(n.op, Join) or len(n.inputs) == 4\n for n in e if isinstance(n.op, Join)])\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n # test for matrix join(0,a)\n # We can't optimize this case.\n s = join(0, m, numpy.asarray([[2.]], dtype=config.floatX), m)\n f = function([m], s, mode=mode_opt)\n val = f([[1]])\n assert numpy.all(val == [[1], [2], [1]])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 1\n assert all([not isinstance(n.op, Join) or len(n.inputs) == 4\n for n in e if isinstance(n.op, Join)])\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n\n\ndef test_local_join_make_vector():\n a, b, c, d, e = tensor.scalars('abcde')\n v = tensor.vector('v')\n mv = MakeVector(config.floatX)\n s = tensor.join(0, mv(a), v, mv(b, c), mv(d, e))\n f = function([a, b, c, d, e, v], s, mode=mode_opt)\n theano.printing.debugprint(f)\n val = f(1, 2, 3, 4, 6, [7, 8])\n assert numpy.all(val == [1, 7, 8, 2, 3, 4, 6])\n e = f.maker.fgraph.toposort()\n assert len([n for n in e if isinstance(n.op, Join)]) == 1\n assert all([not isinstance(n.op, Join) or len(n.inputs) == 4\n for n in e if isinstance(n.op, Join)])\n assert f.maker.fgraph.outputs[0].dtype == config.floatX\n\n assert hasattr(f.outputs[0].variable, 'tag')\n assert hasattr(f.outputs[0].variable.tag, 'trace')\n\n\ndef test_local_add_specialize():\n # test of non-zero dimension\n a = tensor.vector()\n s = tensor.add(tensor.zeros_like(a))\n assert local_add_specialize.transform(s.owner)\n\n # test of 0-d\n a = tensor.scalar()\n s = tensor.add(tensor.zeros_like(a))\n assert local_add_specialize.transform(s.owner)\n\n # Test when the 0 input is forcing upcasting\n a = tensor.constant(0, dtype='int64')\n b = tensor.constant(1, dtype='int32')\n s = a + b\n transformed = local_add_specialize.transform(s.owner)\n assert transformed\n assert transformed[0].type == s.type\n\n\ndef test_local_tensor_scalar_tensor():\n dtypes = ['int8', 'int16', 'int32', 'int64',\n 'uint8', 'uint16', 'uint32', 'uint64',\n 'float32', 'float64',\n 'complex64', 'complex128'\n ]\n\n for dtype in dtypes:\n t_type = TensorType(dtype=dtype, broadcastable=())\n t = t_type()\n s = tensor.scalar_from_tensor(t)\n t2 = tensor.tensor_from_scalar(s)\n\n f = function([t], t2, mode=mode_opt)\n e = f.maker.fgraph.toposort()\n cast_nodes = [n for n in e\n if isinstance(n.op, (tensor.TensorFromScalar,\n tensor.ScalarFromTensor))]\n assert len(cast_nodes) == 0\n f(0)\n\n\ndef test_local_scalar_tensor_scalar():\n dtypes = ['int8', 'int16', 'int32', 'int64',\n 'uint8', 'uint16', 'uint32', 'uint64',\n 'float32', 'float64',\n 'complex64', 'complex128'\n ]\n\n for dtype in dtypes:\n s_type = theano.scalar.Scalar(dtype=dtype)\n s = s_type()\n t = tensor.tensor_from_scalar(s)\n s2 = tensor.scalar_from_tensor(t)\n\n f = function([s], s2, mode=mode_opt)\n e = f.maker.fgraph.toposort()\n cast_nodes = [n for n in e\n if isinstance(n.op, (tensor.TensorFromScalar,\n tensor.ScalarFromTensor))]\n assert len(cast_nodes) == 0\n f(0)\n\n\ndef test_local_div_to_inv():\n num_len_s = tensor.lscalar('num_len')\n denom_s = tensor.scalar('denom')\n\n num_v = tensor.alloc(1, num_len_s)\n denom_m = denom_s.dimshuffle('x', 'x')\n\n out = num_v / denom_m\n assert numpy.all(out.broadcastable == (True, False))\n\n f = theano.function([num_len_s, denom_s], out)\n out_val = f(3, 2.)\n assert out_val.shape == (1, 3)\n assert numpy.allclose(out_val, 0.5)\n\n\ndef test_local_useless_split():\n x = tensor.matrix('x')\n splits = tensor.ivector('splits')\n opt = tensor.split(x, splits, n_splits=1)\n nonopt = tensor.split(x, splits, n_splits=3)\n\n mode = compile.get_default_mode().including(\"local_useless_split\")\n f_opt = theano.function([x, splits], opt, mode=mode)\n f_nonopt = theano.function([x, splits], nonopt, mode=mode)\n\n f_opt(numpy.random.rand(4,4).astype(config.floatX), [4])\n f_nonopt(numpy.random.rand(4,4).astype(config.floatX), [1,2,1])\n graph_opt = f_opt.maker.fgraph.toposort()\n graph_nonopt = f_nonopt.maker.fgraph.toposort()\n\n assert isinstance(graph_opt[-1].op, DeepCopyOp)\n assert len(graph_nonopt)==1\n assert isinstance(graph_nonopt[0].op, tensor.Split)\n\n # Check that stacktraces have been copied over properly\n assert hasattr(f_opt.outputs[0].variable.tag, 'trace')\n assert len(f_opt.outputs[0].variable.tag.trace) > 0\n assert hasattr(f_nonopt.outputs[0].variable.tag, 'trace')\n assert len(f_nonopt.outputs[0].variable.tag.trace) > 0\n\n\ndef test_local_flatten_lift():\n for i in xrange(1, 4):\n x = tensor.tensor4()\n out = tensor.flatten(T.exp(x), i)\n assert out.ndim == i\n mode = compile.mode.get_default_mode()\n mode = mode.including('local_reshape_lift')\n f = theano.function([x], out, mode=mode)\n x_np = numpy.random.rand(5, 4, 3, 2).astype(config.floatX)\n out_np = f(x_np)\n topo = f.maker.fgraph.toposort()\n shape_out_np = tuple(x_np.shape[:i-1])+(numpy.prod(x_np.shape[i-1:]),)\n assert shape_out_np == out_np.shape\n\n reshape_nodes = [n for n in topo if isinstance(n.op, tensor.Reshape)]\n assert (len(reshape_nodes) == 1 and\n tensor.is_flat(reshape_nodes[0].outputs[0], outdim=i))\n assert isinstance(topo[-1].op, tensor.Elemwise)\n\n\nclass Test_Reshape(unittest.TestCase):\n def setUp(self):\n self.mode = mode_opt\n self.op = tensor.Reshape\n\n def test_local_reshape(self):\n a = tensor.fmatrix()\n b = self.op(3)(a, [2, 3, 4])\n c = self.op(1)(b, [24])\n f = theano.function([a], c, mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert sum(isinstance(node.op, self.op) for node in topo) == 1\n\n\ndef test_local_useless_reshape():\n mode = theano.compile.get_default_mode().including(\n 'local_useless_reshape')\n i = T.iscalar('i')\n m = theano.tensor.mgrid[0:i,]\n f = theano.function([i], m, mode=mode)\n topo = f.maker.fgraph.toposort()\n assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)\n\n\ndef test_local_reshape_lift():\n x = tensor.tensor4()\n out = T.exp(x).reshape([x.size])\n assert out.ndim == 1\n mode = compile.mode.get_default_mode()\n mode = mode.including('local_reshape_lift')\n f = theano.function([x], out, mode=mode)\n f(numpy.random.rand(5, 4, 3, 2).astype(config.floatX))\n topo = f.maker.fgraph.toposort()\n assert isinstance(topo[-2].op, tensor.Reshape)\n assert isinstance(topo[-1].op, tensor.Elemwise)\n\n\nclass Test_lift_transpose_through_dot(unittest.TestCase):\n def simple_optimize(self, g):\n out2in(opt.local_useless_elemwise).optimize(g)\n out2in(opt.local_lift_transpose_through_dot).optimize(g)\n out2in(opt.local_useless_elemwise).optimize(g)\n return g\n\n def test_matrix_matrix(self):\n a, b = matrices('ab')\n g = self.simple_optimize(FunctionGraph([a, b], [tensor.dot(a, b).T]))\n sg = '[dot(DimShuffle{1,0}(b), DimShuffle{1,0}(a))]'\n assert str(g) == sg, (str(g), sg)\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))\n\n def test_row_matrix(self):\n a = vector('a')\n b = matrix('b')\n g = optimize(FunctionGraph(\n [a, b],\n [tensor.dot(a.dimshuffle('x', 0), b).T]),\n level='stabilize')\n sg = '[dot(DimShuffle{1,0}(b), DimShuffle{0,x}(a))]'\n assert str(g) == sg, (str(g), sg)\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))\n\n def test_matrix_col(self):\n a = vector('a')\n b = matrix('b')\n g = optimize(FunctionGraph(\n [a, b],\n [tensor.dot(b, a.dimshuffle(0, 'x')).T]),\n level='stabilize')\n sg = '[dot(DimShuffle{x,0}(a), DimShuffle{1,0}(b))]'\n assert str(g) == sg, (str(g), sg)\n # Check stacktrace was copied over correctly after opt was applied\n self.assertTrue(hasattr(g.outputs[0].tag, 'trace'))\n\n\ndef test_local_upcast_elemwise_constant_inputs():\n s = dvector(\"s\")\n x = tensor.sum(tensor.log(10 ** s))\n f = function([s], [tensor.grad(x, s)])\n f([-42, -2.1, -1, -0.5, 0, 0.2, 1, 2, 12])\n\n # This test a corner where the optimization should not be applied.\n old = theano.config.floatX\n theano.config.floatX = 'float32'\n try:\n v = lvector()\n function([v], theano.tensor.basic.true_div(v, 2))\n finally:\n theano.config.floatX = old\n\n\nclass TestShape_i(utt.InferShapeTester):\n\n def setUp(self):\n super(TestShape_i, self).setUp()\n\n def test_perform(self):\n\n advec = vector()\n advec_val = numpy.random.rand(3).astype(config.floatX)\n f = function([advec], Shape_i(0)(advec))\n out = f(advec_val)\n assert numpy.allclose(out, advec_val.shape[0])\n\n admat = matrix()\n admat_val = numpy.random.rand(4, 3).astype(config.floatX)\n for i in xrange(2):\n f = function([admat], Shape_i(i)(admat))\n out = f(admat_val)\n assert numpy.allclose(out, admat_val.shape[i])\n\n def test_infer_shape(self):\n admat = matrix()\n admat_val = numpy.random.rand(3, 4).astype(config.floatX)\n self._compile_and_check([admat], [Shape_i(0)(admat)],\n [admat_val], Shape_i)\n\n self._compile_and_check([admat], [Shape_i(1)(admat)],\n [admat_val], Shape_i)\n\n\nclass TestShapeFeature(unittest.TestCase):\n def test_scalar(self):\n x = scalar()\n cst = T.constant(1).clone()\n o = x + cst\n fgraph = FunctionGraph([x], [o], clone=False)\n shape_feature = opt.ShapeFeature()\n fgraph.attach_feature(shape_feature)\n assert shape_feature.same_shape(x, o)\n\n def test_vector(self):\n x = vector()\n cst = T.constant(1).clone()\n o = x + cst\n fgraph = FunctionGraph([x], [o], clone=False)\n shape_feature = opt.ShapeFeature()\n fgraph.attach_feature(shape_feature)\n assert shape_feature.same_shape(x, o)\n\n def test_vector2(self):\n x = vector()\n y = vector()\n o = x + y\n fgraph = FunctionGraph([x, y], [o], clone=False)\n shape_feature = opt.ShapeFeature()\n fgraph.attach_feature(shape_feature)\n assert shape_feature.same_shape(x, o)\n # The following case isn't implemented\n assert not shape_feature.same_shape(y, o)\n\n def test_vector_dim(self):\n x = vector()\n y = vector()\n o = x + y\n fgraph = FunctionGraph([x, y], [o], clone=False)\n shape_feature = opt.ShapeFeature()\n fgraph.attach_feature(shape_feature)\n assert shape_feature.same_shape(x, o, 0, 0)\n # The following case isn't implemented\n assert not shape_feature.same_shape(y, o, 0, 0)\n\n def test_vector_dim_err(self):\n x = vector()\n y = vector()\n o = x + y\n fgraph = FunctionGraph([x, y], [o], clone=False)\n shape_feature = opt.ShapeFeature()\n fgraph.attach_feature(shape_feature)\n self.assertRaises(IndexError, shape_feature.same_shape, x, o, 1, 0)\n self.assertRaises(IndexError, shape_feature.same_shape, x, o, 0, 1)\n\n\ndef test_assert_op_gradient():\n x = T.vector('x')\n assert_op = Assert()\n cost = T.sum(assert_op(x, x.size < 2))\n grad = T.grad(cost, x)\n func = theano.function([x], grad)\n\n x_val = numpy.ones(shape=(1,), dtype=theano.config.floatX)\n assert func(x_val) == 1\n\n\nclass TestIntDivByOne(unittest.TestCase):\n\n def setUp(self):\n self.mode = theano.compile.mode.get_default_mode()\n self.mode = self.mode.including('local_intdiv_by_one')\n\n def test1(self):\n \"\"\"Tests removing the extra floor_div by 1 introduced by\n local_subtensor_merge optimization\"\"\"\n y = T.tensor4('y')\n self.mode = self.mode.excluding('fusion')\n f = theano.function([y], y[::-1][::-1], mode=self.mode)\n\n graph = f.maker.fgraph.toposort()\n divs = [node for node in graph\n if isinstance(node.op, T.elemwise.Elemwise) and\n isinstance(node.op.scalar_op, theano.scalar.IntDiv)]\n assert len(divs) == 0\n\n def test2(self):\n \"\"\"Simple test case for removing dividing by 1\"\"\"\n y = T.tensor4('y')\n z = y // 1\n f = theano.function([y], z, mode = self.mode)\n graph = f.maker.fgraph.toposort()\n divs = [node for node in graph\n if isinstance(node.op, T.elemwise.Elemwise) and\n isinstance(node.op.scalar_op, theano.scalar.IntDiv)]\n assert len(divs) == 0\n\n def test3(self):\n \"\"\"Simple test case for removing dividing by a tensor of ones\"\"\"\n y = T.tensor4('y')\n z = y // numpy.ones((2,2,2,2))\n f = theano.function([y], z, mode=self.mode)\n graph = f.maker.fgraph.toposort()\n divs = [node for node in graph\n if isinstance(node.op, T.elemwise.Elemwise) and\n isinstance(node.op.scalar_op, theano.scalar.IntDiv)]\n assert len(divs) == 0\n\n\ndef test_local_merge_alloc():\n # Add this opt to the default mode,\n # otherwise, FAST_COMPILE fails.\n default_mode = theano.compile.mode.get_default_mode()\n opt_mode = default_mode.including(\"local_merge_alloc\")\n\n x = T.iscalar('x')\n y = T.iscalar('y')\n y2 = T.iscalar('y2')\n z = T.iscalar('z')\n w = T.iscalar('w')\n m = T.fscalar('m')\n # case 1\n # Alloc(Alloc(m, x, 1, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)\n output = T.alloc(T.alloc(m, 1, y, 1, 1), x, y, z, w)\n f = theano.function([m, x, y, z, w], output, mode=opt_mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, T.Alloc)\n o = f(0., 1, 2, 3, 4)\n assert o.shape == (1, 2, 3, 4)\n\n # case 2\n # Alloc(Alloc(m, y, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)\n output = T.alloc(T.alloc(m, y, 1, 1), x, y, z, w)\n f = theano.function([m, x, y, z, w], output, mode=opt_mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, T.Alloc)\n o = f(0., 1, 2, 3, 4)\n assert o.shape == (1, 2, 3, 4)\n\n # case 3\n # Alloc(Alloc(m, y1, 1, 1), x, y2, z, w) ->\n # Alloc(m, x, assert(y1, y1==y2), z, w)\n output = T.alloc(T.alloc(m, y, 1, 1), x, y2, z, w)\n f = theano.function([m, x, y, y2, z, w], output, mode=opt_mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 3\n assert isinstance(topo[-2].op, T.opt.Assert)\n assert isinstance(topo[-1].op, T.Alloc)\n o = f(0., 1, 2, 2, 3, 4)\n assert o.shape == (1, 2, 3, 4)\n assert_raises((AssertionError, ValueError), f, 0., 1, 2, 5, 3, 4)\n\n\nif __name__ == '__main__':\n t = TestMakeVector('setUp')\n t.setUp()\n # t.test_perform()\n t.test_infer_shape()\n\n \"\"\"\n# unittest.main()\n test_fusion().tes_memory_leak()\n \"\"\"\n",
"import numpy\nimport theano\nfrom numpy.testing import assert_allclose\nfrom theano import tensor\n\nfrom blocks.theano_expressions import l2_norm, hessian_times_vector\n\n\ndef test_l2_norm():\n assert_allclose(l2_norm([2]).eval(), 2.0)\n assert_allclose(l2_norm([3, 4]).eval(), 5.0)\n assert_allclose(l2_norm([3, [1, 2]]).eval(), 14.0 ** 0.5)\n assert_allclose(\n l2_norm([3, [1, 2], [[1, 2], [3, 4]]]).eval(), 44.0 ** 0.5)\n\n\ndef test_hessian_times_vector():\n x_y = tensor.vector('x_y')\n x, y = x_y[0], x_y[1]\n # The Hessian of this should be the identity\n c = 0.5 * (x ** 2 + y ** 2)\n g = tensor.grad(c, x_y)\n\n v = tensor.vector('v')\n Hv = hessian_times_vector(g, x_y, v)\n Hv_rop = hessian_times_vector(g, x_y, v, r_op=True)\n\n f = theano.function([x_y, v], Hv)\n f_rop = theano.function([x_y, v], Hv_rop)\n\n x_y_val = numpy.random.rand(2).astype(theano.config.floatX)\n v_val = numpy.random.rand(2).astype(theano.config.floatX)\n\n assert_allclose(v_val, f(x_y_val, v_val))\n assert_allclose(v_val, f_rop(x_y_val, v_val))\n",
"import logging\n\nfrom nose.plugins.skip import SkipTest\nimport numpy\nfrom itertools import product\n\nimport theano\nfrom six import StringIO\nimport theano.tensor as T\nimport theano.tests.unittest_tools as utt\nfrom theano.sandbox.neighbours import images2neibs\nfrom theano.tensor.signal.pool import pool_2d\nfrom theano.tensor.signal.pool import MaxPoolGrad, AveragePoolGrad\n\nfrom .. import dnn\nfrom ..basic_ops import GpuAllocEmpty\n\nfrom .config import mode_with_gpu, mode_without_gpu, test_ctx_name\nfrom . import test_nnet\n\n\ndef test_dnn_conv_desc_merge():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n kern_shp = T.as_tensor_variable(\n numpy.asarray([3, 1, 2, 2]).astype('int64'))\n desc1 = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(2, 2),\n conv_mode='conv')(kern_shp)\n desc2 = dnn.GpuDnnConvDesc(border_mode='full', subsample=(1, 1),\n conv_mode='cross')(kern_shp)\n # CDataType is not DeepCopyable so this will crash if we don't use\n # borrow=True\n f = theano.function([], [theano.Out(desc1, borrow=True),\n theano.Out(desc2, borrow=True)])\n\n d1, d2 = f()\n\n # This will be the case if they are merged, which would be bad.\n assert d1 != d2\n\n\ndef test_dnn_conv_merge():\n # This test that we merge correctly multiple dnn_conv.\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img_shp = [2, 5, 6, 8]\n kern_shp = [3, 5, 5, 6]\n img = T.ftensor4('img')\n kern = T.ftensor4('kern')\n out = T.ftensor4('out')\n desc = dnn.GpuDnnConvDesc(\n border_mode='valid')(kern.shape)\n\n # Test forward op\n o1 = dnn.dnn_conv(img, kern)\n o2 = dnn.dnn_conv(img, kern)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),\n numpy.random.rand(*kern_shp).astype('float32'))\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]) == 1\n\n # Test grad w op\n o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc)\n o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc)\n f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]) == 1\n\n # Test grad i op\n o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc)\n o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc)\n f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]) == 1\n\n\ndef test_dnn_conv_inplace():\n \"\"\"This test that we have inplace work correctly even when\n GpuAllocEmpty get merged together.\n\n \"\"\"\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img_shp = [2, 5, 6, 8]\n kern_shp = [3, 5, 5, 6]\n img = T.ftensor4('img')\n kern = T.ftensor4('kern')\n out = T.ftensor4('out')\n desc1 = dnn.GpuDnnConvDesc(border_mode='valid', conv_mode='conv')(\n kern.shape)\n desc2 = dnn.GpuDnnConvDesc(\n border_mode='valid', conv_mode='cross')(kern.shape)\n\n # Test forward op\n o1 = dnn.dnn_conv(img, kern, conv_mode='conv')\n o2 = dnn.dnn_conv(img, kern, conv_mode='cross')\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),\n numpy.random.rand(*kern_shp).astype('float32'))\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n # Test grad w op\n out = GpuAllocEmpty(kern.dtype, test_ctx_name)(*kern.shape)\n o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc1)\n o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc2)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n # Test grad i op\n out = GpuAllocEmpty(img.dtype, test_ctx_name)(*img.shape)\n o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc1)\n o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc2)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n\ndef pool_2d_i2n(input, ds=(2, 2), strides=None,\n pad=(0, 0),\n pool_function=T.max, mode='ignore_borders'):\n if strides is None:\n strides = ds\n\n if strides[0] > ds[0] or strides[1] > ds[1]:\n raise RuntimeError(\n \"strides should be smaller than or equal to ds,\"\n \" strides=(%d, %d) and ds=(%d, %d)\" %\n (strides + ds))\n shape = input.shape\n if pad != (0, 0):\n assert pool_function is T.max\n pad_x = pad[0]\n pad_y = pad[1]\n a = T.alloc(-numpy.inf, shape[0], shape[1], shape[2] + pad_x * 2,\n shape[3] + pad_y * 2)\n input = T.set_subtensor(a[:, :,\n pad_x:pad_x + shape[2],\n pad_y:pad_y + shape[3]],\n input)\n shape = input.shape\n\n neibs = images2neibs(input, ds, strides, mode=mode)\n pooled_neibs = pool_function(neibs, axis=1)\n\n output_width = (shape[2] - ds[0]) // strides[0] + 1\n output_height = (shape[3] - ds[1]) // strides[1] + 1\n\n pooled_output = pooled_neibs.reshape((shape[0], shape[1],\n output_width, output_height))\n return pooled_output\n\n\ndef test_pooling():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n\n # 'average_exc_pad' is disabled for versions < 4004\n if dnn.version() < 4004:\n modes = ('max', 'average_inc_pad')\n else:\n modes = ('max', 'average_inc_pad', 'average_exc_pad')\n\n x = T.ftensor4()\n for mode, pad in product(modes,\n ((0, 0), (1, 0), (1, 0), (2, 3), (3, 2))):\n if mode == 'max':\n func = T.max\n else:\n func = T.mean\n\n if pad != (0, 0) and func is T.mean:\n continue\n\n for ws in (4, 2, 5):\n for stride in (2, 3):\n if stride > ws:\n continue\n if pad[0] > stride or pad[1] > stride:\n # Not implemented\n continue\n # We will check that the opt introduced it.\n out1 = pool_2d(x, (ws, ws),\n st=(stride, stride),\n ignore_border=True,\n padding=pad, mode=mode)\n out2 = pool_2d_i2n(x, ds=(ws, ws), strides=(stride, stride),\n pad=pad,\n pool_function=func)\n mode_without_gpu2 = mode_without_gpu.including()\n mode_without_gpu2.check_isfinite = False\n f1 = theano.function([x], out1, mode=mode_with_gpu)\n assert any([isinstance(node.op, dnn.GpuDnnPool)\n for node in f1.maker.fgraph.apply_nodes])\n f2 = theano.function([x], out2, mode=mode_without_gpu2)\n assert not any([isinstance(node.op, dnn.GpuDnnPool)\n for node in f2.maker.fgraph.apply_nodes])\n for shp in [(1, 10, 100, 100),\n (1, 3, 99, 99),\n (32, 1, 147, 197),\n ]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\")\n a = f1(data)\n b = f2(data)\n\n utt.assert_allclose(a, b)\n\n # Test the grad\n for shp in [(1, 1, 2, 2),\n (1, 1, 3, 3)]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\") * 10\n\n ws = 2\n stride = 2\n if pad[0] > stride or pad[1] > stride:\n # Not implemented\n continue\n\n # This test the CPU grad + opt + GPU implemtentation\n def fn(x):\n return pool_2d(x, (ws, ws), ignore_border=True,\n padding=pad, mode=mode)\n utt.verify_grad(fn, [data],\n cast_to_output_type=False,\n mode=mode_with_gpu)\n # Confirm that the opt would have inserted it.\n fg = theano.function([x], theano.grad(fn(x).sum(), x),\n mode=mode_with_gpu)\n assert any([isinstance(node.op, dnn.GpuDnnPoolGrad)\n for node in fg.maker.fgraph.toposort()])\n\n # Test the GPU grad + GPU implementation\n def fn(x):\n dnn_op = dnn.dnn_pool(\n x, ws=(ws, ws),\n stride=(stride, stride),\n pad=pad,\n mode=mode)\n return dnn_op\n utt.verify_grad(fn, [data],\n cast_to_output_type=False,\n mode=mode_with_gpu)\n # Confirm that we get the good op.\n fg = theano.function([x], theano.grad(fn(x).sum(), x),\n mode=mode_with_gpu)\n assert any([isinstance(node.op, dnn.GpuDnnPoolGrad)\n for node in fg.maker.fgraph.toposort()])\n g_out = fg(data)\n\n # Compare against the CPU result\n out = pool_2d(x, (ws, ws),\n padding=pad,\n ignore_border=True, mode=mode)\n fc = theano.function([x], theano.grad(out.sum(), x),\n mode=mode_without_gpu)\n if mode == 'max':\n assert any([isinstance(node.op, MaxPoolGrad)\n for node in fc.maker.fgraph.toposort()])\n else:\n assert any([isinstance(node.op, AveragePoolGrad)\n for node in fc.maker.fgraph.toposort()])\n c_out = fc(data)\n utt.assert_allclose(c_out, g_out)\n\n\ndef test_pooling_opt():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n\n x = T.fmatrix()\n\n f = theano.function(\n [x],\n pool_2d(x, ds=(2, 2), mode='average_inc_pad',\n ignore_border=True),\n mode=mode_with_gpu)\n\n assert any([isinstance(n.op, dnn.GpuDnnPool)\n for n in f.maker.fgraph.toposort()])\n\n f(numpy.zeros((10, 10), dtype='float32'))\n\n f = theano.function(\n [x],\n T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',\n ignore_border=True).sum(),\n x),\n mode=mode_with_gpu.including(\"cudnn\"))\n\n assert any([isinstance(n.op, dnn.GpuDnnPoolGrad)\n for n in f.maker.fgraph.toposort()])\n\n f(numpy.zeros((10, 10), dtype='float32'))\n\n\ndef test_dnn_tag():\n \"\"\"\n Test that if cudnn isn't avail we crash and that if it is avail, we use it.\n \"\"\"\n x = T.ftensor4()\n old = theano.config.on_opt_error\n theano.config.on_opt_error = \"raise\"\n\n sio = StringIO()\n handler = logging.StreamHandler(sio)\n logging.getLogger('theano.compile.tests.test_dnn').addHandler(handler)\n # Silence original handler when intentionnally generating warning messages\n logging.getLogger('theano').removeHandler(theano.logging_default_handler)\n raised = False\n try:\n f = theano.function(\n [x],\n pool_2d(x, ds=(2, 2), ignore_border=True),\n mode=mode_with_gpu.including(\"cudnn\"))\n except (AssertionError, RuntimeError):\n assert not dnn.dnn_available(test_ctx_name)\n raised = True\n finally:\n theano.config.on_opt_error = old\n logging.getLogger(\n 'theano.compile.tests.test_dnn').removeHandler(handler)\n logging.getLogger('theano').addHandler(theano.logging_default_handler)\n\n if not raised:\n assert dnn.dnn_available(test_ctx_name)\n assert any([isinstance(n.op, dnn.GpuDnnPool)\n for n in f.maker.fgraph.toposort()])\n\n\nclass TestDnnInferShapes(utt.InferShapeTester):\n def setUp(self):\n super(TestDnnInferShapes, self).setUp()\n self.mode = mode_with_gpu\n\n def test_softmax(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n t = T.ftensor4('t')\n rand_tensor = numpy.asarray(\n numpy.random.rand(5, 4, 3, 2),\n dtype='float32'\n )\n self._compile_and_check(\n [t],\n [dnn.GpuDnnSoftmax('accurate', 'channel')(t)],\n [rand_tensor],\n dnn.GpuDnnSoftmax\n )\n\n self._compile_and_check(\n [t],\n [\n T.grad(\n dnn.GpuDnnSoftmax(\n 'accurate',\n 'channel'\n )(t).mean(),\n t\n )\n ],\n [rand_tensor],\n dnn.GpuDnnSoftmaxGrad\n )\n\n def test_conv(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n kerns = T.ftensor4('kerns')\n out = T.ftensor4('out')\n img_val = numpy.asarray(\n numpy.random.rand(7, 2, 6, 4),\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n numpy.random.rand(8, 2, 4, 3),\n dtype='float32'\n )\n\n for params in product(\n ['valid', 'full', 'half'],\n [(1, 1), (2, 2)],\n ['conv', 'cross']\n ):\n out_vals = numpy.zeros(\n dnn.GpuDnnConv.get_out_shape(img_val.shape, kern_vals.shape,\n border_mode=params[0],\n subsample=params[1]),\n dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(kerns.shape)\n conv = dnn.GpuDnnConv()(img, kerns, out, desc)\n self._compile_and_check(\n [img, kerns, out],\n [conv],\n [img_val, kern_vals, out_vals],\n dnn.GpuDnnConv\n )\n\n def test_conv_gradw(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n kerns = T.ftensor4('kerns')\n out = T.ftensor4('out')\n img_val = numpy.asarray(\n numpy.random.rand(2, 5, 6, 8),\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n numpy.random.rand(2, 1, 5, 6),\n dtype='float32'\n )\n\n for params in product(\n ['valid', 'full', 'half'],\n [(1, 1)], # strides besides (1, 1)\n ['conv', 'cross']\n ):\n temp_img = img.dimshuffle(1, 0, 2, 3)\n temp_kerns = kerns\n if params[2] == 'conv':\n temp_kerns = temp_kerns[:, :, ::-1, ::-1]\n temp_kerns = temp_kerns.dimshuffle(1, 0, 2, 3)\n shape = (\n kern_vals.shape[1], img_val.shape[1],\n img_val.shape[2] - kern_vals.shape[2] + 1,\n img_val.shape[3] - kern_vals.shape[3] + 1\n )\n out_vals = numpy.zeros(shape, dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(out.shape)\n conv_grad_w = dnn.GpuDnnConvGradW()(\n temp_img,\n temp_kerns,\n out,\n desc,\n )\n self._compile_and_check(\n [temp_img, temp_kerns, out],\n [conv_grad_w],\n [img_val, kern_vals, out_vals],\n dnn.GpuDnnConvGradW\n )\n\n def test_conv_gradi(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n kerns = T.ftensor4('kerns')\n out = T.ftensor4('out')\n kern_vals = numpy.asarray(\n numpy.random.rand(13, 14, 15, 16),\n dtype='float32'\n )\n out_vals = numpy.asarray(\n numpy.random.rand(3, 13, 5, 6),\n dtype='float32'\n )\n\n for params in product(\n ['valid'], # Should this work for 'full'?\n [(1, 1)],\n ['conv', 'cross']\n ):\n shape = (\n out_vals.shape[0], kern_vals.shape[1],\n out_vals.shape[2] + kern_vals.shape[2] - 1,\n out_vals.shape[3] + kern_vals.shape[3] - 1\n )\n img_vals = numpy.zeros(shape, dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(kerns.shape)\n conv_grad_i = dnn.GpuDnnConvGradI()(\n kerns,\n out,\n img,\n desc,\n )\n self._compile_and_check(\n [kerns, img, out],\n [conv_grad_i],\n [kern_vals, img_vals, out_vals],\n dnn.GpuDnnConvGradI\n )\n\n def test_pool(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n img_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n\n # 'average_exc_pad' is disabled for versions < 4004\n if dnn.version() < 4004:\n modes = ['max', 'average_inc_pad']\n else:\n modes = ['max', 'average_inc_pad', 'average_exc_pad']\n\n for params in product(\n [(1, 1), (2, 2), (3, 3)],\n [(1, 1), (2, 2), (3, 3)],\n modes\n ):\n desc = dnn.GpuDnnPoolDesc(\n ws=params[0],\n stride=params[1],\n mode=params[2]\n )()\n self._compile_and_check(\n [img],\n [dnn.GpuDnnPool()(img, desc)],\n [img_val],\n dnn.GpuDnnPool\n )\n\n def test_pool_grad(self):\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n img_grad = T.ftensor4('img_grad')\n out = T.ftensor4('out')\n img_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n img_grad_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n out_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n\n for params in product(\n [(1, 1), (2, 2), (3, 3)],\n [(1, 1), (2, 2), (3, 3)],\n ['max', 'average_inc_pad']\n ):\n desc = dnn.GpuDnnPoolDesc(\n ws=params[0],\n stride=params[1],\n mode=params[2]\n )()\n pool_grad = dnn.GpuDnnPoolGrad()(\n img,\n out,\n img_grad,\n desc\n )\n self._compile_and_check(\n [img, img_grad, out],\n [pool_grad],\n [img_val, img_grad_val, out_val],\n dnn.GpuDnnPoolGrad\n )\n\n\n# this has been a problem in the past\ndef test_dnn_conv_border_mode():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4()\n kern = T.ftensor4()\n\n dnn.dnn_conv(img, kern, border_mode=1)\n dnn.dnn_conv(img, kern, border_mode=(2, 3))\n dnn.dnn_conv(img, kern, border_mode='full')\n dnn.dnn_conv(img, kern, border_mode='valid')\n dnn.dnn_conv(img, kern, border_mode='half')\n\n\ndef test_dnn_conv_alpha_output_merge():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4()\n kern = T.ftensor4()\n out = T.ftensor4()\n\n b = 1\n c = 4\n f = 3\n ih = 5\n iw = 8\n kh = 2\n kw = 6\n img_val = numpy.random.random((b, c, ih, iw)).astype('float32')\n kern_val = numpy.random.random((f, c, kh, kw)).astype('float32')\n out_val = numpy.random.random((b, f, ih - kh + 1,\n iw - kw + 1)).astype('float32')\n\n conv = dnn.dnn_conv(img, kern)\n gw = theano.grad(conv.sum(), kern)\n gi = theano.grad(conv.sum(), img)\n\n lr = numpy.asarray(0.05, dtype='float32')\n\n fr = lr * (conv + out)\n wr = kern + lr * gw\n ir = img + lr * gi\n\n f1 = theano.function([img, kern, out], [fr, wr, ir], mode=mode_with_gpu)\n assert isinstance(f1.maker.fgraph.outputs[0].owner.inputs[0].owner.op,\n dnn.GpuDnnConv)\n assert isinstance(f1.maker.fgraph.outputs[1].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradW)\n assert isinstance(f1.maker.fgraph.outputs[2].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradI)\n\n mode = mode_with_gpu\n mode = mode.excluding('local_dnn_conv_alpha_merge')\n mode = mode.excluding('local_dnn_convw_alpha_merge')\n mode = mode.excluding('local_dnn_convi_alpha_merge')\n mode = mode.excluding('local_dnn_conv_output_merge')\n mode = mode.excluding('local_dnn_convw_output_merge')\n mode = mode.excluding('local_dnn_convi_output_merge')\n\n f2 = theano.function([img, kern, out], [fr, wr, ir], mode=mode)\n\n assert not isinstance(f2.maker.fgraph.outputs[0].owner.inputs[0].owner.op,\n dnn.GpuDnnConv)\n assert not isinstance(f2.maker.fgraph.outputs[1].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradW)\n assert not isinstance(f2.maker.fgraph.outputs[2].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradI)\n\n out_f1 = f1(img_val, kern_val, out_val)\n out_f2 = f2(img_val, kern_val, out_val)\n\n assert len(out_f1) == len(out_f2)\n\n for v1, v2 in zip(out_f1, out_f2):\n utt.assert_allclose(v1, v2)\n\n\ndef test_dnn_conv_grad():\n b = 1\n c = 4\n f = 3\n ih = 2\n iw = 8\n kh = 2\n kw = 2\n img_val = numpy.random.random((b, c, ih, iw)).astype('float32')\n kern_val = numpy.random.random((f, c, kh, kw)).astype('float32')\n out_val = numpy.random.random((b, f, ih - kw + 1,\n iw - kw + 1)).astype('float32')\n\n def dconv(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(kern.shape)\n return dnn.GpuDnnConv()(img, kern, out, desc, alpha=0.5, beta=0.75)\n\n def dconvi(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(kern.shape)\n return dnn.GpuDnnConvGradI()(kern, out, img, desc, alpha=-1.0,\n beta=0.0)\n\n def dconvw(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(kern.shape)\n return dnn.GpuDnnConvGradW()(img, out, kern, desc, alpha=0.75,\n beta=-1.0)\n\n utt.verify_grad(dconv, [img_val, kern_val, out_val])\n utt.verify_grad(dconvi, [img_val, kern_val, out_val])\n utt.verify_grad(dconvw, [img_val, kern_val, out_val])\n\n\ndef test_version():\n if not dnn.dnn_available(test_ctx_name):\n raise SkipTest(dnn.dnn_available.msg)\n assert isinstance(dnn.version(), int)\n\n\nclass test_SoftMax(test_nnet.test_SoftMax):\n gpu_op = dnn.GpuDnnSoftmax\n gpu_grad_op = dnn.GpuDnnSoftmaxGrad\n mode = mode_with_gpu\n\n def test_softmax_shape_0(self):\n raise SkipTest(\"Cudnn doesn't support 0 shapes\")\n\n def test_softmax_grad(self):\n def cmp(n, m, f, f_gpu):\n data = numpy.arange(n * m, dtype='float32').reshape(n, m)\n gdata = numpy.asarray(data)[:, :, None, None]\n\n out = f(data)\n gout = numpy.asarray(f_gpu(gdata))[:, :, 0, 0]\n utt.assert_allclose(out, gout)\n\n x = T.matrix('x', 'float32')\n x_gpu = T.tensor4('x_gpu', 'float32')\n f_z = T.nnet.softmax_op\n f_gpu = dnn.GpuDnnSoftmax(\n 'accurate',\n 'channel'\n )\n\n # Verify the grad operation\n dims = (2, 3, 4, 5)\n gdata = numpy.arange(\n numpy.product(dims),\n dtype='float32'\n ).reshape(dims)\n T.verify_grad(f_gpu, [gdata], rng=numpy.random,\n mode=mode_with_gpu)\n\n # Verify that the CPU and GPU implementations return the same results\n # up to a tolerance.\n\n self._test_softmax(\n x,\n x_gpu,\n f_z,\n f_gpu,\n cmp\n )\n\n self._test_softmax(\n x, x, f_z, f_z, self._cmp\n )\n\n # Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad\n # optimization is applied when cudnn is required\n y = T.fvector('y')\n f = theano.function(\n [y],\n T.grad(T.nnet.softmax(y).mean(), y),\n mode=mode_with_gpu\n )\n sorted_f = f.maker.fgraph.toposort()\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n self.gpu_grad_op)\n ]) == 1)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad)\n ]) == 0)\n\n # Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad\n # optimization is not applied when cudnn is excluded or not\n # available\n mode_wo_cudnn = mode_with_gpu.excluding(\"cudnn\")\n y = T.fvector('y')\n f = theano.function(\n [y],\n T.grad(T.nnet.softmax(y).mean(), y),\n mode=mode_wo_cudnn\n )\n sorted_f = f.maker.fgraph.toposort()\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n self.gpu_grad_op)\n ]) == 0)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad)\n ]) == 1)\n\n # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not\n # crash with manual graph\n y = T.fvector('y')\n o = theano.tensor.nnet.SoftmaxGrad()(y, y * 2)\n f = theano.function([y], o, mode=mode_with_gpu)\n sorted_f = f.maker.fgraph.toposort()\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n self.gpu_grad_op)\n ]) == 1)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad)\n ]) == 0)\n\n def test_log_softmax(self):\n # This is a test for an optimization that depends on CuDNN v3 or\n # more recent. Don't test if the CuDNN version is too old.\n if dnn.version() < 3000:\n raise SkipTest(\"Log-softmax is only in cudnn v3+\")\n\n x = T.ftensor4()\n softmax_out = dnn.GpuDnnSoftmax('accurate', 'channel')(x)\n log_out = T.log(T.as_tensor_variable(softmax_out))\n\n f = theano.function([x], log_out, mode=mode_with_gpu)\n\n # Ensure that the optimization has been applied\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Ensure that the output of the function is valid\n input_shapes = [(3, 4, 5, 6),\n (1025, 2, 3, 4),\n (2, 1025, 3, 4),\n (2, 3, 1025, 4),\n (2, 3, 4, 1025),\n (66000, 2, 3, 4),\n (2, 66000, 3, 4),\n (2, 3, 66000, 4),\n (2, 3, 4, 66000)]\n\n for inp_shape in input_shapes:\n input_val = numpy.random.normal(0, 1, inp_shape).astype(\"float32\")\n\n out = f(input_val)\n expected_out = numpy.log(numpy.exp(input_val) /\n numpy.exp(input_val).sum(1)[:, None, :, :])\n\n utt.assert_allclose(out, expected_out)\n\n def test_log_softmax2(self):\n # Test that the op LogSoftmax is correctly replaced by the op\n # DnnSoftmax with the 'log' mode.\n\n # This is a test for an optimization that depends on CuDNN v3 or\n # more recent. Don't test if the CuDNN version is too old.\n if dnn.version() < 3000:\n raise SkipTest(\"Log-softmax is only in cudnn v3+\")\n\n # Compile a reference function, on the CPU, to be used to validate the\n # results of the other function.\n x = T.fmatrix()\n f_ref = theano.function([x], T.nnet.LogSoftmax()(x))\n\n # Build the first graph and ensure that the optimization is applied\n log_softmax_out = T.nnet.LogSoftmax()(x)\n f = theano.function([x], log_softmax_out, mode=mode_with_gpu)\n\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Compare the output of the function with the reference function\n inp = numpy.random.normal(0, 1, (5, 6)).astype(\"float32\")\n utt.assert_allclose(f(inp), f_ref(inp))\n\n # Build the first graph and ensure that the optimization is applied\n log_softmax_out = T.log(T.nnet.Softmax()(x))\n f = theano.function([x], log_softmax_out, mode=mode_with_gpu)\n\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Compare the output of the function with the reference function\n inp = numpy.random.normal(0, 1, (5, 6)).astype(\"float32\")\n utt.assert_allclose(f(inp), f_ref(inp))\n",
"import os\nimport tarfile\n\nimport h5py\nimport numpy\nimport six\nfrom six.moves import cPickle\n\nfrom fuel.converters.base import fill_hdf5_file, check_exists\n\nDISTRIBUTION_FILE = 'cifar-100-python.tar.gz'\n\n\n@check_exists(required_files=[DISTRIBUTION_FILE])\ndef convert_cifar100(directory, output_directory,\n output_filename='cifar100.hdf5'):\n \"\"\"Converts the CIFAR-100 dataset to HDF5.\n\n Converts the CIFAR-100 dataset to an HDF5 dataset compatible with\n :class:`fuel.datasets.CIFAR100`. The converted dataset is saved as\n 'cifar100.hdf5'.\n\n This method assumes the existence of the following file:\n `cifar-100-python.tar.gz`\n\n Parameters\n ----------\n directory : str\n Directory in which the required input files reside.\n output_directory : str\n Directory in which to save the converted dataset.\n output_filename : str, optional\n Name of the saved dataset. Defaults to 'cifar100.hdf5'.\n\n Returns\n -------\n output_paths : tuple of str\n Single-element tuple containing the path to the converted dataset.\n\n \"\"\"\n output_path = os.path.join(output_directory, output_filename)\n h5file = h5py.File(output_path, mode=\"w\")\n input_file = os.path.join(directory, 'cifar-100-python.tar.gz')\n tar_file = tarfile.open(input_file, 'r:gz')\n\n file = tar_file.extractfile('cifar-100-python/train')\n try:\n if six.PY3:\n train = cPickle.load(file, encoding='latin1')\n else:\n train = cPickle.load(file)\n finally:\n file.close()\n\n train_features = train['data'].reshape(train['data'].shape[0],\n 3, 32, 32)\n train_coarse_labels = numpy.array(train['coarse_labels'],\n dtype=numpy.uint8)\n train_fine_labels = numpy.array(train['fine_labels'],\n dtype=numpy.uint8)\n\n file = tar_file.extractfile('cifar-100-python/test')\n try:\n if six.PY3:\n test = cPickle.load(file, encoding='latin1')\n else:\n test = cPickle.load(file)\n finally:\n file.close()\n\n test_features = test['data'].reshape(test['data'].shape[0],\n 3, 32, 32)\n test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8)\n test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8)\n\n data = (('train', 'features', train_features),\n ('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))),\n ('train', 'fine_labels', train_fine_labels.reshape((-1, 1))),\n ('test', 'features', test_features),\n ('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))),\n ('test', 'fine_labels', test_fine_labels.reshape((-1, 1))))\n fill_hdf5_file(h5file, data)\n h5file['features'].dims[0].label = 'batch'\n h5file['features'].dims[1].label = 'channel'\n h5file['features'].dims[2].label = 'height'\n h5file['features'].dims[3].label = 'width'\n h5file['coarse_labels'].dims[0].label = 'batch'\n h5file['coarse_labels'].dims[1].label = 'index'\n h5file['fine_labels'].dims[0].label = 'batch'\n h5file['fine_labels'].dims[1].label = 'index'\n\n h5file.flush()\n h5file.close()\n\n return (output_path,)\n\n\ndef fill_subparser(subparser):\n \"\"\"Sets up a subparser to convert the CIFAR100 dataset files.\n\n Parameters\n ----------\n subparser : :class:`argparse.ArgumentParser`\n Subparser handling the `cifar100` command.\n\n \"\"\"\n return convert_cifar100\n"
] |
[
[
"numpy.arange"
],
[
"numpy.dot",
"numpy.hstack",
"numpy.arange",
"numpy.ones",
"numpy.concatenate",
"numpy.testing.assert_raises",
"numpy.testing.assert_allclose",
"numpy.exp",
"numpy.tanh",
"numpy.array",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.asarray",
"numpy.all",
"numpy.max",
"numpy.round",
"numpy.random.randn",
"numpy.zeros_like",
"numpy.exp",
"numpy.where",
"numpy.random.randint",
"numpy.allclose",
"numpy.arange",
"numpy.sin",
"numpy.log1p",
"numpy.zeros",
"numpy.log",
"numpy.cosh",
"numpy.power",
"numpy.min",
"numpy.tan",
"numpy.int64",
"numpy.log10",
"numpy.random.rand",
"numpy.array",
"numpy.tanh",
"numpy.sum",
"numpy.absolute",
"numpy.random.random",
"numpy.log2",
"numpy.isfinite",
"numpy.cos",
"numpy.sinh",
"numpy.ones",
"numpy.sign",
"numpy.random.normal",
"numpy.prod",
"numpy.ndindex"
],
[
"numpy.random.rand"
],
[
"numpy.product",
"numpy.random.random",
"numpy.asarray",
"numpy.arange",
"numpy.random.normal",
"numpy.random.rand",
"numpy.exp",
"numpy.zeros"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sessa93/continuous-auth-service
|
[
"e045bbbae1205629a4cc4bd369c0a0b5dd6fd93f"
] |
[
"python_scripts/general_purpose.py"
] |
[
"# pylint: disable = C0111, C0103, C0411, C0301, W0102, C0330, C0303\n\"\"\"General Purpose Functions\"\"\"\nimport numpy as np\nfrom random import randint\nfrom sklearn.covariance import EllipticEnvelope\nfrom sklearn.ensemble import IsolationForest\n\n\ndef is_not_extreme_outlier(x, _min, _max):\n \"\"\"Returns true if x >=min and x<=max.\"\"\"\n return x >= _min and x <= _max\n\n\ndef my_reshape(sel, filter_by_digraph=''):\n \"\"\"Reshapes the subject extracted data list.\n \\nAttributes:\n \\n sel: The list of dicts with digraphs timings: ['data']: [{'digraph', 'points'}]\n \\n filter_by_digraph: Specifies the digraph to filter\n \\n Returns: Object with keys ['subject'], ['track_code'], and ['points'] as x,y,z\n \"\"\"\n if filter_by_digraph != '':\n tmp = [v for v in sel['data'] if v['digraph'] == filter_by_digraph]\n if tmp == []:\n # exit('!!!Exiting: No digraph data found for subject:' +\n # sel['_subject'])\n return -1\n else:\n pts = tmp[0]['points']\n else:\n pts = sel['data'][0]['points']\n for v in sel['data'][1:]:\n pts = np.append(pts, v['points'], axis=0)\n return {\"subject\": sel['_subject'], \"track_code\": sel['_track_code'], \"points\": pts}\n\n\ndef is_inside_interval(point, m, t):\n \"\"\"Returns: true if point is bigger than m-t and less than m+t\"\"\"\n return point >= m - t and point <= m + t\n\n\ndef clean_with_std(points, n_stds_tolerance):\n \"\"\"Removes data that are too far away by n_stds of their mean\n \\npoints: n x 3 numpy array of x,y,z points\n \\nn_stds_tolerance: int How many stds tolerance\n \\nReturns: n x 3 numpy array with clean data\"\"\"\n means = {\"x\": np.mean(points[:, 0]), \"y\": np.mean(\n points[:, 1]), \"z\": np.mean(points[:, 2])}\n tols = {\"x\": n_stds_tolerance * np.std(points[:, 0]), \"y\": n_stds_tolerance * np.std(\n points[:, 1]), \"z\": n_stds_tolerance * np.std(points[:, 2])}\n return np.array([row for row in points if is_inside_interval(row[0], means['x'], tols['x']) and is_inside_interval(row[1], means['y'], tols['y']) and is_inside_interval(row[2], means['z'], tols['z'])])\n\n\ndef clean_with_algo(X, algorithm, contamination=0.1):\n \"\"\"Applies the specific algorithm to remove outliers from the data, something like outlier\n detection to remove noise from data coming from one class.\n \\n X: NxM numpy array\n \\n algorithm: Can be one of 'EllipticEnvelope'\n \\n contamination: If EllipticEnvelope is used, the contamination\n specifies how polluted the data are\n \\n Returns: Data without outliers, same shape as X\n \"\"\"\n\n # Generate Model\n if hasattr(globals()[algorithm](), 'contamination'):\n model = globals()[algorithm](contamination=contamination)\n else:\n model = globals()[algorithm]()\n\n # Fit & predict\n model.fit(X)\n labels_pred = model.predict(X)\n\n # Remove outliers\n _X = np.array([row for i, row in enumerate(X) if labels_pred[i] != -1])\n\n\ndef events_sample(events, samples):\n \"\"\"Samples a continuous amount of events defined by samples \"\"\"\n if samples > len(events):\n exit('*events_sample: Exiting -> sample > events length')\n start = randint(0, len(events) - samples - 1)\n return events[start:start + samples]\n"
] |
[
[
"numpy.std",
"numpy.append",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vvvm23/ddpm
|
[
"99482bb4edc4286b87e2a79cace0f43f95bdb297"
] |
[
"dataset.py"
] |
[
"import torch\nimport torchvision\n\nfrom ptpt.log import error\n\nfrom pathlib import Path\n\ndef get_dataset(task: str):\n if task in ['ffhq1024','ffhq1024-large']:\n transforms = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),\n ])\n dataset = torchvision.datasets.ImageFolder('data/ffhq1024', transform=transforms)\n train_idx, test_idx = torch.arange(0, 60_000), torch.arange(60_000, len(dataset))\n train_dataset, test_dataset = torch.utils.data.Subset(dataset, train_idx), torch.utils.data.Subset(dataset, test_idx)\n elif task == 'ffhq256':\n transforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),\n ])\n dataset = torchvision.datasets.ImageFolder('data/ffhq1024', transform=transforms)\n train_idx, test_idx = torch.arange(0, 60_000), torch.arange(60_000, len(dataset))\n train_dataset, test_dataset = torch.utils.data.Subset(dataset, train_idx), torch.utils.data.Subset(dataset, test_idx)\n elif task == 'ffhq128':\n transforms = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),\n ])\n dataset = torchvision.datasets.ImageFolder('data/ffhq128', transform=transforms)\n train_idx, test_idx = torch.arange(0, 60_000), torch.arange(60_000, len(dataset))\n train_dataset, test_dataset = torch.utils.data.Subset(dataset, train_idx), torch.utils.data.Subset(dataset, test_idx)\n elif task == 'cifar10':\n transforms = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),\n ])\n train_dataset = torchvision.datasets.CIFAR10('data', train=True, transform=transforms, download=True)\n test_dataset = torchvision.datasets.CIFAR10('data', train=False, transform=transforms, download=True)\n elif task == 'mnist':\n transforms = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n ])\n train_dataset = torchvision.datasets.MNIST('data', train=True, transform=transforms, download=True)\n test_dataset = torchvision.datasets.MNIST('data', train=False, transform=transforms, download=True)\n elif task == 'kmnist':\n transforms = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n ])\n train_dataset = torchvision.datasets.KMNIST('data', train=True, transform=transforms, download=True)\n test_dataset = torchvision.datasets.KMNIST('data', train=False, transform=transforms, download=True)\n else:\n msg = f\"unrecognised dataset '{task}'!\"\n error(msg)\n raise ValueError(msg)\n\n return train_dataset, test_dataset\n"
] |
[
[
"torch.utils.data.Subset",
"torch.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chahyon1998/sornet-ku
|
[
"87a4479368c95e4d3d59863a4329b2b77f184218"
] |
[
"train_leonardo.py"
] |
[
"'''\nMIT License\n\nCopyright (c) 2022 Wentao Yuan\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nfrom datasets import LeonardoDataset, build_predicates, denormalize_rgb\nfrom functools import partial\nfrom matplotlib import pyplot as plt\nfrom networks import EmbeddingNet, ReadoutNet\nfrom tensorboardX import SummaryWriter\nfrom train_utils import train_one_epoch, eval_one_epoch, train_ddp\nimport argparse\nimport json\nimport numpy as np\nimport os\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nunary_pred = [\n 'on_surface(%s, left)', 'on_surface(%s, right)', 'on_surface(%s, far)',\n 'on_surface(%s, center)', 'has_obj(robot, %s)', 'top_is_clear(%s)',\n 'in_approach_region(robot, %s)'\n]\nbinary_pred = ['stacked(%s, %s)', 'aligned_with(%s, %s)']\n\n\ndef step(use_gripper, data, model, head):\n img, obj_patches, gripper, target = data\n emb, attn = model(img, obj_patches)\n if use_gripper:\n emb = torch.cat(\n [emb, gripper[:, None, None].expand(-1, emb.shape[1], -1)], dim=-1\n )\n logits = head(emb)\n loss = torch.nn.functional.binary_cross_entropy_with_logits(logits, target)\n return logits, loss\n\n\ndef calc_acc(data, logits, pred_types):\n img, obj_patches, gripper, target = data\n pred = (logits.detach() > 0).int()\n acc = (pred == target.int()).sum(dim=0) / logits.shape[0] * 100\n return acc\n\n\ndef log(\n writer, global_step, split, epoch, idx, total,\n batch_time, data_time, avg_loss, avg_acc, pred_types=None\n ):\n print(\n f'Epoch {(epoch+1):02d} {split.capitalize()} {idx:04d}/{total:04d} '\n f'Batch time {batch_time:.3f} Data time {data_time:.3f} '\n f'Loss {avg_loss.item():.4f} Accuracy {avg_acc.mean().item():.2f}'\n )\n acc = [a.mean() for a in avg_acc.split(list(pred_types.values()))]\n writer.add_scalar(f'{split}/loss', avg_loss, global_step)\n writer.add_scalar(f'{split}/accuracy', avg_acc.mean().item(), global_step)\n for a, name in zip(acc, pred_types.keys()):\n writer.add_scalar(f'{split}/accuracy_{name}', a.item(), global_step)\n\n\ndef plot(predicates, n_plot, data, logits):\n img, obj_patches, gripper, target = data\n patch_size = obj_patches.shape[-1]\n img_with_obj = []\n for i, o in zip(img[:n_plot], obj_patches[:n_plot]):\n obj_panel = np.full((patch_size+4, i.shape[2], 3), 255, dtype=np.uint8)\n for j in range(obj_patches.shape[1]):\n obj_panel[4:, j*2*patch_size:(j*2+1)*patch_size] = \\\n np.array(denormalize_rgb(o[j]))\n img_with_obj.append(\n np.concatenate([np.array(denormalize_rgb(i)), obj_panel], axis=0)\n )\n pred = (logits.detach() > 0).int()\n\n fig = plt.figure(figsize=(n_plot * 4, 9))\n for i in range(n_plot):\n plt.subplot(2, n_plot, i + 1)\n plt.imshow(img_with_obj[i])\n plt.axis('off')\n plt.subplot(2, n_plot, n_plot + i + 1)\n j = 0\n for name, p, t in zip(predicates, pred[i], target[i]):\n if t or p:\n if t:\n c = 'k' if p else 'r'\n else:\n c = 'b'\n plt.text(\n 0.5, 0.9 - j * 0.08, name, color=c,\n fontsize=10, ha='center', va='center'\n )\n j += 1\n plt.axis('off')\n plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)\n return fig\n\n\ndef train(rank, args):\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = args.port\n dist.init_process_group('nccl', rank=rank, world_size=args.n_gpu)\n torch.cuda.set_device(rank)\n\n pred_types = {}\n for pred in unary_pred:\n prefix = pred.split('(')[0]\n if prefix in pred_types:\n pred_types[prefix] += args.n_objects\n else:\n pred_types[prefix] = args.n_objects\n for pred in binary_pred:\n prefix = pred.split('(')[0]\n pred_types[prefix] = args.n_objects * (args.n_objects - 1)\n\n objects = [f'object{i:02d}' for i in range(args.n_objects)]\n predicates = build_predicates(objects, unary_pred, binary_pred)\n\n train_data = LeonardoDataset(\n args.data_dir, 'train', predicates, 'train_objects.h5',\n randpatch=True, view=args.n_views, randview=True, gripper=args.gripper\n )\n\n valid_data = LeonardoDataset(\n args.data_dir, 'valid', predicates, 'train_objects.h5',\n randpatch=False, view=args.n_views, randview=True, gripper=args.gripper\n )\n\n model = EmbeddingNet(\n (args.img_w, args.img_h), args.patch_size, args.n_objects,\n args.width, args.layers, args.heads\n )\n out_dim = args.width + 1 if args.gripper else args.width\n head = ReadoutNet(out_dim, args.d_hidden, len(unary_pred), len(binary_pred))\n optimizer = torch.optim.Adam(\n list(model.parameters()) + list(head.parameters()), args.lr\n )\n\n init_epoch = 0\n if args.resume is not None:\n checkpoint = torch.load(args.resume, map_location='cpu')\n init_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['model'])\n head.load_state_dict(checkpoint['head'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n writer = None\n if rank == 0:\n writer = SummaryWriter(args.log_dir)\n json.dump(args.__dict__, open(f'{args.log_dir}/args.txt', 'w'), indent=4)\n\n step_fn = partial(step, args.gripper)\n plot_fn = partial(plot, predicates, args.n_plot)\n train_one = partial(\n train_one_epoch, pred_types, step_fn, calc_acc, log,\n args.print_freq, plot_fn, args.plot_freq, writer\n )\n eval_one = partial(\n eval_one_epoch, pred_types, step_fn, calc_acc, log, plot_fn, writer\n )\n train_ddp(\n train_data, valid_data, args.batch_size, args.n_worker, model, head,\n optimizer, init_epoch, args.n_epoch, train_one, eval_one,\n args.eval_freq, args.save_freq, args.log_dir\n )\n\n dist.destroy_process_group()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # Data\n parser.add_argument('--data_dir')\n parser.add_argument('--img_h', type=int, default=224)\n parser.add_argument('--img_w', type=int, default=224)\n parser.add_argument('--n_objects', type=int, default=4)\n parser.add_argument('--n_views', type=int, default=3)\n # Model\n parser.add_argument('--patch_size', type=int, default=32)\n parser.add_argument('--width', type=int, default=768)\n parser.add_argument('--layers', type=int, default=12)\n parser.add_argument('--heads', type=int, default=12)\n parser.add_argument('--d_hidden', type=int, default=512)\n parser.add_argument('--gripper', action='store_true')\n # Training\n parser.add_argument('--log_dir')\n parser.add_argument('--n_gpu', type=int, default=1)\n parser.add_argument('--n_worker', type=int, default=2)\n parser.add_argument('--port', default='12345')\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--lr', type=float, default=0.0001)\n parser.add_argument('--n_epoch', type=int, default=40)\n parser.add_argument('--print_freq', type=int, default=50)\n parser.add_argument('--plot_freq', type=int, default=500)\n parser.add_argument('--n_plot', type=int, default=4)\n parser.add_argument('--eval_freq', type=int, default=2)\n parser.add_argument('--save_freq', type=int, default=10)\n parser.add_argument('--resume')\n args = parser.parse_args()\n\n mp.spawn(\n train,\n args=(args,),\n nprocs=args.n_gpu,\n join=True\n )\n"
] |
[
[
"matplotlib.pyplot.imshow",
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.multiprocessing.spawn",
"torch.load",
"torch.nn.functional.binary_cross_entropy_with_logits",
"numpy.full",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"torch.distributed.destroy_process_group",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
whplh/mmediting
|
[
"9efe23bec5e07126ab945369670f026c2b7763d7",
"9efe23bec5e07126ab945369670f026c2b7763d7"
] |
[
"tests/test_models/test_restorers/test_basicvsr_model.py",
"tests/test_models/test_backbones/test_sr_backbones/test_tdan_net.py"
] |
[
"# Copyright (c) OpenMMLab. All rights reserved.\nimport tempfile\n\nimport mmcv\nimport pytest\nimport torch\nfrom mmcv.runner import obj_from_dict\n\nfrom mmedit.models import build_model\nfrom mmedit.models.backbones.sr_backbones import BasicVSRNet\nfrom mmedit.models.losses import MSELoss\n\n\ndef test_basicvsr_model():\n\n model_cfg = dict(\n type='BasicVSR',\n generator=dict(\n type='BasicVSRNet',\n mid_channels=64,\n num_blocks=30,\n spynet_pretrained=None),\n pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),\n )\n\n train_cfg = dict(fix_iter=1)\n train_cfg = mmcv.Config(train_cfg)\n test_cfg = None\n\n # build restorer\n restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)\n\n # test attributes\n assert restorer.__class__.__name__ == 'BasicVSR'\n assert isinstance(restorer.generator, BasicVSRNet)\n assert isinstance(restorer.pixel_loss, MSELoss)\n\n # prepare data\n inputs = torch.rand(1, 5, 3, 64, 64)\n targets = torch.rand(1, 5, 3, 256, 256)\n\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n targets = targets.cuda()\n restorer = restorer.cuda()\n\n # prepare data and optimizer\n data_batch = {'lq': inputs, 'gt': targets}\n optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))\n optimizer = {\n 'generator':\n obj_from_dict(optim_cfg, torch.optim,\n dict(params=getattr(restorer, 'generator').parameters()))\n }\n\n # train_step (wihout updating spynet)\n outputs = restorer.train_step(data_batch, optimizer)\n assert isinstance(outputs, dict)\n assert isinstance(outputs['log_vars'], dict)\n assert isinstance(outputs['log_vars']['loss_pix'], float)\n assert outputs['num_samples'] == 1\n assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())\n assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())\n assert torch.is_tensor(outputs['results']['output'])\n assert outputs['results']['output'].size() == (1, 5, 3, 256, 256)\n\n # train with spynet updated\n outputs = restorer.train_step(data_batch, optimizer)\n assert isinstance(outputs, dict)\n assert isinstance(outputs['log_vars'], dict)\n assert isinstance(outputs['log_vars']['loss_pix'], float)\n assert outputs['num_samples'] == 1\n assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())\n assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())\n assert torch.is_tensor(outputs['results']['output'])\n assert outputs['results']['output'].size() == (1, 5, 3, 256, 256)\n\n # test forward_dummy\n with torch.no_grad():\n output = restorer.forward_dummy(data_batch['lq'])\n assert torch.is_tensor(output)\n assert output.size() == (1, 5, 3, 256, 256)\n\n # forward_test\n with torch.no_grad():\n outputs = restorer(**data_batch, test_mode=True)\n assert torch.equal(outputs['lq'], data_batch['lq'].cpu())\n assert torch.equal(outputs['gt'], data_batch['gt'].cpu())\n assert torch.is_tensor(outputs['output'])\n assert outputs['output'].size() == (1, 5, 3, 256, 256)\n\n with torch.no_grad():\n outputs = restorer(inputs, test_mode=True)\n assert torch.equal(outputs['lq'], data_batch['lq'].cpu())\n assert torch.is_tensor(outputs['output'])\n assert outputs['output'].size() == (1, 5, 3, 256, 256)\n\n # test with metric and save image\n train_cfg = mmcv.ConfigDict(fix_iter=1)\n test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)\n test_cfg = mmcv.Config(test_cfg)\n\n data_batch = {\n 'lq': inputs,\n 'gt': targets,\n 'meta': [{\n 'gt_path': 'fake_path/fake_name.png',\n 'key': '000'\n }]\n }\n\n restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)\n\n if torch.cuda.is_available():\n restorer = restorer.cuda()\n\n with pytest.raises(AssertionError):\n # evaluation with metrics must have gt images\n restorer(lq=inputs, test_mode=True)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n outputs = restorer(\n **data_batch,\n test_mode=True,\n save_image=True,\n save_path=tmpdir,\n iteration=None)\n assert isinstance(outputs, dict)\n assert isinstance(outputs['eval_result'], dict)\n assert isinstance(outputs['eval_result']['PSNR'], float)\n assert isinstance(outputs['eval_result']['SSIM'], float)\n\n outputs = restorer(\n **data_batch,\n test_mode=True,\n save_image=True,\n save_path=tmpdir,\n iteration=100)\n assert isinstance(outputs, dict)\n assert isinstance(outputs['eval_result'], dict)\n assert isinstance(outputs['eval_result']['PSNR'], float)\n assert isinstance(outputs['eval_result']['SSIM'], float)\n\n with pytest.raises(ValueError):\n # iteration should be number or None\n restorer(\n **data_batch,\n test_mode=True,\n save_image=True,\n save_path=tmpdir,\n iteration='100')\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmedit.models.backbones.sr_backbones.tdan_net import TDANNet\n\n\ndef test_tdan_net():\n \"\"\"Test TDANNet.\"\"\"\n\n # gpu (DCN is avaialble only on GPU)\n if torch.cuda.is_available():\n tdan = TDANNet().cuda()\n input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()\n tdan.init_weights(pretrained=None)\n\n output = tdan(input_tensor)\n assert len(output) == 2 # (1) HR center + (2) aligned LRs\n assert output[0].shape == (1, 3, 256, 256) # HR center frame\n assert output[1].shape == (1, 5, 3, 64, 64) # aligned LRs\n\n with pytest.raises(TypeError):\n # pretrained should be str or None\n tdan.init_weights(pretrained=[1])\n"
] |
[
[
"torch.is_tensor",
"torch.no_grad",
"torch.rand",
"torch.cuda.is_available"
],
[
"torch.rand",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiaoyangyang2/Paddle
|
[
"b1a4668c5ff39e44efcfea46d567a5c398fdf3dc",
"b1a4668c5ff39e44efcfea46d567a5c398fdf3dc",
"b1a4668c5ff39e44efcfea46d567a5c398fdf3dc"
] |
[
"python/paddle/fluid/tests/unittests/test_set_value_op.py",
"python/paddle/fluid/tests/unittests/test_tensor_register_hook.py",
"python/paddle/fluid/variable_index.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Test set_value op in static mode\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\n\nimport paddle\nfrom paddle.fluid.layer_helper import LayerHelper\nfrom functools import reduce\nfrom paddle.fluid.framework import _test_eager_guard, _in_eager_mode\n\n\nclass TestSetValueBase(unittest.TestCase):\n def setUp(self):\n paddle.enable_static()\n self.set_dtype()\n self.set_value()\n self.set_shape()\n self.data = np.ones(self.shape).astype(self.dtype)\n self.program = paddle.static.Program()\n\n def set_shape(self):\n self.shape = [2, 3, 4]\n\n def set_value(self):\n self.value = 6\n\n def set_dtype(self):\n self.dtype = \"float32\"\n\n def _call_setitem(self, x):\n x[0, 0] = self.value\n\n def _get_answer(self):\n self.data[0, 0] = self.value\n\n\nclass TestSetValueApi(TestSetValueBase):\n def _run_static(self):\n paddle.enable_static()\n with paddle.static.program_guard(self.program):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n self._call_setitem(x)\n\n exe = paddle.static.Executor(paddle.CPUPlace())\n out = exe.run(self.program, fetch_list=[x])\n paddle.disable_static()\n return out\n\n def _run_dynamic(self):\n paddle.disable_static()\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n self._call_setitem(x)\n out = x.numpy()\n paddle.enable_static()\n return out\n\n def func_test_api(self):\n static_out = self._run_static()\n dynamic_out = self._run_dynamic()\n self._get_answer()\n\n error_msg = \"\\nIn {} mode: \\nExpected res = \\n{}, \\n\\nbut received : \\n{}\"\n self.assertTrue(\n (self.data == static_out).all(),\n msg=error_msg.format(\"static\", self.data, static_out))\n self.assertTrue(\n (self.data == dynamic_out).all(),\n msg=error_msg.format(\"dynamic\", self.data, dynamic_out))\n\n def test_api(self):\n with _test_eager_guard():\n self.func_test_api()\n self.func_test_api()\n\n\n# 1. Test different type of item: int, Python slice, Paddle Tensor\n# 1.1 item is int\nclass TestSetValueItemInt(TestSetValueApi):\n def _call_setitem(self, x):\n x[0] = self.value\n\n def _get_answer(self):\n self.data[0] = self.value\n\n\n# 1.2 item is slice\n# 1.2.1 step is 1\nclass TestSetValueItemSlice(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:2] = self.value\n\n def _get_answer(self):\n self.data[0:2] = self.value\n\n\nclass TestSetValueItemSlice2(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:-1] = self.value\n\n def _get_answer(self):\n self.data[0:-1] = self.value\n\n\nclass TestSetValueItemSlice3(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:-1, 0:2] = self.value\n\n def _get_answer(self):\n self.data[0:-1, 0:2] = self.value\n\n\nclass TestSetValueItemSlice4(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:, 1:2, :] = self.value\n\n def _get_answer(self):\n self.data[0:, 1:2, :] = self.value\n\n\nclass TestSetValueItemSlice5(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:, 1:1, :] = self.value\n\n def _get_answer(self):\n self.data[0:, 1:1, :] = self.value\n\n\nclass TestSetValueItemSliceInWhile(TestSetValueApi):\n def _call_setitem(self, x):\n def cond(i, x):\n return i < 1\n\n def body(i, x):\n x[i] = self.value\n i = i + 1\n return i, x\n\n i = paddle.zeros(shape=(1, ), dtype='int32')\n i, x = paddle.fluid.layers.while_loop(cond, body, [i, x])\n\n def _get_answer(self):\n self.data[0] = self.value\n\n\n# 1.2.2 step > 1\nclass TestSetValueItemSliceStep(TestSetValueApi):\n def set_shape(self):\n self.shape = [5, 5, 5]\n\n def _call_setitem(self, x):\n x[0:2:2] = self.value\n\n def _get_answer(self):\n self.data[0:2:2] = self.value\n\n\nclass TestSetValueItemSliceStep2(TestSetValueApi):\n def set_shape(self):\n self.shape = [7, 5, 5]\n\n def _call_setitem(self, x):\n x[0:-1:3] = self.value\n\n def _get_answer(self):\n self.data[0:-1:3] = self.value\n\n\nclass TestSetValueItemSliceStep3(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:-1, 0:2, ::2] = self.value\n\n def _get_answer(self):\n self.data[0:-1, 0:2, ::2] = self.value\n\n\nclass TestSetValueItemSliceStep4(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:, 1:2:2, :] = self.value\n\n def _get_answer(self):\n self.data[0:, 1:2:2, :] = self.value\n\n\n# 1.2.3 step < 0\nclass TestSetValueItemSliceNegetiveStep(TestSetValueApi):\n def set_shape(self):\n self.shape = [5, 2]\n\n def set_value(self):\n self.value = np.array([3, 4])\n\n def _call_setitem(self, x):\n x[5:2:-1] = self.value\n\n def _get_answer(self):\n self.data[5:2:-1] = self.value\n\n\nclass TestSetValueItemSliceNegetiveStep2(TestSetValueApi):\n def set_shape(self):\n self.shape = [5]\n\n def set_value(self):\n self.value = np.array([3, 4])\n\n def _call_setitem(self, x):\n x[1::-1] = self.value\n\n def _get_answer(self):\n self.data[1::-1] = self.value\n\n\nclass TestSetValueItemSliceNegetiveStep3(TestSetValueApi):\n def set_shape(self):\n self.shape = [3]\n\n def set_value(self):\n self.value = np.array([3, 4, 5])\n\n def _call_setitem(self, x):\n x[::-1] = self.value\n\n def _get_answer(self):\n self.data[::-1] = self.value\n\n\nclass TestSetValueItemSliceNegetiveStep4(TestSetValueApi):\n def set_shape(self):\n self.shape = [3, 4, 5]\n\n def _call_setitem(self, x):\n x[2:0:-1, 0:2, ::-1] = self.value\n\n def _get_answer(self):\n self.data[2:0:-1, 0:2, ::-1] = self.value\n\n\n# 1.3 item is Ellipsis\n\n\nclass TestSetValueItemEllipsis1(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:, ..., 1:] = self.value\n\n def _get_answer(self):\n self.data[0:, ..., 1:] = self.value\n\n\nclass TestSetValueItemEllipsis2(TestSetValueApi):\n def _call_setitem(self, x):\n x[0:, ...] = self.value\n\n def _get_answer(self):\n self.data[0:, ...] = self.value\n\n\nclass TestSetValueItemEllipsis3(TestSetValueApi):\n def _call_setitem(self, x):\n x[..., 1:] = self.value\n\n def _get_answer(self):\n self.data[..., 1:] = self.value\n\n\nclass TestSetValueItemEllipsis4(TestSetValueApi):\n def _call_setitem(self, x):\n x[...] = self.value\n\n def _get_answer(self):\n self.data[...] = self.value\n\n\n# 1.4 item is Paddle Tensor\nclass TestSetValueItemTensor(TestSetValueApi):\n def _call_setitem(self, x):\n zero = paddle.full([1], 0, dtype=\"int32\")\n x[zero] = self.value\n\n def _get_answer(self):\n self.data[0] = self.value\n\n\nclass TestSetValueItemTensor2(TestSetValueApi):\n def _call_setitem(self, x):\n zero = paddle.full([1], 0, dtype=\"int32\")\n two = paddle.full([1], 2, dtype=\"int64\")\n x[zero:two] = self.value\n\n def _get_answer(self):\n self.data[0:2] = self.value\n\n\nclass TestSetValueItemTensor3(TestSetValueApi):\n def _call_setitem(self, x):\n zero = paddle.full([1], 0, dtype=\"int32\")\n two = paddle.full([1], 2, dtype=\"int64\")\n x[zero:-1, 0:two] = self.value\n\n def _get_answer(self):\n self.data[0:-1, 0:2] = self.value\n\n\nclass TestSetValueItemTensor4(TestSetValueApi):\n def _call_setitem(self, x):\n zero = paddle.full([1], 0, dtype=\"int32\")\n two = paddle.full([1], 2, dtype=\"int64\")\n x[0:-1, zero:2, 0:6:two] = self.value\n\n def _get_answer(self):\n self.data[0:-1, 0:2, ::2] = self.value\n\n\nclass TestSetValueItemTensor5(TestSetValueApi):\n def _call_setitem(self, x):\n zero = paddle.full([1], 0, dtype=\"int32\")\n two = paddle.full([1], 2, dtype=\"int64\")\n x[zero:, 1:2:two, :] = self.value\n\n def _get_answer(self):\n self.data[0:, 1:2:2, :] = self.value\n\n\nclass TestSetValueItemTensor6(TestSetValueApi):\n def set_shape(self):\n self.shape = [3, 4, 5]\n\n def _call_setitem(self, x):\n minus1 = paddle.full([1], -1, dtype=\"int32\")\n zero = paddle.full([1], 0, dtype=\"int32\")\n x[2:zero:minus1, 0:2, 10:-6:minus1] = self.value\n\n def _get_answer(self):\n self.data[2:0:-1, 0:2, ::-1] = self.value\n\n\n# 1.5 item is None\nclass TestSetValueItemNone1(TestSetValueApi):\n def _call_setitem(self, x):\n x[None] = self.value\n\n def _get_answer(self):\n self.data[None] = self.value\n\n\nclass TestSetValueItemNone2(TestSetValueApi):\n def _call_setitem(self, x):\n x[0, None, 1] = self.value\n\n def _get_answer(self):\n self.data[0, None, 1] = self.value\n\n\nclass TestSetValueItemNone3(TestSetValueApi):\n def _call_setitem(self, x):\n x[:, None, None, 1] = self.value\n\n def _get_answer(self):\n self.data[:, None, None, 1] = self.value\n\n\nclass TestSetValueItemNone4(TestSetValueApi):\n def _call_setitem(self, x):\n x[0, 0, None, 1] = self.value\n\n def _get_answer(self):\n self.data[0, 0, None, 1] = self.value\n\n\nclass TestSetValueItemNone5(TestSetValueApi):\n def _call_setitem(self, x):\n x[0, None, 0, None, 1] = self.value\n\n def _get_answer(self):\n self.data[0, None, 0, None, 1] = self.value\n\n\nclass TestSetValueItemNone6(TestSetValueApi):\n def _call_setitem(self, x):\n x[None, 0, 0, None, 0] = self.value\n\n def _get_answer(self):\n self.data[None, 0, 0, None, 0] = self.value\n\n\nclass TestSetValueItemNone7(TestSetValueApi):\n def _call_setitem(self, x):\n x[:, None, 1] = np.zeros(self.shape)[:, None, 0]\n\n def _get_answer(self):\n self.data[:, None, 1] = np.zeros(self.shape)[:, None, 0]\n\n\nclass TestSetValueItemNone8(TestSetValueApi):\n def _call_setitem(self, x):\n x[:, 1, None] = np.zeros(self.shape)[:, 0, None]\n\n def _get_answer(self):\n self.data[:, 1, None] = np.zeros(self.shape)[:, 0, None]\n\n\nclass TestSetValueItemNone9(TestSetValueApi):\n def _call_setitem(self, x):\n x[None, :, 1, ..., None] = np.zeros(self.shape)[0, 0, :, None]\n\n def _get_answer(self):\n self.data[None, :, 1, ..., None] = np.zeros(self.shape)[0, 0, :, None]\n\n\nclass TestSetValueItemNone10(TestSetValueApi):\n def _call_setitem(self, x):\n x[..., None, :, None] = np.zeros(self.shape)[..., None, :, None]\n\n def _get_answer(self):\n self.data[..., None, :, None] = np.zeros(self.shape)[..., None, :, None]\n\n\n# 1.5 item is list or Tensor of bol\nclass TestSetValueItemBool1(TestSetValueApi):\n def _call_setitem(self, x):\n x[[True, False]] = self.value\n\n def _get_answer(self):\n self.data[[True, False]] = self.value\n\n\nclass TestSetValueItemBool2(TestSetValueApi):\n def _call_setitem(self, x):\n x[[False, False]] = self.value\n\n def _get_answer(self):\n self.data[[False, False]] = self.value\n\n\nclass TestSetValueItemBool3(TestSetValueApi):\n def _call_setitem(self, x):\n x[[False, True]] = np.zeros(self.shape[2])\n\n def _get_answer(self):\n self.data[[False, True]] = np.zeros(self.shape[2])\n\n\nclass TestSetValueItemBool4(TestSetValueApi):\n def _call_setitem(self, x):\n idx = paddle.assign(np.array([False, True]))\n x[idx] = np.zeros(self.shape[2])\n\n def _get_answer(self):\n self.data[np.array([False, True])] = np.zeros(self.shape[2])\n\n\nclass TestSetValueItemBool5(TestSetValueApi):\n def _call_setitem(self, x):\n idx = paddle.assign(\n np.array([[False, True, False], [True, True, False]]))\n x[idx] = self.value\n\n def _get_answer(self):\n self.data[np.array([[False, True, False], [True, True, False]\n ])] = self.value\n\n\nclass TestSetValueItemBool6(TestSetValueApi):\n def _call_setitem(self, x):\n x[0, ...] = 0\n x[x > 0] = self.value\n\n def _get_answer(self):\n self.data[0, ...] = 0\n self.data[self.data > 0] = self.value\n\n\n# 2. Test different type of value: int, float, numpy.ndarray, Tensor\n# 2.1 value is int32, int64, float32, float64, bool\n\n\ndef create_test_value_int32(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = 7\n\n def set_dtype(self):\n self.dtype = \"int32\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueInt32\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_int32(TestSetValueItemInt)\ncreate_test_value_int32(TestSetValueItemSlice)\ncreate_test_value_int32(TestSetValueItemSlice2)\ncreate_test_value_int32(TestSetValueItemSlice3)\ncreate_test_value_int32(TestSetValueItemSlice4)\n\n\ndef create_test_value_int64(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = 7\n\n def set_dtype(self):\n self.dtype = \"int64\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueInt64\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_int64(TestSetValueItemInt)\ncreate_test_value_int64(TestSetValueItemSlice)\ncreate_test_value_int64(TestSetValueItemSlice2)\ncreate_test_value_int64(TestSetValueItemSlice3)\ncreate_test_value_int64(TestSetValueItemSlice4)\n\n\ndef create_test_value_fp32(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = 3.3\n\n def set_dtype(self):\n self.dtype = \"float32\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueFp32\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_fp32(TestSetValueItemInt)\ncreate_test_value_fp32(TestSetValueItemSlice)\ncreate_test_value_fp32(TestSetValueItemSlice2)\ncreate_test_value_fp32(TestSetValueItemSlice3)\ncreate_test_value_fp32(TestSetValueItemSlice4)\n\n\ndef create_test_value_fp64(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = 2.0**127 # float32:[-2^128, 2^128)\n\n def set_dtype(self):\n self.dtype = \"float64\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueFp64\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_fp64(TestSetValueItemInt)\ncreate_test_value_fp64(TestSetValueItemSlice)\ncreate_test_value_fp64(TestSetValueItemSlice2)\ncreate_test_value_fp64(TestSetValueItemSlice3)\ncreate_test_value_fp64(TestSetValueItemSlice4)\n\n\ndef create_test_value_bool(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = 0\n\n def set_dtype(self):\n self.dtype = \"bool\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueBool\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_bool(TestSetValueItemInt)\ncreate_test_value_bool(TestSetValueItemSlice)\ncreate_test_value_bool(TestSetValueItemSlice2)\ncreate_test_value_bool(TestSetValueItemSlice3)\ncreate_test_value_bool(TestSetValueItemSlice4)\n\n\n# 2.2 value is numpy.array (int32, int64, float32, float64, bool)\ndef create_test_value_numpy_int32(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = np.array([5])\n\n def set_dtype(self):\n self.dtype = \"int32\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueNumpyInt32\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_numpy_int32(TestSetValueItemInt)\ncreate_test_value_numpy_int32(TestSetValueItemSlice)\ncreate_test_value_numpy_int32(TestSetValueItemSlice2)\ncreate_test_value_numpy_int32(TestSetValueItemSlice3)\ncreate_test_value_numpy_int32(TestSetValueItemSlice4)\n\n\ndef create_test_value_numpy_int64(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = np.array([1])\n\n def set_dtype(self):\n self.dtype = \"int64\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueNumpyInt64\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_numpy_int64(TestSetValueItemInt)\ncreate_test_value_numpy_int64(TestSetValueItemSlice)\ncreate_test_value_numpy_int64(TestSetValueItemSlice2)\ncreate_test_value_numpy_int64(TestSetValueItemSlice3)\ncreate_test_value_numpy_int64(TestSetValueItemSlice4)\n\n\ndef create_test_value_numpy_fp32(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = np.array([1])\n\n def set_dtype(self):\n self.dtype = \"float32\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueNumpyFp32\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_numpy_fp32(TestSetValueItemInt)\ncreate_test_value_numpy_fp32(TestSetValueItemSlice)\ncreate_test_value_numpy_fp32(TestSetValueItemSlice2)\ncreate_test_value_numpy_fp32(TestSetValueItemSlice3)\ncreate_test_value_numpy_fp32(TestSetValueItemSlice4)\n\n\ndef create_test_value_numpy_fp64(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = np.array([2**127]).astype(\"float64\")\n\n def set_dtype(self):\n self.dtype = \"float64\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueNumpyFp64\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_numpy_fp64(TestSetValueItemInt)\ncreate_test_value_numpy_fp64(TestSetValueItemSlice)\ncreate_test_value_numpy_fp64(TestSetValueItemSlice2)\ncreate_test_value_numpy_fp64(TestSetValueItemSlice3)\ncreate_test_value_numpy_fp64(TestSetValueItemSlice4)\n\n\ndef create_test_value_numpy_bool(parent):\n class TestValueInt(parent):\n def set_value(self):\n self.value = np.array([0])\n\n def set_dtype(self):\n self.dtype = \"bool\"\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueNumpyBool\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_numpy_bool(TestSetValueItemInt)\ncreate_test_value_numpy_bool(TestSetValueItemSlice)\ncreate_test_value_numpy_bool(TestSetValueItemSlice2)\ncreate_test_value_numpy_bool(TestSetValueItemSlice3)\ncreate_test_value_numpy_bool(TestSetValueItemSlice4)\n\n\n# 2.3 value is a Paddle Tensor (int32, int64, float32, float64, bool)\ndef create_test_value_tensor_int32(parent):\n class TestValueInt(parent):\n def set_dtype(self):\n self.dtype = \"int32\"\n\n def _call_setitem(self, x):\n value = paddle.full(shape=[1], fill_value=3, dtype=self.dtype)\n x[0, 1] = value\n\n def _get_answer(self):\n self.data[0, 1] = 3\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueTensorInt32\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_tensor_int32(TestSetValueItemInt)\ncreate_test_value_tensor_int32(TestSetValueItemSlice)\ncreate_test_value_tensor_int32(TestSetValueItemSlice2)\ncreate_test_value_tensor_int32(TestSetValueItemSlice3)\ncreate_test_value_tensor_int32(TestSetValueItemSlice4)\n\n\ndef create_test_value_tensor_int64(parent):\n class TestValueInt(parent):\n def set_dtype(self):\n self.dtype = \"int64\"\n\n def _call_setitem(self, x):\n value = paddle.full(shape=[1], fill_value=3, dtype=self.dtype)\n x[0, 1] = value\n\n def _get_answer(self):\n self.data[0, 1] = 3\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueTensorInt64\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_tensor_int64(TestSetValueItemInt)\ncreate_test_value_tensor_int64(TestSetValueItemSlice)\ncreate_test_value_tensor_int64(TestSetValueItemSlice2)\ncreate_test_value_tensor_int64(TestSetValueItemSlice3)\ncreate_test_value_tensor_int64(TestSetValueItemSlice4)\n\n\ndef create_test_value_tensor_fp32(parent):\n class TestValueInt(parent):\n def set_dtype(self):\n self.dtype = \"float32\"\n\n def _call_setitem(self, x):\n value = paddle.full(shape=[1], fill_value=3, dtype=self.dtype)\n x[0, 1] = value\n\n def _get_answer(self):\n self.data[0, 1] = 3\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueTensorFp32\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_tensor_fp32(TestSetValueItemInt)\ncreate_test_value_tensor_fp32(TestSetValueItemSlice)\ncreate_test_value_tensor_fp32(TestSetValueItemSlice2)\ncreate_test_value_tensor_fp32(TestSetValueItemSlice3)\ncreate_test_value_tensor_fp32(TestSetValueItemSlice4)\n\n\ndef create_test_value_tensor_fp64(parent):\n class TestValueInt(parent):\n def set_dtype(self):\n self.dtype = \"float64\"\n\n def _call_setitem(self, x):\n value = paddle.full(shape=[1], fill_value=3, dtype=self.dtype)\n x[0, 1] = value\n\n def _get_answer(self):\n self.data[0, 1] = 3\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueTensorFp64\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_tensor_fp64(TestSetValueItemInt)\ncreate_test_value_tensor_fp64(TestSetValueItemSlice)\ncreate_test_value_tensor_fp64(TestSetValueItemSlice2)\ncreate_test_value_tensor_fp64(TestSetValueItemSlice3)\ncreate_test_value_tensor_fp64(TestSetValueItemSlice4)\n\n\ndef create_test_value_tensor_bool(parent):\n class TestValueInt(parent):\n def set_dtype(self):\n self.dtype = \"bool\"\n\n def _call_setitem(self, x):\n value = paddle.full(shape=[1], fill_value=False, dtype=self.dtype)\n x[0, 1] = value\n\n def _get_answer(self):\n self.data[0, 1] = False\n\n cls_name = \"{0}_{1}\".format(parent.__name__, \"ValueTensorBool\")\n TestValueInt.__name__ = cls_name\n globals()[cls_name] = TestValueInt\n\n\ncreate_test_value_tensor_bool(TestSetValueItemInt)\ncreate_test_value_tensor_bool(TestSetValueItemSlice)\ncreate_test_value_tensor_bool(TestSetValueItemSlice2)\ncreate_test_value_tensor_bool(TestSetValueItemSlice3)\ncreate_test_value_tensor_bool(TestSetValueItemSlice4)\n\n\n# 3. Test different shape of value\nclass TestSetValueValueShape1(TestSetValueApi):\n def set_value(self):\n self.value = np.array([3, 4, 5, 6]) # shape is (4,)\n\n def _call_setitem(self, x):\n x[0] = self.value\n\n def _get_answer(self):\n self.data[0] = self.value\n\n\nclass TestSetValueValueShape2(TestSetValueApi):\n def set_value(self):\n self.value = np.array([[3, 4, 5, 6]]) # shape is (1,4)\n\n def _call_setitem(self, x):\n x[0:1] = self.value\n\n def _get_answer(self):\n self.data[0:1] = self.value\n\n\nclass TestSetValueValueShape3(TestSetValueApi):\n def set_value(self):\n self.value = np.array(\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]) # shape is (3,4)\n\n def _call_setitem(self, x):\n x[0] = self.value\n\n def _get_answer(self):\n self.data[0] = self.value\n\n\nclass TestSetValueValueShape4(TestSetValueApi):\n def set_value(self):\n self.value = np.array(\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]).astype(\n self.dtype) # shape is (3,4)\n\n def _call_setitem(self, x):\n x[0] = paddle.assign(self.value) # x is Paddle.Tensor\n\n def _get_answer(self):\n self.data[0] = self.value\n\n\nclass TestSetValueValueShape5(TestSetValueApi):\n def set_value(self):\n self.value = np.array([3, 3, 3]).astype(self.dtype)\n\n def set_shape(self):\n self.shape = [3, 4]\n\n def _call_setitem(self, x):\n x[:, 0] = paddle.assign(self.value) # x is Paddle.Tensor\n\n def _get_answer(self):\n self.data[:, 0] = self.value\n\n\n# 4. Test error\nclass TestError(TestSetValueBase):\n def _value_type_error(self):\n with self.assertRaisesRegexp(\n TypeError,\n \"Only support to assign an integer, float, numpy.ndarray or paddle.Tensor\"\n ):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n value = [1]\n x[0] = value\n\n def _dtype_error(self):\n with self.assertRaisesRegexp(\n TypeError,\n \"When assign a numpy.ndarray, integer or float to a paddle.Tensor, \"\n ):\n y = paddle.ones(shape=self.shape, dtype=\"float16\")\n y[0] = 1\n\n def _step_error(self):\n with self.assertRaisesRegexp(ValueError, \"step can not be 0\"):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n x[0:1:0] = self.value\n\n def _ellipsis_error(self):\n with self.assertRaisesRegexp(\n IndexError, \"An index can only have a single ellipsis\"):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n x[..., ...] = self.value\n with self.assertRaisesRegexp(ValueError, \"the start or end is None\"):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n one = paddle.ones([1])\n x[::one] = self.value\n\n def _bool_list_error(self):\n with self.assertRaises(TypeError):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n x[[True, False, 0]] = 0\n\n with self.assertRaises(IndexError):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n x[[True, False], [True, False]] = 0\n\n def _bool_tensor_error(self):\n with self.assertRaises(IndexError):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n idx = paddle.assign([True, False, True])\n x[idx] = 0\n\n def _broadcast_mismatch(self):\n program = paddle.static.Program()\n with paddle.static.program_guard(program):\n x = paddle.ones(shape=self.shape, dtype=self.dtype)\n value = np.array([3, 4, 5, 6, 7])\n x[0] = value\n exe = paddle.static.Executor(paddle.CPUPlace())\n with self.assertRaises(ValueError):\n exe.run(program)\n\n def test_error(self):\n paddle.enable_static()\n with paddle.static.program_guard(self.program):\n self._value_type_error()\n self._dtype_error()\n self._step_error()\n self._bool_list_error()\n self._bool_tensor_error()\n self._broadcast_mismatch()\n\n\n# 5. Test backward\n\n\nclass Model(paddle.nn.Layer):\n def __init__(self):\n super(Model, self).__init__()\n self.conv = paddle.nn.Conv2D(12, 12, 3)\n\n def forward(self, x, y):\n x = self.conv(x)\n y = self.conv(y)\n var = y.flatten()\n\n x[0, :, 0, 0] = var\n loss = paddle.mean(x)\n return loss, var, x\n\n\nclass TestBackward(unittest.TestCase):\n def test_static(self):\n paddle.enable_static()\n main_program = paddle.static.Program()\n startup_program = paddle.static.Program()\n\n x_np = np.random.random(size=(4, 4)).astype('float32')\n y_np = np.random.random(size=(4, 4)).astype('float32')\n label_np = np.random.randint(2, size=(4, 1)).astype('int64')\n\n with paddle.static.program_guard(main_program, startup_program):\n x = paddle.static.data(name=\"x\", shape=[4, 4], dtype='float32')\n y = paddle.static.data(name=\"y\", shape=[4, 4], dtype='float32')\n\n label = paddle.static.data(\n name=\"label\", shape=[4, 1], dtype='int64')\n\n z = paddle.add(x, y)\n var = y[0, :]\n z[0, :] = var\n\n prediction = paddle.static.nn.fc(x=z, size=2, activation='softmax')\n\n cost = paddle.nn.functional.cross_entropy(\n input=prediction, label=label)\n loss = paddle.mean(cost)\n sgd = paddle.optimizer.SGD(learning_rate=0.01)\n sgd.minimize(loss)\n\n exe = paddle.static.Executor(paddle.CPUPlace())\n exe.run(startup_program)\n\n var_grad, z_grad = exe.run(\n main_program,\n feed={\"x\": x_np,\n \"y\": y_np,\n \"label\": label_np},\n fetch_list=[var.name + \"@GRAD\", z.name + \"@GRAD\"])\n\n self.assertTrue((var_grad == z_grad[0, :]).all())\n paddle.disable_static()\n\n def func_test_dynamic(self):\n model = Model()\n x = paddle.ones([1, 12, 3, 3]).astype(\"float32\")\n y = paddle.ones([1, 12, 3, 3]).astype(\"float32\")\n loss, var, x = model(x, y)\n loss.backward()\n\n self.assertTrue(var.grad.shape == x.grad[0, :, 0, 0].shape)\n # \n # TODO(pangyoki) add inplace and delete if\n if not _in_eager_mode():\n self.assertTrue((0 == x.grad[0, :, 0, 0]).all())\n\n def test_dynamic(self):\n with _test_eager_guard():\n self.func_test_dynamic()\n self.func_test_dynamic()\n\n\nclass TestGradientTruncated(unittest.TestCase):\n def func_test_consistent_with_competitor(self):\n paddle.disable_static()\n\n def set_value(t, value):\n a = t * t\n a[0, 1] = value\n y = a * a\n return y.sum()\n\n # case 1\n array = np.arange(\n 1, 1 + 2 * 3 * 4, dtype=\"float32\").reshape([1, 2, 1, 3, 1, 4])\n value = np.arange(100, 104, dtype=\"float32\").reshape(1, 4)\n\n inps = paddle.to_tensor(array, stop_gradient=False)\n value = paddle.to_tensor(value, stop_gradient=False)\n\n loss = set_value(inps, value)\n loss.backward()\n\n value_grad = np.array([[600., 606., 612., 618.]])\n input_grad = np.array(\n [[[[[[4., 32., 108., 256.]], [[500., 864., 1372., 2048.]],\n [[2916., 4000., 5324., 6912.]]]],\n [[[[0., 0., 0., 0.]], [[0., 0., 0., 0.]], [[0., 0., 0., 0.]]]]]])\n self.assertTrue(\n np.array_equal(inps.grad.numpy(), input_grad),\n msg=\"The gradient of value should be \\n{},\\n but reveived {}\".\n format(input_grad, inps.grad.numpy()))\n self.assertTrue(\n np.array_equal(value.grad.numpy(), value_grad),\n msg=\"The gradient of input should be \\n{},\\n but reveived {}\".\n format(value_grad, value.grad.numpy()))\n\n # case 2\n array = np.arange(1, 2 * 3 * 4 + 1, dtype=\"float32\").reshape([4, 2, 3])\n value = np.arange(100, 100 + 1, dtype=\"float32\")\n\n inps2 = paddle.to_tensor(array, stop_gradient=False)\n value2 = paddle.to_tensor(value, stop_gradient=False)\n\n loss = set_value(inps2, value2)\n loss.backward()\n\n value_grad2 = np.array([600.])\n input_grad2 = np.array(\n [[[4., 32., 108.], [0., 0., 0.]], [[1372., 2048., 2916.],\n [4000., 5324., 6912.]],\n [[8788., 10976., 13500.], [16384., 19652., 23328.]],\n [[27436., 32000., 37044.], [42592., 48668., 55296.]]])\n self.assertTrue(\n np.array_equal(inps2.grad.numpy(), input_grad2),\n msg=\"The gradient of value should be \\n{},\\n but reveived {}\".\n format(input_grad, inps2.grad.numpy()))\n self.assertTrue(\n np.array_equal(value2.grad.numpy(), value_grad2),\n msg=\"The gradient of input should be \\n{},\\n but reveived {}\".\n format(value_grad, value2.grad.numpy()))\n\n # case 3\n def set_value3(t, value):\n a = t * t\n a[0, :, 0, :] = value\n y = a * a\n return y.sum()\n\n array = np.arange(\n 1, 1 + 2 * 3 * 4, dtype=\"float32\").reshape([4, 3, 1, 1, 2, 1])\n value = np.arange(100, 100 + 2, dtype=\"float32\").reshape(1, 2, 1)\n\n inps = paddle.to_tensor(array, stop_gradient=False)\n value = paddle.to_tensor(value, stop_gradient=False)\n\n loss = set_value3(inps, value)\n loss.backward()\n\n value_grad = np.array([[[600.], [606.]]])\n input_grad = np.array(\n [[[[[[0.], [0.]]]], [[[[0.], [0.]]]], [[[[0.], [0.]]]]],\n [[[[[1372.], [2048.]]]], [[[[2916.], [4000.]]]],\n [[[[5324.], [6912.]]]]], [[[[[8788.], [10976.]]]], [[[[13500.],\n [16384.]]]],\n [[[[19652.], [23328.]]]]],\n [[[[[27436.], [32000.]]]], [[[[37044.], [42592.]]]],\n [[[[48668.], [55296.]]]]]])\n self.assertTrue(\n np.array_equal(inps.grad.numpy(), input_grad),\n msg=\"The gradient of value should be \\n{},\\n but reveived {}\".\n format(input_grad, inps.grad.numpy()))\n self.assertTrue(\n np.array_equal(value.grad.numpy(), value_grad),\n msg=\"The gradient of input should be \\n{},\\n but reveived {}\".\n format(value_grad, value.grad.numpy()))\n\n #case 4: step >0\n def set_value4(t, value):\n a = t * t\n a[0, :, 0, ::3] = value\n y = a * a\n return y.sum()\n\n array = np.arange(\n 1, 1 + 2 * 3 * 4, dtype=\"float32\").reshape([2, 3, 1, 4, 1])\n value = np.arange(100, 100 + 2, dtype=\"float32\").reshape(1, 2, 1)\n\n inps = paddle.to_tensor(array, stop_gradient=False)\n value = paddle.to_tensor(value, stop_gradient=False)\n\n loss = set_value4(inps, value)\n loss.backward()\n\n value_grad = np.array([[[600.], [606.]]])\n input_grad = np.array([[[[[0.], [32.], [108.],\n [0.]]], [[[0.], [864.], [1372.], [0.]]],\n [[[0.], [4000.], [5324.], [0.]]]],\n [[[[8788.], [10976.], [13500.], [16384.]]],\n [[[19652.], [23328.], [27436.], [32000.]]],\n [[[37044.], [42592.], [48668.], [55296.]]]]])\n self.assertTrue(\n np.array_equal(inps.grad.numpy(), input_grad),\n msg=\"The gradient of value should be \\n{},\\n but reveived {}\".\n format(input_grad, inps.grad.numpy()))\n self.assertTrue(\n np.array_equal(value.grad.numpy(), value_grad),\n msg=\"The gradient of input should be \\n{},\\n but reveived {}\".\n format(value_grad, value.grad.numpy()))\n\n # case 5:a[0].shape==value.shape\n def set_value5(t, value):\n a = t * t\n a[0] = value\n y = a * a\n return y.sum()\n\n array = np.arange(1, 1 + 2 * 3 * 4, dtype=\"float32\").reshape([2, 3, 4])\n value = np.arange(100, 100 + 12, dtype=\"float32\").reshape(3, 4)\n\n inps = paddle.to_tensor(array, stop_gradient=False)\n value = paddle.to_tensor(value, stop_gradient=False)\n\n loss = set_value5(inps, value)\n loss.backward()\n\n value_grad = np.array([[200., 202., 204., 206.],\n [208., 210., 212., 214.],\n [216., 218., 220., 222.]])\n input_grad = np.array([[[0., 0., 0., 0.], [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n [[8788., 10976., 13500., 16384.],\n [19652., 23328., 27436., 32000.],\n [37044., 42592., 48668., 55296.]]])\n self.assertTrue(\n np.array_equal(inps.grad.numpy(), input_grad),\n msg=\"The gradient of value should be \\n{},\\n but reveived {}\".\n format(input_grad, inps.grad.numpy()))\n self.assertTrue(\n np.array_equal(value.grad.numpy(), value_grad),\n msg=\"The gradient of input should be \\n{},\\n but reveived {}\".\n format(value_grad, value.grad.numpy()))\n\n # case 6: pass stop_gradient from value to x\n x = paddle.zeros([8, 8], dtype='float32')\n value = paddle.to_tensor([10], dtype='float32', stop_gradient=False)\n\n self.assertTrue(x.stop_gradient)\n self.assertTrue(x.is_leaf)\n\n x[0, :] = value\n\n self.assertTrue(~x.stop_gradient)\n self.assertTrue(~x.is_leaf)\n\n def test_consistent_with_competitor(self):\n with _test_eager_guard():\n self.func_test_consistent_with_competitor()\n self.func_test_consistent_with_competitor()\n\n def test_static_graph(self):\n paddle.enable_static()\n\n to_string = lambda x, i, : x + '_' + str(i)\n numel = lambda input_shape: reduce(lambda x, y: x * y, input_shape)\n\n def op1(x):\n value = paddle.fluid.layers.fill_constant([1], \"float32\", 1)\n # test stop_gradient \n value.stop_gradient = True\n x.stop_gradient = False\n start = paddle.fluid.layers.fill_constant(\n [1], \"int32\", 5, force_cpu=True)\n end = paddle.fluid.layers.fill_constant(\n [1], \"int32\", 0, force_cpu=True)\n step = paddle.fluid.layers.fill_constant(\n [1], \"int32\", -2, force_cpu=True)\n\n inputs = {\n 'Input': x,\n 'ValueTensor': value,\n 'StartsTensorList': [start, ],\n 'EndsTensorList': [end, ],\n 'StepsTensorList': [step, ]\n }\n\n helper = LayerHelper(\"set_value\")\n y = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(\n type=\"set_value\",\n inputs=inputs,\n outputs={'Out': y},\n attrs={'axes': [0]})\n\n return y, value\n\n def op2(x):\n value = paddle.fluid.layers.fill_constant([1, 3, 2], \"float32\", 1)\n # test stop_gradient \n value.stop_gradient = False\n x.stop_gradient = False\n attrs = {\n 'axes': [0],\n 'starts': [6],\n 'ends': [0],\n 'steps': [-4],\n 'decrease_axes': [],\n 'none_axes': [],\n 'dtype': paddle.float32\n }\n inputs = {'Input': x, 'ValueTensor': value}\n\n helper = LayerHelper(\"set_value\")\n y = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(\n type=\"set_value\",\n inputs=inputs,\n outputs={'Out': y},\n attrs=attrs)\n\n return y, value\n\n def op3(x):\n value = paddle.fluid.layers.fill_constant([1], \"float32\", 1)\n x.stop_gradient = True\n value.stop_gradient = False\n start = paddle.fluid.layers.fill_constant(\n [1], \"int32\", 0, force_cpu=True)\n end = paddle.fluid.layers.fill_constant(\n [1], \"int32\", 5, force_cpu=True)\n step = paddle.fluid.layers.fill_constant(\n [1], \"int32\", 3, force_cpu=True)\n\n inputs = {\n 'Input': x,\n 'ValueTensor': value,\n 'StartsTensorList': [start, ],\n 'EndsTensorList': [end, ],\n 'StepsTensorList': [step, ]\n }\n\n helper = LayerHelper(\"set_value\")\n y = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(\n type=\"set_value\",\n inputs=inputs,\n outputs={'Out': y},\n attrs={'axes': [0]})\n\n return y, value\n\n def set_value(array, i, op):\n name_x = to_string('x', i)\n x = paddle.static.data(\n name=name_x, shape=array.shape, dtype='float32')\n\n # set_value_op in __get/setitem__ is an inplace operation. \n # When `input.stop_gradient = True` and `value.stop_gradient = False`, \n # set_value_grad_op will not be run during backward.\n y, value = op(x)\n\n y2 = y + 1\n loss = paddle.fluid.layers.reduce_sum(y2)\n sgd = paddle.optimizer.Adam()\n sgd.minimize(loss)\n place = paddle.fluid.CPUPlace(\n ) if not paddle.fluid.core.is_compiled_with_cuda(\n ) else paddle.fluid.CUDAPlace(0)\n\n prog = paddle.static.default_main_program()\n exe = paddle.static.Executor(place)\n exe.run(paddle.static.default_startup_program())\n fetch_list = []\n if not x.stop_gradient:\n fetch_list.append(x.grad_name)\n if not value.stop_gradient:\n fetch_list.append(value.grad_name)\n out = exe.run(prog, feed={x.name: array}, fetch_list=fetch_list)\n return out\n\n input_shape = [7, 6, 5, 4, 3, 2]\n\n array = np.arange(\n 0, numel(input_shape), dtype=\"float32\").reshape(input_shape)\n\n for i in range(len(input_shape)):\n program = paddle.static.Program()\n with paddle.static.program_guard(program):\n out1 = set_value(array, i, op1)\n self.assertTrue((out1[0][5:0:-2] == 0).all())\n\n if len(array.shape) > 2:\n program2 = paddle.static.Program()\n with paddle.static.program_guard(program2):\n out2 = set_value(array, i, op2)\n self.assertTrue((out2[0][6:0:-4] == 0).all())\n\n program3 = paddle.static.Program()\n with paddle.static.program_guard(program3):\n out3 = set_value(array, i, op3)\n self.assertTrue((numel(out1[0][0:5:3].shape) == out3[0]).all())\n\n array = array[0]\n paddle.disable_static()\n\n\nclass TestSetValueInplace(unittest.TestCase):\n def test_inplace(self):\n paddle.disable_static()\n with paddle.fluid.dygraph.guard():\n paddle.seed(100)\n a = paddle.rand(shape=[1, 4])\n a.stop_gradient = False\n b = a[:]\n c = b\n b[paddle.to_tensor(0)] = 1.0\n\n self.assertTrue(id(b) == id(c))\n self.assertTrue(np.array_equal(b.numpy(), c.numpy()))\n self.assertEqual(b.inplace_version, 1)\n\n paddle.enable_static()\n\n\nclass TestSetValueInplaceLeafVar(unittest.TestCase):\n def test_inplace_var_become_leaf_var(self):\n paddle.disable_static()\n\n a_grad_1, b_grad_1, a_grad_2, b_grad_2 = 0, 1, 2, 3\n with paddle.fluid.dygraph.guard():\n paddle.seed(100)\n a = paddle.rand(shape=[1, 4])\n b = paddle.rand(shape=[1, 4])\n a.stop_gradient = False\n b.stop_gradient = False\n c = a / b\n c.sum().backward()\n a_grad_1 = a.grad.numpy()\n b_grad_1 = b.grad.numpy()\n\n with paddle.fluid.dygraph.guard():\n paddle.seed(100)\n a = paddle.rand(shape=[1, 4])\n b = paddle.rand(shape=[1, 4])\n a.stop_gradient = False\n b.stop_gradient = False\n c = a / b\n d = paddle.zeros((4, 4))\n self.assertTrue(d.stop_gradient)\n d[0, :] = c\n self.assertFalse(d.stop_gradient)\n d[0, :].sum().backward()\n a_grad_2 = a.grad.numpy()\n b_grad_2 = b.grad.numpy()\n\n self.assertTrue(np.array_equal(a_grad_1, a_grad_2))\n self.assertTrue(np.array_equal(b_grad_1, b_grad_2))\n paddle.enable_static()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\n\nimport paddle\nimport paddle.nn as nn\nfrom paddle.fluid.framework import _test_eager_guard, _in_eager_mode\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\n\n\nclass SimpleNet(nn.Layer):\n def __init__(self, in_size, out_size):\n super(SimpleNet, self).__init__()\n self.linear1 = nn.Linear(in_size, in_size)\n self.linear2 = nn.Linear(in_size, out_size)\n\n def forward(self, x, hook=None, register=False, remove=False):\n ret1 = self.linear1(x)\n if hook is not None:\n if register:\n h = ret1.register_hook(hook)\n if remove:\n h.remove()\n ret2 = self.linear2(ret1)\n out = paddle.mean(ret2, axis=-1)\n return ret1, out\n\n\nclass SimpleNetForStatic(nn.Layer):\n def __init__(self, in_size, out_size):\n super(SimpleNetForStatic, self).__init__()\n self.linear1 = nn.Linear(in_size, in_size)\n self.linear2 = nn.Linear(in_size, out_size)\n\n def forward(self, x):\n ret1 = self.linear1(x)\n ret1.register_hook(lambda grad: grad * 2)\n\n ret2 = self.linear2(ret1)\n out = paddle.mean(ret2, axis=-1)\n return out\n\n\nclass TestTensorRegisterHook(unittest.TestCase):\n def setUp(self):\n self.seed = 2021\n self.in_size = 10\n self.out_size = 10\n self.batch_size = 4\n self.devices = [\"cpu\"]\n if paddle.is_compiled_with_cuda():\n self.devices.append(\"gpu\")\n\n def func_hook_for_interior_var(self):\n def run_double_hook_for_interior_var(double_hook, removed=False):\n for device in self.devices:\n paddle.set_device(device)\n\n x = paddle.to_tensor([0., 1., 2., 3.])\n y = paddle.to_tensor([4., 5., 6., 7.])\n x.stop_gradient = False\n y.stop_gradient = False\n\n w = x + y\n w.stop_gradient = False\n helper = w.register_hook(double_hook)\n\n z = paddle.to_tensor([1., 2., 3., 4.])\n z.stop_gradient = False\n\n o = z.matmul(w)\n\n # remove hook before backward\n if removed:\n helper.remove()\n\n o.backward()\n\n # z.grad is not affected\n self.assertTrue(np.array_equal(z.grad.numpy(), w.numpy()))\n # w.grad is not changed by hook\n self.assertTrue(np.array_equal(w.grad.numpy(), z.numpy()))\n # x.grad and y.grad are changed if run hook\n self.assertTrue(\n np.array_equal(x.grad.numpy(),\n z.numpy() * 2 if not removed else z.numpy()))\n self.assertTrue(\n np.array_equal(y.grad.numpy(),\n z.numpy() * 2 if not removed else z.numpy()))\n\n def run_print_hook_for_interior_var(print_hook, removed=False):\n for device in self.devices:\n paddle.set_device(device)\n\n x = paddle.to_tensor([0., 1., 2., 3.])\n y = paddle.to_tensor([4., 5., 6., 7.])\n x.stop_gradient = False\n y.stop_gradient = False\n\n w = x + y\n w.stop_gradient = False\n helper = w.register_hook(print_hook)\n\n z = paddle.to_tensor([1., 2., 3., 4.])\n z.stop_gradient = False\n\n o = z.matmul(w)\n\n # remove hook before backward\n if removed:\n helper.remove()\n\n o.backward()\n\n # all grads are not affected\n self.assertTrue(np.array_equal(z.grad.numpy(), w.numpy()))\n self.assertTrue(np.array_equal(w.grad.numpy(), z.numpy()))\n self.assertTrue(np.array_equal(x.grad.numpy(), z.numpy()))\n self.assertTrue(np.array_equal(y.grad.numpy(), z.numpy()))\n\n def double_hook(grad):\n grad = grad * 2\n print(grad)\n return grad\n\n def print_hook(grad):\n print(grad)\n\n # register hook\n run_double_hook_for_interior_var(double_hook)\n # register hook and removed\n run_double_hook_for_interior_var(double_hook, removed=True)\n\n # register hook\n run_double_hook_for_interior_var(lambda grad: grad * 2)\n # register hook and removed\n run_double_hook_for_interior_var(lambda grad: grad * 2, removed=True)\n\n # register hook\n run_print_hook_for_interior_var(print_hook)\n # register hook and removed\n run_print_hook_for_interior_var(print_hook, removed=True)\n\n def test_hook_for_interior_var(self):\n with _test_eager_guard():\n self.func_hook_for_interior_var()\n self.func_hook_for_interior_var()\n\n def func_hook_for_leaf_var(self):\n def run_double_hook_for_leaf_var(double_hook, removed=False):\n for device in self.devices:\n paddle.set_device(device)\n\n x = paddle.to_tensor([0., 1., 2., 3.])\n y = paddle.to_tensor([4., 5., 6., 7.])\n x.stop_gradient = False\n y.stop_gradient = False\n helper = y.register_hook(double_hook)\n\n w = x + y\n w.stop_gradient = False\n\n z = paddle.to_tensor([1., 2., 3., 4.])\n z.stop_gradient = False\n\n o = z.matmul(w)\n\n # remove hook before backward\n if removed:\n helper.remove()\n\n o.backward()\n\n # z.grad, w.grad, x.grad is not affected\n self.assertTrue(np.array_equal(z.grad.numpy(), w.numpy()))\n self.assertTrue(np.array_equal(w.grad.numpy(), z.numpy()))\n self.assertTrue(np.array_equal(x.grad.numpy(), z.numpy()))\n # y.grad are changed if run hook\n self.assertTrue(\n np.array_equal(y.grad.numpy(),\n z.numpy() * 2 if not removed else z.numpy()))\n\n # register hook\n run_double_hook_for_leaf_var(lambda grad: grad * 2)\n # register hook and removed\n run_double_hook_for_leaf_var(lambda grad: grad * 2, removed=True)\n\n def test_hook_for_leaf_var(self):\n with _test_eager_guard():\n self.func_hook_for_leaf_var()\n self.func_hook_for_leaf_var()\n\n def func_hook_for_accumulated_grad_interior_var(self):\n def run_double_hook_for_accumulated_grad_interior_var(double_hook,\n removed=False):\n for device in self.devices:\n paddle.set_device(device)\n\n a = paddle.to_tensor([0., 1., 1., 2.])\n b = paddle.to_tensor([0., 0., 1., 2.])\n a.stop_gradient = False\n b.stop_gradient = False\n\n helper1 = a.register_hook(double_hook)\n\n x = a + b\n x.stop_gradient = False\n\n helper2 = x.register_hook(double_hook)\n\n y = paddle.to_tensor([4., 5., 6., 7.])\n z = paddle.to_tensor([1., 2., 3., 4.])\n y.stop_gradient = False\n z.stop_gradient = False\n\n o1 = x + y\n o2 = x + z\n o1.stop_gradient = False\n o2.stop_gradient = False\n\n o = o1.matmul(o2)\n\n # remove hook before backward\n if removed:\n helper1.remove()\n helper2.remove()\n\n o.backward()\n\n base_grad = np.array([5., 9., 13., 19.])\n # x.grad is not changed\n self.assertTrue(np.array_equal(x.grad.numpy(), base_grad))\n # b.grad is changed by x.hook\n self.assertTrue(\n np.array_equal(b.grad.numpy(), base_grad * 2\n if not removed else base_grad))\n # a.grad is changed by x.hook and a.hook\n self.assertTrue(\n np.array_equal(a.grad.numpy(), base_grad * 4\n if not removed else base_grad))\n\n # register hook\n run_double_hook_for_accumulated_grad_interior_var(lambda grad: grad * 2)\n # register hook and removed\n run_double_hook_for_accumulated_grad_interior_var(\n lambda grad: grad * 2, removed=True)\n\n def test_hook_for_accumulated_grad_interior_var(self):\n with _test_eager_guard():\n self.func_hook_for_accumulated_grad_interior_var()\n self.func_hook_for_accumulated_grad_interior_var()\n\n def func_hook_for_accumulated_grad_leaf_var(self):\n def run_double_hook_for_accumulated_grad_leaf_var(double_hook,\n removed=False):\n for device in self.devices:\n paddle.set_device(device)\n\n x = paddle.to_tensor([0., 1., 2., 4.])\n x.stop_gradient = False\n\n helper = x.register_hook(double_hook)\n\n y = paddle.to_tensor([4., 5., 6., 7.])\n z = paddle.to_tensor([1., 2., 3., 4.])\n y.stop_gradient = False\n z.stop_gradient = False\n\n o1 = x + y\n o2 = x + z\n o1.stop_gradient = False\n o2.stop_gradient = False\n\n o = o1.matmul(o2)\n\n # remove hook before backward\n if removed:\n helper.remove()\n\n o.backward()\n\n base_grad = np.array([5., 9., 13., 19.])\n # x.grad is changed by x.hook\n self.assertTrue(\n np.array_equal(x.grad.numpy(), base_grad * 2\n if not removed else base_grad))\n\n # register hook\n run_double_hook_for_accumulated_grad_leaf_var(lambda grad: grad * 2)\n # register hook and removed\n run_double_hook_for_accumulated_grad_leaf_var(\n lambda grad: grad * 2, removed=True)\n\n def test_hook_for_accumulated_grad_leaf_var(self):\n with _test_eager_guard():\n self.func_hook_for_accumulated_grad_leaf_var()\n self.func_hook_for_accumulated_grad_leaf_var()\n\n def func_hook_in_model(self):\n def run_double_hook_in_model(data,\n label,\n hook=None,\n register=False,\n remove=False):\n for device in self.devices:\n paddle.seed(self.seed)\n paddle.set_device(device)\n\n net = SimpleNet(self.in_size, self.out_size)\n loss_fn = nn.MSELoss()\n\n data = paddle.to_tensor(data)\n label = paddle.to_tensor(label)\n\n ret1, out = net(data, hook, register, remove)\n loss = loss_fn(out, label)\n loss.backward()\n\n return (ret1.grad.numpy(), net.linear1.weight.grad.numpy(),\n net.linear1.bias.grad.numpy())\n\n data = np.random.uniform(\n size=[self.batch_size, self.in_size]).astype('float32')\n label = np.random.uniform(size=[self.batch_size, 1]).astype('float32')\n\n # get original value\n ret1_grad, linear1_w_grad, linear1_b_grad = run_double_hook_in_model(\n data, label)\n # get value changed by hook\n ret1_grad_hook, linear1_w_grad_hook, linear1_b_grad_hook = run_double_hook_in_model(\n data, label, lambda grad: grad * 2, True)\n # get value after removing hook\n ret1_grad_rm, linear1_w_grad_rm, linear1_b_grad_rm = run_double_hook_in_model(\n data, label, lambda grad: grad * 2, True, True)\n\n # compare original value and with hook\n self.assertTrue(np.array_equal(ret1_grad, ret1_grad_hook))\n self.assertTrue(np.array_equal(linear1_w_grad * 2, linear1_w_grad_hook))\n self.assertTrue(np.array_equal(linear1_b_grad * 2, linear1_b_grad_hook))\n\n # compare original value and remove hook\n self.assertTrue(np.array_equal(ret1_grad, ret1_grad_rm))\n self.assertTrue(np.array_equal(linear1_w_grad, linear1_w_grad_rm))\n self.assertTrue(np.array_equal(linear1_b_grad, linear1_b_grad_rm))\n\n def test_func_hook_in_model(self):\n with _test_eager_guard():\n self.func_hook_in_model()\n self.func_hook_in_model()\n\n def func_multiple_hooks_for_interior_var(self):\n def run_multiple_hooks_for_interior_var(device,\n hooks,\n remove1=False,\n remove2=False,\n remove3=False):\n paddle.set_device(device)\n\n x = paddle.to_tensor([0., 1., 2., 3.])\n y = paddle.to_tensor([4., 5., 6., 7.])\n x.stop_gradient = False\n y.stop_gradient = False\n\n w = x + y\n w.stop_gradient = False\n\n helpers = []\n for hook in hooks:\n helper = w.register_hook(hook)\n helpers.append(helper)\n\n z = paddle.to_tensor([1., 2., 3., 4.])\n z.stop_gradient = False\n\n o = z.matmul(w)\n\n if remove1:\n helpers[0].remove()\n if remove2:\n helpers[1].remove()\n if remove3:\n helpers[2].remove()\n\n o.backward()\n\n return z.numpy(), w.grad.numpy(), x.grad.numpy(), y.grad.numpy()\n\n def double_hook(grad):\n return grad * 2\n\n hooks = [double_hook, double_hook, double_hook]\n\n for device in self.devices:\n z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var(\n device, hooks)\n\n self.assertTrue(np.array_equal(w_grad, z))\n self.assertTrue(np.array_equal(x_grad, z * 8))\n self.assertTrue(np.array_equal(y_grad, z * 8))\n\n z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var(\n device, hooks, remove1=True)\n\n self.assertTrue(np.array_equal(w_grad, z))\n self.assertTrue(np.array_equal(x_grad, z * 4))\n self.assertTrue(np.array_equal(y_grad, z * 4))\n\n z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var(\n device, hooks, remove2=True)\n\n self.assertTrue(np.array_equal(w_grad, z))\n self.assertTrue(np.array_equal(x_grad, z * 4))\n self.assertTrue(np.array_equal(y_grad, z * 4))\n\n z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var(\n device, hooks, remove3=True)\n\n self.assertTrue(np.array_equal(w_grad, z))\n self.assertTrue(np.array_equal(x_grad, z * 4))\n self.assertTrue(np.array_equal(y_grad, z * 4))\n\n z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var(\n device, hooks, remove1=True, remove2=True, remove3=True)\n\n self.assertTrue(np.array_equal(w_grad, z))\n self.assertTrue(np.array_equal(x_grad, z))\n self.assertTrue(np.array_equal(y_grad, z))\n\n def test_multiple_hooks_for_interior_var(self):\n with _test_eager_guard():\n self.func_multiple_hooks_for_interior_var()\n self.func_multiple_hooks_for_interior_var()\n\n def func_hook_in_double_grad(self):\n def double_print_hook(grad):\n grad = grad * 2\n print(grad)\n return grad\n\n x = paddle.ones(shape=[1], dtype='float32')\n x.stop_gradient = False\n\n # hook only works in backward\n # for forward var x, the x.grad generated in\n # paddle.grad will not deal with by hook\n x.register_hook(double_print_hook)\n\n y = x * x\n fluid.set_flags({'FLAGS_retain_grad_for_all_tensor': False})\n # Since y = x * x, dx = 2 * x\n dx = paddle.grad(\n outputs=[y], inputs=[x], create_graph=True, retain_graph=True)[0]\n fluid.set_flags({'FLAGS_retain_grad_for_all_tensor': True})\n\n z = y + dx\n self.assertTrue(x.grad is None)\n\n # If create_graph = True, the gradient of dx\n # would be backpropagated. Therefore,\n # z = x * x + dx = x * x + 2 * x, and\n # x.gradient() = 2 * x + 2 = 4.0\n # after changed by hook: 8.0\n\n # TODO(wuweilong): enable this case when DoubleGrad in eager mode is ready\n if core._in_eager_mode():\n pass\n else:\n z.backward()\n self.assertTrue(np.array_equal(x.grad.numpy(), np.array([8.])))\n\n def test_hook_in_double_grad(self):\n with _test_eager_guard():\n self.func_hook_in_double_grad()\n self.func_hook_in_double_grad()\n\n def func_remove_one_hook_multiple_times(self):\n for device in self.devices:\n paddle.set_device(device)\n\n x = paddle.to_tensor([1., 2., 3., 4.])\n x.stop_gradient = False\n\n h = x.register_hook(lambda grad: grad * 2)\n self.assertTrue(h.remove())\n self.assertFalse(h.remove())\n\n def test_remove_one_hook_multiple_times(self):\n with _test_eager_guard():\n self.func_remove_one_hook_multiple_times()\n self.func_remove_one_hook_multiple_times()\n\n def func_register_hook_for_stop_gradient_var(self):\n for device in self.devices:\n paddle.set_device(device)\n\n x = paddle.to_tensor([1., 2., 3., 4.])\n\n with self.assertRaises(RuntimeError):\n x.register_hook(lambda grad: grad * 2)\n\n def test_register_hook_for_stop_gradient_var(self):\n with _test_eager_guard():\n self.func_register_hook_for_stop_gradient_var()\n self.func_register_hook_for_stop_gradient_var()\n\n def test_register_hook_in_static_mode(self):\n paddle.enable_static()\n\n startup_program = paddle.static.Program()\n main_program = paddle.static.Program()\n with paddle.static.scope_guard(paddle.static.Scope()):\n with paddle.static.program_guard(main_program, startup_program):\n x = paddle.static.data(\n name='x', shape=[None, self.in_size], dtype='float32')\n\n net = SimpleNetForStatic(self.in_size, self.out_size)\n with self.assertRaises(AssertionError):\n out = net(x)\n\n paddle.disable_static()\n\n def func_register_hook_in_dy2static_mode(self):\n net = SimpleNetForStatic(self.in_size, self.out_size)\n jit_net = paddle.jit.to_static(\n net, input_spec=[paddle.static.InputSpec([None, self.in_size])])\n\n data = np.random.uniform(\n size=[self.batch_size, self.in_size]).astype('float32')\n data_t = paddle.to_tensor(data)\n\n with self.assertRaises(AssertionError):\n out = jit_net(data_t)\n\n def test_register_hook_in_dy2static_mode(self):\n with _test_eager_guard():\n self.func_register_hook_in_dy2static_mode()\n self.func_register_hook_in_dy2static_mode()\n\n\nHOOK_INIT_VALUE = 10\nHOOK_IS_CALLED = False\n\n\ndef global_void_hook():\n global HOOK_INIT_VALUE\n global HOOK_IS_CALLED\n HOOK_INIT_VALUE *= 2\n HOOK_IS_CALLED = True\n\n\nclass TestTensorRegisterBackwardHook(unittest.TestCase):\n def setUp(self):\n self.devices = [\"cpu\"]\n if paddle.is_compiled_with_cuda():\n self.devices.append(\"gpu\")\n\n def func_register_backward_hook(self):\n global HOOK_INIT_VALUE\n global HOOK_IS_CALLED\n for device in self.devices:\n x = paddle.to_tensor(5., stop_gradient=False)\n x._register_backward_hook(global_void_hook)\n for i in range(5):\n y = paddle.pow(x, 4.0)\n y.backward()\n\n self.assertEqual(HOOK_INIT_VALUE, 320)\n self.assertTrue(HOOK_IS_CALLED)\n\n # reset initial value\n HOOK_INIT_VALUE = 10\n HOOK_IS_CALLED = False\n\n def test_register_backward_hook(self):\n with _test_eager_guard():\n self.func_register_backward_hook()\n self.func_register_backward_hook()\n\n def func_register_backward_hook_for_interior_var(self):\n x = paddle.to_tensor(5., stop_gradient=False)\n y = paddle.pow(x, 4.0)\n\n with self.assertRaises(ValueError):\n y._register_backward_hook(global_void_hook)\n\n def test_register_backward_hook_for_interior_var(self):\n with _test_eager_guard():\n self.func_register_backward_hook_for_interior_var()\n self.func_register_backward_hook_for_interior_var()\n\n def func_register_backward_hook_for_var_without_gradient(self):\n x = paddle.to_tensor(5.)\n y = paddle.pow(x, 4.0)\n\n with self.assertRaises(ValueError):\n x._register_backward_hook(global_void_hook)\n\n def test_register_backward_hook_for_var_without_gradient(self):\n with _test_eager_guard():\n self.func_register_backward_hook_for_var_without_gradient()\n self.func_register_backward_hook_for_var_without_gradient()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport numpy as np\nfrom . import unique_name\nfrom . import core\nimport paddle\n\nMAX_INTEGER = 2**31 - 1\n\n\ndef is_list_tuple(index, contain_type):\n def _is_list_tuple(item):\n if not (isinstance(item, (list, tuple)) or type(item) == contain_type):\n return False\n if isinstance(item, (tuple, list)):\n for s in item:\n if not _is_list_tuple(s):\n return False\n return True\n\n if not isinstance(index, (tuple, list)):\n return False\n for s in index:\n if not _is_list_tuple(s):\n return False\n return True\n\n\ndef is_one_dim_list(index, contain_type):\n if isinstance(index, list):\n for i in index:\n if not isinstance(i, contain_type):\n return False\n else:\n return False\n return True\n\n\ndef get_list_index_shape(var_dims, index_dims):\n var_dims_size = len(var_dims)\n index_dims_size = len(index_dims)\n\n out_dims_size = var_dims_size - index_dims[0] + index_dims_size - 1\n\n out_dims_shape = [1] * out_dims_size\n\n out_dims_shape[:index_dims_size - 1] = index_dims[1:]\n\n out_dims_shape[index_dims_size - 1:] = var_dims[index_dims[0]:]\n return out_dims_shape\n\n\nclass SliceInfo:\n def __init__(self):\n self.pre_shape = None\n self.indexes = []\n self.dtype = None\n\n def update(self, index):\n if is_list_tuple(index, int) or isinstance(index, (\n paddle.fluid.Variable, np.ndarray)):\n # convert index to Tensor\n if not isinstance(index, paddle.fluid.Variable):\n index = paddle.assign(index)\n\n if self.dtype is None:\n self.dtype = index.dtype\n else:\n if index.dtype != self.dtype:\n raise IndexError(\n \"Data type of Tensor/List index should be same. The current data type is {}, but the previous data type is {}.\".\n format(index.dtype, self.dtype))\n\n self.indexes.append(index)\n\n if self.pre_shape is None:\n self.pre_shape = index.shape\n else:\n if self.pre_shape != index.shape:\n # broadcast \n cur_shape = paddle.broadcast_shape(self.pre_shape,\n index.shape)\n for i in range(len(self.indexes)):\n self.indexes[i] = paddle.broadcast_to(self.indexes[i],\n cur_shape)\n self.pre_shape = self.indexes[-1].shape\n else:\n raise ValueError(\n \"Index should be list/tuple of int or Tensor, but received {}.\".\n format(index))\n\n def shape_stride(self, shape):\n s = [1] * len(shape)\n for i in range(len(shape) - 2, -1, -1):\n s[i] = shape[i + 1] * s[i + 1]\n\n return s\n\n def numel(self, shape):\n return reduce(lambda x, y: x * y, shape)\n\n def get_offset_stride(self, tensor_shape):\n for index in self.indexes:\n if not isinstance(index, paddle.fluid.Variable):\n raise ValueError(\n \"only support list/tensor index, but received {}.\".format(\n type(index)))\n\n if len(self.indexes) <= len(tensor_shape) or len(self.indexes) == 1:\n shape = paddle.stack(self.indexes)\n axes = list(range(1, len(self.pre_shape) + 1)) + [0, ]\n\n else:\n raise ValueError(\n \"too many indices for tensor: tensor is {}-dimensional, but {} were indexed\".\n format(len(tensor_shape), self.pre_shape[0]))\n\n shape_transpose = paddle.transpose(shape, axes)\n return shape_transpose\n\n def get_item(self, tensor):\n shape_transpose = self.get_offset_stride(tensor.shape)\n index = paddle.assign(shape_transpose)\n return paddle.gather_nd(tensor, index)\n\n def set_item(self, tensor_origin, value):\n\n if not isinstance(value, paddle.fluid.Variable):\n value = paddle.assign(value)\n tensor_type = None\n\n if tensor_origin.dtype in [\n core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64\n ]:\n tensor = tensor_origin\n else:\n tensor_type = tensor_origin.dtype\n tensor = tensor_origin.astype(core.VarDesc.VarType.FP32)\n\n if value.dtype != tensor.dtype:\n value = value.astype(tensor.dtype)\n\n shape_transpose = self.get_offset_stride(tensor_origin.shape)\n index = paddle.assign(shape_transpose)\n\n gather_tensor_shape = get_list_index_shape(\n tensor.shape, [len(self.indexes), ] + list(self.indexes[-1].shape))\n\n value_dims_bd = [1, ] * len(gather_tensor_shape)\n value_dims_bd[-len(value.shape):] = list(value.shape)\n\n for i in range(len(gather_tensor_shape)):\n if not (value_dims_bd[i] == gather_tensor_shape[i] or\n value_dims_bd[i] == 1):\n raise ValueError(\"{} can not broadcast into {}\".format(\n value.shape, gather_tensor_shape))\n\n value_broadcast = paddle.broadcast_to(value, gather_tensor_shape)\n\n value_1d = value_broadcast.reshape([-1] + gather_tensor_shape[len(\n index.shape) - 1:])\n\n index_1d = index.reshape([-1, index.shape[-1]])\n\n tensor_stride = paddle.assign(\n self.shape_stride(tensor.shape[:index.shape[-1]]))\n inds = []\n for i in range(index_1d.shape[0]):\n temp = (index_1d[i] * tensor_stride).sum()\n inds.append(temp)\n index_1d = paddle.stack(inds).reshape([-1])\n t_reshape = tensor.reshape([-1] + list(tensor.shape[index.shape[-1]:]))\n out = paddle.scatter(t_reshape, index_1d, value_1d)\n if tensor_type is not None:\n out = out.astype(tensor_type)\n tensor_origin[:] = out.reshape(tensor_origin.shape)\n\n return tensor_origin\n\n\ndef replace_ellipsis(var, item):\n from .framework import Variable\n # Use slice(None) to replace Ellipsis.\n # For var, var.shape = [3,4,5,6]\n #\n # var[..., 1:2] -> var[:, :, :, 1:2]\n # var[0, ...] -> var[0]\n # var[0, ..., 1:2] -> var[0, :, :, 1:2]\n\n item = list(item)\n\n # Remove Variable to skip bug when counting Ellipsis\n item_remove_var = [\n ele for ele in item\n if not isinstance(ele, (Variable, np.ndarray)) and ele is not None\n ]\n ell_count = item_remove_var.count(Ellipsis)\n if ell_count == 0:\n return item\n elif ell_count > 1:\n raise IndexError(\"An index can only have a single ellipsis ('...')\")\n\n ell_idx = item.index(Ellipsis)\n\n if ell_idx == len(item) - 1:\n return item[:-1]\n else:\n item[ell_idx:ell_idx + 1] = [slice(None)] * (\n len(var.shape) - len(item) + item.count(None) + 1)\n\n return item\n\n\ndef replace_ndarray(item):\n new_item = []\n for slice_item in item:\n if isinstance(slice_item, np.ndarray):\n new_item.append(paddle.assign(slice_item))\n else:\n new_item.append(slice_item)\n return new_item\n\n\ndef replace_none(item):\n new_item = []\n none_axes = []\n for i, slice_item in enumerate(item):\n if slice_item is None:\n none_axes.append(i)\n else:\n new_item.append(slice_item)\n return new_item, none_axes\n\n\ndef is_integer_or_scalar_tensor(ele):\n from .framework import Variable\n if isinstance(ele, int):\n return True\n elif isinstance(ele, Variable):\n if len(ele.shape) == 1 and ele.shape[0] == 1:\n return True\n return False\n\n\ndef deal_attrs(attrs, attr, attr_name, tensor_attr_name, inputs, infer_flags):\n from .framework import Variable\n from .layers import utils\n\n if utils._contain_var(attr):\n inputs[tensor_attr_name] = utils._convert_to_tensor_list(\n attr, dtype=\"int64\")\n for i, dim in enumerate(attr):\n if isinstance(dim, Variable):\n attrs[attr_name].append(-1)\n infer_flags[i] = -1\n else:\n attrs[attr_name].append(dim)\n else:\n attrs[attr_name] = attr\n\n\ndef _getitem_impl_(var, item):\n \"\"\"\n Slice the variable.\n\n Args:\n item(int/slice/tuple) : the index.\n\n Returns:\n Sliced variable\n \"\"\"\n from .framework import default_main_program, Variable\n if isinstance(item, list):\n if not is_one_dim_list(item, int):\n item = tuple(item)\n\n if not isinstance(item, tuple):\n item = (item, )\n\n decrease_axes = []\n axes = []\n starts = []\n ends = []\n steps = []\n reverse_axes = []\n\n use_strided_slice = False\n item = replace_ndarray(item)\n item = replace_ellipsis(var, item)\n item, none_axes = replace_none(item)\n slice_info = SliceInfo()\n\n for dim, slice_item in enumerate(item):\n if is_integer_or_scalar_tensor(slice_item):\n if isinstance(slice_item,\n int) and var.shape[dim] is not None and var.shape[\n dim] >= 0 and slice_item >= var.shape[dim]:\n # For python, if users write a, b = var, the __getitem__\n # method will iterate through 0, 1, 2 ... until __getitem__\n # throws an IndexError, then stop. The var[0], var[1] will\n # be given to a, b respectively. If more values are given,\n # the unpack size would cause error.\n #\n # We raises IndexError here to support grammar like `a, b = var`\n raise IndexError(\n \"slice_item %d at dim %d should be >= 0 and < var.shape[%d]: %d\"\n % (slice_item, dim, dim, var.shape[dim]))\n decrease_axes.append(dim)\n start = slice_item\n step = 1\n end = slice_item + 1 if slice_item != -1 else MAX_INTEGER\n\n elif isinstance(slice_item, slice):\n start = slice_item.start\n end = slice_item.stop\n step = slice_item.step\n\n if start is None and end is None and step is None:\n continue\n\n step = 1 if step is None else step\n\n if start is None:\n start = 0 if step > 0 else MAX_INTEGER\n if end is None:\n end = MAX_INTEGER if step > 0 else -1\n\n elif isinstance(slice_item, list):\n all_bool = True\n\n if is_list_tuple(slice_item, int):\n slice_info.update(slice_item)\n continue\n\n for i in slice_item:\n if type(i) is int:\n all_bool = False\n elif not isinstance(i, bool):\n raise TypeError(\"Only support int or bool in index list.\")\n\n if len(item) != 1:\n raise IndexError(\n \"When index contains a list, its length must be 1, but received {}.\".\n format(len(item)))\n new_slice_item = []\n if all_bool:\n if len(slice_item) != var.shape[0]:\n raise IndexError(\n \"The dimension of bool index doesn't match indexed array along \"\\\n \"dimension 0, the target dimension is {}, but received {}.\".\n format(var.shape[0], len(slice_item)))\n for idx, ele in enumerate(slice_item):\n if ele is True:\n new_slice_item.append(idx)\n slice_item = new_slice_item\n else:\n for idx, ele in enumerate(slice_item):\n if type(ele) is int:\n new_slice_item.append(ele)\n elif ele is True:\n new_slice_item.append(1)\n else:\n new_slice_item.append(0)\n slice_item = new_slice_item\n\n from .layers import assign\n from ..tensor import index_select\n\n idx = assign(np.array(slice_item).astype(\"int32\"))\n return index_select(var, index=idx, axis=0)\n\n elif isinstance(slice_item, (Variable, core.eager.Tensor)):\n if len(item) == 1:\n\n from ..tensor import index_select, gather_nd\n from .layers.nn import where\n\n if slice_item.dtype == paddle.bool:\n if len(slice_item.shape) > len(var.shape):\n raise IndexError(\n \"The dims of bool index doesn't match indexed array, \"\n \"the dims of bool index except to be equal or less \"\n \"than {}, but received {}.\".format(\n len(var.shape), len(slice_item.shape)))\n for i, dim_len in enumerate(slice_item.shape):\n if dim_len != var.shape[i]:\n raise IndexError(\n \"The dimension of bool index doesn't match indexed array along \"\\\n \"dimension {}, the target dimension is {}, but received {}.\".\n format(i, var.shape[i], dim_len))\n bool_2_idx = where(slice_item == True)\n return gather_nd(var, bool_2_idx)\n else:\n if len(slice_item.shape) == 1:\n return index_select(var, index=slice_item, axis=0)\n else:\n slice_info.update(slice_item)\n continue\n else:\n slice_info.update(slice_item)\n continue\n\n else:\n raise IndexError(\n \"Valid index accept int or slice or ellipsis or list, but received {}.\".\n format(slice_item))\n\n axes.append(dim)\n starts.append(start)\n ends.append(end)\n steps.append(step)\n use_strided_slice = True if step != 1 else use_strided_slice\n\n if slice_info.indexes:\n if len(slice_info.indexes) != len(item):\n raise IndexError(\n \"Valid index accept int or slice or ellipsis or list, but received {}.\".\n format(item))\n return slice_info.get_item(var)\n\n inputs = {'Input': [var]}\n attrs = {\n 'axes': axes,\n 'starts': [],\n 'ends': [],\n 'decrease_axis': decrease_axes\n }\n if use_strided_slice:\n attrs['strides'] = []\n\n infer_flags = [1] * len(axes)\n deal_attrs(attrs, starts, \"starts\", \"StartsTensorList\", inputs, infer_flags)\n deal_attrs(attrs, ends, \"ends\", \"EndsTensorList\", inputs, infer_flags)\n deal_attrs(attrs, steps, \"strides\", \"StridesTensorList\", inputs,\n infer_flags)\n attrs['infer_flags'] = infer_flags\n\n out = var\n if len(axes) > 0:\n target_block = default_main_program().current_block()\n op_type = \"strided_slice\" if use_strided_slice else \"slice\"\n\n slice_out_var = target_block.create_var(\n name=unique_name.generate_with_ignorable_key(var.name + \"_\" +\n op_type),\n dtype=var.dtype)\n target_block.append_op(\n type=op_type,\n inputs=inputs,\n outputs={'Out': [slice_out_var]},\n attrs=attrs)\n out = slice_out_var\n\n if len(reverse_axes) > 0:\n from .layers.tensor import reverse\n out = reverse(out, axis=reverse_axes)\n\n # Deal with cases when all axes are decreased.\n # After slice, the shape of out is [1], which should have been [], but Paddle doesn't support scalar.\n # In order to ensure the correctness of the final shape of out, one dimension of out needs to be decreased.\n # For example:\n # # x.shape: (2,3,4)\n # out = x[0, 1, 1, None] # out.shape : (1)\n if len(decrease_axes) == len(var.shape):\n none_axes = none_axes[1:]\n\n if len(none_axes) > 0:\n # Deal with cases that decrease_axes is not empty\n # For example:\n # # x.shape: (2,3,4)\n # out = x[0, 0:2, None] # out.shape : (2, 1, 4)\n for idx, axis in enumerate(none_axes):\n l = len([i for i in decrease_axes if i < axis])\n new_axis = axis - l\n none_axes[idx] = new_axis\n\n # Deal with cases when all axes are decreased.\n # After slice, the shape of out is [1], which should have been [], but Paddle doesn't support scalar.\n # In order to ensure the correctness of the final shape of out, one dimension of out needs to be decreased.\n # For example:\n # # x.shape: (2,3,4)\n # out = x[0, 1, 1, None] # out.shape : (1)\n\n from ..tensor import unsqueeze\n out = unsqueeze(out, axis=none_axes)\n\n return out\n\n\ndef _setitem_impl_(var, item, value):\n from .framework import default_main_program, Variable\n\n inputs = {'Input': var}\n if isinstance(item, list):\n if not is_one_dim_list(item, int):\n item = tuple(item)\n # 1. Parse item\n if not isinstance(item, tuple):\n item = (item, )\n\n decrease_axes = []\n axes = []\n starts = []\n ends = []\n steps = []\n\n item = replace_ndarray(item)\n item = replace_ellipsis(var, item)\n item, none_axes = replace_none(item)\n slice_info = SliceInfo()\n dim = 0\n for _, slice_item in enumerate(item):\n if is_integer_or_scalar_tensor(slice_item):\n decrease_axes.append(dim)\n start = slice_item\n end = slice_item + 1 if slice_item != -1 else MAX_INTEGER\n step = 1\n\n elif isinstance(slice_item, slice):\n start = slice_item.start\n end = slice_item.stop\n step = slice_item.step\n\n if start is None and end is None and step is None:\n dim += 1\n continue\n\n step = 1 if step is None else step\n\n if not isinstance(step, Variable) and step == 0:\n raise ValueError(\n \"When assign a value to a paddle.Tensor, step can not be 0, \"\n \"but received step is {}.\".format(step))\n\n if isinstance(step, Variable) and (start is None or end is None):\n raise ValueError(\n \"When assign a value to a paddle.Tensor, it's not supported that \"\n \"the start or end is None when the type of step is paddle.Tensor.\"\n )\n\n if start is None:\n start = 0 if step > 0 else MAX_INTEGER\n\n if end is None:\n end = MAX_INTEGER if step > 0 else (0 - MAX_INTEGER)\n elif isinstance(slice_item, list):\n if is_list_tuple(slice_item, int):\n slice_info.update(slice_item)\n continue\n\n for i in slice_item:\n if not isinstance(i, bool):\n raise TypeError(\"Doesn't support {} in index list.\".format(\n type(i)))\n\n if len(item) != 1:\n raise IndexError(\n \"When index contains a bool list, its length must be 1, but received {}.\".\n format(len(item)))\n\n from .layers import assign\n idx_tensor = assign(slice_item)\n return set_value_for_bool_tensor(var, idx_tensor, value)\n\n elif isinstance(slice_item, Variable):\n if slice_item.dtype == core.VarDesc.VarType.BOOL:\n if len(item) != 1:\n raise IndexError(\n \"When index contains a bool tensor, its length must be 1, but received {}.\".\n format(len(item)))\n return set_value_for_bool_tensor(var, slice_item, value)\n else:\n slice_info.update(slice_item)\n continue\n else:\n raise IndexError(\n \"Valid index accept int, slice, ellipsis, None, list of bool, Variable, \"\n \"but received {}.\".format(slice_item))\n\n axes.append(dim)\n starts.append(start)\n ends.append(end)\n steps.append(step)\n\n dim += 1\n if slice_info.indexes:\n if len(slice_info.indexes) != len(item):\n raise IndexError(\n \"Valid index accept int or slice or ellipsis or list, but received {}.\".\n format(item))\n return slice_info.set_item(var, value)\n attrs = {\n 'axes': axes,\n 'starts': starts,\n 'ends': ends,\n 'steps': steps,\n 'decrease_axes': decrease_axes,\n 'none_axes': none_axes\n }\n\n from .layers import utils\n if utils._contain_var(starts):\n inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts)\n del attrs['starts']\n if utils._contain_var(ends):\n inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends)\n del attrs['ends']\n if utils._contain_var(steps):\n inputs['StepsTensorList'] = utils._convert_to_tensor_list(steps)\n del attrs['steps']\n\n # 2. Parse value\n dtype = var.dtype\n attrs['dtype'] = dtype\n\n from .data_feeder import convert_dtype\n # 2.1 value is an integer of float\n if isinstance(value, (int, float)):\n value = np.array([value]).astype(convert_dtype(dtype))\n\n # 2.2 value is a np.ndarray\n if isinstance(value, np.ndarray):\n shape = list(value.shape)\n if dtype == core.VarDesc.VarType.BOOL:\n value_name = \"bool_values\"\n values = [int(v) for v in value.flat]\n elif dtype == core.VarDesc.VarType.FP32:\n value_name = \"fp32_values\"\n values = [float(v) for v in value.flat]\n elif dtype == core.VarDesc.VarType.FP64:\n value_name = \"fp64_values\"\n values = [float(v) for v in value.flat]\n elif dtype == core.VarDesc.VarType.INT32:\n value_name = \"int32_values\"\n values = [int(v) for v in value.flat]\n elif dtype == core.VarDesc.VarType.INT64:\n value_name = \"int64_values\"\n values = [int(v) for v in value.flat]\n else:\n raise TypeError(\n \"When assign a numpy.ndarray, integer or float to a paddle.Tensor, \"\n \"the data type of the paddle.Tensor must be bool, float32, int32 or int64, but \"\n \"received %s.\" % convert_dtype(dtype))\n attrs[value_name] = values\n attrs[\"shape\"] = shape\n\n elif isinstance(value, (Variable, core.eager.Tensor)):\n inputs[\"ValueTensor\"] = value\n else:\n raise TypeError(\n \"Only support to assign an integer, float, numpy.ndarray or \"\n \"paddle.Tensor to a paddle.Tensor, but received {}\".format(\n type(value)))\n\n if paddle.fluid.framework.in_dygraph_mode(\n ) and not paddle.fluid.framework._in_eager_mode():\n # TODO(pangyoki) add inplace(BumpInplaceVersion) if need\n var._bump_inplace_version()\n\n cur_block = default_main_program().current_block()\n cur_block.append_op(\n type=\"set_value\",\n inputs=inputs,\n outputs={'Out': var},\n attrs=attrs,\n inplace_map={\"Input\": \"Out\"})\n\n return var\n\n\n# the item is a tensor of bool \ndef set_value_for_bool_tensor(var, item, value):\n if len(item.shape) > len(var.shape):\n raise IndexError(\"The dims of bool index doesn't match indexed array, \"\n \"the dims of bool index except to be equal or less \"\n \"than {}, but received {}.\".format(\n len(var.shape), len(item.shape)))\n for i, dim_len in enumerate(item.shape):\n if dim_len != var.shape[i]:\n raise IndexError(\n \"The dimension of bool index doesn't match indexed array along \"\n \"dimension {}, the target dimension is {}, but received {}.\".\n format(i, var.shape[i], dim_len))\n\n def idx_not_empty(var, item, value):\n from .framework import Variable\n from .layers import assign\n from .layers.nn import where\n from ..tensor import gather_nd, scatter_nd_add\n\n if not isinstance(value, Variable):\n value = assign(value).cast(var.dtype)\n\n idx = where(item)\n gather_val = gather_nd(var, idx)\n gather_val_new = value - gather_val\n out = scatter_nd_add(var, idx, gather_val_new)\n var[:] = out\n\n from .layers.control_flow import cond\n # If all the bool index is False, just do nothing\n cond(item.any(), lambda: idx_not_empty(var, item, value))\n\n return var\n"
] |
[
[
"numpy.random.random",
"numpy.array_equal",
"numpy.arange",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.random.uniform",
"numpy.array",
"numpy.array_equal"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EmmaNguyen/feature_adversarial_with_topology_signatures
|
[
"efa7db6d0fdf5b2505d67d4341dcdb2ab05a97a7"
] |
[
"models/architecture/neural_topo_nets.py"
] |
[
"import torchvision.transforms as transforms\nfrom torchvision.utils import save_image\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\n\ncuda = True if torch.cuda.is_available() else False\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\nclass Topo_Generator(nn.Module):\n def __init__(self, latent_dim, img_size, channels, subscripted_views):\n super(Generator, self).__init__()\n self.latent_dim = latent_dim\n self.img_size = img_size\n self.channels = channels\n self.subscripted_views = subscripted_views\n self.transform = UpperDiagonalThresholdedLogTransform(0.1)\n\n def get_init(num_elements):\n transform = UpperDiagonalThresholdedLogTransform(0.1)\n return transform(pers_dgm_center_init(num_elements))\n\n # self.model = nn.Sequential(\n # nn.Linear(self.latent_dim, 128),\n # nn.LeakyReLU(0.2, inplace=True),\n # nn.Linear(128, 256),\n # nn.BatchNorm1d(256),\n # nn.LeakyReLU(0.2, inplace=True),\n # nn.Linear(256, 512),\n # nn.BatchNorm1d(512),\n # nn.LeakyReLU(0.2, inplace=True),\n # nn.Linear(512, 1024),\n # nn.BatchNorm1d(1024),\n # nn.LeakyReLU(0.2, inplace=True),\n # nn.Linear(1024, self.img_size**2),\n # nn.Tanh()\n # )\n\n def get_init(n_elements):\n transform = UpperDiagonalThresholdedLogTransform(0.1)\n return transform(pers_dgm_center_init(n_elements))\n\n self.dim_0 = SLayer(150, 2, get_init(150), torch.ones(150, 2) * 3)\n self.dim_0_ess = SLayer(50, 1)\n self.dim_1_ess = SLayer(50, 1)\n self.slayers = [self.dim_0,\n self.dim_0_ess,\n self.dim_1_ess\n ]\n\n self.stage_1 = []\n stage_1_outs = [75, 25, 25]\n\n for i, (n_in, n_out) in enumerate(zip([150, 50, 50], stage_1_outs)):\n seq = nn.Sequential()\n seq.add_module('linear_1', nn.Linear(n_in, n_out))\n seq.add_module('batch_norm', nn.BatchNorm1d(n_out))\n seq.add_module('drop_out_1', nn.Dropout(0.1))\n seq.add_module('linear_2', nn.Linear(n_out, n_out))\n seq.add_module('relu', nn.ReLU())\n seq.add_module('drop_out_2', nn.Dropout(0.1))\n\n self.stage_1.append(seq)\n self.add_module('stage_1_{}'.format(i), seq)\n\n linear_1 = nn.Sequential()\n linear_1.add_module('linear_1', nn.Linear(sum(stage_1_outs), 200))\n linear_1.add_module('batchnorm_1', torch.nn.BatchNorm1d(200))\n linear_1.add_module('relu_1', nn.ReLU())\n linear_1.add_module('linear_2', nn.Linear(200, 100))\n linear_1.add_module('batchnorm_2', torch.nn.BatchNorm1d(100))\n linear_1.add_module('drop_out_2', torch.nn.Dropout(0.1))\n linear_1.add_module('relu_2', nn.ReLU())\n linear_1.add_module('linear_3', nn.Linear(100, 50))\n linear_1.add_module('batchnorm_3', nn.BatchNorm1d(50))\n linear_1.add_module('relu_3', nn.ReLU())\n linear_1.add_module('linear_4', nn.Linear(50, 5))\n linear_1.add_module('batchnorm_4', nn.BatchNorm1d(5))\n self.linear_1 = linear_1\n\n def forward(self, noise):\n # import pdb;\n # img = self.model(noise)\n # # pdb.set_trace(im)\n # img = img.view(img.size()[0], self.channels, self.img_size, self.img_size)\n #\n # return img\n x = [batch[n] for n in self.subscripted_views]\n\n x = [\n [self.transform(dgm) for dgm in x[0]],\n [reduce_essential_dgm(dgm) for dgm in x[1]],\n [reduce_essential_dgm(dgm) for dgm in x[2]]\n ]\n\n x_sl = [l(xx) for l, xx in zip(self.slayers, x)]\n\n x = [l(xx) for l, xx in zip(self.stage_1, x_sl)]\n\n x = torch.cat(x, 1)\n\n x = self.linear_1(x)\n\n return x\n\nclass Discriminator(nn.Module):\n def __init__(self, img_size, latent_dim):\n super(Discriminator, self).__init__()\n self.img_size = img_size\n self.latent_dim = latent_dim\n self.model = nn.Sequential(\n nn.Linear(self.img_size**2 + self.latent_dim, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 1),\n nn.Sigmoid()\n )\n\n def forward(self, img, latent_vector):\n img_flat = img.view(img.size()[0], -1)\n validity = self.model(torch.cat([img_flat, latent_vector],1))\n return validity\n\nclass Topo_Decoder(nn.Module):\n def __init__(self, img_size, latent_dim):\n super(Decoder, self).__init__()\n self.img_size = img_size\n self.latent_dim = latent_dim\n self.model = nn.Sequential(\n nn.Linear(self.img_size**2, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, self.latent_dim),\n nn.Sigmoid()\n )\n def forward(self, img):\n # import pdb; pdb.set_trace()\n img_flat = img.view(img.size()[0], -1)\n validity = self.model(img_flat) #64x784\n return validity\n\ndef train_discriminator(discriminator, imgs, latent_vector):\n # imgs = imgs.view(imgs.size()[0], -1)\n # vector = torch.cat([imgs, latent_vector], 1)\n # return discriminator(vector)\n return discriminator(imgs, latent_vector)\n\ndef get_loss_discriminator(discriminator, fake_imgs, z, real_imgs, fake_z):\n adversarial_loss = nn.BCELoss()\n # minibatch_size = discriminator_real.size()[0]\n minibatch_size = real_imgs.size()[0]\n valid = Variable(Tensor(minibatch_size, 1).fill_(1.0), requires_grad=False)\n fake = Variable(Tensor(minibatch_size, 1).fill_(0.0), requires_grad=False)\n real_loss = adversarial_loss(train_discriminator(discriminator, real_imgs, fake_z), valid)\n fake_loss = adversarial_loss(train_discriminator(discriminator, fake_imgs.detach(), z), fake)\n return (real_loss + fake_loss) / 2\n\ndef get_loss_generator(discriminator, fake_imgs, z, real_imgs, fake_z):\n objection = nn.BCELoss()\n minibatch_size = fake_imgs.size()[0]\n # minibatch_size = self.batch_size\n valid = Variable(Tensor(minibatch_size, 1).fill_(1.0), requires_grad=False)\n valid_prediction = train_discriminator(discriminator, fake_imgs, z)\n # import pdb; pdb.set_trace()\n return objection(valid_prediction, valid)\n\ndef get_loss_wasserstein_discriminator(discriminator, fake_imgs, z, real_imgs, fake_z):\n real_validity = discriminator(real_imgs, fake_z)\n fake_validity = discriminator(fake_imgs, z)\n return real_validity - fake_validity\n\ndef get_loss_wasserstein_generator(discriminator, fake_imgs, z, real_imgs, fake_z):\n return torch.mean(discriminator(fake_imgs, z))\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.ones",
"torch.cat",
"torch.nn.BCELoss",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fennel-labs/connect4
|
[
"974103db0d489bb1631fd33cc2caf577ce40cbce"
] |
[
"connect4/test/test_field.py"
] |
[
"from connect4.field import Field\nimport unittest\nimport numpy as np\n\nclass TestField(unittest.TestCase):\n\n def setUp(self):\n self.field = Field()\n\n def test_legal_placement(self):\n self.field.place(1,Field.Player.P1)\n self.field.place(1,Field.Player.P2)\n self.field.place(2,Field.Player.P1)\n self.field.place(1,Field.Player.P1)\n self.field.place(1,Field.Player.P2)\n self.field.place(1,Field.Player.P1)\n\n grid_expected = np.zeros_like(self.field.grid)\n grid_expected[0,1] = Field.Player.P1\n grid_expected[1,1] = Field.Player.P2\n grid_expected[0,2] = Field.Player.P1\n grid_expected[2,1] = Field.Player.P1\n grid_expected[3,1] = Field.Player.P2\n grid_expected[4,1] = Field.Player.P1\n\n self.assertTrue(np.array_equal(self.field.grid, grid_expected))\n\n def test_illegal_placement(self):\n with self.assertRaises(ValueError):\n self.field.place(-1, Field.Player.P1)\n with self.assertRaises(ValueError):\n self.field.place(Field.WIDTH + 1, Field.Player.P1)\n with self.assertRaises(ValueError):\n self.field.place(0, 20)\n \n def test_rejected_placement(self):\n for col in range(0,self.field.WIDTH):\n # fill up one row and check whether return values fit\n for _ in range(0,self.field.HEIGHT):\n self.assertTrue(self.field.place(col,Field.Player.P1))\n self.assertFalse(self.field.place(col,Field.Player.P1))\n self.assertFalse(self.field.place(col,Field.Player.P2))\n\n def test_checks(self):\n # horizontal test\n increment = (0,1)\n for col in range(0,Field.WIDTH - Field.WIN_LENGTH + 1):\n for row in range(0, Field.HEIGHT):\n self.field = Field()\n pos = (row, col)\n # put 4 chips in a winning condtion into the field\n for _ in range(0,Field.WIN_LENGTH):\n player,_,_ = self.field.check()\n self.assertEqual(player,0)\n self.field.grid[pos] = Field.Player.P1\n pos = tuple(np.add(pos,increment))\n #print(self.field)\n player,_,_ = self.field.check()\n self.assertEqual(player,Field.Player.P1)\n\n # vertical test\n increment = (1,0)\n for col in range(0,Field.WIDTH):\n for row in range(0, Field.HEIGHT - Field.WIN_LENGTH + 1):\n self.field = Field()\n pos = (row, col)\n # put 4 chips in a winning condtion into the field\n for _ in range(0,Field.WIN_LENGTH):\n player,_,_ = self.field.check()\n self.assertEqual(player,0)\n self.field.grid[pos] = Field.Player.P2\n pos = tuple(np.add(pos,increment))\n #print(self.field)\n player,_,_ = self.field.check()\n self.assertEqual(player,Field.Player.P2) \n\n # diagnoal 1 test\n increment = (1,1)\n for col in range(0,Field.WIDTH - Field.WIN_LENGTH + 1):\n for row in range(0, Field.HEIGHT - Field.WIN_LENGTH + 1):\n self.field = Field()\n pos = (row, col)\n # put 4 chips in a winning condtion into the field\n for _ in range(0,Field.WIN_LENGTH):\n player,_,_ = self.field.check()\n self.assertEqual(player,0)\n self.field.grid[pos] = Field.Player.P1\n pos = tuple(np.add(pos,increment))\n #print(self.field)\n player,_,_ = self.field.check()\n self.assertEqual(player,Field.Player.P1) \n\n # diagnoal 2 test\n increment = (-1,1)\n for col in range(0,Field.WIDTH - Field.WIN_LENGTH + 1):\n for row in range(Field.WIN_LENGTH-1, Field.HEIGHT):\n self.field = Field()\n pos = (row, col)\n # put 4 chips in a winning condtion into the field\n for _ in range(0,Field.WIN_LENGTH):\n player,_,_ = self.field.check()\n self.assertEqual(player,0)\n self.field.grid[pos] = Field.Player.P2\n pos = tuple(np.add(pos,increment))\n #print(self.field)\n player,_,_ = self.field.check()\n self.assertEqual(player,Field.Player.P2) \n\n def test_player_switch(self):\n self.assertEqual(self.field.getPlayer(), Field.Player.P1)\n # switch 1->2\n self.assertEqual(self.field.switchPlayer(), Field.Player.P2)\n self.assertEqual(self.field.getPlayer(), Field.Player.P2)\n # switch 2->1\n self.assertEqual(self.field.switchPlayer(), Field.Player.P1)\n self.assertEqual(self.field.getPlayer(), Field.Player.P1)\n\n\nif __name__ == '__main__':\n unittest.main()\n "
] |
[
[
"numpy.add",
"numpy.zeros_like",
"numpy.array_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
syn8228/met
|
[
"268325eaf91cafe1a56f11a322762391f3f8d027"
] |
[
"code/classifiers/knn_classifier.py"
] |
[
"import faiss\nimport itertools\nimport numpy as np\n\nfrom code.utils.evaluation_metrics import *\n\n\n\n\nclass KNN_Classifier():\n\n\n def __init__(self,K,t):\n\n self.K = K #number of neighbors to take into account\n self.t = t #temperature of the softmax\n\n\n def fit(self, train_descrs, train_labels):\n\n self.index = faiss.IndexFlatIP(np.shape(train_descrs)[1])\n self.index.add(train_descrs)\n\n self.train_labels = train_labels\n self.n = np.unique(train_labels).shape[0] #total number of classes in the train set\n\n\n def predict(self, test_descrs):\n\n similarities_k_sorted, idx = self.index.search(test_descrs,self.K)\n train_labels_k_sorted = self.train_labels[idx]\n\n preds,confs = [],[]\n \n for i in range(np.shape(similarities_k_sorted)[0]):\n\n unique_neighbors = np.unique(train_labels_k_sorted[i])\n count_neighbors = np.zeros((1,len(unique_neighbors)))[0]\n total_sims = np.zeros((1,len(unique_neighbors)))[0]\n\n for j in range(len(train_labels_k_sorted[i])):\n\n idx_total = np.where(unique_neighbors==train_labels_k_sorted[i][j])[0]\n \n if len(idx_total)==0:\n continue\n \n total_sims[idx_total] = max(total_sims[idx_total], similarities_k_sorted[i][j])\n\n \n total_sims = np.exp(self.t*total_sims)\n total_sims /= (total_sims.sum()+(self.n-total_sims.shape[0])*np.exp(0))\n\n test_label_pred = unique_neighbors[total_sims.argmax()]\n confidence = total_sims.max()\n\n preds.append(test_label_pred)\n confs.append(confidence)\n\n return preds,confs\n\n\n\ndef tune_KNN(param_grid,train_descr,train_labels,val_descr,val_labels,verbose = True):\n '''Tuning is performed on the GAP metric\n '''\n combinations = itertools.product(*(param_grid[p] for p in param_grid))\n best_score = -np.Inf\n best_params = {}\n\n for param_set in combinations:\n\n clf = KNN_Classifier(K=int(param_set[0]),t=float(param_set[1]))\n clf.fit(train_descr,train_labels)\n val_preds,val_confs = clf.predict(val_descr)\n print(param_set)\n gap,_,_ = evaluate(val_preds,val_confs,val_labels,verbose=verbose)\n\n score = gap\n if score > best_score:\n best_score = score\n best_params = param_set\n\n return best_score,dict(zip(param_grid, best_params))\n"
] |
[
[
"numpy.exp",
"numpy.shape",
"numpy.where",
"numpy.unique"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siddhu001/slue-toolkit
|
[
"b8a62ef941a812ce277cf6a4af08d6065af8bec6",
"b8a62ef941a812ce277cf6a4af08d6065af8bec6",
"b8a62ef941a812ce277cf6a4af08d6065af8bec6"
] |
[
"slue_toolkit/eval/infer_asr.py",
"slue_toolkit/prepare/prepare_voxceleb_asr_pred.py",
"slue_toolkit/prepare/prepare_voxpopuli.py"
] |
[
"#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nRun inference for pre-processed data with a trained model.\n\"\"\"\n\nimport logging\nimport math\nimport os\nimport sys\n\nimport editdistance\nimport numpy as np\nimport torch\nfrom fairseq import checkpoint_utils, options, progress_bar, tasks, utils\nfrom fairseq.data.data_utils import post_process\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.logging.meters import StopwatchMeter, TimeMeter\n\n\nlogging.basicConfig()\nlogging.root.setLevel(logging.INFO)\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef add_asr_eval_argument(parser):\n parser.add_argument(\"--kspmodel\", default=None, help=\"sentence piece model\")\n parser.add_argument(\n \"--wfstlm\", default=None, help=\"wfstlm on dictonary output units\"\n )\n parser.add_argument(\n \"--rnnt_decoding_type\",\n default=\"greedy\",\n help=\"wfstlm on dictonary\\\noutput units\",\n )\n try:\n parser.add_argument(\n \"--lm-weight\",\n type=float,\n default=0.2,\n help=\"weight for lm while interpolating with neural score\",\n )\n except:\n pass\n parser.add_argument(\n \"--len-penalty\", type=float, default=1.0, help=\"length penalty on word level\"\n )\n parser.add_argument(\n \"--w2l-decoder\",\n # choices=[\"viterbi\", \"kenlm\", \"fairseqlm\"],\n help=\"use a w2l decoder\",\n )\n parser.add_argument(\"--lexicon\", help=\"lexicon for w2l decoder\")\n parser.add_argument(\"--unit-lm\", action=\"store_true\", help=\"if using a unit lm\")\n parser.add_argument(\"--kenlm-model\", \"--lm-model\", help=\"lm model for w2l decoder\")\n parser.add_argument(\"--beam-threshold\", type=float, default=25.0)\n parser.add_argument(\"--beam-size-token\", type=float, default=100)\n parser.add_argument(\"--word-score\", type=float, default=1.0)\n parser.add_argument(\"--unk-weight\", type=float, default=-math.inf)\n parser.add_argument(\"--sil-weight\", type=float, default=0.0)\n parser.add_argument(\n \"--dump-emissions\",\n type=str,\n default=None,\n help=\"if present, dumps emissions into this file and exits\",\n )\n parser.add_argument(\n \"--dump-features\",\n type=str,\n default=None,\n help=\"if present, dumps features into this file and exits\",\n )\n parser.add_argument(\n \"--load-emissions\",\n type=str,\n default=None,\n help=\"if present, loads emissions from this file\",\n )\n parser.add_argument(\n \"--eval-temperature\",\n type=float,\n default=1.0,\n help=\"temperature scaling of the logits\",\n )\n parser.add_argument(\n \"--eval-upsample\",\n type=float,\n default=1.0,\n help=\"upsample factor\",\n )\n return parser\n\n\ndef check_args(args):\n # assert args.path is not None, \"--path required for generation!\"\n # assert args.results_path is not None, \"--results_path required for generation!\"\n assert (\n not args.sampling or args.nbest == args.beam\n ), \"--sampling requires --nbest to be equal to --beam\"\n assert (\n args.replace_unk is None or args.raw_text\n ), \"--replace-unk requires a raw text dataset (--raw-text)\"\n\n\ndef get_dataset_itr(args, task, models):\n return task.get_batch_iterator(\n dataset=task.dataset(args.gen_subset),\n max_tokens=args.max_tokens,\n max_sentences=args.batch_size,\n max_positions=(sys.maxsize, sys.maxsize),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n num_shards=args.num_shards,\n shard_id=args.shard_id,\n num_workers=args.num_workers,\n data_buffer_size=args.data_buffer_size,\n ).next_epoch_itr(shuffle=False)\n\n\ndef process_predictions(\n args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id\n):\n for hypo in hypos[: min(len(hypos), args.nbest)]:\n hyp_pieces = tgt_dict.string(hypo[\"tokens\"].int().cpu())\n\n if \"words\" in hypo:\n hyp_words = \" \".join(hypo[\"words\"])\n else:\n hyp_words = post_process(hyp_pieces, args.post_process)\n\n if res_files is not None:\n print(\n \"{} ({}-{})\".format(hyp_pieces, speaker, id),\n file=res_files[\"hypo.units\"],\n )\n print(\n \"{} ({}-{})\".format(hyp_words, speaker, id),\n file=res_files[\"hypo.words\"],\n )\n\n tgt_pieces = tgt_dict.string(target_tokens)\n tgt_words = post_process(tgt_pieces, args.post_process)\n\n if res_files is not None:\n print(\n \"{} ({}-{})\".format(tgt_pieces, speaker, id),\n file=res_files[\"ref.units\"],\n )\n print(\n \"{} ({}-{})\".format(tgt_words, speaker, id), file=res_files[\"ref.words\"]\n )\n # only score top hypothesis\n if not args.quiet:\n logger.debug(\"HYPO:\" + hyp_words)\n logger.debug(\"TARGET:\" + tgt_words)\n logger.debug(\"___________________\")\n\n hyp_words = hyp_words.split()\n tgt_words = tgt_words.split()\n return editdistance.eval(hyp_words, tgt_words), len(tgt_words)\n\n\ndef prepare_result_files(args):\n def get_res_file(file_prefix):\n if args.num_shards > 1:\n file_prefix = f\"{args.shard_id}_{file_prefix}\"\n path = os.path.join(\n args.results_path,\n \"{}-{}-{}.txt\".format(\n file_prefix, os.path.basename(args.path), args.gen_subset\n ),\n )\n return open(path, \"w\", buffering=1)\n\n if not args.results_path:\n return None\n\n return {\n \"hypo.words\": get_res_file(\"hypo.word\"),\n \"hypo.units\": get_res_file(\"hypo.units\"),\n \"ref.words\": get_res_file(\"ref.word\"),\n \"ref.units\": get_res_file(\"ref.units\"),\n }\n\n\ndef load_models_and_criterions(\n filenames, data_path, arg_overrides=None, task=None, model_state=None\n):\n models = []\n criterions = []\n\n if arg_overrides is None:\n arg_overrides = {}\n\n arg_overrides[\"wer_args\"] = None\n arg_overrides[\"data\"] = data_path\n\n if filenames is None:\n assert model_state is not None\n filenames = [0]\n else:\n filenames = filenames.split(\":\")\n\n for filename in filenames:\n if model_state is None:\n if not os.path.exists(filename):\n raise IOError(\"Model file not found: {}\".format(filename))\n state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides)\n else:\n state = model_state\n\n if \"cfg\" in state:\n cfg = state[\"cfg\"]\n else:\n cfg = convert_namespace_to_omegaconf(state[\"args\"])\n\n if task is None:\n if hasattr(cfg.task, \"data\"):\n cfg.task.data = data_path\n task = tasks.setup_task(cfg.task)\n\n model = task.build_model(cfg.model)\n model.load_state_dict(state[\"model\"], strict=True)\n models.append(model)\n\n criterion = task.build_criterion(cfg.criterion)\n if state.get(\"criterion\", None) is not None:\n criterion.load_state_dict(state[\"criterion\"], strict=True)\n criterions.append(criterion)\n return models, criterions, task\n\n\ndef optimize_models(args, use_cuda, models):\n \"\"\"Optimize ensemble for generation\"\"\"\n for model in models:\n model.make_generation_fast_(\n beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,\n need_attn=args.print_alignment,\n )\n if args.fp16:\n model.half()\n if use_cuda:\n model.cuda()\n\n\nclass ExistingEmissionsDecoder(object):\n def __init__(self, decoder, emissions):\n self.decoder = decoder\n self.emissions = emissions\n\n def generate(self, models, sample, **unused):\n ids = sample[\"id\"].cpu().numpy()\n try:\n emissions = np.stack(self.emissions[ids])\n except:\n print([x.shape for x in self.emissions[ids]])\n raise Exception(\"invalid sizes\")\n emissions = torch.from_numpy(emissions)\n return self.decoder.decode(emissions)\n\n\ndef get_num_param(model):\n return sum([p.numel() for p in model.parameters()])\n\n\ndef main(args, task=None, model_state=None):\n check_args(args)\n\n if args.max_tokens is None and args.batch_size is None:\n args.max_tokens = 4000000\n logger.info(args)\n\n use_cuda = torch.cuda.is_available() and not args.cpu\n\n logger.info(\"| decoding with criterion {}\".format(args.criterion))\n\n # Load ensemble\n if args.load_emissions:\n models, criterions = [], []\n task = tasks.setup_task(args)\n else:\n # task = tasks.setup_task(args)\n logger.info(\"| loading model(s) from {}\".format(args.path))\n models, criterions, task = load_models_and_criterions(\n args.path,\n data_path=args.data,\n arg_overrides=eval(args.model_overrides), # noqa\n # task=task,\n model_state=model_state,\n )\n optimize_models(args, use_cuda, models)\n for i, model in enumerate(models):\n logger.info(f\"| model {i} size: {get_num_param(model)}\")\n for name, m in model.named_children():\n logger.info(f\"| | model {i} {name} size: {get_num_param(m)}\")\n for name2, m2 in m.named_children():\n logger.info(\n f\"| | | model {i} {name}.{name2} size: {get_num_param(m2)}\"\n )\n for name3, m3 in m2.named_children():\n logger.info(\n f\"| | | | model {i} {name}.{name2}.{name3} size: {get_num_param(m3)}\"\n )\n\n # Load dataset splits\n task.load_dataset(args.gen_subset)\n\n # Set dictionary\n tgt_dict = task.target_dictionary\n\n logger.info(\n \"| {} {} {} examples\".format(\n args.data, args.gen_subset, len(task.dataset(args.gen_subset))\n )\n )\n\n # hack to pass transitions to W2lDecoder\n if args.criterion == \"asg_loss\":\n trans = criterions[0].asg.trans.data\n args.asg_transitions = torch.flatten(trans).tolist()\n\n # Load dataset (possibly sharded)\n itr = get_dataset_itr(args, task, models)\n\n # Initialize generator\n gen_timer = StopwatchMeter()\n\n def build_generator(args):\n w2l_decoder = getattr(args, \"w2l_decoder\", None)\n if w2l_decoder == \"viterbi\":\n # from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder\n from slue_toolkit.fairseq_addon.decoder.w2l_decoder_old import (\n W2lViterbiDecoder,\n )\n\n return W2lViterbiDecoder(args, task.target_dictionary)\n elif w2l_decoder == \"kenlm\":\n # from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder\n from slue_toolkit.fairseq_addon.decoder.w2l_decoder_old import (\n W2lKenLMDecoder,\n )\n\n return W2lKenLMDecoder(args, task.target_dictionary)\n elif w2l_decoder == \"fairseqlm\":\n from slue_toolkit.fairseq_addon.decoder.w2l_decoder_old import (\n W2lFairseqLMDecoder,\n )\n\n return W2lFairseqLMDecoder(args, task.target_dictionary)\n elif w2l_decoder == \"argmax\":\n from slue_toolkit.fairseq_addon.decoder.ctc_decoder import CTCArgMaxDecoder\n\n return CTCArgMaxDecoder(args, task.target_dictionary)\n elif w2l_decoder == \"s2s\":\n from fairseq.dataclass.configs import GenerationConfig\n\n gen_cfg = GenerationConfig(beam=args.beam)\n if args.kenlm_model:\n overrides = dict()\n lms, _ = checkpoint_utils.load_model_ensemble(\n [args.kenlm_model], arg_overrides=overrides, task=None\n )\n lms[0].eval()\n optimize_models(args, use_cuda, lms)\n extra_gen_cls_kwargs = {\"lm_model\": lms[0], \"lm_weight\": args.lm_weight}\n generator = task.build_generator(\n [model], gen_cfg, extra_gen_cls_kwargs=extra_gen_cls_kwargs\n )\n print(f\"lm model: {generator.lm_model}\")\n else:\n generator = task.build_generator([model], gen_cfg)\n return generator\n else:\n print(\n \"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment\"\n )\n\n # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task\n generator = build_generator(args)\n\n if args.load_emissions:\n generator = ExistingEmissionsDecoder(\n generator, np.load(args.load_emissions, allow_pickle=True)\n )\n logger.info(\"loaded emissions from \" + args.load_emissions)\n\n num_sentences = 0\n\n if args.results_path is not None and not os.path.exists(args.results_path):\n os.makedirs(args.results_path)\n\n max_source_pos = (\n utils.resolve_max_positions(\n task.max_positions(), *[model.max_positions() for model in models]\n ),\n )\n\n if max_source_pos is not None:\n max_source_pos = max_source_pos[0]\n if max_source_pos is not None:\n max_source_pos = max_source_pos[0] - 1\n\n if args.dump_emissions:\n emissions = {}\n if args.dump_features:\n features = {}\n models[0].bert.proj = None\n else:\n res_files = prepare_result_files(args)\n errs_t = 0\n lengths_t = 0\n with progress_bar.build_progress_bar(args, itr) as t:\n wps_meter = TimeMeter()\n full_timer = StopwatchMeter()\n full_timer.start()\n for sample in t:\n sample = utils.move_to_cuda(sample) if use_cuda else sample\n\n def apply_half(t):\n if t.dtype is torch.float32:\n return t.half()\n return t\n\n if args.fp16:\n sample = utils.apply_to_sample(apply_half, sample)\n\n if \"net_input\" not in sample:\n continue\n\n prefix_tokens = None\n if args.prefix_size > 0:\n prefix_tokens = sample[\"target\"][:, : args.prefix_size]\n\n gen_timer.start()\n if args.dump_emissions:\n with torch.no_grad():\n encoder_out = models[0](**sample[\"net_input\"])\n emm = models[0].get_normalized_probs(encoder_out, log_probs=True)\n emm = emm.transpose(0, 1).cpu().numpy()\n for i, id in enumerate(sample[\"id\"]):\n emissions[id.item()] = emm[i]\n continue\n elif args.dump_features:\n with torch.no_grad():\n encoder_out = models[0](**sample[\"net_input\"])\n feat = encoder_out[\"encoder_out\"].transpose(0, 1).cpu().numpy()\n for i, id in enumerate(sample[\"id\"]):\n padding = (\n encoder_out[\"encoder_padding_mask\"][i].cpu().numpy()\n if encoder_out[\"encoder_padding_mask\"] is not None\n else None\n )\n features[id.item()] = (feat[i], padding)\n continue\n hypos = task.inference_step(generator, models, sample, prefix_tokens)\n num_generated_tokens = sum(len(h[0][\"tokens\"]) for h in hypos)\n gen_timer.stop(num_generated_tokens)\n\n for i, sample_id in enumerate(sample[\"id\"].tolist()):\n speaker = None\n # id = task.dataset(args.gen_subset).ids[int(sample_id)]\n id = sample_id\n toks = (\n sample[\"target\"][i, :]\n if \"target_label\" not in sample\n else sample[\"target_label\"][i, :]\n )\n target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()\n # Process top predictions\n errs, length = process_predictions(\n args,\n hypos[i],\n None,\n tgt_dict,\n target_tokens,\n res_files,\n speaker,\n id,\n )\n errs_t += errs\n lengths_t += length\n\n wps_meter.update(num_generated_tokens)\n t.log({\"wps\": round(wps_meter.avg)})\n num_sentences += (\n sample[\"nsentences\"] if \"nsentences\" in sample else sample[\"id\"].numel()\n )\n\n full_timer.stop()\n\n wer = None\n if args.dump_emissions:\n emm_arr = []\n for i in range(len(emissions)):\n emm_arr.append(emissions[i])\n np.save(args.dump_emissions, np.array(emm_arr, dtype=\"object\"))\n logger.info(f\"saved {len(emissions)} emissions to {args.dump_emissions}\")\n elif args.dump_features:\n feat_arr = []\n for i in range(len(features)):\n feat_arr.append(features[i])\n np.save(args.dump_features, np.array(feat_arr, dtype=\"object\"))\n logger.info(f\"saved {len(features)} emissions to {args.dump_features}\")\n else:\n if lengths_t > 0:\n wer = errs_t * 100.0 / lengths_t\n logger.info(f\"WER: {wer}\")\n logger.info(f\"full time used: {full_timer.sum}\")\n logger.info(f\"time used: {gen_timer.sum}\")\n\n logger.info(\n \"| Processed {} sentences ({} tokens) in {:.2f} s ({:.2f}\"\n \" sentences/s, {:.2f} tokens/s)\".format(\n num_sentences,\n gen_timer.n,\n gen_timer.sum,\n num_sentences / gen_timer.sum,\n 1.0 / gen_timer.avg,\n )\n )\n logger.info(\"| Generate {} with beam={}\".format(args.gen_subset, args.beam))\n # write the WER info into file in case it's called by infer_multiprocess\n if args.results_path is not None and not os.path.exists(args.results_path):\n with open(os.path.join(args.results_path, \"wer\"), \"w\") as fw_wer:\n fw_wer.write(f\"{errs_t} {lengths_t}\")\n return task, wer\n\n\ndef make_parser():\n parser = options.get_generation_parser()\n parser = add_asr_eval_argument(parser)\n return parser\n\n\ndef cli_main():\n parser = make_parser()\n args = options.parse_args_and_arch(parser)\n main(args)\n\n\nif __name__ == \"__main__\":\n cli_main()\n",
"import argparse, os\nimport pandas as pd\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Get evaluation result for sentiment analysis task\",\n )\n parser.add_argument(\n \"--data\",\n type=str,\n required=True,\n default=\"manifest/slue-voxceleb\",\n help=\"Root directory containing voxceleb1_slue data files,\"\n \"This dir should contain audio/ voxceleb1_slue_{finetune,dev,test} folders \",\n )\n parser.add_argument(\n \"--pred-data\",\n type=str,\n required=True,\n default=\"datasets/slue-voxceleb/preds/vc1/w2v2-large-lv60k-ft-slue-vc1-12h-lr1e-5-s1-mt800000-8gpu-update280000\",\n help=\"Root directory containing voxceleb1_slue data files,\"\n \"This dir should contain audio/ voxceleb1_slue_{finetune,dev,test} folders \",\n )\n args, _ = parser.parse_known_args()\n\n for subset in [\"dev\", \"test\"]:\n pred_csv = os.path.join(args.pred_data, f\"{subset}.asr-pred.tsv\")\n data = pd.read_csv(pred_csv, delimiter=\"\\t\")\n manifest_tsv = os.path.join(args.data, subset) + \".tsv\"\n output_tsv = os.path.join(args.data, subset) + \".pred.wrd\"\n\n try:\n fid = open(output_tsv, \"w\")\n for line in open(manifest_tsv).readlines()[1:]:\n fileid, _ = line.strip().split(\"\\t\")\n fileid = (\n f\"{fileid.split('.flac')[0]}-1.flac\" # temp. need to delete future\n )\n fid.write(f\"{list(data.pred_text[data.filename==fileid])[0]}\\n\")\n fid.close()\n print(f\"Successfully generated file at {output_tsv}\")\n\n except:\n print(f\"something wrong when generating {output_tsv}\")\n return\n\n\nif __name__ == \"__main__\":\n main()\n",
"import os\nimport fire\nimport numpy as np\nimport pandas as pd\nimport shutil\nimport soundfile\nimport re\n\nfrom slue_toolkit.prepare import data_utils\n\nsplits = {\"fine-tune\", \"dev\", \"test\"}\n\n\ndef create_split(\n input_dir=\"/persist/data/voxpopuli\", output_dir=\"/persist/data/slue-voxpopuli\"\n):\n os.makedirs(output_dir, exist_ok=True)\n for split in splits:\n print(f\"processing {split}\")\n split_dir = os.path.join(output_dir, split)\n os.makedirs(split_dir, exist_ok=True)\n metafile = os.path.join(input_dir, f\"voxpopuli_asr_en_{split}_ner.tsv\")\n shutil.copy(metafile, os.path.join(output_dir, f\"{split}.tsv\"))\n df = pd.read_csv(metafile, sep=\"\\t\")\n for id in df[\"id\"].array:\n filename = os.path.join(\n input_dir, f\"voxpopuli_asr_en/transcribed_data/en/{id[:4]}/{id}.ogg\"\n )\n shutil.copy(filename, split_dir)\n\n\ndef create_manifest(\n data_dir=\"datasets/slue-voxpopuli\",\n manifest_dir=\"manifest/slue-voxpopuli\",\n is_blind=True,\n):\n os.makedirs(manifest_dir, exist_ok=True)\n for split in splits:\n if (split == \"test\") and is_blind:\n df = pd.read_csv(\n os.path.join(data_dir, f\"slue-voxpopuli_{split}_blind.tsv\"), sep=\"\\t\"\n )\n else:\n df = pd.read_csv(\n os.path.join(data_dir, f\"slue-voxpopuli_{split}.tsv\"), sep=\"\\t\"\n )\n\n with open(os.path.join(manifest_dir, f\"{split}.tsv\"), \"w\") as f:\n print(os.path.abspath(os.path.join(data_dir, split)), file=f)\n for uid in df[\"id\"].array:\n frames = soundfile.info(\n os.path.join(data_dir, split, f\"{uid}.ogg\")\n ).frames\n print(f\"{uid}.ogg\\t{frames}\", file=f)\n\n if not (split == \"test\") and is_blind:\n with open(os.path.join(manifest_dir, f\"{split}.wrd\"), \"w\") as f:\n for text in df[\"normalized_text\"].array:\n text = re.sub(r\"[\\.;?!]\", \"\", text)\n text = re.sub(r\"\\s+\", \" \", text)\n print(text, file=f)\n\n with open(os.path.join(manifest_dir, f\"{split}.ltr\"), \"w\") as f:\n for text in df[\"normalized_text\"].array:\n text = re.sub(r\"[\\.;?!]\", \"\", text)\n text = re.sub(r\"\\s+\", \" \", text)\n print(\" \".join(text.replace(\" \", \"|\")), file=f)\n\n # prepare NER files (for Fairseq and HugginFace)\n for sub_dir_name in [\"e2e_ner\", \"nlp_ner\"]:\n os.makedirs(os.path.join(manifest_dir, sub_dir_name), exist_ok=True)\n for label_type in [\"raw\", \"combined\"]:\n wrd_fn = os.path.join(\n manifest_dir, \"e2e_ner\", f\"{split}_{label_type}.wrd\"\n )\n ltr_fn = os.path.join(\n manifest_dir, \"e2e_ner\", f\"{split}_{label_type}.ltr\"\n )\n tsv_fn = os.path.join(\n manifest_dir, \"nlp_ner\", f\"{split}_{label_type}.tsv\"\n )\n with open(wrd_fn, \"w\") as f_wrd, open(ltr_fn, \"w\") as f_ltr, open(\n tsv_fn, \"w\"\n ) as f_tsv:\n for data_sample in df.iterrows():\n entity_pair_str = data_utils.prep_text_ner_tsv(\n data_sample[1].normalized_text,\n data_sample[1].normalized_ner,\n label_type,\n )\n print(entity_pair_str, file=f_tsv, end=\"\")\n wrd_str, ltr_str = data_utils.prep_e2e_ner_files(\n entity_pair_str, label_type\n )\n print(wrd_str, file=f_wrd)\n print(ltr_str, file=f_ltr)\n\n\nif __name__ == \"__main__\":\n fire.Fire()\n"
] |
[
[
"torch.from_numpy",
"numpy.stack",
"torch.no_grad",
"torch.cuda.is_available",
"torch.flatten",
"numpy.load",
"numpy.array"
],
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wwarriner/RVGAN
|
[
"d76042da976fb8de7e0e6984cc9d61ba0f4b9970"
] |
[
"eval.py"
] |
[
"import argparse\nfrom pathlib import Path, PurePath\n\nimport numpy as np\n\nimport src.data\nimport src.file_util\nimport src.image_util\nimport src.model\n\n\ndef eval(\n image_chunks: np.ndarray,\n mask_chunks: np.ndarray,\n downscale_factor: int,\n g_c: src.data.ModelFile,\n g_f: src.data.ModelFile,\n) -> np.ndarray:\n dummy_label_chunks = np.zeros_like(mask_chunks)\n dummy_images_per_batch = 1\n dataset = src.data.Dataset(\n XA_fr=image_chunks,\n XB_fr=mask_chunks,\n XC_fr=dummy_label_chunks,\n downscale_factor=downscale_factor,\n images_per_batch=dummy_images_per_batch,\n g_f_arch=g_f.model,\n g_c_arch=g_c.model,\n )\n\n g_c.model.trainable = False\n g_f.model.trainable = False\n data = dataset.get_full_data()\n out = data[\"XC_fx\"]\n return out # type: ignore\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_folder\", type=str, required=True)\n parser.add_argument(\"--input_folder\", type=str, required=True)\n parser.add_argument(\"--output_folder\", type=str, required=True)\n parser.add_argument(\"--config_file\", type=str, default=\"config.yaml\")\n parser.add_argument(\"--fov_scale_factor\", type=float, default=1.0)\n parser.add_argument(\"--image_extension\", type=str, default=\".png\")\n parser.add_argument(\"--mask_extension\", type=str, default=\".png\")\n args = parser.parse_args()\n\n model_folder = PurePath(args.model_folder)\n assert src.file_util.check_folder(model_folder)\n\n input_folder = PurePath(args.input_folder)\n assert src.file_util.check_folder(input_folder)\n\n output_folder = PurePath(args.output_folder)\n # no check, this gets created\n\n config_file = PurePath(args.config_file)\n assert src.file_util.check_file(config_file)\n\n fov_scale_factor = args.fov_scale_factor\n assert 0.0 < fov_scale_factor\n\n image_extension = src.file_util.fix_ext(args.image_extension)\n mask_extension = src.file_util.fix_ext(args.mask_extension)\n\n config = src.file_util.read_yaml(config_file)\n input_shape_px = np.array(config[\"arch\"][\"input_size\"])\n downscale_factor = config[\"arch\"][\"downscale_factor\"]\n\n # LOAD MODELS\n print(\"loading models...\")\n arch_factory = src.model.ArchFactory(\n input_shape_px=input_shape_px, downscale_factor=downscale_factor\n )\n\n g_c_arch = arch_factory.build_generator(scale_type=\"coarse\")\n g_c = src.data.ModelFile(name=\"g_c\", folder=model_folder, arch=g_c_arch)\n g_c.load(version=\"latest\")\n\n g_f_arch = arch_factory.build_generator(scale_type=\"fine\")\n g_f = src.data.ModelFile(name=\"g_f\", folder=model_folder, arch=g_f_arch)\n g_f.load(version=\"latest\")\n\n # LOAD AND PROCESS IMAGES\n print(\"evaluating...\")\n image_files = src.file_util.glob(\n folder=input_folder / \"image\", pattern=\"*\" + image_extension\n )\n mask_files = src.file_util.glob(\n folder=input_folder / \"mask\", pattern=\"*\" + mask_extension\n )\n\n for image_file, mask_file in zip(image_files, mask_files):\n print(str(image_file))\n\n image = src.image_util.load_image(path=image_file)\n image = src.image_util.rescale(image=image, factor=fov_scale_factor)\n image = src.image_util.rescaled_to_intensity(image=image)\n image = src.image_util.intensity_to_input(image=image)\n image_chunks = src.image_util.image_to_chunks(\n image, chunk_shape_px=input_shape_px, stride_px=input_shape_px\n )\n\n mask = src.image_util.load_image(path=mask_file)\n mask = src.image_util.rescale(image=mask, factor=fov_scale_factor)\n mask = src.image_util.rescaled_to_binary(image=mask)\n mask = src.image_util.binary_to_input(image=mask)\n mask_chunks = src.image_util.image_to_chunks(\n mask, chunk_shape_px=input_shape_px, stride_px=input_shape_px\n )\n\n label_chunks = eval(\n image_chunks=image_chunks,\n mask_chunks=mask_chunks,\n downscale_factor=downscale_factor,\n g_c=g_c,\n g_f=g_f,\n )\n\n image_shape_space_px = src.image_util.get_shape_space_px(image=image)\n label = src.image_util.chunks_to_image(\n chunks=label_chunks,\n image_shape_space_px=image_shape_space_px,\n stride_px=input_shape_px,\n )\n label = src.image_util.output_to_binary(image=label, threshold=0.5)\n Path(output_folder).mkdir(parents=True, exist_ok=True)\n label_file = output_folder / (image_file.stem + \".png\")\n src.image_util.save_image(path=label_file, image=label)\n"
] |
[
[
"numpy.array",
"numpy.zeros_like"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
irrelevantRyan/leafmap
|
[
"20c349b69c057881846a7c174c1970edcba11d81"
] |
[
"leafmap/common.py"
] |
[
"\"\"\"This module contains some common functions for both folium and ipyleaflet.\n\"\"\"\n\nimport csv\nimport os\nimport requests\nimport shutil\nimport tarfile\nimport urllib.request\nimport zipfile\nimport folium\nimport ipyleaflet\nimport ipywidgets as widgets\nimport whitebox\nfrom IPython.display import display, IFrame\n\n\nclass TitilerEndpoint:\n \"\"\"This class contains the methods for the titiler endpoint.\"\"\"\n\n def __init__(\n self,\n endpoint=\"https://titiler.xyz\",\n name=\"stac\",\n TileMatrixSetId=\"WebMercatorQuad\",\n ):\n \"\"\"Initialize the TitilerEndpoint object.\n\n Args:\n endpoint (str, optional): The endpoint of the titiler server. Defaults to \"https://titiler.xyz\".\n name (str, optional): The name to be used in the file path. Defaults to \"stac\".\n TileMatrixSetId (str, optional): The TileMatrixSetId to be used in the file path. Defaults to \"WebMercatorQuad\".\n \"\"\"\n self.endpoint = endpoint\n self.name = name\n self.TileMatrixSetId = TileMatrixSetId\n\n def url_for_stac_item(self):\n return f\"{self.endpoint}/{self.name}/{self.TileMatrixSetId}/tilejson.json\"\n\n def url_for_stac_assets(self):\n return f\"{self.endpoint}/{self.name}/assets\"\n\n def url_for_stac_bounds(self):\n return f\"{self.endpoint}/{self.name}/bounds\"\n\n def url_for_stac_info(self):\n return f\"{self.endpoint}/{self.name}/info\"\n\n def url_for_stac_info_geojson(self):\n return f\"{self.endpoint}/{self.name}/info.geojson\"\n\n def url_for_stac_statistics(self):\n return f\"{self.endpoint}/{self.name}/statistics\"\n\n def url_for_stac_pixel_value(self, lon, lat):\n return f\"{self.endpoint}/{self.name}/point/{lon},{lat}\"\n\n def url_for_stac_wmts(self):\n return (\n f\"{self.endpoint}/{self.name}/{self.TileMatrixSetId}/WMTSCapabilities.xml\"\n )\n\n\nclass PlanetaryComputerEndpoint(TitilerEndpoint):\n \"\"\"This class contains the methods for the Microsoft Planetary Computer endpoint.\"\"\"\n\n def __init__(\n self,\n endpoint=\"https://planetarycomputer.microsoft.com/api/data/v1\",\n name=\"item\",\n TileMatrixSetId=\"WebMercatorQuad\",\n ):\n \"\"\"Initialize the PlanetaryComputerEndpoint object.\n\n Args:\n endpoint (str, optional): The endpoint of the titiler server. Defaults to \"https://planetarycomputer.microsoft.com/api/data/v1\".\n name (str, optional): The name to be used in the file path. Defaults to \"item\".\n TileMatrixSetId (str, optional): The TileMatrixSetId to be used in the file path. Defaults to \"WebMercatorQuad\".\n \"\"\"\n super().__init__(endpoint, name, TileMatrixSetId)\n\n def url_for_stac_collection(self):\n return f\"{self.endpoint}/collection/{self.TileMatrixSetId}/tilejson.json\"\n\n def url_for_collection_assets(self):\n return f\"{self.endpoint}/collection/assets\"\n\n def url_for_collection_bounds(self):\n return f\"{self.endpoint}/collection/bounds\"\n\n def url_for_collection_info(self):\n return f\"{self.endpoint}/collection/info\"\n\n def url_for_collection_info_geojson(self):\n return f\"{self.endpoint}/collection/info.geojson\"\n\n def url_for_collection_pixel_value(self, lon, lat):\n return f\"{self.endpoint}/collection/point/{lon},{lat}\"\n\n def url_for_collection_wmts(self):\n return f\"{self.endpoint}/collection/{self.TileMatrixSetId}/WMTSCapabilities.xml\"\n\n def url_for_collection_lat_lon_assets(self, lng, lat):\n return f\"{self.endpoint}/collection/{lng},{lat}/assets\"\n\n def url_for_collection_bbox_assets(self, minx, miny, maxx, maxy):\n return f\"{self.endpoint}/collection/{minx},{miny},{maxx},{maxy}/assets\"\n\n def url_for_stac_mosaic(self, searchid):\n return f\"{self.endpoint}/mosaic/{searchid}/{self.TileMatrixSetId}/tilejson.json\"\n\n def url_for_mosaic_info(self, searchid):\n return f\"{self.endpoint}/mosaic/{searchid}/info\"\n\n def url_for_mosaic_lat_lon_assets(self, searchid, lon, lat):\n return f\"{self.endpoint}/mosaic/{searchid}/{lon},{lat}/assets\"\n\n\ndef check_titiler_endpoint(titiler_endpoint=None):\n \"\"\"Returns the default titiler endpoint.\n\n Returns:\n object: A titiler endpoint.\n \"\"\"\n if titiler_endpoint is None:\n if os.environ.get(\"TITILER_ENDPOINT\") == \"planetary-computer\":\n titiler_endpoint = PlanetaryComputerEndpoint()\n else:\n titiler_endpoint = TitilerEndpoint()\n elif titiler_endpoint in [\"planetary-computer\", \"pc\"]:\n titiler_endpoint = PlanetaryComputerEndpoint()\n\n return titiler_endpoint\n\n\nclass WhiteboxTools(whitebox.WhiteboxTools):\n \"\"\"This class inherits the whitebox WhiteboxTools class.\"\"\"\n\n def __init__(self, **kwargs):\n\n super().__init__(**kwargs)\n\n\ndef whiteboxgui(verbose=True, tree=False, reset=False, sandbox_path=None):\n \"\"\"Shows the WhiteboxTools GUI.\n\n Args:\n verbose (bool, optional): Whether to show progress info when the tool is running. Defaults to True.\n tree (bool, optional): Whether to use the tree mode toolbox built using ipytree rather than ipywidgets. Defaults to False.\n reset (bool, optional): Whether to regenerate the json file with the dictionary containing the information for all tools. Defaults to False.\n sandbox_path (str, optional): The path to the sandbox folder. Defaults to None.\n\n Returns:\n object: A toolbox GUI.\n \"\"\"\n import whiteboxgui\n\n return whiteboxgui.show(verbose, tree, reset, sandbox_path)\n\n\ndef _in_colab_shell():\n \"\"\"Tests if the code is being executed within Google Colab.\"\"\"\n import sys\n\n if \"google.colab\" in sys.modules:\n return True\n else:\n return False\n\n\ndef _is_drive_mounted():\n \"\"\"Checks whether Google Drive is mounted in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n \"\"\"\n drive_path = \"/content/drive/My Drive\"\n if os.path.exists(drive_path):\n return True\n else:\n return False\n\n\ndef set_proxy(port=1080, ip=\"http://127.0.0.1\"):\n \"\"\"Sets proxy if needed. This is only needed for countries where Google services are not available.\n\n Args:\n port (int, optional): The proxy port number. Defaults to 1080.\n ip (str, optional): The IP address. Defaults to 'http://127.0.0.1'.\n \"\"\"\n\n try:\n\n if not ip.startswith(\"http\"):\n ip = \"http://\" + ip\n proxy = \"{}:{}\".format(ip, port)\n\n os.environ[\"HTTP_PROXY\"] = proxy\n os.environ[\"HTTPS_PROXY\"] = proxy\n\n a = requests.get(\"https://google.com\")\n\n if a.status_code != 200:\n print(\n \"Failed to connect to Google services. Please double check the port number and ip address.\"\n )\n\n except Exception as e:\n raise Exception(e)\n\n\ndef _check_install(package):\n \"\"\"Checks whether a package is installed. If not, it will install the package.\n\n Args:\n package (str): The name of the package to check.\n \"\"\"\n import subprocess\n\n try:\n __import__(package)\n # print('{} is already installed.'.format(package))\n except ImportError:\n print(\"{} is not installed. Installing ...\".format(package))\n try:\n subprocess.check_call([\"python\", \"-m\", \"pip\", \"install\", package])\n except Exception as e:\n print(\"Failed to install {}\".format(package))\n print(e)\n print(\"{} has been installed successfully.\".format(package))\n\n\ndef update_package():\n \"\"\"Updates the leafmap package from the leafmap GitHub repository without the need to use pip or conda.\n In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.\n\n \"\"\"\n\n try:\n download_dir = os.path.join(os.path.expanduser(\"~\"), \"Downloads\")\n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n _clone_repo(out_dir=download_dir)\n\n pkg_dir = os.path.join(download_dir, \"leafmap-master\")\n work_dir = os.getcwd()\n os.chdir(pkg_dir)\n\n if shutil.which(\"pip\") is None:\n cmd = \"pip3 install .\"\n else:\n cmd = \"pip install .\"\n\n os.system(cmd)\n os.chdir(work_dir)\n\n print(\n \"\\nPlease comment out 'leafmap.update_package()' and restart the kernel to take effect:\\nJupyter menu -> Kernel -> Restart & Clear Output\"\n )\n\n except Exception as e:\n raise Exception(e)\n\n\ndef check_package(name, URL=\"\"):\n\n try:\n __import__(name.lower())\n except Exception:\n raise ImportError(\n f\"{name} is not installed. Please install it before proceeding. {URL}\"\n )\n\n\ndef _clone_repo(out_dir=\".\", unzip=True):\n \"\"\"Clones the leafmap GitHub repository.\n\n Args:\n out_dir (str, optional): Output folder for the repo. Defaults to '.'.\n unzip (bool, optional): Whether to unzip the repository. Defaults to True.\n \"\"\"\n url = \"https://github.com/giswqs/leafmap/archive/master.zip\"\n filename = \"leafmap-master.zip\"\n download_from_url(url, out_file_name=filename, out_dir=out_dir, unzip=unzip)\n\n\ndef __install_from_github(url):\n \"\"\"Install a package from a GitHub repository.\n\n Args:\n url (str): The URL of the GitHub repository.\n \"\"\"\n\n try:\n download_dir = os.path.join(os.path.expanduser(\"~\"), \"Downloads\")\n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n\n repo_name = os.path.basename(url)\n zip_url = os.path.join(url, \"archive/master.zip\")\n filename = repo_name + \"-master.zip\"\n download_from_url(\n url=zip_url, out_file_name=filename, out_dir=download_dir, unzip=True\n )\n\n pkg_dir = os.path.join(download_dir, repo_name + \"-master\")\n pkg_name = os.path.basename(url)\n work_dir = os.getcwd()\n os.chdir(pkg_dir)\n print(\"Installing {}...\".format(pkg_name))\n cmd = \"pip install .\"\n os.system(cmd)\n os.chdir(work_dir)\n print(\"{} has been installed successfully.\".format(pkg_name))\n # print(\"\\nPlease comment out 'install_from_github()' and restart the kernel to take effect:\\nJupyter menu -> Kernel -> Restart & Clear Output\")\n\n except Exception as e:\n raise Exception(e)\n\n\ndef _check_git_install():\n \"\"\"Checks if Git is installed.\n\n Returns:\n bool: Returns True if Git is installed, otherwise returns False.\n \"\"\"\n import webbrowser\n\n cmd = \"git --version\"\n output = os.popen(cmd).read()\n\n if \"git version\" in output:\n return True\n else:\n url = \"https://git-scm.com/downloads\"\n print(\n \"Git is not installed. Please download Git from {} and install it.\".format(\n url\n )\n )\n webbrowser.open_new_tab(url)\n return False\n\n\ndef _clone_github_repo(url, out_dir):\n \"\"\"Clones a GitHub repository.\n\n Args:\n url (str): The link to the GitHub repository\n out_dir (str): The output directory for the cloned repository.\n \"\"\"\n\n repo_name = os.path.basename(url)\n # url_zip = os.path.join(url, 'archive/master.zip')\n url_zip = url + \"/archive/master.zip\"\n\n if os.path.exists(out_dir):\n print(\n \"The specified output directory already exists. Please choose a new directory.\"\n )\n return\n\n parent_dir = os.path.dirname(out_dir)\n out_file_path = os.path.join(parent_dir, repo_name + \".zip\")\n\n try:\n urllib.request.urlretrieve(url_zip, out_file_path)\n except Exception:\n print(\"The provided URL is invalid. Please double check the URL.\")\n return\n\n with zipfile.ZipFile(out_file_path, \"r\") as zip_ref:\n zip_ref.extractall(parent_dir)\n\n src = out_file_path.replace(\".zip\", \"-master\")\n os.rename(src, out_dir)\n os.remove(out_file_path)\n\n\ndef _is_tool(name):\n \"\"\"Check whether `name` is on PATH and marked as executable.\"\"\"\n\n return shutil.which(name) is not None\n\n\ndef random_string(string_length=3):\n \"\"\"Generates a random string of fixed length.\n\n Args:\n string_length (int, optional): Fixed length. Defaults to 3.\n\n Returns:\n str: A random string\n \"\"\"\n import random\n import string\n\n # random.seed(1001)\n letters = string.ascii_lowercase\n return \"\".join(random.choice(letters) for i in range(string_length))\n\n\ndef open_image_from_url(url):\n \"\"\"Loads an image from the specified URL.\n\n Args:\n url (str): URL of the image.\n\n Returns:\n object: Image object.\n \"\"\"\n from PIL import Image\n\n from io import BytesIO\n\n # from urllib.parse import urlparse\n\n try:\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n return img\n except Exception as e:\n print(e)\n\n\ndef show_image(img_path, width=None, height=None):\n \"\"\"Shows an image within Jupyter notebook.\n\n Args:\n img_path (str): The image file path.\n width (int, optional): Width of the image in pixels. Defaults to None.\n height (int, optional): Height of the image in pixels. Defaults to None.\n\n \"\"\"\n from IPython.display import display\n\n try:\n out = widgets.Output()\n # layout={'border': '1px solid black'})\n # layout={'border': '1px solid black', 'width': str(width + 20) + 'px', 'height': str(height + 10) + 'px'},)\n out.clear_output(wait=True)\n display(out)\n with out:\n file = open(img_path, \"rb\")\n image = file.read()\n if (width is None) and (height is None):\n display(widgets.Image(value=image))\n elif (width is not None) and (height is not None):\n display(widgets.Image(value=image, width=width, height=height))\n else:\n print(\"You need set both width and height.\")\n return\n except Exception as e:\n raise Exception(e)\n\n\ndef has_transparency(img):\n \"\"\"Checks whether an image has transparency.\n\n Args:\n img (object): a PIL Image object.\n\n Returns:\n bool: True if it has transparency, False otherwise.\n \"\"\"\n\n if img.mode == \"P\":\n transparent = img.info.get(\"transparency\", -1)\n for _, index in img.getcolors():\n if index == transparent:\n return True\n elif img.mode == \"RGBA\":\n extrema = img.getextrema()\n if extrema[3][0] < 255:\n return True\n\n return False\n\n\ndef upload_to_imgur(in_gif):\n \"\"\"Uploads an image to imgur.com\n\n Args:\n in_gif (str): The file path to the image.\n \"\"\"\n import subprocess\n\n pkg_name = \"imgur-uploader\"\n if not _is_tool(pkg_name):\n _check_install(pkg_name)\n\n try:\n IMGUR_API_ID = os.environ.get(\"IMGUR_API_ID\", None)\n IMGUR_API_SECRET = os.environ.get(\"IMGUR_API_SECRET\", None)\n credentials_path = os.path.join(\n os.path.expanduser(\"~\"), \".config/imgur_uploader/uploader.cfg\"\n )\n\n if (\n (IMGUR_API_ID is not None) and (IMGUR_API_SECRET is not None)\n ) or os.path.exists(credentials_path):\n\n proc = subprocess.Popen([\"imgur-uploader\", in_gif], stdout=subprocess.PIPE)\n for _ in range(0, 2):\n line = proc.stdout.readline()\n print(line.rstrip().decode(\"utf-8\"))\n # while True:\n # line = proc.stdout.readline()\n # if not line:\n # break\n # print(line.rstrip().decode(\"utf-8\"))\n else:\n print(\n \"Imgur API credentials could not be found. Please check https://pypi.org/project/imgur-uploader/ for instructions on how to get Imgur API credentials\"\n )\n return\n\n except Exception as e:\n raise Exception(e)\n\n\ndef rgb_to_hex(rgb=(255, 255, 255)):\n \"\"\"Converts RGB to hex color. In RGB color R stands for Red, G stands for Green, and B stands for Blue, and it ranges from the decimal value of 0 – 255.\n\n Args:\n rgb (tuple, optional): RGB color code as a tuple of (red, green, blue). Defaults to (255, 255, 255).\n\n Returns:\n str: hex color code\n \"\"\"\n return \"%02x%02x%02x\" % rgb\n\n\ndef hex_to_rgb(value=\"FFFFFF\"):\n \"\"\"Converts hex color to RGB color.\n\n Args:\n value (str, optional): Hex color code as a string. Defaults to 'FFFFFF'.\n\n Returns:\n tuple: RGB color as a tuple.\n \"\"\"\n value = value.lstrip(\"#\")\n lv = len(value)\n return tuple(int(value[i : i + lv // 3], 16) for i in range(0, lv, lv // 3))\n\n\ndef check_color(in_color):\n \"\"\"Checks the input color and returns the corresponding hex color code.\n\n Args:\n in_color (str or tuple): It can be a string (e.g., 'red', '#ffff00') or tuple (e.g., (255, 127, 0)).\n\n Returns:\n str: A hex color code.\n \"\"\"\n import colour\n\n out_color = \"#000000\" # default black color\n if isinstance(in_color, tuple) and len(in_color) == 3:\n if all(isinstance(item, int) for item in in_color):\n rescaled_color = [x / 255.0 for x in in_color]\n out_color = colour.Color(rgb=tuple(rescaled_color))\n return out_color.hex_l\n else:\n print(\n \"RGB color must be a tuple with three integer values ranging from 0 to 255.\"\n )\n return\n else:\n try:\n out_color = colour.Color(in_color)\n return out_color.hex_l\n except Exception as e:\n print(\"The provided color is invalid. Using the default black color.\")\n print(e)\n return out_color\n\n\ndef system_fonts(show_full_path=False):\n \"\"\"Gets a list of system fonts\n\n # Common font locations:\n # Linux: /usr/share/fonts/TTF/\n # Windows: C:/Windows/Fonts\n # macOS: System > Library > Fonts\n\n Args:\n show_full_path (bool, optional): Whether to show the full path of each system font. Defaults to False.\n\n Returns:\n list: A list of system fonts.\n \"\"\"\n try:\n import matplotlib.font_manager\n\n font_list = matplotlib.font_manager.findSystemFonts(\n fontpaths=None, fontext=\"ttf\"\n )\n font_list.sort()\n\n font_names = [os.path.basename(f) for f in font_list]\n font_names.sort()\n\n if show_full_path:\n return font_list\n else:\n return font_names\n\n except Exception as e:\n raise Exception(e)\n\n\ndef download_from_url(url, out_file_name=None, out_dir=\".\", unzip=True, verbose=True):\n \"\"\"Download a file from a URL (e.g., https://github.com/giswqs/whitebox/raw/master/examples/testdata.zip)\n\n Args:\n url (str): The HTTP URL to download.\n out_file_name (str, optional): The output file name to use. Defaults to None.\n out_dir (str, optional): The output directory to use. Defaults to '.'.\n unzip (bool, optional): Whether to unzip the downloaded file if it is a zip file. Defaults to True.\n verbose (bool, optional): Whether to display or not the output of the function\n \"\"\"\n in_file_name = os.path.basename(url)\n\n if out_file_name is None:\n out_file_name = in_file_name\n out_file_path = os.path.join(os.path.abspath(out_dir), out_file_name)\n\n if verbose:\n print(\"Downloading {} ...\".format(url))\n\n try:\n urllib.request.urlretrieve(url, out_file_path)\n except Exception:\n raise Exception(\"The URL is invalid. Please double check the URL.\")\n\n final_path = out_file_path\n\n if unzip:\n # if it is a zip file\n if \".zip\" in out_file_name:\n if verbose:\n print(\"Unzipping {} ...\".format(out_file_name))\n with zipfile.ZipFile(out_file_path, \"r\") as zip_ref:\n zip_ref.extractall(out_dir)\n final_path = os.path.join(\n os.path.abspath(out_dir), out_file_name.replace(\".zip\", \"\")\n )\n\n # if it is a tar file\n if \".tar\" in out_file_name:\n if verbose:\n print(\"Unzipping {} ...\".format(out_file_name))\n with tarfile.open(out_file_path, \"r\") as tar_ref:\n tar_ref.extractall(out_dir)\n final_path = os.path.join(\n os.path.abspath(out_dir), out_file_name.replace(\".tart\", \"\")\n )\n\n if verbose:\n print(\"Data downloaded to: {}\".format(final_path))\n\n\ndef download_from_gdrive(gfile_url, file_name, out_dir=\".\", unzip=True, verbose=True):\n \"\"\"Download a file shared via Google Drive\n (e.g., https://drive.google.com/file/d/18SUo_HcDGltuWYZs1s7PpOmOq_FvFn04/view?usp=sharing)\n\n Args:\n gfile_url (str): The Google Drive shared file URL\n file_name (str): The output file name to use.\n out_dir (str, optional): The output directory. Defaults to '.'.\n unzip (bool, optional): Whether to unzip the output file if it is a zip file. Defaults to True.\n verbose (bool, optional): Whether to display or not the output of the function\n \"\"\"\n from google_drive_downloader import GoogleDriveDownloader as gdd\n\n file_id = gfile_url.split(\"/\")[5]\n if verbose:\n print(\"Google Drive file id: {}\".format(file_id))\n\n dest_path = os.path.join(out_dir, file_name)\n gdd.download_file_from_google_drive(file_id, dest_path, True, unzip)\n\n\ndef create_download_link(filename, title=\"Click here to download: \"):\n \"\"\"Downloads a file from voila. Adopted from https://github.com/voila-dashboards/voila/issues/578\n\n Args:\n filename (str): The file path to the file to download\n title (str, optional): str. Defaults to \"Click here to download: \".\n\n Returns:\n str: HTML download URL.\n \"\"\"\n import base64\n from IPython.display import HTML\n\n data = open(filename, \"rb\").read()\n b64 = base64.b64encode(data)\n payload = b64.decode()\n basename = os.path.basename(filename)\n html = '<a download=\"{filename}\" href=\"data:text/csv;base64,{payload}\" style=\"color:#0000FF;\" target=\"_blank\">{title}</a>'\n html = html.format(payload=payload, title=title + f\" {basename}\", filename=basename)\n return HTML(html)\n\n\ndef edit_download_html(htmlWidget, filename, title=\"Click here to download: \"):\n \"\"\"Downloads a file from voila. Adopted from https://github.com/voila-dashboards/voila/issues/578#issuecomment-617668058\n\n Args:\n htmlWidget (object): The HTML widget to display the URL.\n filename (str): File path to download.\n title (str, optional): Download description. Defaults to \"Click here to download: \".\n \"\"\"\n\n # from IPython.display import HTML\n # import ipywidgets as widgets\n import base64\n\n # Change widget html temporarily to a font-awesome spinner\n htmlWidget.value = '<i class=\"fa fa-spinner fa-spin fa-2x fa-fw\"></i><span class=\"sr-only\">Loading...</span>'\n\n # Process raw data\n data = open(filename, \"rb\").read()\n b64 = base64.b64encode(data)\n payload = b64.decode()\n\n basename = os.path.basename(filename)\n\n # Create and assign html to widget\n html = '<a download=\"{filename}\" href=\"data:text/csv;base64,{payload}\" target=\"_blank\">{title}</a>'\n htmlWidget.value = html.format(\n payload=payload, title=title + basename, filename=basename\n )\n\n\ndef csv_points_to_shp(in_csv, out_shp, latitude=\"latitude\", longitude=\"longitude\"):\n \"\"\"Converts a csv file containing points (latitude, longitude) into a shapefile.\n\n Args:\n in_csv (str): File path or HTTP URL to the input csv file. For example, https://raw.githubusercontent.com/giswqs/data/main/world/world_cities.csv\n out_shp (str): File path to the output shapefile.\n latitude (str, optional): Column name for the latitude column. Defaults to 'latitude'.\n longitude (str, optional): Column name for the longitude column. Defaults to 'longitude'.\n\n \"\"\"\n\n if in_csv.startswith(\"http\") and in_csv.endswith(\".csv\"):\n out_dir = os.path.join(os.path.expanduser(\"~\"), \"Downloads\")\n out_name = os.path.basename(in_csv)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n download_from_url(in_csv, out_dir=out_dir)\n in_csv = os.path.join(out_dir, out_name)\n\n wbt = whitebox.WhiteboxTools()\n in_csv = os.path.abspath(in_csv)\n out_shp = os.path.abspath(out_shp)\n\n if not os.path.exists(in_csv):\n raise Exception(\"The provided csv file does not exist.\")\n\n with open(in_csv, encoding=\"utf-8\") as csv_file:\n reader = csv.DictReader(csv_file)\n fields = reader.fieldnames\n xfield = fields.index(longitude)\n yfield = fields.index(latitude)\n\n wbt.csv_points_to_vector(in_csv, out_shp, xfield=xfield, yfield=yfield, epsg=4326)\n\n\ndef csv_to_shp(in_csv, out_shp, latitude=\"latitude\", longitude=\"longitude\"):\n \"\"\"Converts a csv file with latlon info to a point shapefile.\n\n Args:\n in_csv (str): The input csv file containing longitude and latitude columns.\n out_shp (str): The file path to the output shapefile.\n latitude (str, optional): The column name of the latitude column. Defaults to 'latitude'.\n longitude (str, optional): The column name of the longitude column. Defaults to 'longitude'.\n \"\"\"\n import shapefile as shp\n\n if in_csv.startswith(\"http\") and in_csv.endswith(\".csv\"):\n out_dir = os.path.join(os.path.expanduser(\"~\"), \"Downloads\")\n out_name = os.path.basename(in_csv)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n download_from_url(in_csv, out_dir=out_dir)\n in_csv = os.path.join(out_dir, out_name)\n\n out_dir = os.path.dirname(out_shp)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n try:\n points = shp.Writer(out_shp, shapeType=shp.POINT)\n with open(in_csv, encoding=\"utf-8\") as csvfile:\n csvreader = csv.DictReader(csvfile)\n header = csvreader.fieldnames\n [points.field(field) for field in header]\n for row in csvreader:\n points.point((float(row[longitude])), (float(row[latitude])))\n points.record(*tuple([row[f] for f in header]))\n\n out_prj = out_shp.replace(\".shp\", \".prj\")\n with open(out_prj, \"w\") as f:\n prj_str = 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.0174532925199433]] '\n f.write(prj_str)\n\n except Exception as e:\n raise Exception(e)\n\n\ndef pandas_to_geojson(\n df,\n out_geojson=None,\n latitude=\"latitude\",\n longitude=\"longitude\",\n encoding=\"utf-8\",\n):\n \"\"\"Creates points for a Pandas DataFrame and exports data as a GeoJSON.\n\n Args:\n df (pandas.DataFrame): The input Pandas DataFrame.\n out_geojson (str): The file path to the exported GeoJSON. Default to None.\n latitude (str, optional): The name of the column containing latitude coordinates. Defaults to \"latitude\".\n longitude (str, optional): The name of the column containing longitude coordinates. Defaults to \"longitude\".\n encoding (str, optional): The encoding of characters. Defaults to \"utf-8\".\n\n \"\"\"\n\n import json\n from geojson import Feature, FeatureCollection, Point\n\n if out_geojson is not None:\n out_dir = os.path.dirname(os.path.abspath(out_geojson))\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n features = df.apply(\n lambda row: Feature(\n geometry=Point((float(row[longitude]), float(row[latitude]))),\n properties=dict(row),\n ),\n axis=1,\n ).tolist()\n\n geojson = FeatureCollection(features=features)\n\n if out_geojson is None:\n return geojson\n else:\n with open(out_geojson, \"w\", encoding=encoding) as f:\n f.write(json.dumps(geojson))\n\n\ndef csv_to_geojson(\n in_csv,\n out_geojson=None,\n latitude=\"latitude\",\n longitude=\"longitude\",\n encoding=\"utf-8\",\n):\n \"\"\"Creates points for a CSV file and exports data as a GeoJSON.\n\n Args:\n in_csv (str): The file path to the input CSV file.\n out_geojson (str): The file path to the exported GeoJSON. Default to None.\n latitude (str, optional): The name of the column containing latitude coordinates. Defaults to \"latitude\".\n longitude (str, optional): The name of the column containing longitude coordinates. Defaults to \"longitude\".\n encoding (str, optional): The encoding of characters. Defaults to \"utf-8\".\n\n \"\"\"\n\n import json\n import pandas as pd\n\n if out_geojson is not None:\n out_dir = os.path.dirname(os.path.abspath(out_geojson))\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n df = pd.read_csv(in_csv)\n geojson = pandas_to_geojson(\n df, latitude=latitude, longitude=longitude, encoding=encoding\n )\n\n if out_geojson is None:\n return geojson\n else:\n with open(out_geojson, \"w\", encoding=encoding) as f:\n f.write(json.dumps(geojson))\n\n\ndef csv_to_gdf(in_csv, latitude=\"latitude\", longitude=\"longitude\", encoding=\"utf-8\"):\n \"\"\"Creates points for a CSV file and converts them to a GeoDataFrame.\n\n Args:\n in_csv (str): The file path to the input CSV file.\n latitude (str, optional): The name of the column containing latitude coordinates. Defaults to \"latitude\".\n longitude (str, optional): The name of the column containing longitude coordinates. Defaults to \"longitude\".\n encoding (str, optional): The encoding of characters. Defaults to \"utf-8\".\n\n Returns:\n object: GeoDataFrame.\n \"\"\"\n\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n\n import geopandas as gpd\n\n out_dir = os.getcwd()\n\n out_geojson = os.path.join(out_dir, random_string() + \".geojson\")\n csv_to_geojson(in_csv, out_geojson, latitude, longitude, encoding)\n\n gdf = gpd.read_file(out_geojson)\n os.remove(out_geojson)\n return gdf\n\n\ndef create_code_cell(code=\"\", where=\"below\"):\n \"\"\"Creates a code cell in the IPython Notebook.\n\n Args:\n code (str, optional): Code to fill the new code cell with. Defaults to ''.\n where (str, optional): Where to add the new code cell. It can be one of the following: above, below, at_bottom. Defaults to 'below'.\n \"\"\"\n\n import base64\n from IPython.display import Javascript, display\n\n encoded_code = (base64.b64encode(str.encode(code))).decode()\n display(\n Javascript(\n \"\"\"\n var code = IPython.notebook.insert_cell_{0}('code');\n code.set_text(atob(\"{1}\"));\n \"\"\".format(\n where, encoded_code\n )\n )\n )\n\n\ndef cog_tile(url, titiler_endpoint=\"https://titiler.xyz\", **kwargs):\n \"\"\"Get a tile layer from a Cloud Optimized GeoTIFF (COG).\n Source code adapted from https://developmentseed.org/titiler/examples/notebooks/Working_with_CloudOptimizedGeoTIFF_simple/\n\n Args:\n url (str): HTTP URL to a COG, e.g., https://opendata.digitalglobe.com/events/mauritius-oil-spill/post-event/2020-08-12/105001001F1B5B00/105001001F1B5B00.tif\n titiler_endpoint (str, optional): Titiler endpoint. Defaults to \"https://titiler.xyz\".\n\n Returns:\n tuple: Returns the COG Tile layer URL and bounds.\n \"\"\"\n\n kwargs[\"url\"] = url\n\n TileMatrixSetId = \"WebMercatorQuad\"\n if \"TileMatrixSetId\" in kwargs.keys():\n TileMatrixSetId = kwargs[\"TileMatrixSetId\"]\n kwargs.pop(\"TileMatrixSetId\")\n\n r = requests.get(\n f\"{titiler_endpoint}/cog/{TileMatrixSetId}/tilejson.json\", params=kwargs\n ).json()\n\n return r[\"tiles\"][0]\n\n\ndef cog_mosaic(\n links,\n titiler_endpoint=\"https://titiler.xyz\",\n username=\"anonymous\",\n layername=None,\n overwrite=False,\n verbose=True,\n **kwargs,\n):\n \"\"\"Creates a COG mosaic from a list of COG URLs.\n\n Args:\n links (list): A list containing COG HTTP URLs.\n titiler_endpoint (str, optional): Titiler endpoint. Defaults to \"https://titiler.xyz\".\n username (str, optional): User name for the titiler endpoint. Defaults to \"anonymous\".\n layername ([type], optional): Layer name to use. Defaults to None.\n overwrite (bool, optional): Whether to overwrite the layer name if existing. Defaults to False.\n verbose (bool, optional): Whether to print out descriptive information. Defaults to True.\n\n Raises:\n Exception: If the COG mosaic fails to create.\n\n Returns:\n str: The tile URL for the COG mosaic.\n \"\"\"\n\n if layername is None:\n layername = \"layer_\" + random_string(5)\n\n try:\n if verbose:\n print(\"Creating COG masaic ...\")\n\n # Create token\n r = requests.post(\n f\"{titiler_endpoint}/tokens/create\",\n json={\"username\": username, \"scope\": [\"mosaic:read\", \"mosaic:create\"]},\n ).json()\n token = r[\"token\"]\n\n # Create mosaic\n requests.post(\n f\"{titiler_endpoint}/mosaicjson/create\",\n json={\n \"username\": username,\n \"layername\": layername,\n \"files\": links,\n # \"overwrite\": overwrite\n },\n params={\n \"access_token\": token,\n },\n ).json()\n\n r2 = requests.get(\n f\"{titiler_endpoint}/mosaicjson/{username}.{layername}/tilejson.json\",\n ).json()\n\n return r2[\"tiles\"][0]\n\n except Exception as e:\n raise Exception(e)\n\n\ndef cog_mosaic_from_file(\n filepath,\n skip_rows=0,\n titiler_endpoint=\"https://titiler.xyz\",\n username=\"anonymous\",\n layername=None,\n overwrite=False,\n verbose=True,\n **kwargs,\n):\n \"\"\"Creates a COG mosaic from a csv/txt file stored locally for through HTTP URL.\n\n Args:\n filepath (str): Local path or HTTP URL to the csv/txt file containing COG URLs.\n skip_rows (int, optional): The number of rows to skip in the file. Defaults to 0.\n titiler_endpoint (str, optional): Titiler endpoint. Defaults to \"https://titiler.xyz\".\n username (str, optional): User name for the titiler endpoint. Defaults to \"anonymous\".\n layername ([type], optional): Layer name to use. Defaults to None.\n overwrite (bool, optional): Whether to overwrite the layer name if existing. Defaults to False.\n verbose (bool, optional): Whether to print out descriptive information. Defaults to True.\n\n Returns:\n str: The tile URL for the COG mosaic.\n \"\"\"\n import urllib\n\n links = []\n if filepath.startswith(\"http\"):\n data = urllib.request.urlopen(filepath)\n for line in data:\n links.append(line.decode(\"utf-8\").strip())\n\n else:\n with open(filepath) as f:\n links = [line.strip() for line in f.readlines()]\n\n links = links[skip_rows:]\n # print(links)\n mosaic = cog_mosaic(\n links, titiler_endpoint, username, layername, overwrite, verbose, **kwargs\n )\n return mosaic\n\n\ndef cog_bounds(url, titiler_endpoint=\"https://titiler.xyz\"):\n \"\"\"Get the bounding box of a Cloud Optimized GeoTIFF (COG).\n\n Args:\n url (str): HTTP URL to a COG, e.g., https://opendata.digitalglobe.com/events/mauritius-oil-spill/post-event/2020-08-12/105001001F1B5B00/105001001F1B5B00.tif\n titiler_endpoint (str, optional): Titiler endpoint. Defaults to \"https://titiler.xyz\".\n\n Returns:\n list: A list of values representing [left, bottom, right, top]\n \"\"\"\n\n r = requests.get(f\"{titiler_endpoint}/cog/bounds\", params={\"url\": url}).json()\n\n if \"bounds\" in r.keys():\n bounds = r[\"bounds\"]\n else:\n bounds = None\n return bounds\n\n\ndef cog_center(url, titiler_endpoint=\"https://titiler.xyz\"):\n \"\"\"Get the centroid of a Cloud Optimized GeoTIFF (COG).\n\n Args:\n url (str): HTTP URL to a COG, e.g., https://opendata.digitalglobe.com/events/mauritius-oil-spill/post-event/2020-08-12/105001001F1B5B00/105001001F1B5B00.tif\n titiler_endpoint (str, optional): Titiler endpoint. Defaults to \"https://titiler.xyz\".\n\n Returns:\n tuple: A tuple representing (longitude, latitude)\n \"\"\"\n bounds = cog_bounds(url, titiler_endpoint)\n center = ((bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2) # (lat, lon)\n return center\n\n\ndef cog_bands(url, titiler_endpoint=\"https://titiler.xyz\"):\n \"\"\"Get band names of a Cloud Optimized GeoTIFF (COG).\n\n Args:\n url (str): HTTP URL to a COG, e.g., https://opendata.digitalglobe.com/events/mauritius-oil-spill/post-event/2020-08-12/105001001F1B5B00/105001001F1B5B00.tif\n titiler_endpoint (str, optional): Titiler endpoint. Defaults to \"https://titiler.xyz\".\n\n Returns:\n list: A list of band names\n \"\"\"\n\n r = requests.get(\n f\"{titiler_endpoint}/cog/info\",\n params={\n \"url\": url,\n },\n ).json()\n\n bands = [b[0] for b in r[\"band_descriptions\"]]\n return bands\n\n\ndef cog_stats(url, titiler_endpoint=\"https://titiler.xyz\"):\n \"\"\"Get band statistics of a Cloud Optimized GeoTIFF (COG).\n\n Args:\n url (str): HTTP URL to a COG, e.g., https://opendata.digitalglobe.com/events/mauritius-oil-spill/post-event/2020-08-12/105001001F1B5B00/105001001F1B5B00.tif\n titiler_endpoint (str, optional): Titiler endpoint. Defaults to \"https://titiler.xyz\".\n\n Returns:\n list: A dictionary of band statistics.\n \"\"\"\n\n r = requests.get(\n f\"{titiler_endpoint}/cog/statistics\",\n params={\n \"url\": url,\n },\n ).json()\n\n return r\n\n\ndef cog_info(url, titiler_endpoint=\"https://titiler.xyz\", return_geojson=False):\n \"\"\"Get band statistics of a Cloud Optimized GeoTIFF (COG).\n\n Args:\n url (str): HTTP URL to a COG, e.g., https://opendata.digitalglobe.com/events/mauritius-oil-spill/post-event/2020-08-12/105001001F1B5B00/105001001F1B5B00.tif\n titiler_endpoint (str, optional): Titiler endpoint. Defaults to \"https://titiler.xyz\".\n\n Returns:\n list: A dictionary of band info.\n \"\"\"\n\n info = \"info\"\n if return_geojson:\n info = \"info.geojson\"\n\n r = requests.get(\n f\"{titiler_endpoint}/cog/{info}\",\n params={\n \"url\": url,\n },\n ).json()\n\n return r\n\n\ndef cog_pixel_value(\n lon,\n lat,\n url,\n bidx=None,\n titiler_endpoint=\"https://titiler.xyz\",\n verbose=True,\n **kwargs,\n):\n \"\"\"Get pixel value from COG.\n\n Args:\n lon (float): Longitude of the pixel.\n lat (float): Latitude of the pixel.\n url (str): HTTP URL to a COG, e.g., 'https://opendata.digitalglobe.com/events/california-fire-2020/pre-event/2018-02-16/pine-gulch-fire20/1030010076004E00.tif'\n bidx (str, optional): Dataset band indexes (e.g bidx=1, bidx=1&bidx=2&bidx=3). Defaults to None.\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n verbose (bool, optional): Print status messages. Defaults to True.\n\n Returns:\n list: A dictionary of band info.\n \"\"\"\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n kwargs[\"url\"] = url\n if bidx is not None:\n kwargs[\"bidx\"] = bidx\n\n r = requests.get(f\"{titiler_endpoint}/cog/point/{lon},{lat}\", params=kwargs).json()\n bands = cog_bands(url, titiler_endpoint)\n # if isinstance(titiler_endpoint, str):\n # r = requests.get(f\"{titiler_endpoint}/cog/point/{lon},{lat}\", params=kwargs).json()\n # else:\n # r = requests.get(\n # titiler_endpoint.url_for_stac_pixel_value(lon, lat), params=kwargs\n # ).json()\n\n if \"detail\" in r:\n if verbose:\n print(r[\"detail\"])\n return None\n else:\n values = r[\"values\"]\n result = dict(zip(bands, values))\n return result\n\n\ndef stac_tile(\n url=None,\n collection=None,\n items=None,\n assets=None,\n bands=None,\n titiler_endpoint=None,\n **kwargs,\n):\n\n \"\"\"Get a tile layer from a single SpatialTemporal Asset Catalog (STAC) item.\n\n Args:\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n assets (str | list): The Microsoft Planetary Computer STAC asset ID, e.g., [\"SR_B7\", \"SR_B5\", \"SR_B4\"].\n bands (list): A list of band names, e.g., [\"SR_B7\", \"SR_B5\", \"SR_B4\"]\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"https://planetarycomputer.microsoft.com/api/data/v1\", \"planetary-computer\", \"pc\". Defaults to None.\n\n Returns:\n str: Returns the STAC Tile layer URL.\n \"\"\"\n\n if url is None and collection is None:\n raise ValueError(\"Either url or collection must be specified.\")\n\n if collection is not None and titiler_endpoint is None:\n titiler_endpoint = \"planetary-computer\"\n\n if url is not None:\n kwargs[\"url\"] = url\n if collection is not None:\n kwargs[\"collection\"] = collection\n if items is not None:\n kwargs[\"items\"] = items\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n\n if isinstance(titiler_endpoint, PlanetaryComputerEndpoint):\n if isinstance(bands, list):\n bands = \",\".join(bands)\n if isinstance(assets, list):\n assets = \",\".join(assets)\n if assets is None and (bands is not None):\n assets = bands\n else:\n kwargs[\"bidx\"] = bands\n\n kwargs[\"assets\"] = assets\n\n if (\"expression\" in kwargs) and (\"rescale\" not in kwargs):\n stats = stac_stats(\n collection=collection,\n items=items,\n expression=kwargs[\"expression\"],\n titiler_endpoint=titiler_endpoint,\n )\n kwargs[\n \"rescale\"\n ] = f\"{stats[0]['percentile_2']},{stats[0]['percentile_98']}\"\n\n if (\"asset_expression\" in kwargs) and (\"rescale\" not in kwargs):\n stats = stac_stats(\n collection=collection,\n items=items,\n expression=kwargs[\"asset_expression\"],\n titiler_endpoint=titiler_endpoint,\n )\n kwargs[\n \"rescale\"\n ] = f\"{stats[0]['percentile_2']},{stats[0]['percentile_98']}\"\n\n if (\n (assets is not None)\n and (\"asset_expression\" not in kwargs)\n and (\"expression\" not in kwargs)\n and (\"rescale\" not in kwargs)\n ):\n stats = stac_stats(\n collection=collection,\n items=items,\n assets=assets,\n titiler_endpoint=titiler_endpoint,\n )\n percentile_2 = min([s[\"percentile_2\"] for s in stats])\n percentile_98 = max([s[\"percentile_98\"] for s in stats])\n kwargs[\"rescale\"] = f\"{percentile_2},{percentile_98}\"\n\n else:\n if isinstance(bands, str):\n bands = bands.split(\",\")\n if isinstance(assets, str):\n assets = assets.split(\",\")\n\n if assets is None and (bands is not None):\n assets = bands\n else:\n kwargs[\"asset_bidx\"] = bands\n kwargs[\"assets\"] = assets\n\n TileMatrixSetId = \"WebMercatorQuad\"\n if \"TileMatrixSetId\" in kwargs.keys():\n TileMatrixSetId = kwargs[\"TileMatrixSetId\"]\n kwargs.pop(\"TileMatrixSetId\")\n\n if isinstance(titiler_endpoint, str):\n r = requests.get(\n f\"{titiler_endpoint}/stac/{TileMatrixSetId}/tilejson.json\",\n params=kwargs,\n ).json()\n else:\n r = requests.get(titiler_endpoint.url_for_stac_item(), params=kwargs).json()\n\n return r[\"tiles\"][0]\n\n\ndef stac_bounds(url=None, collection=None, items=None, titiler_endpoint=None, **kwargs):\n \"\"\"Get the bounding box of a single SpatialTemporal Asset Catalog (STAC) item.\n\n Args:\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n\n Returns:\n list: A list of values representing [left, bottom, right, top]\n \"\"\"\n\n if url is None and collection is None:\n raise ValueError(\"Either url or collection must be specified.\")\n\n if collection is not None and titiler_endpoint is None:\n titiler_endpoint = \"planetary-computer\"\n\n if url is not None:\n kwargs[\"url\"] = url\n if collection is not None:\n kwargs[\"collection\"] = collection\n if items is not None:\n kwargs[\"items\"] = items\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n if isinstance(titiler_endpoint, str):\n r = requests.get(f\"{titiler_endpoint}/stac/bounds\", params=kwargs).json()\n else:\n r = requests.get(titiler_endpoint.url_for_stac_bounds(), params=kwargs).json()\n\n bounds = r[\"bounds\"]\n return bounds\n\n\ndef stac_center(url=None, collection=None, items=None, titiler_endpoint=None, **kwargs):\n \"\"\"Get the centroid of a single SpatialTemporal Asset Catalog (STAC) item.\n\n Args:\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n\n Returns:\n tuple: A tuple representing (longitude, latitude)\n \"\"\"\n bounds = stac_bounds(url, collection, items, titiler_endpoint, **kwargs)\n center = ((bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2) # (lon, lat)\n return center\n\n\ndef stac_bands(url=None, collection=None, items=None, titiler_endpoint=None, **kwargs):\n \"\"\"Get band names of a single SpatialTemporal Asset Catalog (STAC) item.\n\n Args:\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n\n Returns:\n list: A list of band names\n \"\"\"\n\n if url is None and collection is None:\n raise ValueError(\"Either url or collection must be specified.\")\n\n if collection is not None and titiler_endpoint is None:\n titiler_endpoint = \"planetary-computer\"\n\n if url is not None:\n kwargs[\"url\"] = url\n if collection is not None:\n kwargs[\"collection\"] = collection\n if items is not None:\n kwargs[\"items\"] = items\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n if isinstance(titiler_endpoint, str):\n r = requests.get(f\"{titiler_endpoint}/stac/assets\", params=kwargs).json()\n else:\n r = requests.get(titiler_endpoint.url_for_stac_assets(), params=kwargs).json()\n\n return r\n\n\ndef stac_stats(\n url=None, collection=None, items=None, assets=None, titiler_endpoint=None, **kwargs\n):\n \"\"\"Get band statistics of a STAC item.\n\n Args:\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n assets (str | list): The Microsoft Planetary Computer STAC asset ID, e.g., [\"SR_B7\", \"SR_B5\", \"SR_B4\"].\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n\n Returns:\n list: A dictionary of band statistics.\n \"\"\"\n\n if url is None and collection is None:\n raise ValueError(\"Either url or collection must be specified.\")\n\n if collection is not None and titiler_endpoint is None:\n titiler_endpoint = \"planetary-computer\"\n\n if url is not None:\n kwargs[\"url\"] = url\n if collection is not None:\n kwargs[\"collection\"] = collection\n if items is not None:\n kwargs[\"items\"] = items\n if assets is not None:\n kwargs[\"assets\"] = assets\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n if isinstance(titiler_endpoint, str):\n r = requests.get(f\"{titiler_endpoint}/stac/statistics\", params=kwargs).json()\n else:\n r = requests.get(\n titiler_endpoint.url_for_stac_statistics(), params=kwargs\n ).json()\n\n return r\n\n\ndef stac_info(\n url=None, collection=None, items=None, assets=None, titiler_endpoint=None, **kwargs\n):\n \"\"\"Get band info of a STAC item.\n\n Args:\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n assets (str | list): The Microsoft Planetary Computer STAC asset ID, e.g., [\"SR_B7\", \"SR_B5\", \"SR_B4\"].\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n\n Returns:\n list: A dictionary of band info.\n \"\"\"\n\n if url is None and collection is None:\n raise ValueError(\"Either url or collection must be specified.\")\n\n if collection is not None and titiler_endpoint is None:\n titiler_endpoint = \"planetary-computer\"\n\n if url is not None:\n kwargs[\"url\"] = url\n if collection is not None:\n kwargs[\"collection\"] = collection\n if items is not None:\n kwargs[\"items\"] = items\n if assets is not None:\n kwargs[\"assets\"] = assets\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n if isinstance(titiler_endpoint, str):\n r = requests.get(f\"{titiler_endpoint}/stac/info\", params=kwargs).json()\n else:\n r = requests.get(titiler_endpoint.url_for_stac_info(), params=kwargs).json()\n\n return r\n\n\ndef stac_info_geojson(\n url=None, collection=None, items=None, assets=None, titiler_endpoint=None, **kwargs\n):\n \"\"\"Get band info of a STAC item.\n\n Args:\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n assets (str | list): The Microsoft Planetary Computer STAC asset ID, e.g., [\"SR_B7\", \"SR_B5\", \"SR_B4\"].\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n\n Returns:\n list: A dictionary of band info.\n \"\"\"\n\n if url is None and collection is None:\n raise ValueError(\"Either url or collection must be specified.\")\n\n if collection is not None and titiler_endpoint is None:\n titiler_endpoint = \"planetary-computer\"\n\n if url is not None:\n kwargs[\"url\"] = url\n if collection is not None:\n kwargs[\"collection\"] = collection\n if items is not None:\n kwargs[\"items\"] = items\n if assets is not None:\n kwargs[\"assets\"] = assets\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n if isinstance(titiler_endpoint, str):\n r = requests.get(f\"{titiler_endpoint}/stac/info.geojson\", params=kwargs).json()\n else:\n r = requests.get(\n titiler_endpoint.url_for_stac_info_geojson(), params=kwargs\n ).json()\n\n return r\n\n\ndef stac_assets(url=None, collection=None, items=None, titiler_endpoint=None, **kwargs):\n \"\"\"Get all assets of a STAC item.\n\n Args:\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n\n Returns:\n list: A list of assets.\n \"\"\"\n\n if url is None and collection is None:\n raise ValueError(\"Either url or collection must be specified.\")\n\n if collection is not None and titiler_endpoint is None:\n titiler_endpoint = \"planetary-computer\"\n\n if url is not None:\n kwargs[\"url\"] = url\n if collection is not None:\n kwargs[\"collection\"] = collection\n if items is not None:\n kwargs[\"items\"] = items\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n if isinstance(titiler_endpoint, str):\n r = requests.get(f\"{titiler_endpoint}/stac/assets\", params=kwargs).json()\n else:\n r = requests.get(titiler_endpoint.url_for_stac_assets(), params=kwargs).json()\n\n return r\n\n\ndef stac_pixel_value(\n lon,\n lat,\n url=None,\n collection=None,\n items=None,\n assets=None,\n titiler_endpoint=None,\n verbose=True,\n **kwargs,\n):\n \"\"\"Get pixel value from STAC assets.\n\n Args:\n lon (float): Longitude of the pixel.\n lat (float): Latitude of the pixel.\n url (str): HTTP URL to a STAC item, e.g., https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json\n collection (str): The Microsoft Planetary Computer STAC collection ID, e.g., landsat-8-c2-l2.\n items (str): The Microsoft Planetary Computer STAC item ID, e.g., LC08_L2SP_047027_20201204_02_T1.\n assets (str | list): The Microsoft Planetary Computer STAC asset ID, e.g., [\"SR_B7\", \"SR_B5\", \"SR_B4\"].\n titiler_endpoint (str, optional): Titiler endpoint, e.g., \"https://titiler.xyz\", \"planetary-computer\", \"pc\". Defaults to None.\n verbose (bool, optional): Print out the error message. Defaults to True.\n\n Returns:\n list: A dictionary of pixel values for each asset.\n \"\"\"\n\n if url is None and collection is None:\n raise ValueError(\"Either url or collection must be specified.\")\n\n if collection is not None and titiler_endpoint is None:\n titiler_endpoint = \"planetary-computer\"\n\n if url is not None:\n kwargs[\"url\"] = url\n if collection is not None:\n kwargs[\"collection\"] = collection\n if items is not None:\n kwargs[\"items\"] = items\n\n if assets is None:\n assets = stac_assets(\n url=url,\n collection=collection,\n items=items,\n titiler_endpoint=titiler_endpoint,\n )\n assets = \",\".join(assets)\n kwargs[\"assets\"] = assets\n\n titiler_endpoint = check_titiler_endpoint(titiler_endpoint)\n if isinstance(titiler_endpoint, str):\n r = requests.get(f\"{titiler_endpoint}/stac/{lon},{lat}\", params=kwargs).json()\n else:\n r = requests.get(\n titiler_endpoint.url_for_stac_pixel_value(lon, lat), params=kwargs\n ).json()\n\n if \"detail\" in r:\n if verbose:\n print(r[\"detail\"])\n return None\n else:\n values = [v[0] for v in r[\"values\"]]\n result = dict(zip(assets.split(\",\"), values))\n return result\n\n\ndef bbox_to_geojson(bounds):\n \"\"\"Convert coordinates of a bounding box to a geojson.\n\n Args:\n bounds (list): A list of coordinates representing [left, bottom, right, top].\n\n Returns:\n dict: A geojson feature.\n \"\"\"\n return {\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [bounds[0], bounds[3]],\n [bounds[0], bounds[1]],\n [bounds[2], bounds[1]],\n [bounds[2], bounds[3]],\n [bounds[0], bounds[3]],\n ]\n ],\n },\n \"type\": \"Feature\",\n }\n\n\ndef coords_to_geojson(coords):\n \"\"\"Convert a list of bbox coordinates representing [left, bottom, right, top] to geojson FeatureCollection.\n\n Args:\n coords (list): A list of bbox coordinates representing [left, bottom, right, top].\n\n Returns:\n dict: A geojson FeatureCollection.\n \"\"\"\n\n features = []\n for bbox in coords:\n features.append(bbox_to_geojson(bbox))\n return {\"type\": \"FeatureCollection\", \"features\": features}\n\n\ndef explode(coords):\n \"\"\"Explode a GeoJSON geometry's coordinates object and yield\n coordinate tuples. As long as the input is conforming, the type of\n the geometry doesn't matter. From Fiona 1.4.8\n\n Args:\n coords (list): A list of coordinates.\n\n Yields:\n [type]: [description]\n \"\"\"\n\n for e in coords:\n if isinstance(e, (float, int)):\n yield coords\n break\n else:\n for f in explode(e):\n yield f\n\n\ndef get_bounds(geometry, north_up=True, transform=None):\n \"\"\"Bounding box of a GeoJSON geometry, GeometryCollection, or FeatureCollection.\n left, bottom, right, top\n *not* xmin, ymin, xmax, ymax\n If not north_up, y will be switched to guarantee the above.\n Source code adapted from https://github.com/mapbox/rasterio/blob/master/rasterio/features.py#L361\n\n Args:\n geometry (dict): A GeoJSON dict.\n north_up (bool, optional): . Defaults to True.\n transform ([type], optional): . Defaults to None.\n\n Returns:\n list: A list of coordinates representing [left, bottom, right, top]\n \"\"\"\n\n if \"bbox\" in geometry:\n return tuple(geometry[\"bbox\"])\n\n geometry = geometry.get(\"geometry\") or geometry\n\n # geometry must be a geometry, GeometryCollection, or FeatureCollection\n if not (\n \"coordinates\" in geometry or \"geometries\" in geometry or \"features\" in geometry\n ):\n raise ValueError(\n \"geometry must be a GeoJSON-like geometry, GeometryCollection, \"\n \"or FeatureCollection\"\n )\n\n if \"features\" in geometry:\n # Input is a FeatureCollection\n xmins = []\n ymins = []\n xmaxs = []\n ymaxs = []\n for feature in geometry[\"features\"]:\n xmin, ymin, xmax, ymax = get_bounds(feature[\"geometry\"])\n xmins.append(xmin)\n ymins.append(ymin)\n xmaxs.append(xmax)\n ymaxs.append(ymax)\n if north_up:\n return min(xmins), min(ymins), max(xmaxs), max(ymaxs)\n else:\n return min(xmins), max(ymaxs), max(xmaxs), min(ymins)\n\n elif \"geometries\" in geometry:\n # Input is a geometry collection\n xmins = []\n ymins = []\n xmaxs = []\n ymaxs = []\n for geometry in geometry[\"geometries\"]:\n xmin, ymin, xmax, ymax = get_bounds(geometry)\n xmins.append(xmin)\n ymins.append(ymin)\n xmaxs.append(xmax)\n ymaxs.append(ymax)\n if north_up:\n return min(xmins), min(ymins), max(xmaxs), max(ymaxs)\n else:\n return min(xmins), max(ymaxs), max(xmaxs), min(ymins)\n\n elif \"coordinates\" in geometry:\n # Input is a singular geometry object\n if transform is not None:\n xyz = list(explode(geometry[\"coordinates\"]))\n xyz_px = [transform * point for point in xyz]\n xyz = tuple(zip(*xyz_px))\n return min(xyz[0]), max(xyz[1]), max(xyz[0]), min(xyz[1])\n else:\n xyz = tuple(zip(*list(explode(geometry[\"coordinates\"]))))\n if north_up:\n return min(xyz[0]), min(xyz[1]), max(xyz[0]), max(xyz[1])\n else:\n return min(xyz[0]), max(xyz[1]), max(xyz[0]), min(xyz[1])\n\n # all valid inputs returned above, so whatever falls through is an error\n raise ValueError(\n \"geometry must be a GeoJSON-like geometry, GeometryCollection, \"\n \"or FeatureCollection\"\n )\n\n\ndef get_center(geometry, north_up=True, transform=None):\n \"\"\"Get the centroid of a GeoJSON.\n\n Args:\n geometry (dict): A GeoJSON dict.\n north_up (bool, optional): . Defaults to True.\n transform ([type], optional): . Defaults to None.\n\n Returns:\n list: [lon, lat]\n \"\"\"\n bounds = get_bounds(geometry, north_up, transform)\n center = ((bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2) # (lat, lon)\n return center\n\n\ndef adjust_longitude(in_fc):\n \"\"\"Adjusts longitude if it is less than -180 or greater than 180.\n\n Args:\n in_fc (dict): The input dictionary containing coordinates.\n\n Returns:\n dict: A dictionary containing the converted longitudes\n \"\"\"\n try:\n\n keys = in_fc.keys()\n\n if \"geometry\" in keys:\n\n coordinates = in_fc[\"geometry\"][\"coordinates\"]\n\n if in_fc[\"geometry\"][\"type\"] == \"Point\":\n longitude = coordinates[0]\n if longitude < -180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc[\"geometry\"][\"coordinates\"][0] = longitude\n\n elif in_fc[\"geometry\"][\"type\"] == \"Polygon\":\n for index1, item in enumerate(coordinates):\n for index2, element in enumerate(item):\n longitude = element[0]\n if longitude < -180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc[\"geometry\"][\"coordinates\"][index1][index2][0] = longitude\n\n elif in_fc[\"geometry\"][\"type\"] == \"LineString\":\n for index, element in enumerate(coordinates):\n longitude = element[0]\n if longitude < -180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc[\"geometry\"][\"coordinates\"][index][0] = longitude\n\n elif \"type\" in keys:\n\n coordinates = in_fc[\"coordinates\"]\n\n if in_fc[\"type\"] == \"Point\":\n longitude = coordinates[0]\n if longitude < -180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc[\"coordinates\"][0] = longitude\n\n elif in_fc[\"type\"] == \"Polygon\":\n for index1, item in enumerate(coordinates):\n for index2, element in enumerate(item):\n longitude = element[0]\n if longitude < -180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc[\"coordinates\"][index1][index2][0] = longitude\n\n elif in_fc[\"type\"] == \"LineString\":\n for index, element in enumerate(coordinates):\n longitude = element[0]\n if longitude < -180:\n longitude = 360 + longitude\n elif longitude > 180:\n longitude = longitude - 360\n in_fc[\"coordinates\"][index][0] = longitude\n\n return in_fc\n\n except Exception as e:\n print(e)\n return None\n\n\ndef is_GCS(in_shp):\n\n import warnings\n import pycrs\n\n if not os.path.exists(in_shp):\n raise FileNotFoundError(\"The input shapefile could not be found.\")\n\n if not in_shp.endswith(\".shp\"):\n raise TypeError(\"The input shapefile is invalid.\")\n\n in_prj = in_shp.replace(\".shp\", \".prj\")\n\n if not os.path.exists(in_prj):\n warnings.warn(\n f\"The projection file {in_prj} could not be found. Assuming the dataset is in a geographic coordinate system (GCS).\"\n )\n return True\n else:\n\n with open(in_prj) as f:\n esri_wkt = f.read()\n epsg4326 = pycrs.parse.from_epsg_code(4326).to_proj4()\n try:\n crs = pycrs.parse.from_esri_wkt(esri_wkt).to_proj4()\n if crs == epsg4326:\n return True\n else:\n return False\n except Exception:\n return False\n\n\ndef kml_to_shp(in_kml, out_shp):\n \"\"\"Converts a KML to shapefile.\n\n Args:\n in_kml (str): The file path to the input KML.\n out_shp (str): The file path to the output shapefile.\n\n Raises:\n FileNotFoundError: The input KML could not be found.\n TypeError: The output must be a shapefile.\n \"\"\"\n import warnings\n\n warnings.filterwarnings(\"ignore\")\n\n in_kml = os.path.abspath(in_kml)\n if not os.path.exists(in_kml):\n raise FileNotFoundError(\"The input KML could not be found.\")\n\n out_shp = os.path.abspath(out_shp)\n if not out_shp.endswith(\".shp\"):\n raise TypeError(\"The output must be a shapefile.\")\n\n out_dir = os.path.dirname(out_shp)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n\n import geopandas as gpd\n\n # import fiona\n # print(fiona.supported_drivers)\n gpd.io.file.fiona.drvsupport.supported_drivers[\"KML\"] = \"rw\"\n df = gpd.read_file(in_kml, driver=\"KML\")\n df.to_file(out_shp)\n\n\ndef kml_to_geojson(in_kml, out_geojson=None):\n \"\"\"Converts a KML to GeoJSON.\n\n Args:\n in_kml (str): The file path to the input KML.\n out_geojson (str): The file path to the output GeoJSON. Defaults to None.\n\n Raises:\n FileNotFoundError: The input KML could not be found.\n TypeError: The output must be a GeoJSON.\n \"\"\"\n import warnings\n\n warnings.filterwarnings(\"ignore\")\n\n in_kml = os.path.abspath(in_kml)\n if not os.path.exists(in_kml):\n raise FileNotFoundError(\"The input KML could not be found.\")\n\n if out_geojson is not None:\n out_geojson = os.path.abspath(out_geojson)\n ext = os.path.splitext(out_geojson)[1].lower()\n if ext not in [\".json\", \".geojson\"]:\n raise TypeError(\"The output file must be a GeoJSON.\")\n\n out_dir = os.path.dirname(out_geojson)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n\n import geopandas as gpd\n\n # import fiona\n # print(fiona.supported_drivers)\n gpd.io.file.fiona.drvsupport.supported_drivers[\"KML\"] = \"rw\"\n gdf = gpd.read_file(in_kml, driver=\"KML\")\n\n if out_geojson is not None:\n gdf.to_file(out_geojson, driver=\"GeoJSON\")\n else:\n return gdf.__geo_interface__\n\n\ndef csv_to_pandas(in_csv, **kwargs):\n \"\"\"Converts a CSV file to pandas dataframe.\n\n Args:\n in_csv (str): File path to the input CSV.\n\n Returns:\n pd.DataFrame: pandas DataFrame\n \"\"\"\n import pandas as pd\n\n try:\n return pd.read_csv(in_csv, **kwargs)\n except Exception as e:\n raise Exception(e)\n\n\ndef shp_to_gdf(in_shp):\n \"\"\"Converts a shapefile to Geopandas dataframe.\n\n Args:\n in_shp (str): File path to the input shapefile.\n\n Raises:\n FileNotFoundError: The provided shp could not be found.\n\n Returns:\n gpd.GeoDataFrame: geopandas.GeoDataFrame\n \"\"\"\n import warnings\n\n warnings.filterwarnings(\"ignore\")\n\n in_shp = os.path.abspath(in_shp)\n if not os.path.exists(in_shp):\n raise FileNotFoundError(\"The provided shp could not be found.\")\n\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n\n import geopandas as gpd\n\n try:\n return gpd.read_file(in_shp)\n except Exception as e:\n raise Exception(e)\n\n\ndef shp_to_geojson(in_shp, out_json=None, **kwargs):\n \"\"\"Converts a shapefile to GeoJSON.\n\n Args:\n in_shp (str): File path of the input shapefile.\n out_json (str, optional): File path of the output GeoJSON. Defaults to None.\n\n Returns:\n object: The json object representing the shapefile.\n \"\"\"\n try:\n import shapefile\n from datetime import date\n\n in_shp = os.path.abspath(in_shp)\n\n if out_json is not None:\n ext = os.path.splitext(out_json)[1]\n print(ext)\n if ext.lower() not in [\".json\", \".geojson\"]:\n raise TypeError(\"The output file extension must the .json or .geojson.\")\n\n if not os.path.exists(os.path.dirname(out_json)):\n os.makedirs(os.path.dirname(out_json))\n\n if not is_GCS(in_shp):\n try:\n import geopandas as gpd\n\n except Exception:\n raise ImportError(\n \"Geopandas is required to perform reprojection of the data. See https://geopandas.org/install.html\"\n )\n\n try:\n in_gdf = gpd.read_file(in_shp)\n out_gdf = in_gdf.to_crs(epsg=\"4326\")\n out_shp = in_shp.replace(\".shp\", \"_gcs.shp\")\n out_gdf.to_file(out_shp)\n in_shp = out_shp\n except Exception as e:\n raise Exception(e)\n\n if \"encoding\" in kwargs:\n reader = shapefile.Reader(in_shp, encoding=kwargs.pop(\"encoding\"))\n else:\n reader = shapefile.Reader(in_shp)\n out_dict = reader.__geo_interface__\n\n if out_json is not None:\n import json\n\n with open(out_json, \"w\") as geojson:\n geojson.write(json.dumps(out_dict, indent=2) + \"\\n\")\n else:\n return out_dict\n\n except Exception as e:\n raise Exception(e)\n\n\ndef delete_shp(in_shp, verbose=False):\n \"\"\"Deletes a shapefile.\n\n Args:\n in_shp (str): The input shapefile to delete.\n verbose (bool, optional): Whether to print out descriptive text. Defaults to True.\n \"\"\"\n from pathlib import Path\n\n in_shp = os.path.abspath(in_shp)\n in_dir = os.path.dirname(in_shp)\n basename = os.path.basename(in_shp).replace(\".shp\", \"\")\n\n files = Path(in_dir).rglob(basename + \".*\")\n\n for file in files:\n filepath = os.path.join(in_dir, str(file))\n os.remove(filepath)\n if verbose:\n print(f\"Deleted {filepath}\")\n\n\ndef vector_to_geojson(\n filename, out_geojson=None, bbox=None, mask=None, rows=None, epsg=\"4326\", **kwargs\n):\n \"\"\"Converts any geopandas-supported vector dataset to GeoJSON.\n\n Args:\n filename (str): Either the absolute or relative path to the file or URL to be opened, or any object with a read() method (such as an open file or StringIO).\n out_geojson (str, optional): The file path to the output GeoJSON. Defaults to None.\n bbox (tuple | GeoDataFrame or GeoSeries | shapely Geometry, optional): Filter features by given bounding box, GeoSeries, GeoDataFrame or a shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Cannot be used with mask. Defaults to None.\n mask (dict | GeoDataFrame or GeoSeries | shapely Geometry, optional): Filter for features that intersect with the given dict-like geojson geometry, GeoSeries, GeoDataFrame or shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Cannot be used with bbox. Defaults to None.\n rows (int or slice, optional): Load in specific rows by passing an integer (first n rows) or a slice() object.. Defaults to None.\n epsg (str, optional): The EPSG number to convert to. Defaults to \"4326\".\n\n Raises:\n ValueError: When the output file path is invalid.\n\n Returns:\n dict: A dictionary containing the GeoJSON.\n \"\"\"\n import warnings\n\n warnings.filterwarnings(\"ignore\")\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n import geopandas as gpd\n\n if not filename.startswith(\"http\"):\n filename = os.path.abspath(filename)\n if filename.endswith(\".zip\"):\n filename = \"zip://\" + filename\n ext = os.path.splitext(filename)[1].lower()\n if ext == \".kml\":\n gpd.io.file.fiona.drvsupport.supported_drivers[\"KML\"] = \"rw\"\n df = gpd.read_file(\n filename, bbox=bbox, mask=mask, rows=rows, driver=\"KML\", **kwargs\n )\n else:\n\n df = gpd.read_file(filename, bbox=bbox, mask=mask, rows=rows, **kwargs)\n gdf = df.to_crs(epsg=epsg)\n\n if out_geojson is not None:\n\n if not out_geojson.lower().endswith(\".geojson\"):\n raise ValueError(\"The output file must have a geojson file extension.\")\n\n out_geojson = os.path.abspath(out_geojson)\n out_dir = os.path.dirname(out_geojson)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n gdf.to_file(out_geojson, driver=\"GeoJSON\")\n\n else:\n return gdf.__geo_interface__\n\n\ndef screen_capture(outfile, monitor=1):\n \"\"\"Takes a full screenshot of the selected monitor.\n\n Args:\n outfile (str): The output file path to the screenshot.\n monitor (int, optional): The monitor to take the screenshot. Defaults to 1.\n \"\"\"\n from mss import mss\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n if not isinstance(monitor, int):\n print(\"The monitor number must be an integer.\")\n return\n\n try:\n with mss() as sct:\n sct.shot(output=outfile, mon=monitor)\n return outfile\n\n except Exception as e:\n raise Exception(e)\n\n\ndef gdf_to_geojson(gdf, out_geojson=None, epsg=None):\n \"\"\"Converts a GeoDataFame to GeoJSON.\n\n Args:\n gdf (GeoDataFrame): A GeoPandas GeoDataFrame.\n out_geojson (str, optional): File path to he output GeoJSON. Defaults to None.\n epsg (str, optional): An EPSG string, e.g., \"4326\". Defaults to None.\n\n Raises:\n TypeError: When the output file extension is incorrect.\n Exception: When the conversion fails.\n\n Returns:\n dict: When the out_json is None returns a dict.\n \"\"\"\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n\n try:\n if epsg is not None:\n gdf = gdf.to_crs(epsg=epsg)\n geojson = gdf.__geo_interface__\n\n if out_geojson is None:\n return geojson\n else:\n ext = os.path.splitext(out_geojson)[1]\n if ext.lower() not in [\".json\", \".geojson\"]:\n raise TypeError(\n \"The output file extension must be either .json or .geojson\"\n )\n out_dir = os.path.dirname(out_geojson)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n gdf.to_file(out_geojson, driver=\"GeoJSON\")\n except Exception as e:\n raise Exception(e)\n\n\ndef connect_postgis(\n database, host=\"localhost\", user=None, password=None, port=5432, use_env_var=False\n):\n \"\"\"Connects to a PostGIS database.\n\n Args:\n database (str): Name of the database\n host (str, optional): Hosting server for the database. Defaults to \"localhost\".\n user (str, optional): User name to access the database. Defaults to None.\n password (str, optional): Password to access the database. Defaults to None.\n port (int, optional): Port number to connect to at the server host. Defaults to 5432.\n use_env_var (bool, optional): Whether to use environment variables. It set to True, user and password are treated as an environment variables with default values user=\"SQL_USER\" and password=\"SQL_PASSWORD\". Defaults to False.\n\n Raises:\n ValueError: If user is not specified.\n ValueError: If password is not specified.\n\n Returns:\n [type]: [description]\n \"\"\"\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n check_package(\n name=\"sqlalchemy\",\n URL=\"https://docs.sqlalchemy.org/en/14/intro.html#installation\",\n )\n\n from sqlalchemy import create_engine\n\n if use_env_var:\n if user is not None:\n user = os.getenv(user)\n else:\n user = os.getenv(\"SQL_USER\")\n\n if password is not None:\n password = os.getenv(password)\n else:\n password = os.getenv(\"SQL_PASSWORD\")\n\n if user is None:\n raise ValueError(\"user is not specified.\")\n if password is None:\n raise ValueError(\"password is not specified.\")\n\n connection_string = f\"postgresql://{user}:{password}@{host}:{port}/{database}\"\n engine = create_engine(connection_string)\n\n return engine\n\n\ndef read_postgis(sql, con, geom_col=\"geom\", crs=None, **kwargs):\n \"\"\"Reads data from a PostGIS database and returns a GeoDataFrame.\n\n Args:\n sql (str): SQL query to execute in selecting entries from database, or name of the table to read from the database.\n con (sqlalchemy.engine.Engine): Active connection to the database to query.\n geom_col (str, optional): Column name to convert to shapely geometries. Defaults to \"geom\".\n crs (str | dict, optional): CRS to use for the returned GeoDataFrame; if not set, tries to determine CRS from the SRID associated with the first geometry in the database, and assigns that to all geometries. Defaults to None.\n\n Returns:\n [type]: [description]\n \"\"\"\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n\n import geopandas as gpd\n\n gdf = gpd.read_postgis(sql, con, geom_col, crs, **kwargs)\n return gdf\n\n\ndef vector_col_names(filename, **kwargs):\n \"\"\"Retrieves the column names of a vector attribute table.\n\n Args:\n filename (str): The input file path.\n\n Returns:\n list: The list of column names.\n \"\"\"\n import warnings\n\n warnings.filterwarnings(\"ignore\")\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n import geopandas as gpd\n\n if not filename.startswith(\"http\"):\n filename = os.path.abspath(filename)\n ext = os.path.splitext(filename)[1].lower()\n if ext == \".kml\":\n gpd.io.file.fiona.drvsupport.supported_drivers[\"KML\"] = \"rw\"\n gdf = gpd.read_file(filename, driver=\"KML\", **kwargs)\n else:\n gdf = gpd.read_file(filename, **kwargs)\n col_names = gdf.columns.values.tolist()\n return col_names\n\n\ndef get_api_key(token_name, m=None):\n \"\"\"Retrieves an API key based on a system environmen variable.\n\n Args:\n token_name (str): The token name.\n m (ipyleaflet.Map | folium.Map, optional): A Map instance. Defaults to None.\n\n Returns:\n str: The API key.\n \"\"\"\n api_key = os.environ.get(token_name)\n if m is not None and token_name in m.api_keys:\n api_key = m.api_keys[token_name]\n\n return api_key\n\n\ndef set_api_key(token_name, api_key, m=None):\n \"\"\"Sets an API key as an environment variable.\n\n Args:\n token_name (str): The token name.\n api_key (str): The API key.\n m (ipyleaflet.Map | folium.Map, optional): A Map instance.. Defaults to None.\n \"\"\"\n os.environ[token_name] = api_key\n if m is not None:\n m.api_keys[token_name] = api_key\n\n\ndef planet_monthly_tropical(api_key=None, token_name=\"PLANET_API_KEY\"):\n \"\"\"Generates Planet monthly imagery URLs based on an API key. See https://assets.planet.com/docs/NICFI_UserGuidesFAQ.pdf\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n\n Raises:\n ValueError: If the API key could not be found.\n\n Returns:\n list: A list of tile URLs.\n \"\"\"\n from datetime import date\n\n if api_key is None:\n api_key = os.environ.get(token_name)\n if api_key is None:\n raise ValueError(\"The Planet API Key must be provided.\")\n\n today = date.today()\n year_now = int(today.strftime(\"%Y\"))\n month_now = int(today.strftime(\"%m\"))\n\n links = []\n prefix = \"https://tiles.planet.com/basemaps/v1/planet-tiles/planet_medres_normalized_analytic_\"\n subfix = \"_mosaic/gmap/{z}/{x}/{y}.png?api_key=\"\n\n for year in range(2020, year_now + 1):\n\n for month in range(1, 13):\n m_str = str(year) + \"-\" + str(month).zfill(2)\n\n if year == 2020 and month < 9:\n continue\n if year == year_now and month >= month_now:\n break\n\n url = f\"{prefix}{m_str}{subfix}{api_key}\"\n links.append(url)\n\n return links\n\n\ndef planet_biannual_tropical(api_key=None, token_name=\"PLANET_API_KEY\"):\n \"\"\"Generates Planet bi-annual imagery URLs based on an API key. See https://assets.planet.com/docs/NICFI_UserGuidesFAQ.pdf\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n\n Raises:\n ValueError: If the API key could not be found.\n\n Returns:\n list: A list of tile URLs.\n \"\"\"\n\n if api_key is None:\n api_key = os.environ.get(token_name)\n if api_key is None:\n raise ValueError(\"The Planet API Key must be provided.\")\n\n dates = [\n \"2015-12_2016-05\",\n \"2016-06_2016-11\",\n \"2016-12_2017-05\",\n \"2017-06_2017-11\",\n \"2017-12_2018-05\",\n \"2018-06_2018-11\",\n \"2018-12_2019-05\",\n \"2019-06_2019-11\",\n \"2019-12_2020-05\",\n \"2020-06_2020-08\",\n ]\n\n link = []\n prefix = \"https://tiles.planet.com/basemaps/v1/planet-tiles/planet_medres_normalized_analytic_\"\n subfix = \"_mosaic/gmap/{z}/{x}/{y}.png?api_key=\"\n\n for d in dates:\n url = f\"{prefix}{d}{subfix}{api_key}\"\n link.append(url)\n\n return link\n\n\ndef planet_catalog_tropical(api_key=None, token_name=\"PLANET_API_KEY\"):\n \"\"\"Generates Planet bi-annual and monthly imagery URLs based on an API key. See https://assets.planet.com/docs/NICFI_UserGuidesFAQ.pdf\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n\n Returns:\n list: A list of tile URLs.\n \"\"\"\n biannual = planet_biannual_tropical(api_key, token_name)\n monthly = planet_monthly_tropical(api_key, token_name)\n return biannual + monthly\n\n\ndef planet_monthly_tiles_tropical(\n api_key=None, token_name=\"PLANET_API_KEY\", tile_format=\"ipyleaflet\"\n):\n \"\"\"Generates Planet monthly imagery TileLayer based on an API key. See https://assets.planet.com/docs/NICFI_UserGuidesFAQ.pdf\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n tile_format (str, optional): The TileLayer format, can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n\n Raises:\n ValueError: If the tile layer format is invalid.\n\n Returns:\n dict: A dictionary of TileLayer.\n \"\"\"\n\n if tile_format not in [\"ipyleaflet\", \"folium\"]:\n raise ValueError(\"The tile format must be either ipyleaflet or folium.\")\n\n tiles = {}\n link = planet_monthly_tropical(api_key, token_name)\n for url in link:\n index = url.find(\"20\")\n name = \"Planet_\" + url[index : index + 7]\n\n if tile_format == \"ipyleaflet\":\n tile = ipyleaflet.TileLayer(url=url, attribution=\"Planet\", name=name)\n else:\n tile = folium.TileLayer(\n tiles=url,\n attr=\"Planet\",\n name=name,\n overlay=True,\n control=True,\n )\n\n tiles[name] = tile\n\n return tiles\n\n\ndef planet_biannual_tiles_tropical(\n api_key=None, token_name=\"PLANET_API_KEY\", tile_format=\"ipyleaflet\"\n):\n \"\"\"Generates Planet bi-annual imagery TileLayer based on an API key. See https://assets.planet.com/docs/NICFI_UserGuidesFAQ.pdf\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n tile_format (str, optional): The TileLayer format, can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n\n Raises:\n ValueError: If the tile layer format is invalid.\n\n Returns:\n dict: A dictionary of TileLayer.\n \"\"\"\n\n if tile_format not in [\"ipyleaflet\", \"folium\"]:\n raise ValueError(\"The tile format must be either ipyleaflet or folium.\")\n\n tiles = {}\n link = planet_biannual_tropical(api_key, token_name)\n for url in link:\n index = url.find(\"20\")\n name = \"Planet_\" + url[index : index + 15]\n if tile_format == \"ipyleaflet\":\n tile = ipyleaflet.TileLayer(url=url, attribution=\"Planet\", name=name)\n else:\n tile = folium.TileLayer(\n tiles=url,\n attr=\"Planet\",\n name=name,\n overlay=True,\n control=True,\n )\n tiles[name] = tile\n\n return tiles\n\n\ndef planet_tiles_tropical(\n api_key=None, token_name=\"PLANET_API_KEY\", tile_format=\"ipyleaflet\"\n):\n \"\"\"Generates Planet monthly imagery TileLayer based on an API key. See https://assets.planet.com/docs/NICFI_UserGuidesFAQ.pdf\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n tile_format (str, optional): The TileLayer format, can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n\n Raises:\n ValueError: If the tile layer format is invalid.\n\n Returns:\n dict: A dictionary of TileLayer.\n \"\"\"\n\n catalog = {}\n biannul = planet_biannual_tiles_tropical(api_key, token_name, tile_format)\n monthly = planet_monthly_tiles_tropical(api_key, token_name, tile_format)\n\n for key in biannul:\n catalog[key] = biannul[key]\n\n for key in monthly:\n catalog[key] = monthly[key]\n\n return catalog\n\n\ndef planet_monthly(api_key=None, token_name=\"PLANET_API_KEY\"):\n \"\"\"Generates Planet monthly imagery URLs based on an API key. To get a Planet API key, see https://developers.planet.com/quickstart/apis/\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n\n Raises:\n ValueError: If the API key could not be found.\n\n Returns:\n list: A list of tile URLs.\n \"\"\"\n from datetime import date\n\n if api_key is None:\n api_key = os.environ.get(token_name)\n if api_key is None:\n raise ValueError(\"The Planet API Key must be provided.\")\n\n today = date.today()\n year_now = int(today.strftime(\"%Y\"))\n month_now = int(today.strftime(\"%m\"))\n\n link = []\n prefix = \"https://tiles.planet.com/basemaps/v1/planet-tiles/global_monthly_\"\n subfix = \"_mosaic/gmap/{z}/{x}/{y}.png?api_key=\"\n\n for year in range(2016, year_now + 1):\n\n for month in range(1, 13):\n m_str = str(year) + \"_\" + str(month).zfill(2)\n\n if year == year_now and month >= month_now:\n break\n\n url = f\"{prefix}{m_str}{subfix}{api_key}\"\n link.append(url)\n\n return link\n\n\ndef planet_quarterly(api_key=None, token_name=\"PLANET_API_KEY\"):\n \"\"\"Generates Planet quarterly imagery URLs based on an API key. To get a Planet API key, see https://developers.planet.com/quickstart/apis/\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n\n Raises:\n ValueError: If the API key could not be found.\n\n Returns:\n list: A list of tile URLs.\n \"\"\"\n from datetime import date\n\n if api_key is None:\n api_key = os.environ.get(token_name)\n if api_key is None:\n raise ValueError(\"The Planet API Key must be provided.\")\n\n today = date.today()\n year_now = int(today.strftime(\"%Y\"))\n month_now = int(today.strftime(\"%m\"))\n quarter_now = (month_now - 1) // 3 + 1\n\n link = []\n prefix = \"https://tiles.planet.com/basemaps/v1/planet-tiles/global_quarterly_\"\n subfix = \"_mosaic/gmap/{z}/{x}/{y}.png?api_key=\"\n\n for year in range(2016, year_now + 1):\n\n for quarter in range(1, 5):\n m_str = str(year) + \"q\" + str(quarter)\n\n if year == year_now and quarter >= quarter_now:\n break\n\n url = f\"{prefix}{m_str}{subfix}{api_key}\"\n link.append(url)\n\n return link\n\n\ndef planet_catalog(api_key=None, token_name=\"PLANET_API_KEY\"):\n \"\"\"Generates Planet bi-annual and monthly imagery URLs based on an API key. See https://assets.planet.com/docs/NICFI_UserGuidesFAQ.pdf\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n\n Returns:\n list: A list of tile URLs.\n \"\"\"\n quarterly = planet_quarterly(api_key, token_name)\n monthly = planet_monthly(api_key, token_name)\n return quarterly + monthly\n\n\ndef planet_monthly_tiles(\n api_key=None, token_name=\"PLANET_API_KEY\", tile_format=\"ipyleaflet\"\n):\n \"\"\"Generates Planet monthly imagery TileLayer based on an API key. To get a Planet API key, see https://developers.planet.com/quickstart/apis/\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n tile_format (str, optional): The TileLayer format, can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n\n Raises:\n ValueError: If the tile layer format is invalid.\n\n Returns:\n dict: A dictionary of TileLayer.\n \"\"\"\n\n if tile_format not in [\"ipyleaflet\", \"folium\"]:\n raise ValueError(\"The tile format must be either ipyleaflet or folium.\")\n\n tiles = {}\n link = planet_monthly(api_key, token_name)\n\n for url in link:\n index = url.find(\"20\")\n name = \"Planet_\" + url[index : index + 7]\n\n if tile_format == \"ipyleaflet\":\n tile = ipyleaflet.TileLayer(url=url, attribution=\"Planet\", name=name)\n else:\n tile = folium.TileLayer(\n tiles=url,\n attr=\"Planet\",\n name=name,\n overlay=True,\n control=True,\n )\n\n tiles[name] = tile\n\n return tiles\n\n\ndef planet_quarterly_tiles(\n api_key=None, token_name=\"PLANET_API_KEY\", tile_format=\"ipyleaflet\"\n):\n \"\"\"Generates Planet quarterly imagery TileLayer based on an API key. To get a Planet API key, see https://developers.planet.com/quickstart/apis/\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n tile_format (str, optional): The TileLayer format, can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n\n Raises:\n ValueError: If the tile layer format is invalid.\n\n Returns:\n dict: A dictionary of TileLayer.\n \"\"\"\n\n if tile_format not in [\"ipyleaflet\", \"folium\"]:\n raise ValueError(\"The tile format must be either ipyleaflet or folium.\")\n\n tiles = {}\n links = planet_quarterly(api_key, token_name)\n\n for url in links:\n index = url.find(\"20\")\n name = \"Planet_\" + url[index : index + 6]\n\n if tile_format == \"ipyleaflet\":\n tile = ipyleaflet.TileLayer(url=url, attribution=\"Planet\", name=name)\n else:\n tile = folium.TileLayer(\n tiles=url,\n attr=\"Planet\",\n name=name,\n overlay=True,\n control=True,\n )\n\n tiles[name] = tile\n\n return tiles\n\n\ndef planet_tiles(api_key=None, token_name=\"PLANET_API_KEY\", tile_format=\"ipyleaflet\"):\n \"\"\"Generates Planet imagery TileLayer based on an API key. To get a Planet API key, see https://developers.planet.com/quickstart/apis/\n\n Args:\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n tile_format (str, optional): The TileLayer format, can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n\n Raises:\n ValueError: If the tile layer format is invalid.\n\n Returns:\n dict: A dictionary of TileLayer.\n \"\"\"\n\n catalog = {}\n quarterly = planet_quarterly_tiles(api_key, token_name, tile_format)\n monthly = planet_monthly_tiles(api_key, token_name, tile_format)\n\n for key in quarterly:\n catalog[key] = quarterly[key]\n\n for key in monthly:\n catalog[key] = monthly[key]\n\n return catalog\n\n\ndef planet_by_quarter(\n year=2016,\n quarter=1,\n api_key=None,\n token_name=\"PLANET_API_KEY\",\n):\n \"\"\"Gets Planet global mosaic tile url by quarter. To get a Planet API key, see https://developers.planet.com/quickstart/apis/\n\n Args:\n year (int, optional): The year of Planet global mosaic, must be >=2016. Defaults to 2016.\n quarter (int, optional): The quarter of Planet global mosaic, must be 1-4. Defaults to 1.\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n\n Raises:\n ValueError: The Planet API key is not provided.\n ValueError: The year is invalid.\n ValueError: The quarter is invalid.\n ValueError: The quarter is invalid.\n\n Returns:\n str: A Planet global mosaic tile url.\n \"\"\"\n from datetime import date\n\n if api_key is None:\n api_key = os.environ.get(token_name)\n if api_key is None:\n raise ValueError(\"The Planet API Key must be provided.\")\n\n today = date.today()\n year_now = int(today.strftime(\"%Y\"))\n month_now = int(today.strftime(\"%m\"))\n quarter_now = (month_now - 1) // 3 + 1\n\n if year > year_now:\n raise ValueError(f\"Year must be between 2016 and {year_now}.\")\n elif year == year_now and quarter >= quarter_now:\n raise ValueError(f\"Quarter must be less than {quarter_now} for year {year_now}\")\n\n if quarter < 1 or quarter > 4:\n raise ValueError(\"Quarter must be between 1 and 4.\")\n\n prefix = \"https://tiles.planet.com/basemaps/v1/planet-tiles/global_quarterly_\"\n subfix = \"_mosaic/gmap/{z}/{x}/{y}.png?api_key=\"\n\n m_str = str(year) + \"q\" + str(quarter)\n url = f\"{prefix}{m_str}{subfix}{api_key}\"\n\n return url\n\n\ndef planet_by_month(\n year=2016,\n month=1,\n api_key=None,\n token_name=\"PLANET_API_KEY\",\n):\n \"\"\"Gets Planet global mosaic tile url by month. To get a Planet API key, see https://developers.planet.com/quickstart/apis/\n\n Args:\n year (int, optional): The year of Planet global mosaic, must be >=2016. Defaults to 2016.\n month (int, optional): The month of Planet global mosaic, must be 1-12. Defaults to 1.\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n\n Raises:\n ValueError: The Planet API key is not provided.\n ValueError: The year is invalid.\n ValueError: The month is invalid.\n ValueError: The month is invalid.\n\n Returns:\n str: A Planet global mosaic tile url.\n \"\"\"\n from datetime import date\n\n if api_key is None:\n api_key = os.environ.get(token_name)\n if api_key is None:\n raise ValueError(\"The Planet API Key must be provided.\")\n\n today = date.today()\n year_now = int(today.strftime(\"%Y\"))\n month_now = int(today.strftime(\"%m\"))\n # quarter_now = (month_now - 1) // 3 + 1\n\n if year > year_now:\n raise ValueError(f\"Year must be between 2016 and {year_now}.\")\n elif year == year_now and month >= month_now:\n raise ValueError(f\"Month must be less than {month_now} for year {year_now}\")\n\n if month < 1 or month > 12:\n raise ValueError(\"Month must be between 1 and 12.\")\n\n prefix = \"https://tiles.planet.com/basemaps/v1/planet-tiles/global_monthly_\"\n subfix = \"_mosaic/gmap/{z}/{x}/{y}.png?api_key=\"\n\n m_str = str(year) + \"_\" + str(month).zfill(2)\n url = f\"{prefix}{m_str}{subfix}{api_key}\"\n\n return url\n\n\ndef planet_tile_by_quarter(\n year=2016,\n quarter=1,\n name=None,\n api_key=None,\n token_name=\"PLANET_API_KEY\",\n tile_format=\"ipyleaflet\",\n):\n \"\"\"Generates Planet quarterly imagery TileLayer based on an API key. To get a Planet API key, see https://developers.planet.com/quickstart/apis\n\n Args:\n year (int, optional): The year of Planet global mosaic, must be >=2016. Defaults to 2016.\n quarter (int, optional): The quarter of Planet global mosaic, must be 1-4. Defaults to 1.\n name (str, optional): The layer name to use. Defaults to None.\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n tile_format (str, optional): The TileLayer format, can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n\n Raises:\n ValueError: If the tile layer format is invalid.\n\n Returns:\n dict: A dictionary of TileLayer.\n \"\"\"\n\n if tile_format not in [\"ipyleaflet\", \"folium\"]:\n raise ValueError(\"The tile format must be either ipyleaflet or folium.\")\n\n url = planet_by_quarter(year, quarter, api_key, token_name)\n\n if name is None:\n name = \"Planet_\" + str(year) + \"_q\" + str(quarter)\n\n if tile_format == \"ipyleaflet\":\n tile = ipyleaflet.TileLayer(url=url, attribution=\"Planet\", name=name)\n else:\n tile = folium.TileLayer(\n tiles=url,\n attr=\"Planet\",\n name=name,\n overlay=True,\n control=True,\n )\n\n return tile\n\n\ndef planet_tile_by_month(\n year=2016,\n month=1,\n name=None,\n api_key=None,\n token_name=\"PLANET_API_KEY\",\n tile_format=\"ipyleaflet\",\n):\n \"\"\"Generates Planet monthly imagery TileLayer based on an API key. To get a Planet API key, see https://developers.planet.com/quickstart/apis\n\n Args:\n year (int, optional): The year of Planet global mosaic, must be >=2016. Defaults to 2016.\n month (int, optional): The month of Planet global mosaic, must be 1-12. Defaults to 1.\n name (str, optional): The layer name to use. Defaults to None.\n api_key (str, optional): The Planet API key. Defaults to None.\n token_name (str, optional): The environment variable name of the API key. Defaults to \"PLANET_API_KEY\".\n tile_format (str, optional): The TileLayer format, can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n\n Raises:\n ValueError: If the tile layer format is invalid.\n\n Returns:\n dict: A dictionary of TileLayer.\n \"\"\"\n\n if tile_format not in [\"ipyleaflet\", \"folium\"]:\n raise ValueError(\"The tile format must be either ipyleaflet or folium.\")\n\n url = planet_by_month(year, month, api_key, token_name)\n\n if name is None:\n name = \"Planet_\" + str(year) + \"_\" + str(month).zfill(2)\n\n if tile_format == \"ipyleaflet\":\n tile = ipyleaflet.TileLayer(url=url, attribution=\"Planet\", name=name)\n else:\n tile = folium.TileLayer(\n tiles=url,\n attr=\"Planet\",\n name=name,\n overlay=True,\n control=True,\n )\n\n return tile\n\n\ndef basemap_xyz_tiles():\n \"\"\"Returns a dictionary containing a set of basemaps that are XYZ tile layers.\n\n Returns:\n dict: A dictionary of XYZ tile layers.\n \"\"\"\n from .leafmap import leafmap_basemaps\n\n layers_dict = {}\n keys = dict(leafmap_basemaps).keys()\n for key in keys:\n if isinstance(leafmap_basemaps[key], ipyleaflet.WMSLayer):\n pass\n else:\n layers_dict[key] = leafmap_basemaps[key]\n return layers_dict\n\n\ndef to_hex_colors(colors):\n \"\"\"Adds # to a list of hex color codes.\n\n Args:\n colors (list): A list of hex color codes.\n\n Returns:\n list: A list of hex color codes prefixed with #.\n \"\"\"\n result = all([len(color.strip()) == 6 for color in colors])\n if result:\n return [\"#\" + color.strip() for color in colors]\n else:\n return colors\n\n\ndef display_html(src, width=950, height=600):\n \"\"\"Display an HTML file in a Jupyter Notebook.\n\n Args\n src (str): File path to HTML file.\n width (int, optional): Width of the map. Defaults to 950.\n height (int, optional): Height of the map. Defaults to 600.\n \"\"\"\n if not os.path.isfile(src):\n raise ValueError(f\"{src} is not a valid file path.\")\n display(IFrame(src=src, width=width, height=height))\n\n\ndef get_census_dict(reset=False):\n \"\"\"Returns a dictionary of Census data.\n\n Args:\n reset (bool, optional): Reset the dictionary. Defaults to False.\n\n Returns:\n dict: A dictionary of Census data.\n \"\"\"\n import json\n import pkg_resources\n\n pkg_dir = os.path.dirname(pkg_resources.resource_filename(\"leafmap\", \"leafmap.py\"))\n census_data = os.path.join(pkg_dir, \"data/census_data.json\")\n\n if reset:\n\n from owslib.wms import WebMapService\n\n census_dict = {}\n\n names = [\n \"Current\",\n \"ACS 2021\",\n \"ACS 2019\",\n \"ACS 2018\",\n \"ACS 2017\",\n \"ACS 2016\",\n \"ACS 2015\",\n \"ACS 2014\",\n \"ACS 2013\",\n \"ACS 2012\",\n \"ECON 2012\",\n \"Census 2020\",\n \"Census 2010\",\n \"Physical Features\",\n \"Decennial Census 2020\",\n \"Decennial Census 2010\",\n \"Decennial Census 2000\",\n \"Decennial Physical Features\",\n ]\n\n links = {}\n\n print(\"Retrieving data. Please wait ...\")\n for name in names:\n if \"Decennial\" not in name:\n links[\n name\n ] = f\"https://tigerweb.geo.census.gov/arcgis/services/TIGERweb/tigerWMS_{name.replace(' ', '')}/MapServer/WMSServer\"\n else:\n links[\n name\n ] = f\"https://tigerweb.geo.census.gov/arcgis/services/Census2020/tigerWMS_{name.replace('Decennial', '').replace(' ', '')}/MapServer/WMSServer\"\n\n wms = WebMapService(links[name], timeout=300)\n layers = list(wms.contents)\n layers.sort()\n census_dict[name] = {\n \"url\": links[name],\n \"layers\": layers,\n # \"title\": wms.identification.title,\n # \"abstract\": wms.identification.abstract,\n }\n\n with open(census_data, \"w\") as f:\n json.dump(census_dict, f, indent=4)\n\n else:\n\n with open(census_data, \"r\") as f:\n census_dict = json.load(f)\n\n return census_dict\n\n\ndef search_xyz_services(keyword, name=None, list_only=True, add_prefix=True):\n \"\"\"Search for XYZ tile providers from xyzservices.\n\n Args:\n keyword (str): The keyword to search for.\n name (str, optional): The name of the xyz tile. Defaults to None.\n list_only (bool, optional): If True, only the list of services will be returned. Defaults to True.\n add_prefix (bool, optional): If True, the prefix \"xyz.\" will be added to the service name. Defaults to True.\n\n Returns:\n list: A list of XYZ tile providers.\n \"\"\"\n\n import xyzservices.providers as xyz\n\n if name is None:\n providers = xyz.filter(keyword=keyword).flatten()\n else:\n providers = xyz.filter(name=name).flatten()\n\n if list_only:\n if add_prefix:\n return [\"xyz.\" + provider for provider in providers]\n else:\n return [provider for provider in providers]\n else:\n return providers\n\n\ndef search_qms(keyword, limit=10, list_only=True, add_prefix=True):\n \"\"\"Search for QMS tile providers from Quick Map Services.\n\n Args:\n keyword (str): The keyword to search for.\n limit (int, optional): The maximum number of results to return. Defaults to 10.\n list_only (bool, optional): If True, only the list of services will be returned. Defaults to True.\n add_prefix (bool, optional): If True, the prefix \"qms.\" will be added to the service name. Defaults to True.\n\n Returns:\n list: A list of QMS tile providers.\n \"\"\"\n\n QMS_API = \"https://qms.nextgis.com/api/v1/geoservices\"\n services = requests.get(\n f\"{QMS_API}/?search={keyword}&type=tms&epsg=3857&limit={limit}\"\n )\n services = services.json()\n if services[\"results\"]:\n providers = services[\"results\"]\n if list_only:\n if add_prefix:\n return [\"qms.\" + provider[\"name\"] for provider in providers]\n else:\n return [provider[\"name\"] for provider in providers]\n else:\n return providers\n else:\n return None\n\n\ndef get_wms_layers(url):\n \"\"\"Returns a list of WMS layers from a WMS service.\n\n Args:\n url (str): The URL of the WMS service.\n\n Returns:\n list: A list of WMS layers.\n \"\"\"\n from owslib.wms import WebMapService\n\n wms = WebMapService(url)\n layers = list(wms.contents)\n layers.sort()\n return layers\n\n\ndef create_legend(\n legend_title=\"Legend\",\n legend_dict=None,\n legend_keys=None,\n legend_colors=None,\n builtin_legend=None,\n **kwargs,\n):\n \"\"\"Create a custom legend.\n\n Args:\n legend_title (str, optional): Title of the legend. Defaults to 'Legend'.\n legend_dict (dict, optional): A dictionary containing legend items as keys and color as values. If provided, legend_keys and legend_colors will be ignored. Defaults to None.\n legend_keys (list, optional): A list of legend keys. Defaults to None.\n legend_colors (list, optional): A list of legend colors. Defaults to None.\n builtin_legend (str, optional): Name of the builtin legend to add to the map. Defaults to None.\n\n \"\"\"\n import pkg_resources\n from .legends import builtin_legends\n\n pkg_dir = os.path.dirname(pkg_resources.resource_filename(\"leafmap\", \"leafmap.py\"))\n legend_template = os.path.join(pkg_dir, \"data/template/legend.html\")\n\n if \"min_width\" not in kwargs.keys():\n min_width = None\n if \"max_width\" not in kwargs.keys():\n max_width = None\n else:\n max_width = kwargs[\"max_width\"]\n if \"min_height\" not in kwargs.keys():\n min_height = None\n else:\n min_height = kwargs[\"min_height\"]\n if \"max_height\" not in kwargs.keys():\n max_height = None\n else:\n max_height = kwargs[\"max_height\"]\n if \"height\" not in kwargs.keys():\n height = None\n else:\n height = kwargs[\"height\"]\n if \"width\" not in kwargs.keys():\n width = None\n else:\n width = kwargs[\"width\"]\n\n if width is None:\n max_width = \"300px\"\n if height is None:\n max_height = \"400px\"\n\n if not os.path.exists(legend_template):\n print(\"The legend template does not exist.\")\n return\n\n if legend_keys is not None:\n if not isinstance(legend_keys, list):\n print(\"The legend keys must be a list.\")\n return\n else:\n legend_keys = [\"One\", \"Two\", \"Three\", \"Four\", \"etc\"]\n\n if legend_colors is not None:\n if not isinstance(legend_colors, list):\n print(\"The legend colors must be a list.\")\n return\n elif all(isinstance(item, tuple) for item in legend_colors):\n try:\n legend_colors = [rgb_to_hex(x) for x in legend_colors]\n except Exception as e:\n print(e)\n elif all((item.startswith(\"#\") and len(item) == 7) for item in legend_colors):\n pass\n elif all((len(item) == 6) for item in legend_colors):\n pass\n else:\n print(\"The legend colors must be a list of tuples.\")\n return\n else:\n legend_colors = [\n \"#8DD3C7\",\n \"#FFFFB3\",\n \"#BEBADA\",\n \"#FB8072\",\n \"#80B1D3\",\n ]\n\n if len(legend_keys) != len(legend_colors):\n print(\"The legend keys and values must be the same length.\")\n return\n\n allowed_builtin_legends = builtin_legends.keys()\n if builtin_legend is not None:\n if builtin_legend not in allowed_builtin_legends:\n print(\n \"The builtin legend must be one of the following: {}\".format(\n \", \".join(allowed_builtin_legends)\n )\n )\n return\n else:\n legend_dict = builtin_legends[builtin_legend]\n legend_keys = list(legend_dict.keys())\n legend_colors = list(legend_dict.values())\n\n if legend_dict is not None:\n if not isinstance(legend_dict, dict):\n print(\"The legend dict must be a dictionary.\")\n return\n else:\n legend_keys = list(legend_dict.keys())\n legend_colors = list(legend_dict.values())\n if all(isinstance(item, tuple) for item in legend_colors):\n try:\n legend_colors = [rgb_to_hex(x) for x in legend_colors]\n except Exception as e:\n print(e)\n\n header = []\n content = []\n footer = []\n\n with open(legend_template) as f:\n lines = f.readlines()\n lines[3] = lines[3].replace(\"Legend\", legend_title)\n header = lines[:6]\n footer = lines[11:]\n\n for index, key in enumerate(legend_keys):\n color = legend_colors[index]\n if not color.startswith(\"#\"):\n color = \"#\" + color\n item = \" <li><span style='background:{};'></span>{}</li>\\n\".format(\n color, key\n )\n content.append(item)\n\n legend_html = header + content + footer\n legend_text = \"\".join(legend_html)\n return legend_text\n\n\ndef streamlit_legend(html, width=None, height=None, scrolling=True):\n \"\"\"Streamlit function to display a legend.\n\n Args:\n html (str): The HTML string of the legend.\n width (str, optional): The width of the legend. Defaults to None.\n height (str, optional): The height of the legend. Defaults to None.\n scrolling (bool, optional): Whether to allow scrolling in the legend. Defaults to True.\n\n \"\"\"\n\n try:\n import streamlit.components.v1 as components\n\n components.html(html, width=width, height=height, scrolling=scrolling)\n\n except ImportError:\n print(\"Streamlit is not installed. Please run 'pip install streamlit'.\")\n return\n\n\ndef read_file_from_url(url, return_type=\"list\", encoding=\"utf-8\"):\n \"\"\"Reads a file from a URL.\n\n Args:\n url (str): The URL of the file.\n return_type (str, optional): The return type, can either be string or list. Defaults to \"list\".\n encoding (str, optional): The encoding of the file. Defaults to \"utf-8\".\n\n Raises:\n ValueError: The return type must be either list or string.\n\n Returns:\n str | list: The contents of the file.\n \"\"\"\n from urllib.request import urlopen\n\n if return_type == \"list\":\n return [line.decode(encoding).rstrip() for line in urlopen(url).readlines()]\n elif return_type == \"string\":\n return urlopen(url).read().decode(encoding)\n else:\n raise ValueError(\"The return type must be either list or string.\")\n\n\ndef st_download_button(\n label,\n data,\n file_name=None,\n mime=None,\n key=None,\n help=None,\n on_click=None,\n args=None,\n csv_sep=\",\",\n **kwargs,\n):\n \"\"\"Streamlit function to create a download button.\n\n Args:\n label (str): A short label explaining to the user what this button is for..\n data (str | list): The contents of the file to be downloaded. See example below for caching techniques to avoid recomputing this data unnecessarily.\n file_name (str, optional): An optional string to use as the name of the file to be downloaded, such as 'my_file.csv'. If not specified, the name will be automatically generated. Defaults to None.\n mime (str, optional): The MIME type of the data. If None, defaults to \"text/plain\" (if data is of type str or is a textual file) or \"application/octet-stream\" (if data is of type bytes or is a binary file). Defaults to None.\n key (str, optional): An optional string or integer to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Defaults to None.\n help (str, optional): An optional tooltip that gets displayed when the button is hovered over. Defaults to None.\n on_click (str, optional): An optional callback invoked when this button is clicked. Defaults to None.\n args (list, optional): An optional tuple of args to pass to the callback. Defaults to None.\n kwargs (dict, optional): An optional tuple of args to pass to the callback.\n\n \"\"\"\n try:\n import streamlit as st\n import pandas as pd\n\n if isinstance(data, str):\n\n if file_name is None:\n file_name = data.split(\"/\")[-1]\n\n if data.endswith(\".csv\"):\n data = pd.read_csv(data).to_csv(sep=csv_sep, index=False)\n if mime is None:\n mime = \"text/csv\"\n return st.download_button(\n label, data, file_name, mime, key, help, on_click, args, **kwargs\n )\n elif (\n data.endswith(\".gif\") or data.endswith(\".png\") or data.endswith(\".jpg\")\n ):\n if mime is None:\n mime = f\"image/{os.path.splitext(data)[1][1:]}\"\n\n with open(data, \"rb\") as file:\n return st.download_button(\n label,\n file,\n file_name,\n mime,\n key,\n help,\n on_click,\n args,\n **kwargs,\n )\n elif isinstance(data, pd.DataFrame):\n if file_name is None:\n file_name = \"data.csv\"\n\n data = data.to_csv(sep=csv_sep, index=False)\n if mime is None:\n mime = \"text/csv\"\n return st.download_button(\n label, data, file_name, mime, key, help, on_click, args, **kwargs\n )\n\n else:\n # if mime is None:\n # mime = \"application/pdf\"\n return st.download_button(\n label,\n data,\n file_name,\n mime,\n key,\n help,\n on_click,\n args,\n **kwargs,\n )\n\n except ImportError:\n print(\"Streamlit is not installed. Please run 'pip install streamlit'.\")\n return\n except Exception as e:\n raise Exception(e)\n\n\ndef save_data(data, file_ext=None, file_name=None):\n \"\"\"Save data in the memory to a file.\n\n Args:\n data (object): The data to be saved.\n file_ext (str): The file extension of the file.\n file_name (str, optional): The name of the file to be saved. Defaults to None.\n\n Returns:\n str: The path of the file.\n \"\"\"\n import tempfile\n import uuid\n\n try:\n\n if file_ext is None:\n if hasattr(data, \"name\"):\n _, file_ext = os.path.splitext(data.name)\n else:\n if not file_ext.startswith(\".\"):\n file_ext = \".\" + file_ext\n\n if file_name is not None:\n file_path = os.path.abspath(file_name)\n if not file_path.endswith(file_ext):\n file_path = file_path + file_ext\n else:\n file_id = str(uuid.uuid4())\n file_path = os.path.join(tempfile.gettempdir(), f\"{file_id}{file_ext}\")\n\n with open(file_path, \"wb\") as file:\n file.write(data.getbuffer())\n return file_path\n except Exception as e:\n print(e)\n return None\n\n\ndef temp_file_path(extension):\n \"\"\"Returns a temporary file path.\n\n Args:\n extension (str): The file extension.\n\n Returns:\n str: The temporary file path.\n \"\"\"\n\n import tempfile\n import os\n import uuid\n\n if not extension.startswith(\".\"):\n extension = \".\" + extension\n file_id = str(uuid.uuid4())\n file_path = os.path.join(tempfile.gettempdir(), f\"{file_id}{extension}\")\n\n return file_path\n\n\ndef get_local_tile_layer(\n source,\n port=\"default\",\n debug=False,\n projection=\"EPSG:3857\",\n band=None,\n palette=None,\n vmin=None,\n vmax=None,\n nodata=None,\n attribution=None,\n tile_format=\"ipyleaflet\",\n layer_name=None,\n get_center=False,\n get_bounds=False,\n **kwargs,\n):\n \"\"\"Generate an ipyleaflet/folium TileLayer from a local raster dataset or remote Cloud Optimized GeoTIFF (COG).\n\n Args:\n source (str): The path to the GeoTIFF file or the URL of the Cloud Optimized GeoTIFF.\n port (str, optional): The port to use for the server. Defaults to \"default\".\n debug (bool, optional): If True, the server will be started in debug mode. Defaults to False.\n projection (str, optional): The projection of the GeoTIFF. Defaults to \"EPSG:3857\".\n band (int, optional): The band to use. Band indexing starts at 1. Defaults to None.\n palette (str, optional): The name of the color palette from `palettable` to use when plotting a single band. See https://jiffyclub.github.io/palettable. Default is greyscale\n vmin (float, optional): The minimum value to use when colormapping the palette when plotting a single band. Defaults to None.\n vmax (float, optional): The maximum value to use when colormapping the palette when plotting a single band. Defaults to None.\n nodata (float, optional): The value from the band to use to interpret as not valid data. Defaults to None.\n attribution (str, optional): Attribution for the source raster. This defaults to a message about it being a local file.. Defaults to None.\n tile_format (str, optional): The tile layer format. Can be either ipyleaflet or folium. Defaults to \"ipyleaflet\".\n layer_name (str, optional): The layer name to use. Defaults to None.\n get_center (bool, optional): If True, the center of the layer will be returned. Defaults to False.\n get_bounds (bool, optional): If True, the bounds [minx, miny, maxx, maxy] of the layer will be returned. Defaults to False.\n\n Returns:\n ipyleaflet.TileLayer | folium.TileLayer: An ipyleaflet.TileLayer or folium.TileLayer.\n \"\"\"\n\n check_package(\n \"localtileserver\", URL=\"https://github.com/banesullivan/localtileserver\"\n )\n\n from localtileserver import (\n get_leaflet_tile_layer,\n get_folium_tile_layer,\n TileClient,\n )\n\n if isinstance(source, str):\n if not source.startswith(\"http\"):\n source = os.path.abspath(source)\n if not os.path.exists(source):\n raise ValueError(\"The source path does not exist.\")\n else:\n raise ValueError(\"The source must either be a string or TileClient\")\n\n if tile_format not in [\"ipyleaflet\", \"folium\"]:\n raise ValueError(\"The tile format must be either ipyleaflet or folium.\")\n\n if layer_name is None:\n if source.startswith(\"http\"):\n layer_name = \"RemoteTile_\" + random_string(3)\n else:\n layer_name = \"LocalTile_\" + random_string(3)\n\n tile_client = TileClient(source, port=port, debug=debug)\n\n if tile_format == \"ipyleaflet\":\n tile_layer = get_leaflet_tile_layer(\n tile_client,\n port=port,\n debug=debug,\n projection=projection,\n band=band,\n palette=palette,\n vmin=vmin,\n vmax=vmax,\n nodata=nodata,\n attribution=attribution,\n name=layer_name,\n **kwargs,\n )\n else:\n tile_layer = get_folium_tile_layer(\n tile_client,\n port=port,\n debug=debug,\n projection=projection,\n band=band,\n palette=palette,\n vmin=vmin,\n vmax=vmax,\n nodata=nodata,\n attr=attribution,\n overlay=True,\n name=layer_name,\n **kwargs,\n )\n\n center = tile_client.center()\n bounds = tile_client.bounds() # [ymin, ymax, xmin, xmax]\n bounds = (bounds[2], bounds[0], bounds[3], bounds[1]) # [minx, miny, maxx, maxy]\n\n if get_center and get_bounds:\n return tile_layer, center, bounds\n elif get_center:\n return tile_layer, center\n elif get_bounds:\n return tile_layer, bounds\n else:\n return tile_layer\n\n\ndef get_palettable(types=None):\n \"\"\"Get a list of palettable color palettes.\n\n Args:\n types (list, optional): A list of palettable types to return, e.g., types=['matplotlib', 'cartocolors']. Defaults to None.\n\n Returns:\n list: A list of palettable color palettes.\n \"\"\"\n import palettable\n\n if types is not None and (not isinstance(types, list)):\n raise ValueError(\"The types must be a list.\")\n\n allowed_palettes = [\n \"cartocolors\",\n \"cmocean\",\n \"colorbrewer\",\n \"cubehelix\",\n \"lightbartlein\",\n \"matplotlib\",\n \"mycarta\",\n \"scientific\",\n \"tableau\",\n \"wesanderson\",\n ]\n\n if types is None:\n types = allowed_palettes[:]\n\n if all(x in allowed_palettes for x in types):\n pass\n else:\n raise ValueError(\n \"The types must be one of the following: \" + \", \".join(allowed_palettes)\n )\n\n palettes = []\n\n if \"cartocolors\" in types:\n\n cartocolors_diverging = [\n f\"cartocolors.diverging.{c}\"\n for c in dir(palettable.cartocolors.diverging)[:-19]\n ]\n cartocolors_qualitative = [\n f\"cartocolors.qualitative.{c}\"\n for c in dir(palettable.cartocolors.qualitative)[:-19]\n ]\n cartocolors_sequential = [\n f\"cartocolors.sequential.{c}\"\n for c in dir(palettable.cartocolors.sequential)[:-41]\n ]\n\n palettes = (\n palettes\n + cartocolors_diverging\n + cartocolors_qualitative\n + cartocolors_sequential\n )\n\n if \"cmocean\" in types:\n\n cmocean_diverging = [\n f\"cmocean.diverging.{c}\" for c in dir(palettable.cmocean.diverging)[:-19]\n ]\n cmocean_sequential = [\n f\"cmocean.sequential.{c}\" for c in dir(palettable.cmocean.sequential)[:-19]\n ]\n\n palettes = palettes + cmocean_diverging + cmocean_sequential\n\n if \"colorbrewer\" in types:\n\n colorbrewer_diverging = [\n f\"colorbrewer.diverging.{c}\"\n for c in dir(palettable.colorbrewer.diverging)[:-19]\n ]\n colorbrewer_qualitative = [\n f\"colorbrewer.qualitative.{c}\"\n for c in dir(palettable.colorbrewer.qualitative)[:-19]\n ]\n colorbrewer_sequential = [\n f\"colorbrewer.sequential.{c}\"\n for c in dir(palettable.colorbrewer.sequential)[:-41]\n ]\n\n palettes = (\n palettes\n + colorbrewer_diverging\n + colorbrewer_qualitative\n + colorbrewer_sequential\n )\n\n if \"cubehelix\" in types:\n cubehelix = [\n \"classic_16\",\n \"cubehelix1_16\",\n \"cubehelix2_16\",\n \"cubehelix3_16\",\n \"jim_special_16\",\n \"perceptual_rainbow_16\",\n \"purple_16\",\n \"red_16\",\n ]\n cubehelix = [f\"cubehelix.{c}\" for c in cubehelix]\n palettes = palettes + cubehelix\n\n if \"lightbartlein\" in types:\n lightbartlein_diverging = [\n f\"lightbartlein.diverging.{c}\"\n for c in dir(palettable.lightbartlein.diverging)[:-19]\n ]\n lightbartlein_sequential = [\n f\"lightbartlein.sequential.{c}\"\n for c in dir(palettable.lightbartlein.sequential)[:-19]\n ]\n\n palettes = palettes + lightbartlein_diverging + lightbartlein_sequential\n\n if \"matplotlib\" in types:\n matplotlib_colors = [\n f\"matplotlib.{c}\" for c in dir(palettable.matplotlib)[:-16]\n ]\n palettes = palettes + matplotlib_colors\n\n if \"mycarta\" in types:\n mycarta = [f\"mycarta.{c}\" for c in dir(palettable.mycarta)[:-16]]\n palettes = palettes + mycarta\n\n if \"scientific\" in types:\n scientific_diverging = [\n f\"scientific.diverging.{c}\"\n for c in dir(palettable.scientific.diverging)[:-19]\n ]\n scientific_sequential = [\n f\"scientific.sequential.{c}\"\n for c in dir(palettable.scientific.sequential)[:-19]\n ]\n\n palettes = palettes + scientific_diverging + scientific_sequential\n\n if \"tableau\" in types:\n tableau = [f\"tableau.{c}\" for c in dir(palettable.tableau)[:-14]]\n palettes = palettes + tableau\n\n return palettes\n\n\ndef points_from_xy(data, x=\"longitude\", y=\"latitude\", z=None, crs=None, **kwargs):\n \"\"\"Create a GeoPandas GeoDataFrame from a csv or Pandas DataFrame containing x, y, z values.\n\n Args:\n data (str | pd.DataFrame): A csv or Pandas DataFrame containing x, y, z values.\n x (str, optional): The column name for the x values. Defaults to \"longitude\".\n y (str, optional): The column name for the y values. Defaults to \"latitude\".\n z (str, optional): The column name for the z values. Defaults to None.\n crs (str | int, optional): The coordinate reference system for the GeoDataFrame. Defaults to None.\n\n Returns:\n geopandas.GeoDataFrame: A GeoPandas GeoDataFrame containing x, y, z values.\n \"\"\"\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n import geopandas as gpd\n import pandas as pd\n\n if crs is None:\n crs = \"epsg:4326\"\n\n if isinstance(data, pd.DataFrame):\n df = data\n elif isinstance(data, str):\n if not data.startswith(\"http\") and (not os.path.exists(data)):\n raise FileNotFoundError(\"The specified input csv does not exist.\")\n else:\n df = pd.read_csv(data, **kwargs)\n else:\n raise TypeError(\"The data must be a pandas DataFrame or a csv file path.\")\n\n gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df[x], df[y], z=z, crs=crs))\n\n return gdf\n\n\ndef html_to_streamlit(\n html,\n width=800,\n height=600,\n responsive=True,\n scrolling=False,\n token_name=None,\n token_value=None,\n **kwargs,\n):\n \"\"\"Renders an HTML file in a Streamlit app. This method is a static Streamlit Component, meaning, no information is passed back from Leaflet on browser interaction.\n\n Args:\n html (str): The HTML file to render. It can a local file path or a URL.\n width (int, optional): Width of the map. Defaults to 800.\n height (int, optional): Height of the map. Defaults to 600.\n responsive (bool, optional): Whether to make the map responsive. Defaults to True.\n scrolling (bool, optional): Whether to allow the map to scroll. Defaults to False.\n token_name (str, optional): The name of the token in the HTML file to be replaced. Defaults to None.\n token_value (str, optional): The value of the token to pass to the HTML file. Defaults to None.\n\n Returns:\n streamlit.components: components.html object.\n \"\"\"\n\n try:\n import streamlit as st\n import streamlit.components.v1 as components\n\n if isinstance(html, str):\n\n temp_path = None\n if html.startswith(\"http\") and html.endswith(\".html\"):\n temp_path = temp_file_path(\".html\")\n out_file = os.path.basename(temp_path)\n out_dir = os.path.dirname(temp_path)\n download_from_url(html, out_file, out_dir)\n html = temp_path\n\n elif not os.path.exists(html):\n raise FileNotFoundError(\"The specified input html does not exist.\")\n\n with open(html) as f:\n lines = f.readlines()\n if (token_name is not None) and (token_value is not None):\n lines = [line.replace(token_name, token_value) for line in lines]\n html_str = \"\".join(lines)\n\n if temp_path is not None:\n os.remove(temp_path)\n\n if responsive:\n make_map_responsive = \"\"\"\n <style>\n [title~=\"st.iframe\"] { width: 100%}\n </style>\n \"\"\"\n st.markdown(make_map_responsive, unsafe_allow_html=True)\n return components.html(\n html_str, width=width, height=height, scrolling=scrolling\n )\n else:\n raise TypeError(\"The html must be a string.\")\n\n except Exception as e:\n raise Exception(e)\n\n\ndef cesium_to_streamlit(\n html,\n width=800,\n height=600,\n responsive=True,\n scrolling=False,\n token_name=None,\n token_value=None,\n **kwargs,\n):\n \"\"\"Renders an cesium HTML file in a Streamlit app. This method is a static Streamlit Component, meaning, no information is passed back from Leaflet on browser interaction.\n\n Args:\n html (str): The HTML file to render. It can a local file path or a URL.\n width (int, optional): Width of the map. Defaults to 800.\n height (int, optional): Height of the map. Defaults to 600.\n responsive (bool, optional): Whether to make the map responsive. Defaults to True.\n scrolling (bool, optional): Whether to allow the map to scroll. Defaults to False.\n token_name (str, optional): The name of the token in the HTML file to be replaced. Defaults to None.\n token_value (str, optional): The value of the token to pass to the HTML file. Defaults to None.\n\n Returns:\n streamlit.components: components.html object.\n \"\"\"\n if token_name is None:\n token_name = \"your_access_token\"\n\n if token_value is None:\n token_value = os.environ.get(\"CESIUM_TOKEN\")\n\n html_to_streamlit(\n html, width, height, responsive, scrolling, token_name, token_value\n )\n\n\ndef geom_type(in_geojson, encoding=\"utf-8\"):\n \"\"\"Returns the geometry type of a GeoJSON object.\n\n Args:\n in_geojson (dict): A GeoJSON object.\n encoding (str, optional): The encoding of the GeoJSON object. Defaults to \"utf-8\".\n\n Returns:\n str: The geometry type of the GeoJSON object.\n \"\"\"\n import json\n\n try:\n\n if isinstance(in_geojson, str):\n\n if in_geojson.startswith(\"http\"):\n data = requests.get(in_geojson).json()\n else:\n in_geojson = os.path.abspath(in_geojson)\n if not os.path.exists(in_geojson):\n raise FileNotFoundError(\n \"The provided GeoJSON file could not be found.\"\n )\n\n with open(in_geojson, encoding=encoding) as f:\n data = json.load(f)\n elif isinstance(in_geojson, dict):\n data = in_geojson\n else:\n raise TypeError(\"The input geojson must be a type of str or dict.\")\n\n return data[\"features\"][0][\"geometry\"][\"type\"]\n\n except Exception as e:\n raise Exception(e)\n\n\ndef geojson_to_df(in_geojson, encoding=\"utf-8\"):\n \"\"\"Converts a GeoJSON object to a pandas DataFrame.\n\n Args:\n in_geojson (str | dict): The input GeoJSON file or dict.\n encoding (str, optional): The encoding of the GeoJSON object. Defaults to \"utf-8\".\n\n Raises:\n FileNotFoundError: If the input GeoJSON file could not be found.\n\n Returns:\n pd.DataFrame: A pandas DataFrame containing the GeoJSON object.\n \"\"\"\n\n import json\n import pandas as pd\n from urllib.request import urlopen\n\n if isinstance(in_geojson, str):\n\n if in_geojson.startswith(\"http\"):\n with urlopen(in_geojson) as f:\n data = json.load(f)\n else:\n in_geojson = os.path.abspath(in_geojson)\n if not os.path.exists(in_geojson):\n raise FileNotFoundError(\"The provided GeoJSON file could not be found.\")\n\n with open(in_geojson, encoding=encoding) as f:\n data = json.load(f)\n\n elif isinstance(in_geojson, dict):\n data = in_geojson\n\n df = pd.json_normalize(data[\"features\"])\n df.columns = [col.replace(\"properties.\", \"\") for col in df.columns]\n return df\n\n\ndef bbox_to_gdf(bbox, crs=\"EPSG:4326\"):\n \"\"\"Converts a bounding box to a GeoDataFrame.\n\n Args:\n bbox (tuple): A bounding box in the form of a tuple (minx, miny, maxx, maxy).\n crs (str, optional): The coordinate reference system of the bounding box to convert to. Defaults to \"EPSG:4326\".\n\n Returns:\n geopandas.GeoDataFrame: A GeoDataFrame containing the bounding box.\n \"\"\"\n check_package(name=\"geopandas\", URL=\"https://geopandas.org\")\n from shapely.geometry import box\n from geopandas import GeoDataFrame\n\n minx, miny, maxx, maxy = bbox\n geometry = box(minx, miny, maxx, maxy)\n d = {\"geometry\": [geometry]}\n gdf = GeoDataFrame(d, crs=\"EPSG:4326\")\n gdf.to_crs(crs=crs, inplace=True)\n return gdf\n"
] |
[
[
"pandas.json_normalize",
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0"
],
"scipy": [],
"tensorflow": []
}
] |
kreimanlab/AugMem
|
[
"cb0e8d39eb0c469da46c7c550c19229927a2bec5"
] |
[
"other_models/COPE/model/reservoir.py"
] |
[
"\"\"\"This is reservoir sampling, each sample has storage-probability 'buffer samples M / seen samples'\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport random\n\n\nclass Net(nn.Module):\n def __init__(self,\n n_inputs,\n n_outputs,\n n_tasks,\n args):\n super(Net, self).__init__()\n self.net = args.net\n\n self.ce = nn.CrossEntropyLoss()\n self.n_outputs = n_outputs\n\n self.opt = optim.SGD(self.parameters(), args.lr)\n\n self.n_memories = args.n_memories\n self.n_sampled_memories = args.n_sampled_memories\n self.n_constraints = args.n_constraints\n self.gpu = args.cuda\n\n self.batch_size = args.batch_size\n self.n_iter = args.n_iter\n\n # allocate ring buffer\n self.x_buffer = []\n self.y_buffer = []\n\n # allocate counters\n self.observed_tasks = []\n self.old_task = -1\n self.seen_cnt = 0\n\n def forward(self, x, t=0):\n output = self.net(x)\n return output\n\n def observe(self, x, t, y):\n \"\"\" Train. \"\"\"\n # Update ring buffer storing examples from current task\n bsz = y.data.size(0)\n\n # Step over new batch and batch from memory\n for iter_i in range(self.n_iter):\n self.zero_grad()\n x_init = x.clone()\n y_init = y.clone()\n if self.gpu:\n x_init = x_init.cuda()\n y_init = y_init.cuda()\n\n sample_bs = bsz\n if self.n_memories > 0 and len(self.x_buffer) > 0: # Sample\n perm = torch.randperm(len(self.x_buffer))\n idx = perm[:sample_bs]\n x_s = torch.stack(self.x_buffer)[idx]\n y_s = torch.stack(self.y_buffer)[idx]\n x_s, y_s = (x_s.cuda(), y_s.cuda()) if self.gpu else (x_s.cpu(), y_s.cpu())\n x_ext = torch.cat([x_init, x_s])\n y_ext = torch.cat([y_init, y_s])\n else:\n x_ext = x_init\n y_ext = y_init\n\n loss = self.ce(self.forward(x_ext), y_ext)\n loss.backward()\n self.opt.step()\n\n # Update buffers\n for i in range(bsz):\n if self.seen_cnt < self.n_memories:\n self.x_buffer.append(x[i])\n self.y_buffer.append(y[i])\n else:\n j = random.randrange(self.seen_cnt)\n if j < self.n_memories:\n self.x_buffer[j] = x[i]\n self.y_buffer[j] = y[i]\n self.seen_cnt += 1\n\n assert len(self.x_buffer) <= self.n_memories\n assert len(self.x_buffer) == len(self.y_buffer)\n\n def get_hyperparam_list(self, args):\n return []\n"
] |
[
[
"torch.stack",
"torch.nn.CrossEntropyLoss",
"torch.cat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
levincoolxyz/PPO-PyTorch
|
[
"d3e08663d7feda2cc27158734a37cc2e992e1471"
] |
[
"test_clonedAll_ants.py"
] |
[
"#!/usr/bin/python3\n\nimport gym\nfrom gym import wrappers\nimport time\nfrom PPO_clonedAll_ants import PPO, Memory\nfrom PIL import Image\nimport torch\nimport numpy as np\nimport ants\n\ndeviceName = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\ndeviceName = \"cpu\"\ndevice = torch.device(deviceName)\n\ndef test():\n ############## Hyperparameters ##############\n env_name = \"AntsEnv-v0\"\n # Nants = 20\n Nants = 12\n # Nants = 6\n goalDir = 0\n # env = gym.make(env_name,Nmax=Nants,goalDir=goalDir) \n env = ants.AntsEnv(Nmax=Nants,goalDir=goalDir)\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n \n n_episodes = 3 # num of episodes to run\n max_timesteps = 500 # max timesteps in one episode\n render = True # render the environment\n save_gif = False # png images are saved in gif folder\n \n # filename and directory to load model from\n deviceName = \"cpu\"\n # filename = \"PPO_clonedAll_solved_{}.pth\".format(env_name)\n # filename = \"PPO_clonedAll_{}.pth\".format(env_name)\n filename = \"PPO_clonedAll_{}_{}.pth\".format(env_name,deviceName)\n # directory = \"./preTrained/\"\n directory = \"./\"\n\n action_std = 0.01 # constant std for action distribution (Multivariate Normal)\n K_epochs = 80 # update policy for K epochs\n eps_clip = 0.2 # clip parameter for PPO\n gamma = 0.99 # discount factor\n \n lr = 0.0003 # parameters for Adam optimizer\n betas = (0.9, 0.999)\n #############################################\n \n memory = Memory()\n ppo = PPO(state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip)\n\n preTrainedParam = torch.load(directory+filename, map_location=device)\n preTrainedParam.pop('critic.0.weight',None)\n preTrainedParam.pop('critic.2.weight',None)\n preTrainedParam.pop('critic.4.weight',None)\n preTrainedParam.pop('critic.0.bias',None)\n preTrainedParam.pop('critic.2.bias',None)\n preTrainedParam.pop('critic.4.bias',None)\n load_existing_param(ppo.policy_old,preTrainedParam)\n \n for ep in range(1, n_episodes+1):\n ep_reward = 0\n env = wrappers.Monitor(env, './results/cloned/' + str(time.time()) + '/')\n # observation = env.reset()\n observation = env.reset(rand=True)\n for t in range(max_timesteps):\n action = ppo.select_action(observation, memory)\n observation, reward, done, _ = env.step(action)\n ep_reward += reward\n if render:\n env.render()\n if save_gif:\n img = env.render(mode = 'rgb_array')\n img = Image.fromarray(img)\n img.save('./gif/{}.jpg'.format(t)) \n if done:\n break\n \n print('Episode: {}\\tReward: {}'.format(ep, ep_reward))\n ep_reward = 0\n env.close()\n \ndef load_existing_param(network, state_dict):\n\n own_state = network.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n continue\n else:\n own_state[name].copy_(param)\n return network\n \nif __name__ == '__main__':\n test()\n \n "
] |
[
[
"torch.device",
"torch.cuda.is_available",
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
waleedgondal/disentanglement_lib
|
[
"949d737a283079d26c5f7f59a43c06d699aa3531"
] |
[
"disentanglement_lib/data/ground_truth/mpi3d.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The DisentanglementLib Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"MPI3D data set.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom disentanglement_lib.data.ground_truth import ground_truth_data\nfrom disentanglement_lib.data.ground_truth import util\nimport numpy as np\nimport tensorflow as tf\n\n\nclass MPI3D(ground_truth_data.GroundTruthData):\n \"\"\"MPI3D dataset.\n\n MPI3D datasets have been introduced as a part of NEURIPS 2019 Disentanglement\n Competition.(http://www.disentanglement-challenge.com).\n There are three different datasets:\n 1. Simplistic rendered images (mpi3d_toy).\n 2. Realistic rendered images (mpi3d_realistic).\n 3. Real world images (mpi3d_real).\n\n Currently, mpi3d_toy and mpi3d_realistic are publicly available. More details about this\n dataset can be found in \"On the Transfer of Inductive Bias from Simulation to\n the Real World: a New Disentanglement Dataset\"\n (https://arxiv.org/abs/1906.03292).\n\n The ground-truth factors of variation in the dataset are:\n 0 - Object color (4 different values)\n 1 - Object shape (4 different values)\n 2 - Object size (2 different values)\n 3 - Camera height (3 different values)\n 4 - Background colors (3 different values)\n 5 - First DOF (40 different values)\n 6 - Second DOF (40 different values)\n \"\"\"\n\n def __init__(self, mode=\"mpi3d_toy\"):\n if mode == \"mpi3d_toy\":\n mpi3d_path = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"mpi3d_toy\",\n \"mpi3d_toy.npz\")\n if not tf.io.gfile.exists(mpi3d_path):\n raise ValueError(\n \"Dataset '{}' not found. Make sure the dataset is publicly available and downloaded correctly.\"\n .format(mode))\n else:\n with tf.io.gfile.GFile(mpi3d_path, \"rb\") as f:\n data = np.load(f)\n elif mode == \"mpi3d_realistic\":\n mpi3d_path = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"mpi3d_realistic\",\n \"mpi3d_realistic.npz\")\n if not tf.io.gfile.exists(mpi3d_path):\n raise ValueError(\n \"Dataset '{}' not found. Make sure the dataset is publicly available and downloaded correctly.\"\n .format(mode))\n else:\n with tf.io.gfile.GFile(mpi3d_path, \"rb\") as f:\n data = np.load(f)\n elif mode == \"mpi3d_real\":\n mpi3d_path = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"mpi3d_real\",\n \"mpi3d_real.npz\")\n if not tf.io.gfile.exists(mpi3d_path):\n raise ValueError(\n \"Dataset '{}' not found. Make sure the dataset is publicly available and downloaded correctly.\"\n .format(mode))\n else:\n with tf.io.gfile.GFile(mpi3d_path, \"rb\") as f:\n data = np.load(f)\n else:\n raise ValueError(\"Unknown mode provided.\")\n\n self.images = data[\"images\"]\n self.factor_sizes = [4, 4, 2, 3, 3, 40, 40]\n self.latent_factor_indices = [0, 1, 2, 3, 4, 5, 6]\n self.num_total_factors = 7\n self.state_space = util.SplitDiscreteStateSpace(self.factor_sizes,\n self.latent_factor_indices)\n self.factor_bases = np.prod(self.factor_sizes) / np.cumprod(\n self.factor_sizes)\n\n @property\n def num_factors(self):\n return self.state_space.num_latent_factors\n\n @property\n def factors_num_values(self):\n return self.factor_sizes\n\n @property\n def observation_shape(self):\n return [64, 64, 3]\n\n def sample_factors(self, num, random_state):\n \"\"\"Sample a batch of factors Y.\"\"\"\n return self.state_space.sample_latent_factors(num, random_state)\n\n def sample_observations_from_factors(self, factors, random_state):\n all_factors = self.state_space.sample_all_factors(factors, random_state)\n indices = np.array(np.dot(all_factors, self.factor_bases), dtype=np.int64)\n return self.images[indices] / 255.\n"
] |
[
[
"numpy.dot",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile",
"numpy.cumprod",
"numpy.prod",
"numpy.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dkirel/TwitterSentiment
|
[
"ee2ac58f52e687a2cea1f5113dfee3769fb9f693"
] |
[
"vote_classifier.py"
] |
[
"import nltk\nimport pickle\n\nfrom nltk.classify import ClassifierI\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom nltk.metrics.scores import precision, recall\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom statistics import mode\n\n\nclass VoteClassifier(ClassifierI):\n\n def __init__(self, *classifier_classes):\n self.classifier_classes = classifier_classes\n self.classifiers = []\n\n def classify(self, features):\n if self.classifiers:\n votes = [c.classify(features) for c in self.classifiers]\n return mode(votes)\n else:\n return 'Classifier has not been trained or loaded'\n\n def classify_text(self, text):\n features = self.find_features(text)\n return self.classify(features), self.confidence(features)\n\n def confidence(self, features):\n votes = [c.classify(features) for c in self.classifiers]\n return votes.count(mode(votes))/len(votes)\n\n def load_pickle(self):\n # Word Features\n open_file = open('pickled_files/word_features.pickle', 'rb')\n self.word_features = pickle.load(open_file)\n open_file.close()\n \n # Normal Naive Bayes\n open_file = open('pickled_files/NaiveBayesClassifier.pickle', 'rb')\n classifier = pickle.load(open_file)\n open_file.close()\n\n # Other classifiers\n classifiers = {'NaiveBayesClassifier': classifier}\n for classifier_class in self.classifier_classes:\n open_file = open('pickled_files/' + classifier_class.__name__ + '.pickle', 'rb')\n classifier = pickle.load(open_file)\n open_file.close()\n\n self.classifiers = list(classifiers.values())\n\n def train(self, documents, all_words, num_features=5000):\n # Save documents\n save_file = open('pickled_files/documents.pickle', 'wb')\n pickle.dump(documents, save_file)\n save_file.close()\n\n # Get word features\n all_words = [w.lower() for w in word_tokenize(pos_file) + word_tokenize(neg_file)]\n word_dist = nltk.FreqDist(all_words)\n self.word_features = list(word_dist.keys())[:num_features]\n\n # Save word features\n save_file = open('pickled_files/word_features.pickle', 'wb')\n pickle.dump(word_features, save_file)\n save_file.close()\n\n featuresets = [(self.find_features(rev), category) for (rev, category) in reviews]\n train_set, test_set = train_test_split(featuresets, test_size=0.3)\n\n # Normal Naive Bayes\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n print (\"Normal Naive Bayes Accuracy:\", nltk.classify.accuracy(classifier, test_set))\n # classifier.show_most_informative_features(10)\n\n # Save Naive Bayes algorithm\n save_file = open('pickled_files/NaiveBayesClassifier.pickle', 'wb')\n pickle.dump(classifier, save_file)\n save_file.close() \n\n # Various types of classifiers\n classifiers = {'NaiveBayesClassifier': classifier}\n for classifier_class in self.classifier_classes:\n # Train and store classifier\n classifier = SklearnClassifier(classifier_class())\n classifier.train(train_set)\n classifiers[classifier_class.__name__] = classifier\n print (classifier_class.__name__, ' Accuracy: ', nltk.classify.accuracy(classifier, test_set))\n\n # Save Naive Bayes algorithm\n save_file = open('pickled_files/' + classifier_class.__name__ + '.pickle', 'wb')\n pickle.dump(classifier, save_file)\n save_file.close()\n\n self.classifiers = list(classifiers.values())\n print('Voting Classifier Accuracy: ', nltk.classify.accuracy(self, test_set))\n\n # Precision and Recall\n predicted = {'pos': set(), 'neg': set()}\n actual = {'pos': set(), 'neg': set()}\n for i, (features, label) in enumerate(test_set):\n actual[self.classify(features)].add(i)\n predicted[label].add(i)\n\n print('\\nPrecision & Recall')\n print('Positive Precision: ', precision(predicted['pos'], actual['pos']))\n print('Positive Recall: ', recall(predicted['pos'], actual['pos']))\n print('Negative Precision: ', precision(predicted['neg'], actual['neg']))\n print('Negative Recall: ', recall(predicted['neg'], actual['neg']))\n\n def find_features(self, document):\n words = word_tokenize(document)\n return {w: w in words for w in self.word_features}\n\n\ndef train_vote_classifier():\n pos_file = open('short_reviews/positive.txt', 'r', encoding='latin-1').read()\n neg_file = open('short_reviews/negative.txt', 'r', encoding='latin-1').read()\n\n reviews = [(r, 'pos') for r in pos_file.split('\\n')] + [(r, 'neg') for r in neg_file.split('\\n')]\n all_words = [w.lower() for w in word_tokenize(pos_file) + word_tokenize(neg_file)]\n\n vote_classifier = VoteClassifier(MultinomialNB,\n BernoulliNB,\n LogisticRegression,\n SGDClassifier,\n SVC,\n LinearSVC,\n NuSVC)\n\n return vote_classifier.train(reviews, all_words)\n\n \n"
] |
[
[
"sklearn.model_selection.train_test_split"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stjordanis/pyquil
|
[
"36987ecb78d5dc85d299dd62395b7669a1cedd5a",
"36987ecb78d5dc85d299dd62395b7669a1cedd5a"
] |
[
"test/unit/test_noise.py",
"pyquil/simulation/_reference.py"
] |
[
"from collections import OrderedDict\n\nimport numpy as np\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom pyquil.api._qam import QAMExecutionResult\nfrom pyquil.gates import RZ, RX, I, CZ\nfrom pyquil.noise import (\n pauli_kraus_map,\n damping_kraus_map,\n dephasing_kraus_map,\n tensor_kraus_maps,\n _get_program_gates,\n _decoherence_noise_model,\n add_decoherence_noise,\n combine_kraus_maps,\n damping_after_dephasing,\n INFINITY,\n apply_noise_model,\n _noise_model_program_header,\n KrausModel,\n NoiseModel,\n corrupt_bitstring_probs,\n correct_bitstring_probs,\n estimate_bitstring_probs,\n bitstring_probs_to_z_moments,\n estimate_assignment_probs,\n NO_NOISE,\n)\nfrom pyquil.quil import Pragma, Program\nfrom pyquil.quilbase import DefGate, Gate\n\n\ndef test_pauli_kraus_map():\n probabilities = [0.1, 0.2, 0.3, 0.4]\n k1, k2, k3, k4 = pauli_kraus_map(probabilities)\n assert np.allclose(k1, np.sqrt(0.1) * np.eye(2), atol=1 * 10 ** -8)\n assert np.allclose(k2, np.sqrt(0.2) * np.array([[0, 1.0], [1.0, 0]]), atol=1 * 10 ** -8)\n assert np.allclose(k3, np.sqrt(0.3) * np.array([[0, -1.0j], [1.0j, 0]]), atol=1 * 10 ** -8)\n assert np.allclose(k4, np.sqrt(0.4) * np.array([[1, 0], [0, -1]]), atol=1 * 10 ** -8)\n\n two_q_pauli_kmaps = pauli_kraus_map(np.kron(probabilities, list(reversed(probabilities))))\n q1_pauli_kmaps = [k1, k2, k3, k4]\n q2_pauli_kmaps = pauli_kraus_map(list(reversed(probabilities)))\n tensor_kmaps = tensor_kraus_maps(q1_pauli_kmaps, q2_pauli_kmaps)\n assert np.allclose(two_q_pauli_kmaps, tensor_kmaps)\n\n\ndef test_damping_kraus_map():\n p = 0.05\n k1, k2 = damping_kraus_map(p=p)\n assert k1[1, 1] == np.sqrt(1 - p)\n assert k2[0, 1] == np.sqrt(p)\n\n\ndef test_dephasing_kraus_map():\n p = 0.05\n k1, k2 = dephasing_kraus_map(p=p)\n np.testing.assert_allclose(np.diag(k1), [np.sqrt(1 - p)] * 2)\n np.testing.assert_allclose(np.abs(np.diag(k2)), [np.sqrt(p)] * 2)\n\n\ndef test_tensor_kraus_maps():\n damping = damping_kraus_map()\n k1, k2, k3, k4 = tensor_kraus_maps(damping, damping)\n assert k1.shape == (4, 4)\n assert k2.shape == (4, 4)\n assert k3.shape == (4, 4)\n assert k4.shape == (4, 4)\n np.testing.assert_allclose(k1[-1, -1], 1 - 0.1)\n\n\ndef test_combine_kraus_maps():\n damping = damping_kraus_map()\n dephasing = dephasing_kraus_map()\n k1, k2, k3, k4 = combine_kraus_maps(damping, dephasing)\n assert k1.shape == (2, 2)\n assert k2.shape == (2, 2)\n assert k3.shape == (2, 2)\n assert k4.shape == (2, 2)\n\n\ndef test_damping_after_dephasing():\n gate_time = 1\n T1 = 10\n T2 = 3\n kraus_map = damping_after_dephasing(T1, T2, gate_time)\n\n # Density matrix for the |+> state\n rho = 0.5 * np.ones((2, 2))\n\n # See Eq. 7.144 of Nielsen and Chuang\n target_rho = [\n [1 - 0.5 * np.exp(-gate_time / T1), 0.5 * np.exp(-gate_time / T2)],\n [0.5 * np.exp(-gate_time / T2), 0.5 * np.exp(-gate_time / T1)],\n ]\n\n noisy_rho = np.zeros((2, 2))\n for op in kraus_map:\n noisy_rho += op @ rho @ (op.T.conj())\n\n np.testing.assert_allclose(noisy_rho, target_rho)\n\n\ndef test_noise_helpers():\n gates = RX(np.pi / 2, 0), RX(-np.pi / 2, 1), I(1), CZ(0, 1)\n prog = Program(*gates)\n inferred_gates = _get_program_gates(prog)\n assert set(inferred_gates) == set(gates)\n\n\ndef test_decoherence_noise():\n prog = Program(RX(np.pi / 2, 0), CZ(0, 1), RZ(np.pi, 0))\n gates = _get_program_gates(prog)\n m1 = _decoherence_noise_model(gates, T1=INFINITY, T2=INFINITY, ro_fidelity=1.0)\n\n # with no readout error, assignment_probs = identity matrix\n assert np.allclose(m1.assignment_probs[0], np.eye(2))\n assert np.allclose(m1.assignment_probs[1], np.eye(2))\n for g in m1.gates:\n # with infinite coherence time all kraus maps should only have a single, unitary kraus op\n assert len(g.kraus_ops) == 1\n (k0,) = g.kraus_ops\n # check unitarity\n k0dk0 = k0.dot(k0.conjugate().transpose())\n assert np.allclose(k0dk0, np.eye(k0dk0.shape[0]))\n\n # verify that selective (by qubit) dephasing and readout infidelity is working\n m2 = _decoherence_noise_model(gates, T1=INFINITY, T2={0: 30e-6}, ro_fidelity={0: 0.95, 1: 1.0})\n assert np.allclose(m2.assignment_probs[0], [[0.95, 0.05], [0.05, 0.95]])\n assert np.allclose(m2.assignment_probs[1], np.eye(2))\n for g in m2.gates:\n if 0 in g.targets:\n # single dephasing (no damping) channel on qc 0, no noise on qc1 -> 2 Kraus ops\n assert len(g.kraus_ops) == 2\n else:\n assert len(g.kraus_ops) == 1\n\n # verify that combined T1 and T2 will lead to 4 outcome Kraus map.\n m3 = _decoherence_noise_model(gates, T1={0: 30e-6}, T2={0: 30e-6})\n for g in m3.gates:\n if 0 in g.targets:\n # damping (implies dephasing) channel on qc 0, no noise on qc1 -> 4 Kraus ops\n assert len(g.kraus_ops) == 4\n else:\n assert len(g.kraus_ops) == 1\n\n # verify that gate names are translated\n new_prog = apply_noise_model(prog, m3)\n\n # check that headers have been embedded\n headers = _noise_model_program_header(m3)\n assert all(\n (isinstance(i, Pragma) and i.command in [\"ADD-KRAUS\", \"READOUT-POVM\"]) or isinstance(i, DefGate)\n for i in headers\n )\n assert headers.out() in new_prog.out()\n\n # verify that high-level add_decoherence_noise reproduces new_prog\n new_prog2 = add_decoherence_noise(prog, T1={0: 30e-6}, T2={0: 30e-6})\n assert new_prog == new_prog2\n\n\ndef test_kraus_model_1():\n km = KrausModel(\"I\", (5.0,), (0, 1), [np.array([[1 + 1j]])], 1.0)\n d = km.to_dict()\n assert d == OrderedDict(\n [\n (\"gate\", km.gate),\n (\"params\", km.params),\n (\"targets\", (0, 1)),\n (\"kraus_ops\", [[[[1.0]], [[1.0]]]]),\n (\"fidelity\", 1.0),\n ]\n )\n assert KrausModel.from_dict(d) == km\n\n\[email protected]\ndef kraus_model_I_dict():\n return {\n \"gate\": \"I\",\n \"fidelity\": 1.0,\n \"kraus_ops\": [[[[1.0]], [[1.0]]]],\n \"targets\": (0, 1),\n \"params\": (5.0,),\n }\n\n\ndef test_kraus_model_2(kraus_model_I_dict):\n km = KrausModel.from_dict(kraus_model_I_dict)\n assert km == KrausModel(\n gate=kraus_model_I_dict[\"gate\"],\n params=kraus_model_I_dict[\"params\"],\n targets=kraus_model_I_dict[\"targets\"],\n kraus_ops=[KrausModel.unpack_kraus_matrix(kraus_op) for kraus_op in kraus_model_I_dict[\"kraus_ops\"]],\n fidelity=kraus_model_I_dict[\"fidelity\"],\n )\n d = km.to_dict()\n assert d == OrderedDict(\n [\n (\"gate\", km.gate),\n (\"params\", km.params),\n (\"targets\", (0, 1)),\n (\"kraus_ops\", [[[[1.0]], [[1.0]]]]),\n (\"fidelity\", 1.0),\n ]\n )\n\n\ndef test_noise_model_1():\n km1 = KrausModel(\"I\", (5.0,), (0, 1), [np.array([[1 + 1j]])], 1.0)\n km2 = KrausModel(\"RX\", (np.pi / 2,), (0,), [np.array([[1 + 1j]])], 1.0)\n nm = NoiseModel([km1, km2], {0: np.eye(2), 1: np.eye(2)})\n\n assert nm == NoiseModel.from_dict(nm.to_dict())\n assert nm.gates_by_name(\"I\") == [km1]\n assert nm.gates_by_name(\"RX\") == [km2]\n\n\[email protected]\ndef kraus_model_RX90_dict():\n return {\n \"gate\": \"RX\",\n \"fidelity\": 1.0,\n \"kraus_ops\": [[[[1.0]], [[1.0]]]],\n \"targets\": (0,),\n \"params\": (np.pi / 2.0,),\n }\n\n\ndef test_noise_model_2(kraus_model_I_dict, kraus_model_RX90_dict):\n noise_model_dict = {\n \"gates\": [kraus_model_I_dict, kraus_model_RX90_dict],\n \"assignment_probs\": {\"1\": [[1.0, 0.0], [0.0, 1.0]], \"0\": [[1.0, 0.0], [0.0, 1.0]]},\n }\n\n nm = NoiseModel.from_dict(noise_model_dict)\n km1 = KrausModel.from_dict(kraus_model_I_dict)\n km2 = KrausModel.from_dict(kraus_model_RX90_dict)\n assert nm == NoiseModel(gates=[km1, km2], assignment_probs={0: np.eye(2), 1: np.eye(2)})\n assert nm.gates_by_name(\"I\") == [km1]\n assert nm.gates_by_name(\"RX\") == [km2]\n assert nm.to_dict() == noise_model_dict\n\n\ndef test_readout_compensation():\n np.random.seed(1234124)\n p = np.random.rand(2, 2, 2, 2, 2, 2)\n p /= p.sum()\n\n aps = [np.eye(2) + 0.2 * (np.random.rand(2, 2) - 1) for _ in range(p.ndim)]\n for ap in aps:\n ap.flat[ap.flat < 0] = 0.0\n ap /= ap.sum()\n assert np.alltrue(ap >= 0)\n\n assert np.alltrue(p >= 0)\n\n p_corrupted = corrupt_bitstring_probs(p, aps)\n p_restored = correct_bitstring_probs(p_corrupted, aps)\n assert np.allclose(p, p_restored)\n\n results = [[0, 0, 0]] * 100 + [[0, 1, 1]] * 200\n p1 = estimate_bitstring_probs(results)\n assert np.isclose(p1[0, 0, 0], 1.0 / 3.0)\n assert np.isclose(p1[0, 1, 1], 2.0 / 3.0)\n assert np.isclose(p1.sum(), 1.0)\n\n zm = bitstring_probs_to_z_moments(p1)\n assert np.isclose(zm[0, 0, 0], 1)\n assert np.isclose(zm[1, 0, 0], 1)\n assert np.isclose(zm[0, 1, 0], -1.0 / 3)\n assert np.isclose(zm[0, 0, 1], -1.0 / 3)\n assert np.isclose(zm[0, 1, 1], 1.0)\n assert np.isclose(zm[1, 1, 0], -1.0 / 3)\n assert np.isclose(zm[1, 0, 1], -1.0 / 3)\n assert np.isclose(zm[1, 1, 1], 1.0)\n\n\ndef test_estimate_assignment_probs(mocker: MockerFixture):\n mock_qc = mocker.patch(\"pyquil.api.QuantumComputer\").return_value\n mock_compiler = mocker.patch(\"pyquil.api._abstract_compiler.AbstractCompiler\").return_value\n\n trials = 100\n p00 = 0.8\n p11 = 0.75\n\n mock_compiler.native_quil_to_executable.return_value = Program()\n mock_qc.compiler = mock_compiler\n mock_qc\n mock_qc.run.side_effect = [\n QAMExecutionResult(executable=None, readout_data={'ro': np.array([[0]]) * int(round(p00 * trials)) + np.array([[1]]) * int(round((1 - p00) * trials))}), # I gate results\n QAMExecutionResult(executable=None, readout_data={'ro': np.array([[1]]) * int(round(p11 * trials)) + np.array([[0]]) * int(round((1 - p11) * trials))}), # X gate results\n ]\n ap_target = np.array([[p00, 1 - p11], [1 - p00, p11]])\n\n povm_pragma = Pragma(\"READOUT-POVM\", (0, \"({} {} {} {})\".format(*ap_target.flatten())))\n ap = estimate_assignment_probs(0, trials, mock_qc, Program(povm_pragma))\n\n assert mock_compiler.native_quil_to_executable.call_count == 2\n assert mock_qc.run.call_count == 2\n\n for call in mock_compiler.native_quil_to_executable.call_args_list:\n args, kwargs = call\n prog = args[0]\n assert prog._instructions[0] == povm_pragma\n\n assert np.allclose(ap, ap_target)\n\n\ndef test_apply_noise_model():\n p = Program(RX(np.pi / 2, 0), RX(np.pi / 2, 1), CZ(0, 1), RX(np.pi / 2, 1))\n noise_model = _decoherence_noise_model(_get_program_gates(p))\n pnoisy = apply_noise_model(p, noise_model)\n for i in pnoisy:\n if isinstance(i, DefGate):\n pass\n elif isinstance(i, Pragma):\n assert i.command in [\"ADD-KRAUS\", \"READOUT-POVM\"]\n elif isinstance(i, Gate):\n assert i.name in NO_NOISE or not i.params\n\n\ndef test_apply_noise_model_perturbed_angles():\n eps = 1e-15\n p = Program(RX(np.pi / 2 + eps, 0), RX(np.pi / 2 - eps, 1), CZ(0, 1), RX(np.pi / 2 + eps, 1))\n noise_model = _decoherence_noise_model(_get_program_gates(p))\n pnoisy = apply_noise_model(p, noise_model)\n for i in pnoisy:\n if isinstance(i, DefGate):\n pass\n elif isinstance(i, Pragma):\n assert i.command in [\"ADD-KRAUS\", \"READOUT-POVM\"]\n elif isinstance(i, Gate):\n assert i.name in NO_NOISE or not i.params\n",
"##############################################################################\n# Copyright 2016-2019 Rigetti Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\nimport warnings\nfrom typing import Any, List, Optional, Sequence, Tuple, Union, cast\n\nimport numpy as np\nfrom numpy.random.mtrand import RandomState\n\nfrom pyquil.paulis import PauliTerm, PauliSum\nfrom pyquil.pyqvm import AbstractQuantumSimulator\nfrom pyquil.quilbase import Gate\nfrom pyquil.simulation.matrices import P0, P1, KRAUS_OPS, QUANTUM_GATES\nfrom pyquil.simulation.tools import lifted_gate_matrix, lifted_gate, all_bitstrings\n\n\ndef _term_expectation(wf: np.ndarray, term: PauliTerm, n_qubits: int) -> Any:\n # Computes <psi|XYZ..XXZ|psi>\n wf2 = wf\n for qubit_i, op_str in term._ops.items():\n assert isinstance(qubit_i, int)\n # Re-use QUANTUM_GATES since it has X, Y, Z\n op_mat = QUANTUM_GATES[op_str]\n op_mat = lifted_gate_matrix(matrix=op_mat, qubit_inds=[qubit_i], n_qubits=n_qubits)\n wf2 = op_mat @ wf2\n\n # `wf2` is XYZ..XXZ|psi>\n # hit it with a <psi| i.e. `wf.dag`\n return term.coefficient * (wf.conj().T @ wf2)\n\n\ndef _is_valid_quantum_state(state_matrix: np.ndarray, rtol: float = 1e-05, atol: float = 1e-08) -> bool:\n \"\"\"\n Checks if a quantum state is valid, i.e. the matrix is Hermitian; trace one, and that the\n eigenvalues are non-negative.\n\n :param state_matrix: a D by D np.ndarray representing a quantum state\n :param rtol: The relative tolerance parameter in np.allclose and np.isclose\n :param atol: The absolute tolerance parameter in np.allclose and np.isclose\n :return: bool\n \"\"\"\n hermitian = np.allclose(state_matrix, np.conjugate(state_matrix.transpose()), rtol, atol)\n if not hermitian:\n raise ValueError(\"The state matrix is not Hermitian.\")\n trace_one = np.isclose(np.trace(state_matrix), 1, rtol, atol)\n if not trace_one:\n raise ValueError(\"The state matrix is not trace one.\")\n evals = np.linalg.eigvals(state_matrix)\n non_neg_eigs = all([False if val < -atol else True for val in evals])\n if not non_neg_eigs:\n raise ValueError(\"The state matrix has negative Eigenvalues of order -\" + str(atol) + \".\")\n return hermitian and trace_one and non_neg_eigs\n\n\nclass ReferenceWavefunctionSimulator(AbstractQuantumSimulator):\n def __init__(self, n_qubits: int, rs: Optional[RandomState] = None):\n \"\"\"\n A wavefunction simulator that prioritizes readability over performance.\n\n Please consider using\n :py:class:`PyQVM(..., wf_simulator_type=ReferenceWavefunctionSimulator)` rather\n than using this class directly.\n\n This class uses a flat state-vector of length 2^n_qubits to store wavefunction\n amplitudes. The basis is taken to be bitstrings ordered lexicographically with\n qubit 0 as the rightmost bit. This is the same as the Rigetti Lisp QVM.\n\n :param n_qubits: Number of qubits to simulate.\n :param rs: a RandomState (should be shared with the owning :py:class:`PyQVM`) for\n doing anything stochastic. A value of ``None`` disallows doing anything stochastic.\n \"\"\"\n super().__init__(n_qubits=n_qubits, rs=rs)\n\n self.n_qubits = n_qubits\n self.rs = rs\n\n self.wf = np.zeros(2 ** n_qubits, dtype=np.complex128)\n self.wf[0] = complex(1.0, 0)\n\n def sample_bitstrings(self, n_samples: int) -> np.ndarray:\n \"\"\"\n Sample bitstrings from the distribution defined by the wavefunction.\n\n Qubit 0 is at ``out[:, 0]``.\n\n :param n_samples: The number of bitstrings to sample\n :return: An array of shape (n_samples, n_qubits)\n \"\"\"\n if self.rs is None:\n raise ValueError(\n \"You have tried to perform a stochastic operation without setting the \"\n \"random state of the simulator. Might I suggest using a PyQVM object?\"\n )\n probabilities = np.abs(self.wf) ** 2\n possible_bitstrings = all_bitstrings(self.n_qubits)\n inds = self.rs.choice(2 ** self.n_qubits, n_samples, p=probabilities)\n bitstrings = possible_bitstrings[inds, :]\n bitstrings = np.flip(bitstrings, axis=1) # qubit ordering: 0 on the left.\n return bitstrings # type: ignore\n\n def do_gate(self, gate: Gate) -> \"ReferenceWavefunctionSimulator\":\n \"\"\"\n Perform a gate.\n\n :return: ``self`` to support method chaining.\n \"\"\"\n unitary = lifted_gate(gate=gate, n_qubits=self.n_qubits)\n self.wf = unitary.dot(self.wf)\n return self\n\n def do_gate_matrix(self, matrix: np.ndarray, qubits: Sequence[int]) -> \"ReferenceWavefunctionSimulator\":\n \"\"\"\n Apply an arbitrary unitary; not necessarily a named gate.\n\n :param matrix: The unitary matrix to apply. No checks are done.\n :param qubits: The qubits to apply the unitary to.\n :return: ``self`` to support method chaining.\n \"\"\"\n unitary = lifted_gate_matrix(matrix, list(qubits), n_qubits=self.n_qubits)\n self.wf = unitary.dot(self.wf)\n return self\n\n def do_measurement(self, qubit: int) -> int:\n \"\"\"\n Measure a qubit, collapse the wavefunction, and return the measurement result.\n\n :param qubit: Index of the qubit to measure.\n :return: measured bit\n \"\"\"\n if self.rs is None:\n raise ValueError(\n \"You have tried to perform a stochastic operation without setting the \"\n \"random state of the simulator. Might I suggest using a PyQVM object?\"\n )\n # lift projective measure operator to Hilbert space\n # prob(0) = <psi P0 | P0 psi> = psi* . P0* . P0 . psi\n measure_0 = lifted_gate_matrix(matrix=P0, qubit_inds=[qubit], n_qubits=self.n_qubits)\n proj_psi = measure_0 @ self.wf\n prob_zero = np.conj(proj_psi).T @ proj_psi\n\n # generate random number to 'roll' for measure\n if self.rs.uniform() < prob_zero:\n # decohere state using the measure_0 operator\n unitary = measure_0 @ (np.eye(2 ** self.n_qubits) / np.sqrt(prob_zero))\n self.wf = unitary.dot(self.wf)\n return 0\n else: # measure one\n measure_1 = lifted_gate_matrix(matrix=P1, qubit_inds=[qubit], n_qubits=self.n_qubits)\n unitary = measure_1 @ (np.eye(2 ** self.n_qubits) / np.sqrt(1 - prob_zero))\n self.wf = unitary.dot(self.wf)\n return 1\n\n def expectation(self, operator: Union[PauliTerm, PauliSum]) -> float:\n \"\"\"\n Compute the expectation of an operator.\n\n :param operator: The operator\n :return: The operator's expectation value\n \"\"\"\n if not isinstance(operator, PauliSum):\n operator = PauliSum([operator])\n\n return sum(_term_expectation(self.wf, term, n_qubits=self.n_qubits) for term in operator)\n\n def reset(self) -> \"ReferenceWavefunctionSimulator\":\n \"\"\"\n Reset the wavefunction to the ``|000...00>`` state.\n\n :return: ``self`` to support method chaining.\n \"\"\"\n self.wf.fill(0)\n self.wf[0] = complex(1.0, 0)\n return self\n\n def do_post_gate_noise(self, noise_type: str, noise_prob: float, qubits: List[int]) -> \"AbstractQuantumSimulator\":\n raise NotImplementedError(\"The reference wavefunction simulator cannot handle noise\")\n\n\ndef zero_state_matrix(n_qubits: int) -> np.ndarray:\n \"\"\"\n Construct a matrix corresponding to the tensor product of `n` ground states ``|0><0|``.\n\n :param n_qubits: The number of qubits.\n :return: The state matrix ``|000...0><000...0|`` for `n_qubits`.\n \"\"\"\n state_matrix = np.zeros((2 ** n_qubits, 2 ** n_qubits), dtype=np.complex128)\n state_matrix[0, 0] = complex(1.0, 0)\n return state_matrix\n\n\nclass ReferenceDensitySimulator(AbstractQuantumSimulator):\n \"\"\"\n A density matrix simulator that prioritizes readability over performance.\n\n Please consider using\n :py:class:`PyQVM(..., wf_simulator_type=ReferenceDensitySimulator)` rather\n than using this class directly.\n\n This class uses a dense matrix of shape ``(2^n_qubits, 2^n_qubits)`` to store the\n density matrix.\n\n :param n_qubits: Number of qubits to simulate.\n :param rs: a RandomState (should be shared with the owning :py:class:`PyQVM`) for\n doing anything stochastic. A value of ``None`` disallows doing anything stochastic.\n \"\"\"\n\n def __init__(self, n_qubits: int, rs: Optional[RandomState] = None):\n super().__init__(n_qubits=n_qubits, rs=rs)\n\n self.n_qubits = n_qubits\n self.rs = rs\n self.density: Optional[np.ndarray] = None\n self.set_initial_state(zero_state_matrix(n_qubits)).reset()\n\n def set_initial_state(self, state_matrix: np.ndarray) -> \"ReferenceDensitySimulator\":\n \"\"\"\n This method is the correct way (TM) to update the initial state matrix that is\n initialized every time reset() is called. The default initial state of\n ReferenceDensitySimulator is ``|000...00>``.\n\n Note that the current state matrix, i.e. ``self.density`` is not affected by this\n method; you must change it directly or else call reset() after calling this method.\n\n To restore default state initialization behavior of ReferenceDensitySimulator pass in\n a ``state_matrix`` equal to the default initial state on `n_qubits` (i.e. ``|000...00>``)\n and then call ``reset()``. We have provided a helper function ``n_qubit_zero_state``\n in the ``_reference.py`` module to simplify this step.\n\n :param state_matrix: numpy.ndarray or None.\n :return: ``self`` to support method chaining.\n \"\"\"\n rows, cols = state_matrix.shape\n if rows != cols:\n raise ValueError(\"The state matrix is not square.\")\n if self.n_qubits != int(np.log2(rows)):\n raise ValueError(\"The state matrix is not defined on the same numbers of qubits as the QVM.\")\n if _is_valid_quantum_state(state_matrix):\n self.initial_density = state_matrix\n else:\n raise ValueError(\n \"The state matrix is not valid. It must be Hermitian, trace one, \" \"and have non-negative eigenvalues.\"\n )\n return self\n\n def sample_bitstrings(self, n_samples: int, tol_factor: float = 1e8) -> np.ndarray:\n \"\"\"\n Sample bitstrings from the distribution defined by the wavefunction.\n\n Qubit 0 is at ``out[:, 0]``.\n\n :param n_samples: The number of bitstrings to sample\n :param tol_factor: Tolerance to set imaginary probabilities to zero, relative to\n machine epsilon.\n :return: An array of shape (n_samples, n_qubits)\n \"\"\"\n if self.rs is None:\n raise ValueError(\n \"You have tried to perform a stochastic operation without setting the \"\n \"random state of the simulator. Might I suggest using a PyQVM object?\"\n )\n\n # for np.real_if_close the actual tolerance is (machine_eps * tol_factor),\n # where `machine_epsilon = np.finfo(float).eps`. If we use tol_factor = 1e8, then the\n # overall tolerance is \\approx 2.2e-8.\n probabilities = np.real_if_close(np.diagonal(self.density), tol=tol_factor) # type: ignore\n # Next set negative probabilities to zero\n probabilities = np.array([0 if p < 0.0 else p for p in probabilities])\n # Ensure they sum to one\n probabilities = probabilities / np.sum(probabilities)\n possible_bitstrings = all_bitstrings(self.n_qubits)\n inds = self.rs.choice(2 ** self.n_qubits, n_samples, p=probabilities)\n bitstrings = possible_bitstrings[inds, :]\n bitstrings = np.flip(bitstrings, axis=1) # qubit ordering: 0 on the left.\n return bitstrings # type: ignore\n\n def do_gate(self, gate: Gate) -> \"AbstractQuantumSimulator\":\n \"\"\"\n Perform a gate.\n\n :return: ``self`` to support method chaining.\n \"\"\"\n unitary = lifted_gate(gate=gate, n_qubits=self.n_qubits)\n self.density = unitary.dot(self.density).dot(np.conj(unitary).T) # type: ignore\n return self\n\n def do_gate_matrix(self, matrix: np.ndarray, qubits: Sequence[int]) -> \"AbstractQuantumSimulator\":\n \"\"\"\n Apply an arbitrary unitary; not necessarily a named gate.\n\n :param matrix: The unitary matrix to apply. No checks are done\n :param qubits: A list of qubits to apply the unitary to.\n :return: ``self`` to support method chaining.\n \"\"\"\n unitary = lifted_gate_matrix(matrix=matrix, qubit_inds=qubits, n_qubits=self.n_qubits)\n self.density = unitary.dot(self.density).dot(np.conj(unitary).T) # type: ignore\n return self\n\n def do_measurement(self, qubit: int) -> int:\n \"\"\"\n Measure a qubit and collapse the wavefunction\n\n :return: The measurement result. A 1 or a 0.\n \"\"\"\n if self.rs is None:\n raise ValueError(\n \"You have tried to perform a stochastic operation without setting the \"\n \"random state of the simulator. Might I suggest using a PyQVM object?\"\n )\n measure_0 = lifted_gate_matrix(matrix=P0, qubit_inds=[qubit], n_qubits=self.n_qubits)\n prob_zero = np.trace(measure_0 @ self.density) # type: ignore\n\n # generate random number to 'roll' for measurement\n if self.rs.uniform() < prob_zero:\n # decohere state using the measure_0 operator\n unitary = measure_0 @ (np.eye(2 ** self.n_qubits) / np.sqrt(prob_zero))\n self.density = unitary.dot(self.density).dot(np.conj(unitary.T))\n return 0\n else: # measure one\n measure_1 = lifted_gate_matrix(matrix=P1, qubit_inds=[qubit], n_qubits=self.n_qubits)\n unitary = measure_1 @ (np.eye(2 ** self.n_qubits) / np.sqrt(1 - prob_zero))\n self.density = unitary.dot(self.density).dot(np.conj(unitary.T))\n return 1\n\n def expectation(self, operator: Union[PauliTerm, PauliSum]) -> complex:\n raise NotImplementedError(\"To implement\")\n\n def reset(self) -> \"AbstractQuantumSimulator\":\n \"\"\"\n Resets the current state of ReferenceDensitySimulator ``self.density`` to\n ``self.initial_density``.\n\n :return: ``self`` to support method chaining.\n \"\"\"\n self.density = self.initial_density\n return self\n\n def do_post_gate_noise(self, noise_type: str, noise_prob: float, qubits: List[int]) -> \"ReferenceDensitySimulator\":\n kraus_ops = cast(Tuple[np.ndarray, ...], KRAUS_OPS[noise_type](p=noise_prob))\n if np.isclose(noise_prob, 0.0):\n warnings.warn(f\"Skipping {noise_type} post-gate noise because noise_prob is close to 0\")\n return self\n\n for q in qubits:\n new_density = np.zeros_like(self.density) # type: ignore\n for kraus_op in kraus_ops:\n lifted_kraus_op = lifted_gate_matrix(matrix=kraus_op, qubit_inds=[q], n_qubits=self.n_qubits)\n new_density += lifted_kraus_op.dot(self.density).dot(np.conj(lifted_kraus_op.T)) # type: ignore\n self.density = new_density\n return self\n"
] |
[
[
"numpy.diag",
"numpy.allclose",
"numpy.random.seed",
"numpy.sqrt",
"numpy.eye",
"numpy.ones",
"numpy.alltrue",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.linalg.eigvals",
"numpy.sum",
"numpy.abs",
"numpy.conj",
"numpy.log2",
"numpy.sqrt",
"numpy.eye",
"numpy.diagonal",
"numpy.zeros_like",
"numpy.flip",
"numpy.array",
"numpy.zeros",
"numpy.trace",
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krokodilj/flair-with-tfidf
|
[
"d1c0e5a1527d595d3b5a25e4f7a34ba47f633920"
] |
[
"tfidf/model.py"
] |
[
"from gensim.corpora import Dictionary\nfrom gensim.models.tfidfmodel import TfidfModel\nfrom gensim.matutils import sparse2full\n\nimport spacy\nnlp = spacy.load('en_core_web_sm')\n\nimport numpy as np\nimport itertools\n\nclass TfIdfEncoder:\n \n def __init__(self, config):\n self.docs = config['docs']\n self.normalize = True\n \n self._build_model()\n \n \n def _build_model(self): \n \n print(\"Parsing documents . . .\")\n parsed_docs = [self.lemmatize_doc(nlp(str(doc).lower())) for doc in self.docs]\n\n print(\"Creating dictionary . . .\")\n # Create dictionary \n self.docs_dict = Dictionary(parsed_docs)\n print(len(self.docs_dict))\n \n print(\"Creating BOW . . .\")\n # bow corpus\n docs_corpus = [self.docs_dict.doc2bow(doc) for doc in parsed_docs]\n\n print(\"Initializing model . . .\")\n # Init tf-idf model\n self.model_tfidf = TfidfModel(docs_corpus, id2word=self.docs_dict)\n \n print(\"Setting up word vectors (GLOVE) . . .\")\n # Init vector for every word in dictionary\n self.tfidf_emb_vecs = np.vstack([nlp(self.docs_dict[i]).vector for i in range(len(self.docs_dict))])\n\n \n def run(self, sentences, batch_size = None, **kwargs):\n \"\"\"\n :param sentences: [ batch_size ]\n \"\"\"\n if not batch_size:\n parsed_docs = [self.lemmatize_doc(nlp(str(doc).lower())) for doc in sentences]\n corpus = [self.docs_dict.doc2bow(doc) for doc in parsed_docs]\n vecs = np.vstack([sparse2full(c, len(self.docs_dict)) for c in self.model_tfidf[corpus]])\n vecs = np.dot(vecs, self.tfidf_emb_vecs)\n else:\n encoded_list = list()\n\n for batch in self._batch(sentences, batch_size):\n parsed_docs = [self.lemmatize_doc(nlp(str(doc).lower())) for doc in batch]\n corpus = [self.docs_dict.doc2bow(doc) for doc in parsed_docs]\n enc = np.vstack([sparse2full(c, len(self.docs_dict)) for c in self.model_tfidf[corpus]])\n encoded_list.append( np.dot(enc, self.tfidf_emb_vecs))\n\n vecs = np.array(list(itertools.chain(*encoded_list)))\n \n \n return self._normalize(vecs) if self.normalize else vecs\n \n def _normalize(self, x):\n return x / np.linalg.norm(x, ord=2, axis=1, keepdims=True)\n \n def keep_token(self, t):\n return (t.is_alpha and not (t.is_space or t.is_punct or t.is_stop))\n\n def lemmatize_doc(self, doc):\n return [ t.lemma_ for t in doc if self.keep_token(t)]\n \n def _batch(self, iterable, n):\n \"\"\"\n :param iterable: a list if things to be splitted into batches\n :param n: number of things per batch\n \"\"\"\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]"
] |
[
[
"numpy.dot",
"numpy.linalg.norm"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krbjila/labrad_tools
|
[
"5c510cb35090807807bfe6bd910b9c35edce6fce"
] |
[
"sequencer/devices/lib/analog_ramps.py"
] |
[
"import numpy as np\n\ndef H(x):\n \"\"\"\n step function\n \"\"\"\n return 0.5*(np.sign(x-1e-9)+1)\n\ndef G(t1, t2):\n \"\"\"\n pulse\n \"\"\"\n return lambda t: H(t2-t) - H(t1-t) \n\ndef round_dt(dt):\n return float('{0:.7f}'.format(dt))\n# return dt\n\ndef round_dv(dv):\n return float('{0:.7f}'.format(dv))\n# return dv\n\ndef lin_ramp(p):\n \"\"\"\n returns continuous finction defined over ['ti', 'tf'].\n values are determined by connecting 'vi' to 'vf' with a line.\n \"\"\"\n return lambda t: G(p['ti'], p['tf'])(t)*(p['vi'] + (p['vf']-p['vi'])/(p['tf']-p['ti'])*(t-p['ti']))\n\ndef exp_ramp(p, ret_seq=False):\n \"\"\"\n returns continuous finction defined over ['ti', 'tf'].\n values are determined by connecting 'vi' to 'vf' with an exponential function.\n v = a*e^{-t/'tau'} + c\n \"\"\"\n p['a'] = (p['vf']-p['vi'])/(np.exp(p['dt']/p['tau'])-1)\n p['c'] = p['vi'] - p['a']\n v_ideal = lambda t: G(p['ti'], p['tf'])(t)*(p['a']*np.exp((t-p['ti'])/p['tau']) + p['c'])\n t_pts = np.linspace(p['ti'], p['tf']-2e-9, p['pts']+1)\n v_pts = v_ideal(t_pts)\n sseq = [{'type': 'lin', 'ti': ti, 'tf': tf, 'vi': vi, 'vf': vf} \n for ti, tf, vi, vf in zip(t_pts[:-1], t_pts[1:], v_pts[:-1], v_pts[1:])]\n\n sseq[0]['vi'] = p['vi']\n sseq[-1]['vf'] = p['vf'] \n\n if ret_seq:\n return sseq\n else:\n return lambda t: sum([lin_ramp(ss)(t) for ss in sseq])\n\n\n# scurve added 4/23/19\ndef scurve_ramp(p, ret_seq=False):\n \"\"\"\n returns continuous function defined over ['ti', 'tf'].\n values are determined by connecting 'vi' to 'vf' with a logistic function.\n v = c + a / (1 + e^-x), with x = (t - (ti + tf) / 2 ) * (12 * 'k' / (tf - ti)).\n the weird-looking formula is chosen so that the shape of the curve is\n determined only by 'k', and not by the time interval tf - ti\n \"\"\"\n\n p['a'] = p['vf'] - p['vi']\n p['c'] = p['vi']\n \n t0 = (p['ti'] + p['tf']) / 2.0\n dt = p['tf'] - p['ti']\n\n # normalize away the dependence of the shape on dt\n # multiply by 12 so that \"nice-looking\" s-curves have steepness O(1)\n steep = 12.0 * p['k'] / dt\n\n v_ideal = lambda t: G(p['ti'], p['tf'])(t) * (p['c'] + p['a'] / (1 + np.exp(-(t-t0)*steep)) )\n t_pts = np.linspace(p['ti'], p['tf']-2e-9, p['pts']+1)\n v_pts = v_ideal(t_pts)\n sseq = [{'type': 'lin', 'ti': ti, 'tf': tf, 'vi': vi, 'vf': vf} \n for ti, tf, vi, vf in zip(t_pts[:-1], t_pts[1:], v_pts[:-1], v_pts[1:])]\n\n sseq[0]['vi'] = p['vi']\n sseq[-1]['vf'] = p['vf'] \n\n if ret_seq:\n return sseq\n else:\n return lambda t: sum([lin_ramp(ss)(t) for ss in sseq])\n\n\nclass SRamp(object):\n required_parameters = [\n ('vf', ([-10, 10], [(0, 'V'), (-3, 'mV')], 3)),\n ('dt', ([1e-6, 50], [(0, 's'), (-3, 'ms'), (-6, 'us')], 1)), \n ]\n default_values = {}\n def __init__(self, p=None):\n self.p = p\n if p is not None:\n p['vi'] = p['vf']\n self.v = lin_ramp(p)\n\n def to_lin(self):\n \"\"\"\n to list of linear ramps [{dt, dv}]\n \"\"\"\n p = self.p\n return [{'dt': 1e-6, 'dv': p['vf'] - p['_vi']}, {'dt': p['dt']-1e-6, 'dv': 0}]\n\nclass LinRamp(object):\n required_parameters = [\n ('vf', ([-10, 10], [(0, 'V'), (-3, 'mV')], 3)),\n ('dt', ([1e-6, 50], [(0, 's'), (-3, 'ms'), (-6, 'us')], 1)), \n ]\n default_values = {}\n def __init__(self, p=None):\n self.p = p\n if p is not None:\n self.v = lin_ramp(p)\n\n def to_lin(self):\n \"\"\"\n to list of linear ramps [{dt, dv}]\n \"\"\"\n p = self.p\n return [{'dt': p['dt'], 'dv': p['vf']-p['_vi']}]\n\nclass SLinRamp(object):\n required_parameters = [\n ('vi', ([-10, 10], [(0, 'V'), (-3, 'mV')], 3)),\n ('vf', ([-10, 10], [(0, 'V'), (-3, 'mV')], 3)),\n ('dt', ([1e-6, 50], [(0, 's'), (-3, 'ms'), (-6, 'us')], 1)), \n ]\n default_values = {\n 'vi': 0,\n }\n def __init__(self, p=None):\n\n if p is not None:\n self.p = p\n self.v = lin_ramp(p)\n\n def to_lin(self):\n \"\"\"\n to list of linear ramps [{dt, dv}]\n \"\"\"\n p = self.p\n return [{'dt': 1e-6, 'dv': p['vi'] - p['_vi']}, {'dt': p['dt']-1e-6, 'dv': p['vf']-p['vi']}]\n\nclass ExpRamp(object):\n required_parameters = [\n ('vf', ([-10, 10], [(0, 'V')], 3)),\n ('dt', ([1e-6, 50], [(0, 's'), (-3, 'ms'), (-6, 'us')], 1)), \n ('tau', ([-1e2, 1e2], [(0, 's'), (-3, 'ms'), (-6, 'us'), (-9, 'ns')], 1)),\n ('pts', ([1, 10], [(0, 'na')], 0)),\n ]\n default_values = {\n 'pts': 20,\n 'tau': 1\n }\n def __init__(self, p=None):\n self.p = p\n if p is not None:\n self.v = exp_ramp(p)\n\n def to_lin(self):\n \"\"\"\n to list of linear ramps [{dt, dv}]\n \"\"\"\n p = self.p\n seq = exp_ramp(p, ret_seq=True)\n return [{'dt': round_dt(s['tf']-s['ti']), 'dv': round_dv(s['vf']-s['vi'])} for s in seq]\n\nclass SExpRamp(object):\n required_parameters = [\n ('vi', ([-10, 10], [(0, 'V')], 3)),\n ('vf', ([-10, 10], [(0, 'V')], 3)),\n ('dt', ([1e-6, 50], [(0, 's'), (-3, 'ms'), (-6, 'us')], 1)), \n ('tau', ([-1e2, 1e2], [(0, 's'), (-3, 'ms'), (-6, 'us')], 1)),\n ('pts', ([1, 20], [(0, 'na')], 0)),\n ]\n default_values = {\n 'vi': 0,\n 'pts': 10,\n 'tau': 1\n }\n def __init__(self, p=None):\n self.p = p\n if p is not None:\n self.v = exp_ramp(p)\n \n def to_lin(self):\n \"\"\"\n to list of linear ramps [{dt, dv}]\n \"\"\"\n p = self.p\n seq = exp_ramp(p, ret_seq=True)\n return [{'dt': 1e-6, 'dv': p['vi']-p['_vi']}] + [{'dt': s['tf']-s['ti'], 'dv': s['vf']-s['vi']} for s in seq]\n\n\nclass SCurveRamp(object):\n required_parameters = [\n ('vi', ([-10, 10], [(0, 'V')], 3)),\n ('vf', ([-10, 10], [(0, 'V')], 3)),\n ('dt', ([1e-6, 50], [(0, 's'), (-3, 'ms'), (-6, 'us')], 1)), \n ('k', ([0, 10], [(0, '')], 1)),\n ('pts', ([1, 20], [(0, 'na')], 0)),\n ]\n default_values = {\n 'vi': 0,\n 'pts': 20,\n 'k': 1\n }\n def __init__(self, p=None):\n self.p = p\n if p is not None:\n self.v = scurve_ramp(p)\n \n def to_lin(self):\n \"\"\"\n to list of linear ramps [{dt, dv}]\n \"\"\"\n p = self.p\n seq = scurve_ramp(p, ret_seq=True)\n return [{'dt': 1e-6, 'dv': p['vi']-p['_vi']}] + [{'dt': s['tf']-s['ti'], 'dv': s['vf']-s['vi']} for s in seq]\n\n\nclass RampMaker(object):\n available_ramps = {\n 's': SRamp,\n 'lin': LinRamp,\n 'slin': SLinRamp,\n 'exp': ExpRamp,\n 'sexp': SExpRamp,\n 'scurve': SCurveRamp\n }\n def __init__(self, sequence):\n j=0\n for i, s in enumerate(sequence):\n if s['type'] is 'sub':\n seq = sequence.pop(i+j)['seq']\n for ss in s['seq']:\n sequence.insert(i+j, ss)\n j += 1\n \n sequence[0]['_vi'] = 0\n for i in range(len(sequence)-1):\n sequence[i+1]['_vi'] = sequence[i]['vf']\n for i in range(len(sequence)):\n if not sequence[i].has_key('vi'):\n sequence[i]['vi'] = sequence[i]['_vi']\n \n for i, s in enumerate(sequence):\n s['ti'] = sum([ss['dt'] for ss in sequence[:i]])\n s['tf'] = s['ti'] + s['dt']\n \n self.v = lambda t: sum([self.available_ramps[s['type']](s).v(t) for s in sequence])\n self.sequence = sequence\n\n def get_plottable(self, scale='real', pts=100):\n T = np.concatenate([np.linspace(s['ti'], s['tf'], pts)[:-1] for s in self.sequence])\n V = self.v(T)\n if scale=='real':\n return T, V\n elif scale=='step':\n T = range(len(V))\n return T, V\n\n def get_continuous(self):\n return self.v\n\n def get_programmable(self):\n \"\"\"\n to list of linear ramps [{dt, dv}]\n \"\"\"\n lins = np.concatenate([self.available_ramps[s['type']](s).to_lin() for s in self.sequence]).tolist()\n return lins #combine_flat_ramps([], lins)\n\ndef combine_flat_ramps(l, s):\n if not l:\n l = [s.pop(0)]\n if s:\n nxt = s.pop(0)\n if nxt['dv'] == 0 and l[-1]['dv'] == 0:\n l[-1]['dt'] += nxt['dt']\n return combine_flat_ramps(l, s)\n else:\n return l + combine_flat_ramps([nxt], s)\n else:\n return l\n"
] |
[
[
"numpy.sign",
"numpy.exp",
"numpy.linspace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
llv22/MatchZoo
|
[
"a4b7f60a7583c7dcf02288a0a60fac8973a4c917"
] |
[
"matchzoo/layers/DynamicMaxPooling.py"
] |
[
"from keras.layers import Input\nfrom keras import backend as K\nfrom keras.engine.topology import Layer\nimport numpy as np\n\nclass DynamicMaxPooling(Layer):\n\n def __init__(self, psize1, psize2, **kwargs):\n # for psize1, psize2 [3, 10]\n self.psize1 = psize1\n self.psize2 = psize2\n super(DynamicMaxPooling, self).__init__(**kwargs)\n\n def build(self, input_shape):\n # for input data dimension layer : (None, 60, 60, 32)\n input_shape_one = input_shape[0]\n self.msize1 = input_shape_one[1]\n self.msize2 = input_shape_one[2]\n super(DynamicMaxPooling, self).build(input_shape) \n\n def call(self, data):\n x, self.dpool_index = data\n x_expand = K.tf.gather_nd(x, self.dpool_index)\n stride1 = self.msize1 / self.psize1\n stride2 = self.msize2 / self.psize2\n \n suggestion1 = self.msize1 / stride1\n suggestion2 = self.msize2 / stride2\n\n # should be consistent with kernel pooling size\n if suggestion1 != self.psize1 or suggestion2 != self.psize2:\n print(\"DynamicMaxPooling Layer can not \"\n \"generate ({} x {}) output feature map,\"\n \"please use ({} x {} instead.)\".format(self.psize1, self.psize2, \n suggestion1, suggestion2))\n exit()\n\n x_pool = K.tf.nn.max_pool(x_expand, \n [1, self.msize1 / self.psize1, self.msize2 / self.psize2, 1], \n [1, self.msize1 / self.psize1, self.msize2 / self.psize2, 1], \n \"VALID\")\n return x_pool\n\n def compute_output_shape(self, input_shape):\n input_shape_one = input_shape[0]\n return (None, self.psize1, self.psize2, input_shape_one[3])\n\n @staticmethod\n def dynamic_pooling_index(len1, len2, max_len1, max_len2, \n compress_ratio1 = 1, compress_ratio2 = 1):\n def dpool_index_(batch_idx, len1_one, len2_one, max_len1, max_len2):\n '''\n TODO: Here is the check of sentences length to be positive.\n To make sure that the lenght of the input sentences are positive. \n if len1_one == 0:\n print(\"[Error:DynamicPooling] len1 = 0 at batch_idx = {}\".format(batch_idx))\n exit()\n if len2_one == 0:\n print(\"[Error:DynamicPooling] len2 = 0 at batch_idx = {}\".format(batch_idx))\n exit()\n '''\n # decide stride for len1 and len2 based on current len1 and len1 of sentence\n if len1_one == 0:\n stride1 = max_len1\n else:\n stride1 = 1.0 * max_len1 / len1_one\n\n if len2_one == 0:\n stride2 = max_len2\n else:\n stride2 = 1.0 * max_len2 / len2_one\n\n # generate <len1 index for stride>, <len2 index for stride>\n idx1_one = [int(i / stride1) for i in range(max_len1)]\n idx2_one = [int(i / stride2) for i in range(max_len2)]\n mesh1, mesh2 = np.meshgrid(idx1_one, idx2_one)\n assert mesh1.shape == mesh2.shape\n index_one = np.transpose(np.stack([np.ones(mesh1.shape) * batch_idx,\n mesh1, mesh2]), (2,1,0))\n return index_one\n index = []\n dpool_bias1 = dpool_bias2 = 0\n # shifting up for dpool_bias1 and dpool_bias2\n if max_len1 % compress_ratio1 != 0:\n dpool_bias1 = 1\n if max_len2 % compress_ratio2 != 0:\n dpool_bias2 = 1\n cur_max_len1 = max_len1 // compress_ratio1 + dpool_bias1\n cur_max_len2 = max_len2 // compress_ratio2 + dpool_bias2\n assert len(len1) == len(len2)\n for i in range(len(len1)):\n # enumerate all batch size from [0, len1-1], generate convd parameter (0..len1-1, <len1 index for stride>, <len2 index for stride>)\n index.append(dpool_index_(i, len1[i] // compress_ratio1, \n len2[i] // compress_ratio2, cur_max_len1, cur_max_len2))\n return np.array(index)\n"
] |
[
[
"numpy.array",
"numpy.meshgrid",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AakankshaAshok/pandas
|
[
"6498bc1e8a12003640139db4794bd5cd2462c116",
"6498bc1e8a12003640139db4794bd5cd2462c116"
] |
[
"pandas/tests/frame/test_dtypes.py",
"pandas/core/indexes/frozen.py"
] |
[
"from collections import OrderedDict\nfrom datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Series,\n Timedelta,\n Timestamp,\n _np_version_under1p14,\n concat,\n date_range,\n option_context,\n)\nfrom pandas.core.arrays import integer_array\nimport pandas.util.testing as tm\n\n\ndef _check_cast(df, v):\n \"\"\"\n Check if all dtypes of df are equal to v\n \"\"\"\n assert all(s.dtype.name == v for _, s in df.items())\n\n\nclass TestDataFrameDataTypes:\n def test_concat_empty_dataframe_dtypes(self):\n df = DataFrame(columns=list(\"abc\"))\n df[\"a\"] = df[\"a\"].astype(np.bool_)\n df[\"b\"] = df[\"b\"].astype(np.int32)\n df[\"c\"] = df[\"c\"].astype(np.float64)\n\n result = pd.concat([df, df])\n assert result[\"a\"].dtype == np.bool_\n assert result[\"b\"].dtype == np.int32\n assert result[\"c\"].dtype == np.float64\n\n result = pd.concat([df, df.astype(np.float64)])\n assert result[\"a\"].dtype == np.object_\n assert result[\"b\"].dtype == np.float64\n assert result[\"c\"].dtype == np.float64\n\n def test_empty_frame_dtypes_ftypes(self):\n empty_df = pd.DataFrame()\n tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))\n\n # GH 26705 - Assert .ftypes is deprecated\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))\n\n nocols_df = pd.DataFrame(index=[1, 2, 3])\n tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))\n\n # GH 26705 - Assert .ftypes is deprecated\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))\n\n norows_df = pd.DataFrame(columns=list(\"abc\"))\n tm.assert_series_equal(\n norows_df.dtypes, pd.Series(np.object, index=list(\"abc\"))\n )\n\n # GH 26705 - Assert .ftypes is deprecated\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(\n norows_df.ftypes, pd.Series(\"object:dense\", index=list(\"abc\"))\n )\n\n norows_int_df = pd.DataFrame(columns=list(\"abc\")).astype(np.int32)\n tm.assert_series_equal(\n norows_int_df.dtypes, pd.Series(np.dtype(\"int32\"), index=list(\"abc\"))\n )\n # GH 26705 - Assert .ftypes is deprecated\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(\n norows_int_df.ftypes, pd.Series(\"int32:dense\", index=list(\"abc\"))\n )\n\n odict = OrderedDict\n df = pd.DataFrame(odict([(\"a\", 1), (\"b\", True), (\"c\", 1.0)]), index=[1, 2, 3])\n ex_dtypes = pd.Series(\n odict([(\"a\", np.int64), (\"b\", np.bool), (\"c\", np.float64)])\n )\n ex_ftypes = pd.Series(\n odict([(\"a\", \"int64:dense\"), (\"b\", \"bool:dense\"), (\"c\", \"float64:dense\")])\n )\n tm.assert_series_equal(df.dtypes, ex_dtypes)\n\n # GH 26705 - Assert .ftypes is deprecated\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(df.ftypes, ex_ftypes)\n\n # same but for empty slice of df\n tm.assert_series_equal(df[:0].dtypes, ex_dtypes)\n\n # GH 26705 - Assert .ftypes is deprecated\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(df[:0].ftypes, ex_ftypes)\n\n def test_datetime_with_tz_dtypes(self):\n tzframe = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3),\n \"B\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"C\": date_range(\"20130101\", periods=3, tz=\"CET\"),\n }\n )\n tzframe.iloc[1, 1] = pd.NaT\n tzframe.iloc[1, 2] = pd.NaT\n result = tzframe.dtypes.sort_index()\n expected = Series(\n [\n np.dtype(\"datetime64[ns]\"),\n DatetimeTZDtype(\"ns\", \"US/Eastern\"),\n DatetimeTZDtype(\"ns\", \"CET\"),\n ],\n [\"A\", \"B\", \"C\"],\n )\n\n tm.assert_series_equal(result, expected)\n\n def test_dtypes_are_correct_after_column_slice(self):\n # GH6525\n df = pd.DataFrame(index=range(5), columns=list(\"abc\"), dtype=np.float_)\n odict = OrderedDict\n tm.assert_series_equal(\n df.dtypes,\n pd.Series(odict([(\"a\", np.float_), (\"b\", np.float_), (\"c\", np.float_)])),\n )\n tm.assert_series_equal(\n df.iloc[:, 2:].dtypes, pd.Series(odict([(\"c\", np.float_)]))\n )\n tm.assert_series_equal(\n df.dtypes,\n pd.Series(odict([(\"a\", np.float_), (\"b\", np.float_), (\"c\", np.float_)])),\n )\n\n def test_select_dtypes_include_using_list_like(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.Categorical(list(\"abc\")),\n \"g\": pd.date_range(\"20130101\", periods=3),\n \"h\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"i\": pd.date_range(\"20130101\", periods=3, tz=\"CET\"),\n \"j\": pd.period_range(\"2013-01\", periods=3, freq=\"M\"),\n \"k\": pd.timedelta_range(\"1 day\", periods=3),\n }\n )\n\n ri = df.select_dtypes(include=[np.number])\n ei = df[[\"b\", \"c\", \"d\", \"k\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=[np.number], exclude=[\"timedelta\"])\n ei = df[[\"b\", \"c\", \"d\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=[np.number, \"category\"], exclude=[\"timedelta\"])\n ei = df[[\"b\", \"c\", \"d\", \"f\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=[\"datetime\"])\n ei = df[[\"g\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=[\"datetime64\"])\n ei = df[[\"g\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=[\"datetimetz\"])\n ei = df[[\"h\", \"i\"]]\n tm.assert_frame_equal(ri, ei)\n\n with pytest.raises(NotImplementedError, match=r\"^$\"):\n df.select_dtypes(include=[\"period\"])\n\n def test_select_dtypes_exclude_using_list_like(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n }\n )\n re = df.select_dtypes(exclude=[np.number])\n ee = df[[\"a\", \"e\"]]\n tm.assert_frame_equal(re, ee)\n\n def test_select_dtypes_exclude_include_using_list_like(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.date_range(\"now\", periods=3).values,\n }\n )\n exclude = (np.datetime64,)\n include = np.bool_, \"integer\"\n r = df.select_dtypes(include=include, exclude=exclude)\n e = df[[\"b\", \"c\", \"e\"]]\n tm.assert_frame_equal(r, e)\n\n exclude = (\"datetime\",)\n include = \"bool\", \"int64\", \"int32\"\n r = df.select_dtypes(include=include, exclude=exclude)\n e = df[[\"b\", \"e\"]]\n tm.assert_frame_equal(r, e)\n\n def test_select_dtypes_include_using_scalars(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.Categorical(list(\"abc\")),\n \"g\": pd.date_range(\"20130101\", periods=3),\n \"h\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"i\": pd.date_range(\"20130101\", periods=3, tz=\"CET\"),\n \"j\": pd.period_range(\"2013-01\", periods=3, freq=\"M\"),\n \"k\": pd.timedelta_range(\"1 day\", periods=3),\n }\n )\n\n ri = df.select_dtypes(include=np.number)\n ei = df[[\"b\", \"c\", \"d\", \"k\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=\"datetime\")\n ei = df[[\"g\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=\"datetime64\")\n ei = df[[\"g\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=\"category\")\n ei = df[[\"f\"]]\n tm.assert_frame_equal(ri, ei)\n\n with pytest.raises(NotImplementedError, match=r\"^$\"):\n df.select_dtypes(include=\"period\")\n\n def test_select_dtypes_exclude_using_scalars(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.Categorical(list(\"abc\")),\n \"g\": pd.date_range(\"20130101\", periods=3),\n \"h\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"i\": pd.date_range(\"20130101\", periods=3, tz=\"CET\"),\n \"j\": pd.period_range(\"2013-01\", periods=3, freq=\"M\"),\n \"k\": pd.timedelta_range(\"1 day\", periods=3),\n }\n )\n\n ri = df.select_dtypes(exclude=np.number)\n ei = df[[\"a\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(exclude=\"category\")\n ei = df[[\"a\", \"b\", \"c\", \"d\", \"e\", \"g\", \"h\", \"i\", \"j\", \"k\"]]\n tm.assert_frame_equal(ri, ei)\n\n with pytest.raises(NotImplementedError, match=r\"^$\"):\n df.select_dtypes(exclude=\"period\")\n\n def test_select_dtypes_include_exclude_using_scalars(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.Categorical(list(\"abc\")),\n \"g\": pd.date_range(\"20130101\", periods=3),\n \"h\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"i\": pd.date_range(\"20130101\", periods=3, tz=\"CET\"),\n \"j\": pd.period_range(\"2013-01\", periods=3, freq=\"M\"),\n \"k\": pd.timedelta_range(\"1 day\", periods=3),\n }\n )\n\n ri = df.select_dtypes(include=np.number, exclude=\"floating\")\n ei = df[[\"b\", \"c\", \"k\"]]\n tm.assert_frame_equal(ri, ei)\n\n def test_select_dtypes_include_exclude_mixed_scalars_lists(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.Categorical(list(\"abc\")),\n \"g\": pd.date_range(\"20130101\", periods=3),\n \"h\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"i\": pd.date_range(\"20130101\", periods=3, tz=\"CET\"),\n \"j\": pd.period_range(\"2013-01\", periods=3, freq=\"M\"),\n \"k\": pd.timedelta_range(\"1 day\", periods=3),\n }\n )\n\n ri = df.select_dtypes(include=np.number, exclude=[\"floating\", \"timedelta\"])\n ei = df[[\"b\", \"c\"]]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=[np.number, \"category\"], exclude=\"floating\")\n ei = df[[\"b\", \"c\", \"f\", \"k\"]]\n tm.assert_frame_equal(ri, ei)\n\n def test_select_dtypes_duplicate_columns(self):\n # GH20839\n odict = OrderedDict\n df = DataFrame(\n odict(\n [\n (\"a\", list(\"abc\")),\n (\"b\", list(range(1, 4))),\n (\"c\", np.arange(3, 6).astype(\"u1\")),\n (\"d\", np.arange(4.0, 7.0, dtype=\"float64\")),\n (\"e\", [True, False, True]),\n (\"f\", pd.date_range(\"now\", periods=3).values),\n ]\n )\n )\n df.columns = [\"a\", \"a\", \"b\", \"b\", \"b\", \"c\"]\n\n expected = DataFrame(\n {\"a\": list(range(1, 4)), \"b\": np.arange(3, 6).astype(\"u1\")}\n )\n\n result = df.select_dtypes(include=[np.number], exclude=[\"floating\"])\n tm.assert_frame_equal(result, expected)\n\n def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.date_range(\"now\", periods=3).values,\n }\n )\n df[\"g\"] = df.f.diff()\n assert not hasattr(np, \"u8\")\n r = df.select_dtypes(include=[\"i8\", \"O\"], exclude=[\"timedelta\"])\n e = df[[\"a\", \"b\"]]\n tm.assert_frame_equal(r, e)\n\n r = df.select_dtypes(include=[\"i8\", \"O\", \"timedelta64[ns]\"])\n e = df[[\"a\", \"b\", \"g\"]]\n tm.assert_frame_equal(r, e)\n\n def test_select_dtypes_empty(self):\n df = DataFrame({\"a\": list(\"abc\"), \"b\": list(range(1, 4))})\n msg = \"at least one of include or exclude must be nonempty\"\n with pytest.raises(ValueError, match=msg):\n df.select_dtypes()\n\n def test_select_dtypes_bad_datetime64(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.date_range(\"now\", periods=3).values,\n }\n )\n with pytest.raises(ValueError, match=\".+ is too specific\"):\n df.select_dtypes(include=[\"datetime64[D]\"])\n\n with pytest.raises(ValueError, match=\".+ is too specific\"):\n df.select_dtypes(exclude=[\"datetime64[as]\"])\n\n def test_select_dtypes_datetime_with_tz(self):\n\n df2 = DataFrame(\n dict(\n A=Timestamp(\"20130102\", tz=\"US/Eastern\"),\n B=Timestamp(\"20130603\", tz=\"CET\"),\n ),\n index=range(5),\n )\n df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)\n result = df3.select_dtypes(include=[\"datetime64[ns]\"])\n expected = df3.reindex(columns=[])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"dtype\", [str, \"str\", np.string_, \"S1\", \"unicode\", np.unicode_, \"U1\"]\n )\n @pytest.mark.parametrize(\"arg\", [\"include\", \"exclude\"])\n def test_select_dtypes_str_raises(self, dtype, arg):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"g\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.date_range(\"now\", periods=3).values,\n }\n )\n msg = \"string dtypes are not allowed\"\n kwargs = {arg: [dtype]}\n\n with pytest.raises(TypeError, match=msg):\n df.select_dtypes(**kwargs)\n\n def test_select_dtypes_bad_arg_raises(self):\n df = DataFrame(\n {\n \"a\": list(\"abc\"),\n \"g\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.date_range(\"now\", periods=3).values,\n }\n )\n\n msg = \"data type.*not understood\"\n with pytest.raises(TypeError, match=msg):\n df.select_dtypes([\"blargy, blarg, blarg\"])\n\n def test_select_dtypes_typecodes(self):\n # GH 11990\n df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())\n expected = df\n FLOAT_TYPES = list(np.typecodes[\"AllFloat\"])\n tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)\n\n def test_dtypes_gh8722(self, float_string_frame):\n float_string_frame[\"bool\"] = float_string_frame[\"A\"] > 0\n result = float_string_frame.dtypes\n expected = Series(\n {k: v.dtype for k, v in float_string_frame.items()}, index=result.index\n )\n tm.assert_series_equal(result, expected)\n\n # compat, GH 8722\n with option_context(\"use_inf_as_na\", True):\n df = DataFrame([[1]])\n result = df.dtypes\n tm.assert_series_equal(result, Series({0: np.dtype(\"int64\")}))\n\n def test_ftypes(self, mixed_float_frame):\n frame = mixed_float_frame\n expected = Series(\n dict(\n A=\"float32:dense\",\n B=\"float32:dense\",\n C=\"float16:dense\",\n D=\"float64:dense\",\n )\n ).sort_values()\n\n # GH 26705 - Assert .ftypes is deprecated\n with tm.assert_produces_warning(FutureWarning):\n result = frame.ftypes.sort_values()\n tm.assert_series_equal(result, expected)\n\n def test_astype_float(self, float_frame):\n casted = float_frame.astype(int)\n expected = DataFrame(\n float_frame.values.astype(int),\n index=float_frame.index,\n columns=float_frame.columns,\n )\n tm.assert_frame_equal(casted, expected)\n\n casted = float_frame.astype(np.int32)\n expected = DataFrame(\n float_frame.values.astype(np.int32),\n index=float_frame.index,\n columns=float_frame.columns,\n )\n tm.assert_frame_equal(casted, expected)\n\n float_frame[\"foo\"] = \"5\"\n casted = float_frame.astype(int)\n expected = DataFrame(\n float_frame.values.astype(int),\n index=float_frame.index,\n columns=float_frame.columns,\n )\n tm.assert_frame_equal(casted, expected)\n\n def test_astype_mixed_float(self, mixed_float_frame):\n # mixed casting\n casted = mixed_float_frame.reindex(columns=[\"A\", \"B\"]).astype(\"float32\")\n _check_cast(casted, \"float32\")\n\n casted = mixed_float_frame.reindex(columns=[\"A\", \"B\"]).astype(\"float16\")\n _check_cast(casted, \"float16\")\n\n def test_astype_mixed_type(self, mixed_type_frame):\n # mixed casting\n mn = mixed_type_frame._get_numeric_data().copy()\n mn[\"little_float\"] = np.array(12345.0, dtype=\"float16\")\n mn[\"big_float\"] = np.array(123456789101112.0, dtype=\"float64\")\n\n casted = mn.astype(\"float64\")\n _check_cast(casted, \"float64\")\n\n casted = mn.astype(\"int64\")\n _check_cast(casted, \"int64\")\n\n casted = mn.reindex(columns=[\"little_float\"]).astype(\"float16\")\n _check_cast(casted, \"float16\")\n\n casted = mn.astype(\"float32\")\n _check_cast(casted, \"float32\")\n\n casted = mn.astype(\"int32\")\n _check_cast(casted, \"int32\")\n\n # to object\n casted = mn.astype(\"O\")\n _check_cast(casted, \"object\")\n\n def test_astype_with_exclude_string(self, float_frame):\n df = float_frame.copy()\n expected = float_frame.astype(int)\n df[\"string\"] = \"foo\"\n casted = df.astype(int, errors=\"ignore\")\n\n expected[\"string\"] = \"foo\"\n tm.assert_frame_equal(casted, expected)\n\n df = float_frame.copy()\n expected = float_frame.astype(np.int32)\n df[\"string\"] = \"foo\"\n casted = df.astype(np.int32, errors=\"ignore\")\n\n expected[\"string\"] = \"foo\"\n tm.assert_frame_equal(casted, expected)\n\n def test_astype_with_view_float(self, float_frame):\n\n # this is the only real reason to do it this way\n tf = np.round(float_frame).astype(np.int32)\n casted = tf.astype(np.float32, copy=False)\n\n # TODO(wesm): verification?\n tf = float_frame.astype(np.float64)\n casted = tf.astype(np.int64, copy=False) # noqa\n\n def test_astype_with_view_mixed_float(self, mixed_float_frame):\n\n tf = mixed_float_frame.reindex(columns=[\"A\", \"B\", \"C\"])\n\n casted = tf.astype(np.int64)\n casted = tf.astype(np.float32) # noqa\n\n @pytest.mark.parametrize(\"dtype\", [np.int32, np.int64])\n @pytest.mark.parametrize(\"val\", [np.nan, np.inf])\n def test_astype_cast_nan_inf_int(self, val, dtype):\n # see gh-14265\n #\n # Check NaN and inf --> raise error when converting to int.\n msg = \"Cannot convert non-finite values \\\\(NA or inf\\\\) to integer\"\n df = DataFrame([val])\n\n with pytest.raises(ValueError, match=msg):\n df.astype(dtype)\n\n def test_astype_str(self):\n # see gh-9757\n a = Series(date_range(\"2010-01-04\", periods=5))\n b = Series(date_range(\"3/6/2012 00:00\", periods=5, tz=\"US/Eastern\"))\n c = Series([Timedelta(x, unit=\"d\") for x in range(5)])\n d = Series(range(5))\n e = Series([0.0, 0.2, 0.4, 0.6, 0.8])\n\n df = DataFrame({\"a\": a, \"b\": b, \"c\": c, \"d\": d, \"e\": e})\n\n # Datetime-like\n result = df.astype(str)\n\n expected = DataFrame(\n {\n \"a\": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),\n \"b\": list(map(str, map(Timestamp, b._values))),\n \"c\": list(\n map(\n str,\n map(lambda x: Timedelta(x)._repr_base(format=\"all\"), c._values),\n )\n ),\n \"d\": list(map(str, d._values)),\n \"e\": list(map(str, e._values)),\n }\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_astype_str_float(self):\n # see gh-11302\n result = DataFrame([np.NaN]).astype(str)\n expected = DataFrame([\"nan\"])\n\n tm.assert_frame_equal(result, expected)\n result = DataFrame([1.12345678901234567890]).astype(str)\n\n # < 1.14 truncates\n # >= 1.14 preserves the full repr\n val = \"1.12345678901\" if _np_version_under1p14 else \"1.1234567890123457\"\n expected = DataFrame([val])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype_class\", [dict, Series])\n def test_astype_dict_like(self, dtype_class):\n # GH7271 & GH16717\n a = Series(date_range(\"2010-01-04\", periods=5))\n b = Series(range(5))\n c = Series([0.0, 0.2, 0.4, 0.6, 0.8])\n d = Series([\"1.0\", \"2\", \"3.14\", \"4\", \"5.4\"])\n df = DataFrame({\"a\": a, \"b\": b, \"c\": c, \"d\": d})\n original = df.copy(deep=True)\n\n # change type of a subset of columns\n dt1 = dtype_class({\"b\": \"str\", \"d\": \"float32\"})\n result = df.astype(dt1)\n expected = DataFrame(\n {\n \"a\": a,\n \"b\": Series([\"0\", \"1\", \"2\", \"3\", \"4\"]),\n \"c\": c,\n \"d\": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype=\"float32\"),\n }\n )\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(df, original)\n\n dt2 = dtype_class({\"b\": np.float32, \"c\": \"float32\", \"d\": np.float64})\n result = df.astype(dt2)\n expected = DataFrame(\n {\n \"a\": a,\n \"b\": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype=\"float32\"),\n \"c\": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype=\"float32\"),\n \"d\": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype=\"float64\"),\n }\n )\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(df, original)\n\n # change all columns\n dt3 = dtype_class({\"a\": str, \"b\": str, \"c\": str, \"d\": str})\n tm.assert_frame_equal(df.astype(dt3), df.astype(str))\n tm.assert_frame_equal(df, original)\n\n # error should be raised when using something other than column labels\n # in the keys of the dtype dict\n dt4 = dtype_class({\"b\": str, 2: str})\n dt5 = dtype_class({\"e\": str})\n msg = \"Only a column name can be used for the key in a dtype mappings argument\"\n with pytest.raises(KeyError, match=msg):\n df.astype(dt4)\n with pytest.raises(KeyError, match=msg):\n df.astype(dt5)\n tm.assert_frame_equal(df, original)\n\n # if the dtypes provided are the same as the original dtypes, the\n # resulting DataFrame should be the same as the original DataFrame\n dt6 = dtype_class({col: df[col].dtype for col in df.columns})\n equiv = df.astype(dt6)\n tm.assert_frame_equal(df, equiv)\n tm.assert_frame_equal(df, original)\n\n # GH 16717\n # if dtypes provided is empty, the resulting DataFrame\n # should be the same as the original DataFrame\n dt7 = dtype_class({})\n result = df.astype(dt7)\n tm.assert_frame_equal(df, equiv)\n tm.assert_frame_equal(df, original)\n\n def test_astype_duplicate_col(self):\n a1 = Series([1, 2, 3, 4, 5], name=\"a\")\n b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name=\"b\")\n a2 = Series([0, 1, 2, 3, 4], name=\"a\")\n df = concat([a1, b, a2], axis=1)\n\n result = df.astype(str)\n a1_str = Series([\"1\", \"2\", \"3\", \"4\", \"5\"], dtype=\"str\", name=\"a\")\n b_str = Series([\"0.1\", \"0.2\", \"0.4\", \"0.6\", \"0.8\"], dtype=str, name=\"b\")\n a2_str = Series([\"0\", \"1\", \"2\", \"3\", \"4\"], dtype=\"str\", name=\"a\")\n expected = concat([a1_str, b_str, a2_str], axis=1)\n tm.assert_frame_equal(result, expected)\n\n result = df.astype({\"a\": \"str\"})\n expected = concat([a1_str, b, a2_str], axis=1)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"dtype\",\n [\n \"category\",\n CategoricalDtype(),\n CategoricalDtype(ordered=True),\n CategoricalDtype(ordered=False),\n CategoricalDtype(categories=list(\"abcdef\")),\n CategoricalDtype(categories=list(\"edba\"), ordered=False),\n CategoricalDtype(categories=list(\"edcb\"), ordered=True),\n ],\n ids=repr,\n )\n def test_astype_categorical(self, dtype):\n # GH 18099\n d = {\"A\": list(\"abbc\"), \"B\": list(\"bccd\"), \"C\": list(\"cdde\")}\n df = DataFrame(d)\n result = df.astype(dtype)\n expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"cls\",\n [\n pd.api.types.CategoricalDtype,\n pd.api.types.DatetimeTZDtype,\n pd.api.types.IntervalDtype,\n ],\n )\n def test_astype_categoricaldtype_class_raises(self, cls):\n df = DataFrame({\"A\": [\"a\", \"a\", \"b\", \"c\"]})\n xpr = \"Expected an instance of {}\".format(cls.__name__)\n with pytest.raises(TypeError, match=xpr):\n df.astype({\"A\": cls})\n\n with pytest.raises(TypeError, match=xpr):\n df[\"A\"].astype(cls)\n\n @pytest.mark.parametrize(\"dtype\", [\"Int64\", \"Int32\", \"Int16\"])\n def test_astype_extension_dtypes(self, dtype):\n # GH 22578\n df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=[\"a\", \"b\"])\n\n expected1 = pd.DataFrame(\n {\n \"a\": integer_array([1, 3, 5], dtype=dtype),\n \"b\": integer_array([2, 4, 6], dtype=dtype),\n }\n )\n tm.assert_frame_equal(df.astype(dtype), expected1)\n tm.assert_frame_equal(df.astype(\"int64\").astype(dtype), expected1)\n tm.assert_frame_equal(df.astype(dtype).astype(\"float64\"), df)\n\n df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=[\"a\", \"b\"])\n df[\"b\"] = df[\"b\"].astype(dtype)\n expected2 = pd.DataFrame(\n {\"a\": [1.0, 3.0, 5.0], \"b\": integer_array([2, 4, 6], dtype=dtype)}\n )\n tm.assert_frame_equal(df, expected2)\n\n tm.assert_frame_equal(df.astype(dtype), expected1)\n tm.assert_frame_equal(df.astype(\"int64\").astype(dtype), expected1)\n\n @pytest.mark.parametrize(\"dtype\", [\"Int64\", \"Int32\", \"Int16\"])\n def test_astype_extension_dtypes_1d(self, dtype):\n # GH 22578\n df = pd.DataFrame({\"a\": [1.0, 2.0, 3.0]})\n\n expected1 = pd.DataFrame({\"a\": integer_array([1, 2, 3], dtype=dtype)})\n tm.assert_frame_equal(df.astype(dtype), expected1)\n tm.assert_frame_equal(df.astype(\"int64\").astype(dtype), expected1)\n\n df = pd.DataFrame({\"a\": [1.0, 2.0, 3.0]})\n df[\"a\"] = df[\"a\"].astype(dtype)\n expected2 = pd.DataFrame({\"a\": integer_array([1, 2, 3], dtype=dtype)})\n tm.assert_frame_equal(df, expected2)\n\n tm.assert_frame_equal(df.astype(dtype), expected1)\n tm.assert_frame_equal(df.astype(\"int64\").astype(dtype), expected1)\n\n @pytest.mark.parametrize(\"dtype\", [\"category\", \"Int64\"])\n def test_astype_extension_dtypes_duplicate_col(self, dtype):\n # GH 24704\n a1 = Series([0, np.nan, 4], name=\"a\")\n a2 = Series([np.nan, 3, 5], name=\"a\")\n df = concat([a1, a2], axis=1)\n\n result = df.astype(dtype)\n expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"kwargs\", [dict(), dict(other=None)])\n def test_df_where_with_category(self, kwargs):\n # GH 16979\n df = DataFrame(np.arange(2 * 3).reshape(2, 3), columns=list(\"ABC\"))\n mask = np.array([[True, False, True], [False, True, True]])\n\n # change type to category\n df.A = df.A.astype(\"category\")\n df.B = df.B.astype(\"category\")\n df.C = df.C.astype(\"category\")\n\n result = df.A.where(mask[:, 0], **kwargs)\n expected = Series(pd.Categorical([0, np.nan], categories=[0, 3]), name=\"A\")\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"dtype\", [{100: \"float64\", 200: \"uint64\"}, \"category\", \"float64\"]\n )\n def test_astype_column_metadata(self, dtype):\n # GH 19920\n columns = pd.UInt64Index([100, 200, 300], name=\"foo\")\n df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)\n df = df.astype(dtype)\n tm.assert_index_equal(df.columns, columns)\n\n @pytest.mark.parametrize(\"dtype\", [\"M8\", \"m8\"])\n @pytest.mark.parametrize(\"unit\", [\"ns\", \"us\", \"ms\", \"s\", \"h\", \"m\", \"D\"])\n def test_astype_from_datetimelike_to_objectt(self, dtype, unit):\n # tests astype to object dtype\n # gh-19223 / gh-12425\n dtype = \"{}[{}]\".format(dtype, unit)\n arr = np.array([[1, 2, 3]], dtype=dtype)\n df = DataFrame(arr)\n result = df.astype(object)\n assert (result.dtypes == object).all()\n\n if dtype.startswith(\"M8\"):\n assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)\n else:\n assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)\n\n @pytest.mark.parametrize(\"arr_dtype\", [np.int64, np.float64])\n @pytest.mark.parametrize(\"dtype\", [\"M8\", \"m8\"])\n @pytest.mark.parametrize(\"unit\", [\"ns\", \"us\", \"ms\", \"s\", \"h\", \"m\", \"D\"])\n def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):\n # tests all units from numeric origination\n # gh-19223 / gh-12425\n dtype = \"{}[{}]\".format(dtype, unit)\n arr = np.array([[1, 2, 3]], dtype=arr_dtype)\n df = DataFrame(arr)\n result = df.astype(dtype)\n expected = DataFrame(arr.astype(dtype))\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"unit\", [\"ns\", \"us\", \"ms\", \"s\", \"h\", \"m\", \"D\"])\n def test_astype_to_datetime_unit(self, unit):\n # tests all units from datetime origination\n # gh-19223\n dtype = \"M8[{}]\".format(unit)\n arr = np.array([[1, 2, 3]], dtype=dtype)\n df = DataFrame(arr)\n result = df.astype(dtype)\n expected = DataFrame(arr.astype(dtype))\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"unit\", [\"ns\"])\n def test_astype_to_timedelta_unit_ns(self, unit):\n # preserver the timedelta conversion\n # gh-19223\n dtype = \"m8[{}]\".format(unit)\n arr = np.array([[1, 2, 3]], dtype=dtype)\n df = DataFrame(arr)\n result = df.astype(dtype)\n expected = DataFrame(arr.astype(dtype))\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"unit\", [\"us\", \"ms\", \"s\", \"h\", \"m\", \"D\"])\n def test_astype_to_timedelta_unit(self, unit):\n # coerce to float\n # gh-19223\n dtype = \"m8[{}]\".format(unit)\n arr = np.array([[1, 2, 3]], dtype=dtype)\n df = DataFrame(arr)\n result = df.astype(dtype)\n expected = DataFrame(df.values.astype(dtype).astype(float))\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"unit\", [\"ns\", \"us\", \"ms\", \"s\", \"h\", \"m\", \"D\"])\n def test_astype_to_incorrect_datetimelike(self, unit):\n # trying to astype a m to a M, or vice-versa\n # gh-19224\n dtype = \"M8[{}]\".format(unit)\n other = \"m8[{}]\".format(unit)\n\n df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))\n msg = (\n r\"cannot astype a datetimelike from \\[datetime64\\[ns\\]\\] to\"\n r\" \\[timedelta64\\[{}\\]\\]\"\n ).format(unit)\n with pytest.raises(TypeError, match=msg):\n df.astype(other)\n\n msg = (\n r\"cannot astype a timedelta from \\[timedelta64\\[ns\\]\\] to\"\n r\" \\[datetime64\\[{}\\]\\]\"\n ).format(unit)\n df = DataFrame(np.array([[1, 2, 3]], dtype=other))\n with pytest.raises(TypeError, match=msg):\n df.astype(dtype)\n\n def test_timedeltas(self):\n df = DataFrame(\n dict(\n A=Series(date_range(\"2012-1-1\", periods=3, freq=\"D\")),\n B=Series([timedelta(days=i) for i in range(3)]),\n )\n )\n result = df.dtypes\n expected = Series(\n [np.dtype(\"datetime64[ns]\"), np.dtype(\"timedelta64[ns]\")], index=list(\"AB\")\n )\n tm.assert_series_equal(result, expected)\n\n df[\"C\"] = df[\"A\"] + df[\"B\"]\n result = df.dtypes\n expected = Series(\n [\n np.dtype(\"datetime64[ns]\"),\n np.dtype(\"timedelta64[ns]\"),\n np.dtype(\"datetime64[ns]\"),\n ],\n index=list(\"ABC\"),\n )\n tm.assert_series_equal(result, expected)\n\n # mixed int types\n df[\"D\"] = 1\n result = df.dtypes\n expected = Series(\n [\n np.dtype(\"datetime64[ns]\"),\n np.dtype(\"timedelta64[ns]\"),\n np.dtype(\"datetime64[ns]\"),\n np.dtype(\"int64\"),\n ],\n index=list(\"ABCD\"),\n )\n tm.assert_series_equal(result, expected)\n\n def test_arg_for_errors_in_astype(self):\n # issue #14878\n\n df = DataFrame([1, 2, 3])\n\n with pytest.raises(ValueError):\n df.astype(np.float64, errors=True)\n\n df.astype(np.int8, errors=\"ignore\")\n\n def test_arg_for_errors_in_astype_dictlist(self):\n # GH-25905\n df = pd.DataFrame(\n [\n {\"a\": \"1\", \"b\": \"16.5%\", \"c\": \"test\"},\n {\"a\": \"2.2\", \"b\": \"15.3\", \"c\": \"another_test\"},\n ]\n )\n expected = pd.DataFrame(\n [\n {\"a\": 1.0, \"b\": \"16.5%\", \"c\": \"test\"},\n {\"a\": 2.2, \"b\": \"15.3\", \"c\": \"another_test\"},\n ]\n )\n type_dict = {\"a\": \"float64\", \"b\": \"float64\", \"c\": \"object\"}\n\n result = df.astype(dtype=type_dict, errors=\"ignore\")\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"input_vals\",\n [\n ([1, 2]),\n ([\"1\", \"2\"]),\n (list(pd.date_range(\"1/1/2011\", periods=2, freq=\"H\"))),\n (list(pd.date_range(\"1/1/2011\", periods=2, freq=\"H\", tz=\"US/Eastern\"))),\n ([pd.Interval(left=0, right=5)]),\n ],\n )\n def test_constructor_list_str(self, input_vals, string_dtype):\n # GH 16605\n # Ensure that data elements are converted to strings when\n # dtype is str, 'str', or 'U'\n\n result = DataFrame({\"A\": input_vals}, dtype=string_dtype)\n expected = DataFrame({\"A\": input_vals}).astype({\"A\": string_dtype})\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_str_na(self, string_dtype):\n\n result = DataFrame({\"A\": [1.0, 2.0, None]}, dtype=string_dtype)\n expected = DataFrame({\"A\": [\"1.0\", \"2.0\", None]}, dtype=object)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"data, expected\",\n [\n # empty\n (DataFrame(), True),\n # multi-same\n (DataFrame({\"A\": [1, 2], \"B\": [1, 2]}), True),\n # multi-object\n (\n DataFrame(\n {\n \"A\": np.array([1, 2], dtype=object),\n \"B\": np.array([\"a\", \"b\"], dtype=object),\n }\n ),\n True,\n ),\n # multi-extension\n (\n DataFrame(\n {\"A\": pd.Categorical([\"a\", \"b\"]), \"B\": pd.Categorical([\"a\", \"b\"])}\n ),\n True,\n ),\n # differ types\n (DataFrame({\"A\": [1, 2], \"B\": [1.0, 2.0]}), False),\n # differ sizes\n (\n DataFrame(\n {\n \"A\": np.array([1, 2], dtype=np.int32),\n \"B\": np.array([1, 2], dtype=np.int64),\n }\n ),\n False,\n ),\n # multi-extension differ\n (\n DataFrame(\n {\"A\": pd.Categorical([\"a\", \"b\"]), \"B\": pd.Categorical([\"b\", \"c\"])}\n ),\n False,\n ),\n ],\n )\n def test_is_homogeneous_type(self, data, expected):\n assert data._is_homogeneous_type is expected\n\n def test_asarray_homogenous(self):\n df = pd.DataFrame({\"A\": pd.Categorical([1, 2]), \"B\": pd.Categorical([1, 2])})\n result = np.asarray(df)\n # may change from object in the future\n expected = np.array([[1, 1], [2, 2]], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_str_to_small_float_conversion_type(self):\n # GH 20388\n np.random.seed(13)\n col_data = [str(np.random.random() * 1e-12) for _ in range(5)]\n result = pd.DataFrame(col_data, columns=[\"A\"])\n expected = pd.DataFrame(col_data, columns=[\"A\"], dtype=object)\n tm.assert_frame_equal(result, expected)\n # change the dtype of the elements from object to float one by one\n result.loc[result.index, \"A\"] = [float(x) for x in col_data]\n expected = pd.DataFrame(col_data, columns=[\"A\"], dtype=float)\n tm.assert_frame_equal(result, expected)\n\n\nclass TestDataFrameDatetimeWithTZ:\n def test_interleave(self, timezone_frame):\n\n # interleave with object\n result = timezone_frame.assign(D=\"foo\").values\n expected = np.array(\n [\n [\n Timestamp(\"2013-01-01 00:00:00\"),\n Timestamp(\"2013-01-02 00:00:00\"),\n Timestamp(\"2013-01-03 00:00:00\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00-0500\", tz=\"US/Eastern\"),\n pd.NaT,\n Timestamp(\"2013-01-03 00:00:00-0500\", tz=\"US/Eastern\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00+0100\", tz=\"CET\"),\n pd.NaT,\n Timestamp(\"2013-01-03 00:00:00+0100\", tz=\"CET\"),\n ],\n [\"foo\", \"foo\", \"foo\"],\n ],\n dtype=object,\n ).T\n tm.assert_numpy_array_equal(result, expected)\n\n # interleave with only datetime64[ns]\n result = timezone_frame.values\n expected = np.array(\n [\n [\n Timestamp(\"2013-01-01 00:00:00\"),\n Timestamp(\"2013-01-02 00:00:00\"),\n Timestamp(\"2013-01-03 00:00:00\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00-0500\", tz=\"US/Eastern\"),\n pd.NaT,\n Timestamp(\"2013-01-03 00:00:00-0500\", tz=\"US/Eastern\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00+0100\", tz=\"CET\"),\n pd.NaT,\n Timestamp(\"2013-01-03 00:00:00+0100\", tz=\"CET\"),\n ],\n ],\n dtype=object,\n ).T\n tm.assert_numpy_array_equal(result, expected)\n\n def test_astype(self, timezone_frame):\n # astype\n expected = np.array(\n [\n [\n Timestamp(\"2013-01-01 00:00:00\"),\n Timestamp(\"2013-01-02 00:00:00\"),\n Timestamp(\"2013-01-03 00:00:00\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00-0500\", tz=\"US/Eastern\"),\n pd.NaT,\n Timestamp(\"2013-01-03 00:00:00-0500\", tz=\"US/Eastern\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00+0100\", tz=\"CET\"),\n pd.NaT,\n Timestamp(\"2013-01-03 00:00:00+0100\", tz=\"CET\"),\n ],\n ],\n dtype=object,\n ).T\n expected = DataFrame(\n expected,\n index=timezone_frame.index,\n columns=timezone_frame.columns,\n dtype=object,\n )\n result = timezone_frame.astype(object)\n tm.assert_frame_equal(result, expected)\n\n result = timezone_frame.astype(\"datetime64[ns]\")\n expected = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3),\n \"B\": (\n date_range(\"20130101\", periods=3, tz=\"US/Eastern\")\n .tz_convert(\"UTC\")\n .tz_localize(None)\n ),\n \"C\": (\n date_range(\"20130101\", periods=3, tz=\"CET\")\n .tz_convert(\"UTC\")\n .tz_localize(None)\n ),\n }\n )\n expected.iloc[1, 1] = pd.NaT\n expected.iloc[1, 2] = pd.NaT\n tm.assert_frame_equal(result, expected)\n\n def test_astype_str(self, timezone_frame):\n # str formatting\n result = timezone_frame.astype(str)\n expected = DataFrame(\n [\n [\n \"2013-01-01\",\n \"2013-01-01 00:00:00-05:00\",\n \"2013-01-01 00:00:00+01:00\",\n ],\n [\"2013-01-02\", \"NaT\", \"NaT\"],\n [\n \"2013-01-03\",\n \"2013-01-03 00:00:00-05:00\",\n \"2013-01-03 00:00:00+01:00\",\n ],\n ],\n columns=timezone_frame.columns,\n )\n tm.assert_frame_equal(result, expected)\n\n with option_context(\"display.max_columns\", 20):\n result = str(timezone_frame)\n assert (\n \"0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00\"\n ) in result\n assert (\n \"1 2013-01-02 NaT NaT\"\n ) in result\n assert (\n \"2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00\"\n ) in result\n",
"\"\"\"\nfrozen (immutable) data structures to support MultiIndexing\n\nThese are used for:\n\n- .names (FrozenList)\n- .levels & .codes (FrozenNDArray)\n\n\"\"\"\nimport warnings\n\nimport numpy as np\n\nfrom pandas.util._decorators import deprecate_kwarg\n\nfrom pandas.core.dtypes.cast import coerce_indexer_dtype\n\nfrom pandas.core.base import PandasObject\n\nfrom pandas.io.formats.printing import pprint_thing\n\n\nclass FrozenList(PandasObject, list):\n \"\"\"\n Container that doesn't allow setting item *but*\n because it's technically non-hashable, will be used\n for lookups, appropriately, etc.\n \"\"\"\n\n # Side note: This has to be of type list. Otherwise,\n # it messes up PyTables type checks.\n\n def union(self, other) -> \"FrozenList\":\n \"\"\"\n Returns a FrozenList with other concatenated to the end of self.\n\n Parameters\n ----------\n other : array-like\n The array-like whose elements we are concatenating.\n\n Returns\n -------\n diff : FrozenList\n The collection difference between self and other.\n \"\"\"\n if isinstance(other, tuple):\n other = list(other)\n return type(self)(super().__add__(other))\n\n def difference(self, other) -> \"FrozenList\":\n \"\"\"\n Returns a FrozenList with elements from other removed from self.\n\n Parameters\n ----------\n other : array-like\n The array-like whose elements we are removing self.\n\n Returns\n -------\n diff : FrozenList\n The collection difference between self and other.\n \"\"\"\n other = set(other)\n temp = [x for x in self if x not in other]\n return type(self)(temp)\n\n # TODO: Consider deprecating these in favor of `union` (xref gh-15506)\n __add__ = __iadd__ = union\n\n def __getitem__(self, n):\n if isinstance(n, slice):\n return self.__class__(super().__getitem__(n))\n return super().__getitem__(n)\n\n def __radd__(self, other):\n if isinstance(other, tuple):\n other = list(other)\n return self.__class__(other + list(self))\n\n def __eq__(self, other):\n if isinstance(other, (tuple, FrozenList)):\n other = list(other)\n return super().__eq__(other)\n\n __req__ = __eq__\n\n def __mul__(self, other):\n return self.__class__(super().__mul__(other))\n\n __imul__ = __mul__\n\n def __reduce__(self):\n return self.__class__, (list(self),)\n\n def __hash__(self):\n return hash(tuple(self))\n\n def _disabled(self, *args, **kwargs):\n \"\"\"This method will not function because object is immutable.\"\"\"\n raise TypeError(\n \"'{cls}' does not support mutable operations.\".format(\n cls=self.__class__.__name__\n )\n )\n\n def __str__(self) -> str:\n return pprint_thing(self, quote_strings=True, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n\n def __repr__(self) -> str:\n return \"%s(%s)\" % (self.__class__.__name__, str(self))\n\n __setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled\n pop = append = extend = remove = sort = insert = _disabled\n\n\nclass FrozenNDArray(PandasObject, np.ndarray):\n\n # no __array_finalize__ for now because no metadata\n def __new__(cls, data, dtype=None, copy=False):\n warnings.warn(\n \"\\nFrozenNDArray is deprecated and will be removed in a \"\n \"future version.\\nPlease use `numpy.ndarray` instead.\\n\",\n FutureWarning,\n stacklevel=2,\n )\n\n if copy is None:\n copy = not isinstance(data, FrozenNDArray)\n res = np.array(data, dtype=dtype, copy=copy).view(cls)\n return res\n\n def _disabled(self, *args, **kwargs):\n \"\"\"This method will not function because object is immutable.\"\"\"\n raise TypeError(\n \"'{cls}' does not support mutable operations.\".format(cls=self.__class__)\n )\n\n __setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled\n put = itemset = fill = _disabled\n\n def _shallow_copy(self):\n return self.view()\n\n def values(self):\n \"\"\"returns *copy* of underlying array\"\"\"\n arr = self.view(np.ndarray).copy()\n return arr\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for this object.\n \"\"\"\n prepr = pprint_thing(self, escape_chars=(\"\\t\", \"\\r\", \"\\n\"), quote_strings=True)\n return \"%s(%s, dtype='%s')\" % (type(self).__name__, prepr, self.dtype)\n\n @deprecate_kwarg(old_arg_name=\"v\", new_arg_name=\"value\")\n def searchsorted(self, value, side=\"left\", sorter=None):\n \"\"\"\n Find indices to insert `value` so as to maintain order.\n\n For full documentation, see `numpy.searchsorted`\n\n See Also\n --------\n numpy.searchsorted : Equivalent function.\n \"\"\"\n\n # We are much more performant if the searched\n # indexer is the same type as the array.\n #\n # This doesn't matter for int64, but DOES\n # matter for smaller int dtypes.\n #\n # xref: https://github.com/numpy/numpy/issues/5370\n try:\n value = self.dtype.type(value)\n except ValueError:\n pass\n\n return super().searchsorted(value, side=side, sorter=sorter)\n\n\ndef _ensure_frozen(array_like, categories, copy=False):\n array_like = coerce_indexer_dtype(array_like, categories)\n array_like = array_like.view(FrozenNDArray)\n if copy:\n array_like = array_like.copy()\n return array_like\n"
] |
[
[
"pandas.to_datetime",
"pandas.Series",
"numpy.asarray",
"pandas.util.testing.assert_produces_warning",
"pandas.core.dtypes.dtypes.DatetimeTZDtype",
"pandas.DataFrame",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"numpy.round",
"pandas.util.testing.assert_index_equal",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.core.arrays.integer_array",
"pandas.concat",
"pandas.Categorical",
"pandas.UInt64Index",
"pandas.option_context",
"pandas.Timedelta",
"pandas.Interval",
"pandas.date_range",
"pandas.core.dtypes.dtypes.CategoricalDtype",
"numpy.array",
"pandas.timedelta_range",
"numpy.random.random",
"numpy.random.seed",
"pandas.period_range",
"pandas.to_timedelta",
"pandas.Timestamp"
],
[
"pandas.io.formats.printing.pprint_thing",
"numpy.array",
"pandas.core.dtypes.cast.coerce_indexer_dtype",
"pandas.util._decorators.deprecate_kwarg"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
saw-mill/HawkEyeSquash
|
[
"2259923cf4407e259aa684995a642e8145efafe3"
] |
[
"5FpsKalman.py"
] |
[
"import time\nimport cv2\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Modules.foregroundExtraction import readyFrame, frameDifferencing, morphologicalOperations, natural_sort, convert480p\nfrom Modules.ballDetectionRes import findContours, sizeDetection, playerProximityDetection, regionDetection, courtBoundaryDetection\n\n\n# Initializing\ndatasetName = \"Dataset1\"\nif (datasetName == \"Dataset1\"):\n startFrameDataset = 65\n endFrameDataset = 560\nelif (datasetName == \"Dataset2\"):\n startFrameDataset = 35\n endFrameDataset = 215\nelif (datasetName == \"Dataset3\"):\n startFrameDataset = 10\n endFrameDataset = 140\nelif (datasetName == \"Dataset4\"):\n startFrameDataset = 1\n endFrameDataset = 330\nelif (datasetName == \"Dataset5\"):\n startFrameDataset = 1\n endFrameDataset = 200\nelif (datasetName == \"Dataset6\"):\n startFrameDataset = 0\n endFrameDataset = 180\nelif (datasetName == \"Dataset7\"):\n startFrameDataset = 0\n endFrameDataset = 220\nelif (datasetName == \"Dataset8\"):\n startFrameDataset = 0\n endFrameDataset = 240\nelif (datasetName == \"Dataset9\"):\n startFrameDataset = 0\n endFrameDataset = 200\nelif (datasetName == \"Dataset10\"):\n startFrameDataset = 0\n endFrameDataset = 230\ndictFrameNumberscX = {}\ndictFrameNumberscY = {}\nballCandidatesPreviousFrame = list()\n#Profiling Structures\ntrackingTime = list()\ndetectionTime = list()\nfeTime = list()\nprocessTime = list()\n\n#Reading frames\nstartTimeReadingFrames = time.time()\n# Creating Video Object\ncap = cv2.VideoCapture('DatasetVideos/' + datasetName + '.mp4')\nprint(\"Total Frames: {}\".format(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\ncap.set(cv2.CAP_PROP_POS_FRAMES, startFrameDataset)\nendTimeReadingFrames = time.time()\nprint(\"Reading Frames--- %s seconds ---\" %\n (endTimeReadingFrames - startTimeReadingFrames))\n\nwidth = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\nheight = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\nprint(\"size:\", height, width)\n\nfps = cap.get(cv2.CAP_PROP_FPS)\nprint(\"FPS: \",fps)\n\n# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n# print(\"size:\", cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n#Kalman Initialization\nstartKalmanInitTime = time.time()\n\nmp = np.array((2, 1), np.float32) # measurement\ntp = np.zeros((2, 1), np.float32) # tracked / prediction\nkalman = cv2.KalmanFilter(4, 2)\nkalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)\nkalman.transitionMatrix = np.array(\n [[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)\nkalman.processNoiseCov = np.array(\n [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.009\nkalman.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) * 0.00003\n\nendKalmanInitTime = time.time()\ni = 0 #Keeping track of the frame number\nwhile (cap.isOpened()):\n print(\"######Start of Frame {}#####\".format(i + 1))\n startTimeProcess = time.time()\n if(i % 5 == 0): # If first frame read 3 frames\n ret1, previousFrame = cap.read()\n ret2, currFrame = cap.read()\n ret3, nextFrame = cap.read()\n print(previousFrame.shape)\n # print(\"Frame Number {}\".format(i + 1))\n\n # Changing from 720p to 480p\n previousFrame = convert480p(previousFrame)\n currFrame = convert480p(currFrame)\n nextFrame = convert480p(nextFrame)\n #\n #\n # FOREGROUND EXTRACTION\n #\n #\n\n startTimeForeGroundExtraction = time.time()\n\n # Readying the frames\n previousFrameGray, currFrameGray, nextFrameGray = readyFrame(\n previousFrame, currFrame, nextFrame)\n\n # Performing frame differencing\n threshFrameDifferencing = frameDifferencing(\n previousFrameGray, currFrameGray, nextFrameGray)\n\n # Performing morphological operations\n final_image = morphologicalOperations(threshFrameDifferencing, 4, 4)\n\n # final_image = cv2.medianBlur(final_image, 7)\n\n # cv2.imshow('final image', final_image)\n endTimeForegroundExtraction = time.time()\n print(\"Foreground Extraction--- %s seconds ---\" %\n (endTimeForegroundExtraction - startTimeForeGroundExtraction))\n feTime.append(endTimeForegroundExtraction - startTimeForeGroundExtraction) #Profiling\n\n #\n #\n # BALL DETECTION\n #\n #\n\n startTimeBallDetection = time.time()\n\n # Finding contours in the frame\n contours, hier = findContours(final_image)\n\n # Separating candidates based on size\n ballCandidates, playerCadidates, incompletePlayerCandidates = sizeDetection(\n contours, currFrame, i)\n\n # Removing candidates outside the Court Boundary in Dataset2\n # if (datasetName == 'Dataset2' or datasetName == 'Dataset3'):\n ballCandidates, playerCadidates, incompletePlayerCandidates = courtBoundaryDetection(datasetName,\n ballCandidates, playerCadidates, incompletePlayerCandidates, currFrame)\n\n # Removing Candidates that are close to the Players\n ballCandidatesFiltered = playerProximityDetection(\n ballCandidates, playerCadidates, incompletePlayerCandidates, currFrame)\n\n # Removing candidates that are not in their expected region after motion\n ballCandidatesFilteredProximity, ballCandidatesPreviousFrame = regionDetection(\n ballCandidatesFiltered, ballCandidatesPreviousFrame, currFrame)\n\n endTimeBallDetection = time.time()\n print(\"Ball Detection--- %s seconds ---\" %\n (endTimeBallDetection - startTimeBallDetection))\n detectionTime.append(endTimeBallDetection - startTimeBallDetection) #Profiling\n #\n #\n # BALL TRACKING\n #\n #\n\n startKalmanPredTime = time.time()\n\n # Calculating the centre of the image frame for initstate\n height, width, channels = currFrame.shape\n imageCenter = [width/2, height/2]\n\n # First Frame\n if (i + 1 == 1):\n # If no candidate detected, use image centre as initial state\n if not ballCandidatesFilteredProximity:\n initstate = imageCenter\n # If Candidates detected\n else:\n # If a single candidate detected, use it for the initial state\n if (len(ballCandidatesFilteredProximity) == 1):\n x = ballCandidatesFilteredProximity[0][0]\n y = ballCandidatesFilteredProximity[0][1]\n mp = np.array([[np.float32(x)], [np.float32(y)]])\n initstate = [mp[0], mp[1]]\n # If multiple candidates, calculate candidate closest to the image centre for initial state\n else:\n minDistInitCand = 10000\n for cand in ballCandidatesFilteredProximity:\n distCenter = math.sqrt(math.pow(\n (cand[0] - imageCenter[0]), 2) + math.pow((cand[1] - imageCenter[1]), 2))\n if (distCenter < minDistInitCand):\n initstate = [cand[0], cand[1]]\n minDistInitCand = distCenter\n # Using Initstate for First Prediction\n tp[0] = initstate[0]\n tp[1] = initstate[1]\n cv2.circle(currFrame, (tp[0], tp[1]), 10, (0, 0, 255), -1)\n dictFrameNumberscX[i + 1] = tp[0]\n dictFrameNumberscY[i + 1] = tp[1]\n if(__debug__):\n cv2.imshow('Candidate image', currFrame)\n # If not the first frame\n else:\n # Do Prediction\n tp = kalman.predict()\n tp[0] = tp[0] + initstate[0]\n tp[1] = tp[1] + initstate[1]\n\n # If one candidate, measure and correct\n if (len(ballCandidatesFilteredProximity) == 1):\n for cand in ballCandidatesFilteredProximity:\n # distncePredAct = math.sqrt(\n # math.pow((cand[0] - tp[0]), 2) + math.pow((cand[1] - tp[1]), 2))\n x = cand[0]\n y = cand[1]\n x = x - initstate[0]\n y = y - initstate[1]\n mp = np.array([[np.float32(x)], [np.float32(y)]])\n corrected = kalman.correct(mp)\n corrected[0] = corrected[0] + initstate[0]\n corrected[1] = corrected[1] + initstate[1]\n cv2.circle(\n currFrame, (corrected[0], corrected[1]), 10, (0, 255, 0), -1)\n dictFrameNumberscX[i + 1] = corrected[0]\n dictFrameNumberscY[i + 1] = corrected[1]\n # cv2.circle(currFrame, (tp[0], tp[1]),\n # 10, (0, 0, 255), -1) # pred\n\n # #drawing a line\n # cv2.line(currFrame, (int(cand[0]), int(cand[1])), (int(\n # tp[0]), int(tp[1])), (255, 0, 0), 2)\n # xmidPointPlayer = (cand[0]+tp[0])*0.5\n # ymidPointPlayer = (cand[1]+tp[1])*0.5\n # cv2.putText(currFrame, str(round(distncePredAct,2)), (int(xmidPointPlayer), int(\n # ymidPointPlayer)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n # print(\"Distance predact {}\".format(distncePredAct))\n\n cv2.drawContours(currFrame, [cand[3]], -1, (255, 0,), 2)\n cv2.putText(currFrame, \"A: \"+str(\n cand[2])+\" MD:\"+str(cand[5]), (cand[0] + 1, cand[1] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n if(__debug__):\n cv2.imshow('Candidate image', currFrame)\n\n # If multiple candidates,\n elif(len(ballCandidatesFilteredProximity) > 1):\n minDistObject = 1000\n minDistXcoord = 0\n minDistYcoord = 0\n # Calculate candidate closest to the prediction\n for cand in ballCandidatesFilteredProximity:\n distncePredAct = math.sqrt(\n math.pow((cand[0] - tp[0]), 2) + math.pow((cand[1] - tp[1]), 2))\n # #drawing a line\n # cv2.line(currFrame, (int(cand[0]), int(cand[1])), (int(\n # tp[0]), int(tp[1])), (255, 0, 0), 2)\n # xmidPointPlayer = (cand[0]+tp[0])*0.5\n # ymidPointPlayer = (cand[1]+tp[1])*0.5\n # cv2.putText(currFrame, str(round(distncePredAct,2)), (int(xmidPointPlayer), int(\n # ymidPointPlayer)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n # print(\"Distance predact {}\".format(distncePredAct))\n\n if (distncePredAct < 50):\n if (distncePredAct < minDistObject):\n minDistObject = distncePredAct\n minDistXcoord = cand[0]\n minDistYcoord = cand[1]\n # If no candidate is close to the prediction, predict only\n if (minDistObject == 1000):\n cv2.circle(currFrame, (tp[0], tp[1]), 10, (0, 0, 255), -1)\n dictFrameNumberscX[i + 1] = tp[0]\n dictFrameNumberscY[i + 1] = tp[1]\n # If a candidate close to the prediction, use it for measurement and correction\n else:\n x = minDistXcoord\n y = minDistYcoord\n x = x - initstate[0]\n y = y - initstate[1]\n mp = np.array([[np.float32(x)], [np.float32(y)]])\n corrected = kalman.correct(mp)\n corrected[0] = corrected[0] + initstate[0]\n corrected[1] = corrected[1] + initstate[1]\n cv2.circle(\n currFrame, (corrected[0], corrected[1]), 10, (0, 255, 0), -1)\n dictFrameNumberscX[i + 1] = corrected[0]\n dictFrameNumberscY[i+1] = corrected[1]\n\n cv2.drawContours(currFrame, [cand[3]], -1, (255, 0,), 2)\n cv2.putText(currFrame, \"A:\"+str(\n cand[2])+\" MD:\"+str(cand[5]), (cand[0] + 1, cand[1] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n if(__debug__):\n cv2.imshow('Candidate image', currFrame)\n # If no candidate detected, predict only\n else:\n cv2.circle(currFrame, (tp[0], tp[1]), 10, (0, 0, 255), -1)\n dictFrameNumberscX[i + 1] = tp[0]\n dictFrameNumberscY[i+1] = tp[1]\n if(__debug__):\n cv2.imshow('Candidate image', currFrame)\n\n endKalmanPredTime = time.time()\n\n trackingTime.append(endKalmanPredTime -\n startKalmanPredTime)\n\n print(\"Ball Tracking in --- %s seconds ---\" % ((endKalmanPredTime -\n startKalmanPredTime)))\n\n endTimeProcess = time.time()\n processTime.append(endTimeProcess - startTimeProcess) #Profiling\n # Print Ball Trajectory 2D Feature Image\n if (((i + 1) % endFrameDataset) == 0):\n print(\"Average FE Time: {}\".format(\n sum(feTime)/(endFrameDataset-startFrameDataset)))\n print(\"Average Detection Time: {}\".format(\n sum(detectionTime)/(endFrameDataset-startFrameDataset)))\n print(\"Average Tracking Time: {}\".format(\n (sum(trackingTime) / (endFrameDataset - startFrameDataset))+(endKalmanInitTime-startKalmanInitTime)))\n print(\"Average Total Process Time: {}\".format(\n sum(processTime) / (endFrameDataset - startFrameDataset)))\n keys = list(dictFrameNumberscX.keys())\n xvalues = list(dictFrameNumberscX.values())\n yvalues = list(dictFrameNumberscY.values())\n plt.xlabel('Frame Number')\n plt.ylabel('Candidate Kalman X-Coordinate')\n plt.title('CFI with Kalman X Prediction')\n plt.plot(keys, xvalues, 'r--', linewidth=2)\n # plt.axis([-20, 600, 0, 1300])\n # plt.axis([-20,210,100,1200])\n plt.show()\n\n plt.xlabel('Frame Number')\n plt.ylabel('Candidate Kalman Y-Coordinate')\n plt.title('CFI with Kalman Y Prediction')\n plt.plot(keys, yvalues, 'g--', linewidth=2)\n # plt.axis([-10,250,20,650])\n plt.show()\n\n break\n # scatter plot\n\n # print(dictFrameNumberscY)\n # for data_dict in dictFrameNumberscX.items():\n # print(data_dict)\n # x = data_dict[0]\n # values = data_dict[1]\n # for value in values:\n # # plt.subplot(1, 2, 1)\n # plt.scatter(x,value)\n # plt.xlabel('Frame Number')\n # plt.ylabel('Candidate X-Coordinate')\n # plt.title(\"Candidate Feature Image X-coordinate\")\n # dictFrameNumberscX.clear()\n # plt.show()\n\n # plt.xlabel('Frame Number')\n # plt.ylabel('Candidate Kalman Y-Coordinate')\n # plt.title('CFI with Kalman Y Prediction')\n # plt.plot(keys, yvalues, 'g--', linewidth=2)\n # plt.show()\n # cv2.imwrite(datasetName+\".png\",currFrame)\n print(\"######End of Frame##### {}\".format(i+1))\n i += 1 # increments the loop\n\n # Exits the loop when Esc is pressed, goes to previous frame when space pressed and goes to next frame when any other key is pressed\n if(__debug__):\n k = cv2.waitKey(30)\n if k == 27:\n break\n elif k == 32:\n i -= 2\n else:\n continue\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.float32",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thuiar/cmcnn
|
[
"a18f09fa63baf74bb083779fa0a8881d55226e1a",
"a18f09fa63baf74bb083779fa0a8881d55226e1a"
] |
[
"getFeatures.py",
"utils/island_loss.py"
] |
[
"import os\nimport gc\nimport time\nimport torch\nimport random\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nfrom tqdm import tqdm\n\nfrom trains import *\nfrom config import *\nfrom utils.log import *\nfrom utils.metricsTop import *\nfrom utils.functions import *\nfrom models.AMIO import AMIO\nfrom trains.ATIO import ATIO\nfrom data.load_data import FERDataLoader \nfrom sklearn.model_selection import KFold\n\ndef do_test(args, model, dataloader, mode='test'):\n model.eval()\n y_true, y_pred = [], []\n metrics = MetricsTop(args).getMetrics(args.metricsName)\n features = []\n with torch.no_grad():\n for batch_data in tqdm(dataloader[mode]):\n data = batch_data['data'].to(args.device)\n labels = batch_data['labels'].to(args.device)\n emotions = batch_data['emotions']\n # model\n output = model(data)\n features.append(output['fer_feature'].cpu().numpy())\n y_true.append(labels.cpu().numpy())\n\n features = np.concatenate(features, axis=0)\n labels = np.concatenate(y_true, axis=0)\n return features, labels\n\ndef run(args):\n if not os.path.exists(args.res_save_path):\n os.mkdir(args.res_save_path)\n # get dst dataset params\n config = Config(args)\n args = config.get_config()\n # train_df0 = pd.read_csv(os.path.join(args.label_dir,'train.csv'))\n # kf = KFold(10,shuffle = False)\n # for knum, indexs in enumerate(kf.split(train_df0)):\n # # print(indexs)\n # args.train_df = train_df0.iloc[indexs[0],:]\n # args.test_df = train_df0.iloc[indexs[1],:]\n # break\n args.train_df = pd.read_csv(os.path.join(args.label_dir,'train.csv'))\n args.test_df = pd.read_csv(os.path.join(args.label_dir,'test.csv'))\n # get dataloader\n dataloader = FERDataLoader(args)\n # gpu\n using_cuda = len(args.gpu_ids) > 0 and torch.cuda.is_available()\n device = torch.device('cuda:%d' % args.gpu_ids[0] if using_cuda else 'cpu')\n args.device = device\n # build model\n model = AMIO(args).to(device)\n atio = ATIO().getTrain(args)\n # load best model\n model_save_pathes = glob(os.path.join(args.model_save_path,\\\n f'{args.modelName}-{args.datasetName}.pth'))\n assert len(model_save_pathes) == 1\n model.load_state_dict(torch.load(model_save_pathes[0]))\n # do test\n mode = 'test'\n features, labels = do_test(args, model, dataloader, mode=mode)\n save_path = os.path.join(args.res_save_path, f'{args.modelName}-{args.datasetName}-{mode}.npz')\n np.savez(save_path, features=features, labels=labels)\n\n mode = 'train'\n features, labels = do_test(args, model, dataloader, mode=mode)\n save_path = os.path.join(args.res_save_path, f'{args.modelName}-{args.datasetName}-{mode}.npz')\n np.savez(save_path, features=features, labels=labels)\n \n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--tf_mode', type=bool, default=False,\n help='is transfer test ?')\n parser.add_argument('--val_mode', type=bool, default=False,\n help='10 folds cross validation ?')\n parser.add_argument('--modelName', type=str, default='FER_DCNN',\n help='support FER_DCNN/Our')\n parser.add_argument('--datasetName', type=str, default='RAF',\n help='support RAF/SFEW2/CK+/OULU_CASIA')\n parser.add_argument('--num_workers', type=int, default=8,\n help='num workers of loading data')\n parser.add_argument('--model_save_path', type=str, default='results/bestModels',\n help='path to save model.')\n parser.add_argument('--res_save_path', type=str, default='results/Features',\n help='path to save results.')\n parser.add_argument('--gpu_ids', type=list, default=[0],\n help='indicates the gpus will be used.')\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_args()\n # args.seeds = [1, 12, 123, 1234, 12345]\n run(args)",
"import torch\nimport torch.nn as nn\nfrom torch.autograd.function import Function\n\nclass IsLandLoss(nn.Module):\n \"\"\"\n ref: https://github.com/FER-China/islandloss/blob/master/islandloss.py\n \"\"\"\n def __init__(self, num_classes, feat_dim, lamda=0.5, size_average=True, device=None):\n super(IsLandLoss, self).__init__()\n # self.centers = nn.Parameter(torch.randn(num_classes, feat_dim))\n self.centers = nn.Parameter(torch.randn(num_classes, feat_dim, device=device))\n self.islandlossfunc = IslandlossFunc.apply\n self.feat_dim = feat_dim\n self.lamda = lamda\n self.size_average = size_average\n\n def forward(self, label, feat):\n batch_size = feat.size(0)\n feat = feat.view(batch_size, -1)\n if feat.size(1) != self.feat_dim:\n raise ValueError(\"Center's dim: {0} should be equal to input feature's \\\n dim: {1}\".format(self.feat_dim, feat.size(1)))\n batch_size_tensor = feat.new_empty(1).fill_(batch_size if self.size_average else 1)\n lamda_tensor = feat.new_empty(1).fill_(self.lamda)\n loss = self.islandlossfunc(feat, lamda_tensor, label, self.centers, batch_size_tensor)\n return loss\n\nclass IslandlossFunc(Function):\n\n @staticmethod\n def forward(ctx, feature, lamda, label, centers, batch_size):\n ctx.save_for_backward(feature, lamda, label, centers, batch_size)\n centers_batch = centers.index_select(0, label.long())\n center_loss = (feature - centers_batch).pow(2).sum() / 2.0 / batch_size\n N = centers.size(0)\n island_loss = centers.new_zeros(1)\n for j in range(N):\n for k in range(N):\n if k != j:\n cj = centers.index_select(0, centers.new_empty(1, dtype=torch.long).fill_(j)).squeeze()\n ck = centers.index_select(0, centers.new_empty(1, dtype=torch.long).fill_(k)).squeeze()\n cos_distance = torch.cosine_similarity(cj, ck, dim=0) + centers.new_ones(1)\n # cos_distance = cos_distance.index_select(0)\n island_loss.add_(cos_distance)\n return center_loss + lamda * island_loss\n\n @staticmethod\n def backward(ctx, grad_output):\n feature, lamda, label, centers, batch_size = ctx.saved_tensors\n centers_batch = centers.index_select(0, label.long())\n diff = centers_batch - feature\n counts = centers.new_ones(centers.size(0))\n ones = centers.new_ones(label.size(0))\n grad_centers = centers.new_zeros(centers.size())\n counts = counts.scatter_add_(0, label.long(), ones)\n grad_centers.scatter_add_(0, label.unsqueeze(1).expand(feature.size()).long(), diff)\n grad_centers = grad_centers/counts.view(-1, 1)\n N = centers.size(0)\n l2_centers = torch.norm(centers, 2, 1).view(N, -1)\n grad_centers_il = torch.zeros_like(centers)\n for j in range(N):\n for k in range(N):\n if k != j:\n ck = centers.index_select(0, centers.new_empty(1, dtype=torch.long).fill_(k)).squeeze()\n cj = centers.index_select(0, centers.new_empty(1, dtype=torch.long).fill_(j)).squeeze()\n l2ck = l2_centers.index_select(0, centers.new_empty(1, dtype=torch.long).fill_(k)).squeeze()\n l2cj = l2_centers.index_select(0, centers.new_empty(1, dtype=torch.long).fill_(j)).squeeze()\n val = ck / (l2ck * l2cj) - (ck.mul(cj) / (l2ck * l2cj.pow(3))).mul(cj)\n grad_centers_il[j, :].add_(val)\n return - grad_output * diff / batch_size, None, None, grad_centers / batch_size + grad_centers_il * lamda / (N -1), None"
] |
[
[
"numpy.savez",
"torch.load",
"numpy.concatenate",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
],
[
"torch.randn",
"torch.cosine_similarity",
"torch.norm",
"torch.zeros_like"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shanmukhananda/tfrecords
|
[
"0b29f2c0d84fc76c758f7b73b61f5730a8e82cef",
"0b29f2c0d84fc76c758f7b73b61f5730a8e82cef"
] |
[
"create_tfrecords.py",
"stat_tfrecords.py"
] |
[
"\"\"\"\nCreate the tfrecord files for a dataset.\n\nA lot of this code comes from the tensorflow inception example, so here is their license:\n\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport hashlib\nimport json\nimport os\nfrom Queue import Queue\nimport random\nimport sys\nimport threading\n\nimport numpy as np\nimport tensorflow as tf\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _float_feature(value):\n \"\"\"Wrapper for inserting float features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting bytes features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\ndef _validate_text(text):\n \"\"\"If text is not str or unicode, then try to convert it to str.\"\"\"\n\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode('utf8', 'ignore')\n else:\n return str(text)\n\ndef _convert_to_example(image_example, image_buffer, height, width, colorspace='RGB',\n channels=3, image_format='JPEG'):\n \"\"\"Build an Example proto for an example.\n Args:\n image_example: dict, an image example\n image_buffer: string, JPEG encoding of RGB image\n height: integer, image height in pixels\n width: integer, image width in pixels\n Returns:\n Example proto\n \"\"\"\n\n # Required\n filename = str(image_example['filename'])\n image_id = str(image_example['id'])\n\n # Class label for the whole image\n image_class = image_example.get('class', {})\n class_label = image_class.get('label', 0)\n class_text = _validate_text(image_class.get('text', ''))\n class_conf = image_class.get('conf', 1.)\n\n # Objects\n image_objects = image_example.get('object', {})\n object_count = image_objects.get('count', 0)\n\n # Bounding Boxes\n image_bboxes = image_objects.get('bbox', {})\n xmin = image_bboxes.get('xmin', [])\n xmax = image_bboxes.get('xmax', [])\n ymin = image_bboxes.get('ymin', [])\n ymax = image_bboxes.get('ymax', [])\n bbox_scores = image_bboxes.get('score', [])\n bbox_labels = image_bboxes.get('label', [])\n bbox_text = map(_validate_text, image_bboxes.get('text', []))\n bbox_label_confs = image_bboxes.get('conf', [])\n\n # Parts\n image_parts = image_objects.get('parts', {})\n parts_x = image_parts.get('x', [])\n parts_y = image_parts.get('y', [])\n parts_v = image_parts.get('v', [])\n parts_s = image_parts.get('score', [])\n\n # Areas\n object_areas = image_objects.get('area', [])\n\n # Ids\n object_ids = map(str, image_objects.get('id', []))\n\n # Any extra data (e.g. stringified json)\n extra_info = str(image_class.get('extra', ''))\n\n # Additional fields for the format needed by the Object Detection repository\n key = hashlib.sha256(image_buffer).hexdigest()\n is_crowd = image_objects.get('is_crowd', [])\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n #'image/colorspace': _bytes_feature(colorspace),\n #'image/channels': _int64_feature(channels),\n 'image/format': _bytes_feature(image_format),\n 'image/filename': _bytes_feature(filename),\n #'image/id': _bytes_feature(image_id),\n 'image/encoded': _bytes_feature(image_buffer),\n #'image/extra': _bytes_feature(extra_info),\n #'image/class/label': _int64_feature(class_label),\n #'image/class/text': _bytes_feature(class_text),\n #'image/class/conf': _float_feature(class_conf),\n 'image/object/bbox/xmin': _float_feature(xmin),\n 'image/object/bbox/xmax': _float_feature(xmax),\n 'image/object/bbox/ymin': _float_feature(ymin),\n 'image/object/bbox/ymax': _float_feature(ymax),\n #'image/object/bbox/label': _int64_feature(bbox_labels),\n #'image/object/bbox/text': _bytes_feature(bbox_text),\n #'image/object/bbox/conf': _float_feature(bbox_label_confs),\n #'image/object/bbox/score' : _float_feature(bbox_scores),\n #'image/object/parts/x' : _float_feature(parts_x),\n #'image/object/parts/y' : _float_feature(parts_y),\n #'image/object/parts/v' : _int64_feature(parts_v),\n #'image/object/parts/score' : _float_feature(parts_s),\n #'image/object/count' : _int64_feature(object_count),\n #'image/object/area' : _float_feature(object_areas),\n #'image/object/id' : _bytes_feature(object_ids),\n\n # Additional fields for the format needed by the Object Detection repository\n 'image/source_id': _bytes_feature(image_id),\n #'image/key/sha256': _bytes_feature(key),\n 'image/object/class/label': _int64_feature(bbox_labels),\n 'image/object/class/text': _bytes_feature(bbox_text),\n #'image/object/is_crowd': _int64_feature(is_crowd)\n }))\n return example\n \nclass ImageCoder(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n\n def __init__(self):\n # Create a single Session to run all image coding calls.\n self._sess = tf.Session()\n\n # Initializes function that converts PNG to JPEG data.\n self._png_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_png(self._png_data, channels=3)\n self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)\n\n # Initializes function that decodes RGB JPEG data.\n self._decode_jpeg_data = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)\n\n def png_to_jpeg(self, image_data):\n # Convert the image data from png to jpg\n return self._sess.run(self._png_to_jpeg,\n feed_dict={self._png_data: image_data})\n\n def decode_jpeg(self, image_data):\n # Decode the image data as a jpeg image\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._decode_jpeg_data: image_data})\n assert len(image.shape) == 3, \"JPEG needs to have height x width x channels\"\n assert image.shape[2] == 3, \"JPEG needs to have 3 channels (RGB)\"\n return image\n\ndef _is_png(filename):\n \"\"\"Determine if a file contains a PNG format image.\n Args:\n filename: string, path of the image file.\n Returns:\n boolean indicating if the image is a PNG.\n \"\"\"\n _, file_extension = os.path.splitext(filename)\n return file_extension.lower() == '.png'\n\ndef _process_image(filename, coder):\n \"\"\"Process a single image file.\n Args:\n filename: string, path to an image file e.g., '/path/to/example.JPG'.\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n Returns:\n image_buffer: string, JPEG encoding of RGB image.\n height: integer, image height in pixels.\n width: integer, image width in pixels.\n \"\"\"\n # Read the image file.\n image_data = tf.gfile.FastGFile(filename, 'r').read()\n\n # Clean the dirty data.\n if _is_png(filename):\n image_data = coder.png_to_jpeg(image_data)\n\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width\n\n\ndef _process_image_files_batch(coder, thread_index, ranges, name, output_directory,\n dataset, num_shards, store_images, error_queue):\n \"\"\"Processes and saves list of images as TFRecord in 1 thread.\n Args:\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n thread_index: integer, unique batch to run index is within [0, len(ranges)).\n ranges: list of pairs of integers specifying ranges of each batches to\n analyze in parallel.\n name: string, unique identifier specifying the data set (e.g. `train` or `test`)\n output_directory: string, file path to store the tfrecord files.\n dataset: list, a list of image example dicts\n num_shards: integer number of shards for this data set.\n store_images: bool, should the image be stored in the tfrecord\n error_queue: Queue, a queue to place image examples that failed.\n \"\"\"\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n error_counter = 0\n for s in xrange(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(output_directory, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n\n image_example = dataset[i]\n\n filename = str(image_example['filename'])\n\n try:\n if store_images:\n if 'encoded' in image_example:\n image_buffer = image_example['encoded']\n height = image_example['height']\n width = image_example['width']\n colorspace = image_example['colorspace']\n image_format = image_example['format']\n num_channels = image_example['channels']\n example = _convert_to_example(image_example, image_buffer, height,\n width, colorspace, num_channels,\n image_format)\n\n else:\n image_buffer, height, width = _process_image(filename, coder)\n example = _convert_to_example(image_example, image_buffer, height,\n width)\n else:\n image_buffer=''\n height = int(image_example['height'])\n width = int(image_example['width'])\n example = _convert_to_example(image_example, image_buffer, height,\n width)\n\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n except Exception as e:\n raise\n error_counter += 1\n error_msg = repr(e)\n image_example['error_msg'] = error_msg\n error_queue.put(image_example)\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch, with %d errors.' %\n (datetime.now(), thread_index, counter, num_files_in_thread, error_counter))\n sys.stdout.flush()\n\n print('%s [thread %d]: Wrote %d images to %s, with %d errors.' %\n (datetime.now(), thread_index, shard_counter, output_file, error_counter))\n sys.stdout.flush()\n shard_counter = 0\n\n print('%s [thread %d]: Wrote %d images to %d shards, with %d errors.' %\n (datetime.now(), thread_index, counter, num_files_in_thread, error_counter))\n sys.stdout.flush()\n\n\ndef create(dataset, dataset_name, output_directory, num_shards, num_threads, shuffle=True, store_images=True):\n \"\"\"Create the tfrecord files to be used to train or test a model.\n\n Args:\n dataset : [{\n \"filename\" : <REQUIRED: path to the image file>,\n \"id\" : <REQUIRED: id of the image>,\n \"class\" : {\n \"label\" : <[0, num_classes)>,\n \"text\" : <text description of class>\n },\n \"object\" : {\n \"bbox\" : {\n \"xmin\" : [],\n \"xmax\" : [],\n \"ymin\" : [],\n \"ymax\" : [],\n \"label\" : []\n }\n }\n }]\n\n dataset_name: a name for the dataset\n\n output_directory: path to a directory to write the tfrecord files\n\n num_shards: the number of tfrecord files to create\n\n num_threads: the number of threads to use\n\n shuffle : bool, should the image examples be shuffled or not prior to creating the tfrecords.\n\n Returns:\n list : a list of image examples that failed to process.\n \"\"\"\n\n # Images in the tfrecords set must be shuffled properly\n if shuffle:\n random.shuffle(dataset)\n\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, len(dataset), num_threads + 1).astype(np.int)\n ranges = []\n threads = []\n for i in xrange(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i+1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all image codings.\n coder = ImageCoder()\n\n # A Queue to hold the image examples that fail to process.\n error_queue = Queue()\n\n threads = []\n for thread_index in xrange(len(ranges)):\n args = (coder, thread_index, ranges, dataset_name, output_directory, dataset,\n num_shards, store_images, error_queue)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(dataset)))\n\n # Collect the errors\n errors = []\n while not error_queue.empty():\n errors.append(error_queue.get())\n print ('%d examples failed.' % (len(errors),))\n\n return errors\n\n\ndef parse_args():\n\n parser = argparse.ArgumentParser(description='Basic statistics on tfrecord files')\n\n parser.add_argument('--dataset_path', dest='dataset_path',\n help='Path to the dataset json file.', type=str,\n required=True)\n\n parser.add_argument('--prefix', dest='dataset_name',\n help='Prefix for the tfrecords (e.g. `train`, `test`, `val`).', type=str,\n required=True)\n\n parser.add_argument('--output_dir', dest='output_dir',\n help='Directory for the tfrecords.', type=str,\n required=True)\n\n parser.add_argument('--shards', dest='num_shards',\n help='Number of shards to make.', type=int,\n required=True)\n\n parser.add_argument('--threads', dest='num_threads',\n help='Number of threads to make.', type=int,\n required=True)\n\n parser.add_argument('--shuffle', dest='shuffle',\n help='Shuffle the records before saving them.',\n required=False, action='store_true', default=False)\n\n parser.add_argument('--store_images', dest='store_images',\n help='Store the images in the tfrecords.',\n required=False, action='store_true', default=False)\n\n parsed_args = parser.parse_args()\n\n return parsed_args\n\ndef main():\n\n args = parse_args()\n\n with open(args.dataset_path) as f:\n dataset = json.load(f)\n\n errors = create(\n dataset=dataset,\n dataset_name=args.dataset_name,\n output_directory=args.output_dir,\n num_shards=args.num_shards,\n num_threads=args.num_threads,\n shuffle=args.shuffle,\n store_images=args.store_images\n )\n\n return errors\n\nif __name__ == '__main__':\n main()\n",
"\"\"\"\nThese utility functions are meant for computing basic statistics in a set of tfrecord\nfiles. They can be used to sanity check the training and testing files.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\ndef class_stats(tfrecords):\n \"\"\"\n Sum the number of images and compute the number of images available for each class.\n \"\"\"\n\n filename_queue = tf.train.string_input_producer(\n tfrecords,\n num_epochs=1\n )\n\n # Construct a Reader to read examples from the .tfrecords file\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/class/label' : tf.FixedLenFeature([], tf.int64)\n }\n )\n\n label = features['image/class/label']\n\n image_count = 0\n class_image_count = {}\n\n coord = tf.train.Coordinator()\n with tf.Session() as sess:\n\n tf.global_variables_initializer().run()\n tf.local_variables_initializer().run()\n tf.train.start_queue_runners(sess=sess, coord=coord)\n\n try:\n\n while not coord.should_stop():\n\n outputs = sess.run([label])\n\n class_label = outputs[0]\n if class_label not in class_image_count:\n class_image_count[class_label] = 0\n class_image_count[class_label] += 1\n image_count += 1\n\n\n except tf.errors.OutOfRangeError as e:\n pass\n\n # Basic info\n print(\"Found %d images\" % (image_count,))\n print(\"Found %d classes\" % (len(class_image_count),))\n\n class_labels = class_image_count.keys()\n class_labels.sort()\n\n # Print out the per class image counts\n print(\"Class Index | Image Count\")\n for class_label in class_labels:\n print(\"{0:11d} | {1:6d} \".format(class_label, class_image_count[class_label]))\n\n if len(class_labels) == 0:\n return\n\n # Can we detect if there any missing classes?\n max_class_index = max(class_labels)\n\n # We expect class id for each value in the range [0, max_class_id]\n # So lets see if we are missing any of these values\n missing_values = list(set(range(max_class_index+1)).difference(class_labels))\n if len(missing_values) > 0:\n print(\"WARNING: expected %d classes but only found %d classes.\" %\n (max_class_index, len(class_labels)))\n missing_values.sort()\n for index in missing_values:\n print(\"Missing class %d\" % (index,))\n\ndef verify_bboxes(tfrecords):\n\n filename_queue = tf.train.string_input_producer(\n tfrecords,\n num_epochs=1\n )\n\n # Construct a Reader to read examples from the .tfrecords file\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/id' : tf.FixedLenFeature([], tf.string),\n 'image/height' : tf.FixedLenFeature([], tf.int64),\n 'image/width' : tf.FixedLenFeature([], tf.int64),\n 'image/object/bbox/xmin' : tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin' : tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax' : tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax' : tf.VarLenFeature(dtype=tf.float32),\n 'image/object/count' : tf.FixedLenFeature([], tf.int64)\n }\n )\n\n image_height = tf.cast(features['image/height'], tf.float32)\n image_width = tf.cast(features['image/width'], tf.float32)\n\n image_id = features['image/id']\n\n xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)\n\n num_bboxes = tf.cast(features['image/object/count'], tf.int32)\n\n bboxes = tf.concat(axis=0, values=[xmin, ymin, xmax, ymax])\n bboxes = tf.transpose(bboxes, [1, 0])\n\n fetches = [image_id, image_height, image_width, bboxes, num_bboxes]\n\n image_count = 0\n bbox_widths = []\n bbox_heights = []\n images_with_small_bboxes = set()\n images_with_reversed_coords = set()\n images_with_bbox_count_mismatch = set()\n\n coord = tf.train.Coordinator()\n with tf.Session() as sess:\n\n tf.global_variables_initializer().run()\n tf.local_variables_initializer().run()\n tf.train.start_queue_runners(sess=sess, coord=coord)\n\n try:\n\n while not coord.should_stop():\n\n outputs = sess.run(fetches)\n\n img_id = outputs[0]\n img_h = outputs[1]\n img_w = outputs[2]\n img_bboxes = outputs[3]\n img_num_bboxes = outputs[4]\n\n if img_bboxes.shape[0] != img_num_bboxes:\n images_with_bbox_count_mismatch.add(img_id)\n\n for img_bbox in img_bboxes:\n x1, y1, x2, y2 = img_bbox\n\n # Reversed coordinates?\n if x1 > x2:\n images_with_reversed_coords.add(img_id)\n t = x1\n x1 = x2\n x2 = t\n if y1 > y2:\n images_with_reversed_coords.add(img_id)\n t = y1\n y1 = y2\n y2 = t\n\n w = (x2 - x1) * img_w\n h = (y2 - y1) * img_h\n\n # Too small of an area?\n if w * h < 10:\n images_with_small_bboxes.add(img_id)\n\n bbox_widths.append(w)\n bbox_heights.append(h)\n\n image_count += 1\n\n\n except tf.errors.OutOfRangeError as e:\n pass\n\n # Basic info\n print(\"Found %d images\" % (image_count,))\n print()\n print(\"Found %d images with small bboxes\" % (len(images_with_small_bboxes),))\n #print(\"Images with areas < 10:\")\n #for img_id in images_with_small_bboxes:\n # print(img_id)\n print()\n print(\"Found %d images with reversed coordinates\" %\n (len(images_with_reversed_coords),))\n #print(\"Images with reversed coordinates:\")\n #for img_id in images_with_reversed_coords:\n # print(img_id)\n print()\n print(\"Found %d images with bbox count mismatches\" %\n (len(images_with_bbox_count_mismatch),))\n #for img_id in images_with_bbox_count_mismatch:\n # print(img_id)\n print()\n\n bbox_widths = np.round(np.array(bbox_widths)).astype(int)\n bbox_heights = np.round(np.array(bbox_heights)).astype(int)\n\n print(\"Mean width: %0.4f\" % (np.mean(bbox_widths),))\n print(\"Median width: %d\" % (np.median(bbox_widths),))\n print(\"Max width: %d\" % (np.max(bbox_widths),))\n print(\"Min width: %d\" % (np.min(bbox_widths),))\n print()\n print(\"Mean height: %0.4f\" % (np.mean(bbox_heights),))\n print(\"Median height: %d\" % (np.median(bbox_heights),))\n print(\"Max height: %d\" % (np.max(bbox_heights),))\n print(\"Min height: %d\" % (np.min(bbox_heights),))\n\n\ndef parse_args():\n\n parser = argparse.ArgumentParser(description='Basic statistics on tfrecord files')\n\n parser.add_argument('--stat', dest='stat_type',\n choices=['class_stats', 'verify_bboxes'],\n required=True)\n\n parser.add_argument('--tfrecords', dest='tfrecords',\n help='paths to tfrecords files', type=str,\n nargs='+', required=True)\n\n\n parsed_args = parser.parse_args()\n\n return parsed_args\n\ndef main():\n parsed_args = parse_args()\n\n if parsed_args.stat_type == 'class_stats':\n class_stats(parsed_args.tfrecords)\n elif parsed_args.stat_type == 'verify_bboxes':\n verify_bboxes(parsed_args.tfrecords)\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.train.Int64List",
"numpy.linspace",
"numpy.arange",
"tensorflow.image.decode_png",
"tensorflow.train.Coordinator",
"tensorflow.placeholder",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.Session",
"tensorflow.train.FloatList",
"tensorflow.image.encode_jpeg",
"tensorflow.train.BytesList",
"tensorflow.gfile.FastGFile",
"tensorflow.image.decode_jpeg"
],
[
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.FixedLenFeature",
"tensorflow.local_variables_initializer",
"numpy.min",
"tensorflow.train.start_queue_runners",
"numpy.median",
"tensorflow.cast",
"tensorflow.train.Coordinator",
"tensorflow.expand_dims",
"tensorflow.global_variables_initializer",
"numpy.max",
"tensorflow.train.string_input_producer",
"numpy.mean",
"tensorflow.Session",
"tensorflow.TFRecordReader",
"numpy.array",
"tensorflow.VarLenFeature"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
enhatem/quadrotor_mpc_acados
|
[
"9ca50ecc0a852ba5f9464df0ccd5d40e3ebfc295",
"9ca50ecc0a852ba5f9464df0ccd5d40e3ebfc295"
] |
[
"hovering_mpc/acados_settings.py",
"planar_mpc/tut_live_plot.py"
] |
[
"from acados_template import AcadosModel, AcadosOcp, AcadosOcpSolver\nfrom drone_model import drone_model\nimport scipy.linalg\nimport numpy as np\n\ndef acados_settings(Tf, N):\n\n # create OCP object to formulate the optimization\n ocp = AcadosOcp()\n\n # export model\n model = drone_model()\n\n # define acados ODE \n model_ac = AcadosModel()\n model_ac.f_impl_expr = model.f_impl_expr\n model_ac.f_expl_expr = model.f_expl_expr\n model_ac.x = model.x\n model_ac.xdot = model.xdot\n model_ac.u = model.u\n model_ac.z = model.z\n model_ac.p = model.p\n model_ac.name = model.name\n ocp.model = model_ac\n\n # dimensions \n nx = model.x.size()[0]\n nu = model.u.size()[0]\n ny = nx + nu\n ny_e = nx \n\n # discretization \n ocp.dims.N = N\n\n # set cost \n Q = np.diag([5, 1e-1])\n R = np.eye(nu)\n\n Qe = Q\n\n ocp.cost.cost_type = \"LINEAR_LS\"\n ocp.cost.cost_type_e = \"LINEAR_LS\"\n\n # unscale = N / Tf\n # ocp.cost.W = unscale * scipy.linalg.block_diag(Q,R)\n # ocp.cost.W_e = Qe / unscale\n \n ocp.cost.W = scipy.linalg.block_diag(Q,R)\n ocp.cost.W_e = Qe\n\n Vx = np.zeros((ny,nx))\n Vx[:nx, :nx] = np.eye(nx)\n ocp.cost.Vx = Vx\n\n Vu = np.zeros((ny, nu))\n Vu[2,0] = 1.0\n ocp.cost.Vu = Vu\n\n Vx_e = np.zeros((ny_e, nx))\n Vx_e[:nx, :nx] = np.eye(nx)\n ocp.cost.Vx_e = Vx_e\n \n # Initial reference trajectory (can be overwritten during the simulation if required)\n x_ref = np.array([1, 0])\n ocp.cost.yref = np.concatenate((x_ref, np.array([9.81])))\n ocp.cost.yref_e = x_ref\n\n # set constraints\n ocp.constraints.lbu = np.array([model.throttle_min])\n ocp.constraints.ubu = np.array([model.throttle_max])\n ocp.constraints.idxbu = np.array([0])\n\n # set initial condition\n ocp.constraints.x0 = model.x0\n\n # set QP solver and integration\n ocp.solver_options.tf = Tf\n ocp.solver_options.qp_solver = \"PARTIAL_CONDENSING_HPIPM\"\n ocp.solver_options.nlp_solver_type = \"SQP_RTI\"\n ocp.solver_options.hessian_approx = \"GAUSS_NEWTON\"\n ocp.solver_options.integrator_type = \"ERK\"\n ocp.solver_options.sim_method_num_stages = 4\n ocp.solver_options.sim_method_num_steps = 3\n ocp.solver_options.nlp_solver_max_iter = 200\n ocp.solver_options.tol = 1e-4\n\n # create ocp solver \n acados_solver = AcadosOcpSolver(ocp, json_file=(model_ac.name + \"_\" + \"acados_ocp.json\"))\n\n\n\n return model, acados_solver",
"import random\nimport numpy as np\nfrom itertools import count\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\nplt.style.use('fivethirtyeight')\n'''\nx_vals = [0, 1, 2, 3, 4, 5]\ny_vals = [0, 1, 3, 2, 3, 5]\n\n# plt.plot(x_vals, y_vals)\n\nindex = count()\n\ndef animate(i):\n x_vals.append(next(index))\n y_vals.append(random.randint(0,5))\n plt.cla() # to clear the axis\n plt.plot(x_vals, y_vals)\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\n\nplt.tight_layout()\nplt.show()\n'''\n\nt = np.linspace(0,2*np.pi) \na = 30 ; b = 15 \nx = (a*np.cos(t) * np.cos(np.pi /4)) + 5\ny = (b*np.sin(t) * np.sin(np.pi /4)) + 4\nplt.plot(x,y)\nplt.plot(5,4,'r.')\nplt.show()"
] |
[
[
"numpy.diag",
"numpy.eye",
"numpy.array",
"numpy.zeros"
],
[
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
human-analysis/3dfacefill
|
[
"1f85d72731690730eef03871d3afecf9d4d307b4"
] |
[
"utils.py"
] |
[
"# utils.py\n\nimport os\nimport csv\nimport math\nimport numpy as np\nimport argparse\nfrom inspect import getframeinfo, stack\nimport json\nimport sys\nimport psutil\nimport signal\nimport torch\nimport torchvision\nfrom PIL import Image\nfrom scipy.io import loadmat\nfrom plyfile import PlyData, PlyElement\nimport pdb\n\n\n\ndef setup_graceful_exit():\n # handle Ctrl-C signal\n signal.signal(signal.SIGINT, ctrl_c_handler)\n\n\ndef cleanup():\n # signal.signal(signal.SIGINT, signal.SIG_DFL)\n current_process = psutil.Process()\n children = current_process.children(recursive=True)\n for child in children:\n try:\n os.kill(int(child.pid), signal.SIGKILL)\n except OSError as ex:\n raise Exception(\"wasn't able to kill the child process (pid:{}).\".format(child.pid))\n # # os.waitpid(child.pid, os.P_ALL)\n print('\\x1b[?25h', end='', flush=True) # show cursor\n sys.exit(0)\n\n\ndef ctrl_c_handler(*kargs):\n # try to gracefully terminate the program\n # signal.signal(signal.SIGINT, signal.SIG_DFL)\n cleanup()\n\n\ndef isnan(x):\n return x != x\n\n\ndef _debuginfo(self, *message):\n \"\"\"Prints the current filename and line number in addition to debugging\n messages.\"\"\"\n caller = getframeinfo(stack()[1][0])\n print('\\033[92m', caller.filename, '\\033[0m', caller.lineno,\n '\\033[95m', self.__class__.__name__, '\\033[94m', message, '\\033[0m')\n\n\ndef readcsvfile(filename, delimiter=','):\n with open(filename, 'r') as f:\n content = []\n reader = csv.reader(f, delimiter=delimiter)\n for row in reader:\n content.append(row)\n f.close()\n return content\n\n\ndef readtextfile(filename):\n with open(filename) as f:\n content = f.readlines()\n f.close()\n return content\n\n\ndef writetextfile(data, filename, path=None):\n \"\"\"If path is provided, it will make sure the path exists before writing\n the file.\"\"\"\n if path:\n if not os.path.isdir(path):\n os.makedirs(path)\n filename = os.path.join(path, filename)\n with open(filename, 'w') as f:\n f.writelines(data)\n f.close()\n\n\ndef delete_file(filename):\n if os.path.isfile(filename) is True:\n os.remove(filename)\n\n\ndef eformat(f, prec, exp_digits):\n s = \"%.*e\" % (prec, f)\n mantissa, exp = s.split('e')\n # add 1 to digits as 1 is taken by sign +/-\n return \"%se%+0*d\" % (mantissa, exp_digits + 1, int(exp))\n\n\ndef saveargs(args: object) -> object:\n path = args.logs_dir\n varargs = '[Arguments]\\n\\n'\n # TODO: organize the parameters into groups\n for par in vars(args):\n if getattr(args, par) is None or par in ['save_dir', 'logs_dir',\n 'save_results', 'result_path',\n 'config_file']:\n continue\n elif par in ('model_options', 'loss_options', 'evaluation_options',\n 'dataset_options'):\n varargs += '%s = %s\\n' % (par, json.dumps(getattr(args, par)))\n else:\n varargs += '%s = %s\\n' % (par, getattr(args, par))\n writetextfile(varargs, 'args.txt', path)\n\n\ndef file_exists(filename):\n return os.path.isfile(filename)\n\n\ndef str2bool(v):\n \"\"\"A Parser for boolean values with argparse\"\"\"\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef gaussian(size, center, sigma=1):\n if np.isnan(center[0]) or np.isnan(center[1]):\n return np.zeros(size)\n\n x, y = np.meshgrid(np.arange(size[0]), np.arange(size[1]))\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n den = 2 * pow(sigma, 2)\n num = np.power(x - x0, 2) + np.power(y - y0, 2)\n return np.exp(-(num / den)) / math.sqrt(2 * np.pi * sigma * sigma)\n\n\ndef plotlify(fig, env='main', win='mywin'):\n fig = {key: fig[key] for key in fig.keys()}\n fig['win'] = win\n fig['eid'] = env\n\n return fig\n\ndef load_image(filename):\n to_tensor = torchvision.transforms.ToTensor()\n im = Image.load(filename)\n im = to_tensor(im)\n return im\n\ndef load_occ_mask(filename):\n mat = loadmat(filaname)\n mask = mat['mask']\n mask = to_tensor(mask)\n return mask\n\ndef write_to_ply(vertices, faces, filename, color=None):\n if type(vertices) is torch.Tensor:\n vertices = vertices.detach().cpu().numpy()\n if type(faces) is torch.Tensor:\n faces = faces.cpu().numpy()\n faces = faces.reshape(-1).astype(np.int32)\n faces.dtype = [('vertex_indices', 'i4', (3,))]\n if color is not None:\n if type(color) is torch.Tensor:\n color = color.detach().cpu().numpy()\n color = (255*color).astype('int32')\n verts = np.concatenate((vertices, color), 1).astype('float32').reshape(-1)\n verts.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'f4'), ('green', 'f4'), ('blue', 'f4')]\n else:\n verts = vertices.astype('float32').reshape(-1)\n verts.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]\n\n vertex_el = PlyElement.describe(verts, 'vertex')\n face_el = PlyElement.describe(faces, 'face')\n PlyData([vertex_el, face_el], text=True).write(filename)\n\nclass ForkedPdb(pdb.Pdb):\n \"\"\"A Pdb subclass that may be used\n from a forked multiprocessing child\n\n \"\"\"\n def interaction(self, *args, **kwargs):\n _stdin = sys.stdin\n try:\n sys.stdin = open('/dev/stdin')\n pdb.Pdb.interaction(self, *args, **kwargs)\n finally:\n sys.stdin = _stdin\n"
] |
[
[
"numpy.power",
"numpy.isnan",
"numpy.arange",
"scipy.io.loadmat",
"numpy.concatenate",
"numpy.exp",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Benjamin-Etheredge/pytorch-lightning
|
[
"fe572c5911abfa2cc0b806b1c2cfe977d483c7c1",
"fe572c5911abfa2cc0b806b1c2cfe977d483c7c1"
] |
[
"tests/trainer/test_supporters.py",
"pytorch_lightning/plugins/precision/precision_plugin.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom collections import Sequence\nfrom unittest import mock\n\nimport pytest\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom torch.utils.data.dataset import Dataset, IterableDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import Sampler, SequentialSampler\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.trainer.supporters import (\n _nested_calc_num_data,\n CombinedDataset,\n CombinedLoader,\n CombinedLoaderIterator,\n CycleIterator,\n TensorRunningAccum,\n)\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\ndef test_tensor_running_accum_reset():\n \"\"\"Test that reset would set all attributes to the initialization state.\"\"\"\n\n window_length = 10\n\n accum = TensorRunningAccum(window_length=window_length)\n assert accum.last() is None\n assert accum.mean() is None\n\n accum.append(torch.tensor(1.5))\n assert accum.last() == torch.tensor(1.5)\n assert accum.mean() == torch.tensor(1.5)\n\n accum.reset()\n assert accum.window_length == window_length\n assert accum.memory is None\n assert accum.current_idx == 0\n assert accum.last_idx is None\n assert not accum.rotated\n\n\ndef test_cycle_iterator():\n \"\"\"Test the cycling function of `CycleIterator`\"\"\"\n\n iterator = CycleIterator(range(100), 1000)\n assert len(iterator) == 1000\n for idx, item in enumerate(iterator):\n assert item < 100\n\n assert idx == len(iterator) - 1\n\n\ndef test_none_length_cycle_iterator():\n \"\"\"Test the infinite cycling function of `CycleIterator`\"\"\"\n iterator = CycleIterator(range(100))\n assert iterator.__len__() == float(\"inf\")\n\n # test infinite loop\n for idx, item in enumerate(iterator):\n if idx == 1000:\n break\n assert item == 0\n\n\[email protected](\n [\"dataset_1\", \"dataset_2\"],\n [\n ([list(range(10)), list(range(20))]),\n ([range(10), range(20)]),\n ([torch.randn(10, 3, 2), torch.randn(20, 5, 6)]),\n ([TensorDataset(torch.randn(10, 3, 2)), TensorDataset(torch.randn(20, 5, 6))]),\n ],\n)\ndef test_combined_dataset(dataset_1, dataset_2):\n \"\"\"Verify the length of the CombinedDataset.\"\"\"\n datasets = [dataset_1, dataset_2]\n combined_dataset = CombinedDataset(datasets)\n\n assert combined_dataset.max_len == 20\n assert combined_dataset.min_len == len(combined_dataset) == 10\n\n\ndef test_combined_dataset_length_mode_error():\n dset = CombinedDataset([range(10)])\n with pytest.raises(MisconfigurationException, match=\"Invalid Mode\"):\n dset._calc_num_data([range(10)], \"test\")\n\n\ndef test_combined_loader_iterator_dict_min_size():\n \"\"\"Test `CombinedLoaderIterator` given mapping loaders.\"\"\"\n loaders = {\n \"a\": torch.utils.data.DataLoader(range(10), batch_size=4),\n \"b\": torch.utils.data.DataLoader(range(20), batch_size=5),\n }\n\n combined_iter = CombinedLoaderIterator(loaders)\n\n for idx, item in enumerate(combined_iter):\n assert isinstance(item, dict)\n assert len(item) == 2\n assert \"a\" in item and \"b\" in item\n\n assert idx == min(len(loaders[\"a\"]), len(loaders[\"b\"])) - 1\n\n\ndef test_combined_loader_init_mode_error():\n \"\"\"Test the ValueError when constructing `CombinedLoader`\"\"\"\n with pytest.raises(MisconfigurationException, match=\"Invalid Mode\"):\n CombinedLoader([range(10)], \"testtt\")\n\n\ndef test_combined_loader_loader_type_error():\n \"\"\"Test the ValueError when wrapping the loaders.\"\"\"\n with pytest.raises(TypeError, match=\"Expected data to be int, Sequence or Mapping, but got NoneType\"):\n CombinedLoader(None, \"max_size_cycle\")\n\n\ndef test_combined_loader_calc_length_mode_error():\n \"\"\"Test the ValueError when calculating the number of batches.\"\"\"\n with pytest.raises(TypeError, match=\"Expected data to be int, Sequence or Mapping, but got NoneType\"):\n CombinedLoader._calc_num_batches(None)\n\n\ndef test_combined_loader_dict_min_size():\n \"\"\"Test `CombinedLoader` of mode 'min_size' given mapping loaders.\"\"\"\n loaders = {\n \"a\": torch.utils.data.DataLoader(range(10), batch_size=4),\n \"b\": torch.utils.data.DataLoader(range(20), batch_size=5),\n }\n\n combined_loader = CombinedLoader(loaders, \"min_size\")\n\n assert len(combined_loader) == min(len(v) for v in loaders.values())\n\n for idx, item in enumerate(combined_loader):\n assert isinstance(item, dict)\n assert len(item) == 2\n assert \"a\" in item and \"b\" in item\n\n assert idx == len(combined_loader) - 1\n\n\ndef test_combined_loader_dict_max_size_cycle():\n \"\"\"Test `CombinedLoader` of mode 'max_size_cycle' given mapping loaders.\"\"\"\n loaders = {\n \"a\": torch.utils.data.DataLoader(range(10), batch_size=4),\n \"b\": torch.utils.data.DataLoader(range(20), batch_size=5),\n }\n\n combined_loader = CombinedLoader(loaders, \"max_size_cycle\")\n\n assert len(combined_loader) == max(len(v) for v in loaders.values())\n\n for idx, item in enumerate(combined_loader):\n assert isinstance(item, dict)\n assert len(item) == 2\n assert \"a\" in item and \"b\" in item\n\n assert idx == len(combined_loader) - 1\n\n\ndef test_combined_loader_sequence_min_size():\n \"\"\"Test `CombinedLoader` of mode 'min_size' given sequence loaders.\"\"\"\n loaders = [\n torch.utils.data.DataLoader(range(10), batch_size=4),\n torch.utils.data.DataLoader(range(20), batch_size=5),\n ]\n\n combined_loader = CombinedLoader(loaders, \"min_size\")\n\n assert len(combined_loader) == min(len(v) for v in loaders)\n\n for idx, item in enumerate(combined_loader):\n assert isinstance(item, Sequence)\n assert len(item) == 2\n\n assert idx == len(combined_loader) - 1\n\n\nclass TestIterableDataset(IterableDataset):\n def __init__(self, size: int = 10):\n self.size = size\n\n def __iter__(self):\n self.sampler = SequentialSampler(range(self.size))\n self.sampler_iter = iter(self.sampler)\n return self\n\n def __next__(self):\n return next(self.sampler_iter)\n\n\[email protected](\"mode\", [\"min_size\", \"max_size_cycle\"])\[email protected](\"use_multiple_dataloaders\", [False, True])\ndef test_combined_loader_sequence_iterable_dataset(mode, use_multiple_dataloaders):\n \"\"\"Test `CombinedLoader` of mode 'min_size' given sequence loaders.\"\"\"\n if use_multiple_dataloaders:\n loaders = [\n torch.utils.data.DataLoader(TestIterableDataset(10), batch_size=2),\n torch.utils.data.DataLoader(TestIterableDataset(20), batch_size=2),\n ]\n else:\n loaders = [\n torch.utils.data.DataLoader(TestIterableDataset(10), batch_size=2),\n ]\n\n combined_loader = CombinedLoader(loaders, mode)\n\n has_break = False\n\n for idx, item in enumerate(combined_loader):\n assert isinstance(item, Sequence)\n assert len(item) == 2 if use_multiple_dataloaders else 1\n if not use_multiple_dataloaders and idx == 4:\n has_break = True\n break\n\n if mode == \"max_size_cycle\":\n assert combined_loader.loaders[0].state.done == (not has_break)\n expected = (10 if mode == \"max_size_cycle\" else 5) if use_multiple_dataloaders else 5\n assert (expected - 1) == idx, (mode, use_multiple_dataloaders)\n\n\[email protected](\"lengths\", [[4, 6], [5, 5], [6, 4]])\ndef test_combined_loader_sequence_with_map_and_iterable(lengths):\n class MyIterableDataset(IterableDataset):\n def __init__(self, size: int = 10):\n self.size = size\n\n def __iter__(self):\n self.sampler = SequentialSampler(range(self.size))\n self.iter_sampler = iter(self.sampler)\n return self\n\n def __next__(self):\n return next(self.iter_sampler)\n\n class MyMapDataset(Dataset):\n def __init__(self, size: int = 10):\n self.size = size\n\n def __getitem__(self, index):\n return index\n\n def __len__(self):\n return self.size\n\n x, y = lengths\n loaders = [DataLoader(MyIterableDataset(x)), DataLoader(MyMapDataset(y))]\n dataloader = CombinedLoader(loaders, mode=\"max_size_cycle\")\n counter = 0\n for _ in dataloader:\n counter += 1\n assert counter == max(x, y)\n\n\ndef test_combined_loader_sequence_max_size_cycle():\n \"\"\"Test `CombinedLoader` of mode 'max_size_cycle' given sequence loaders.\"\"\"\n loaders = [\n torch.utils.data.DataLoader(range(10), batch_size=4),\n torch.utils.data.DataLoader(range(20), batch_size=5),\n ]\n\n combined_loader = CombinedLoader(loaders, \"max_size_cycle\")\n\n assert len(combined_loader) == max(len(v) for v in loaders)\n\n for idx, item in enumerate(combined_loader):\n assert isinstance(item, Sequence)\n assert len(item) == 2\n\n assert idx == len(combined_loader) - 1\n\n\[email protected](\n [\"input_data\", \"compute_func\", \"expected_length\"],\n [\n ([*range(10), list(range(1, 20))], min, 0),\n ([*range(10), list(range(1, 20))], max, 19),\n ([*range(10), {str(i): i for i in range(1, 20)}], min, 0),\n ([*range(10), {str(i): i for i in range(1, 20)}], max, 19),\n ({**{str(i): i for i in range(10)}, \"nested\": {str(i): i for i in range(1, 20)}}, min, 0),\n ({**{str(i): i for i in range(10)}, \"nested\": {str(i): i for i in range(1, 20)}}, max, 19),\n ({**{str(i): i for i in range(10)}, \"nested\": list(range(20))}, min, 0),\n ({**{str(i): i for i in range(10)}, \"nested\": list(range(20))}, max, 19),\n ],\n)\ndef test_nested_calc_num_data(input_data, compute_func, expected_length):\n calculated_length = _nested_calc_num_data(input_data, compute_func)\n\n assert calculated_length == expected_length\n\n\[email protected](os.environ, {\"CUDA_VISIBLE_DEVICES\": \"0,1\", \"PL_TRAINER_GPUS\": \"2\"})\[email protected](\"torch.cuda.device_count\", return_value=2)\[email protected](\"torch.cuda.is_available\", return_value=True)\ndef test_combined_data_loader_validation_test(cuda_available_mock, device_count_mock, tmpdir):\n \"\"\"This test makes sure distributed sampler has been properly injected in dataloaders when using\n CombinedLoader.\"\"\"\n\n class CustomDataset(Dataset):\n def __init__(self, data):\n self.data = data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n return self.data[index]\n\n dataloader = CombinedLoader(\n {\n \"a\": DataLoader(CustomDataset(range(10))),\n \"b\": {\"c\": DataLoader(CustomDataset(range(10))), \"d\": DataLoader(CustomDataset(range(10)))},\n \"e\": [DataLoader(CustomDataset(range(10))), DataLoader(CustomDataset(range(10)))],\n }\n )\n\n trainer = Trainer(replace_sampler_ddp=True, accelerator=\"ddp\", gpus=2)\n dataloader = trainer.auto_add_sampler(dataloader, shuffle=True)\n _count = 0\n\n def _assert_distributed_sampler(v):\n nonlocal _count\n _count += 1\n assert isinstance(v, DistributedSampler)\n\n apply_to_collection(dataloader.sampler, Sampler, _assert_distributed_sampler)\n assert _count == 5\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nfrom typing import Any, Callable, Generator, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module\nfrom torch.optim import Optimizer\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.core.hooks import CheckpointHooks\nfrom pytorch_lightning.utilities import GradClipAlgorithmType\nfrom pytorch_lightning.utilities.types import _PARAMETERS\n\n\nclass PrecisionPlugin(CheckpointHooks):\n \"\"\"Base class for all plugins handling the precision-specific parts of the training.\n\n The class attribute precision must be overwritten in child classes. The default value reflects fp32 training.\n \"\"\"\n\n precision: Union[str, int] = 32\n\n def master_params(self, optimizer: Optimizer) -> _PARAMETERS:\n \"\"\"The master params of the model.\n\n Returns the plain model params here. Maybe different in other precision plugins.\n \"\"\"\n for group in optimizer.param_groups:\n yield from group[\"params\"]\n\n def connect(\n self, model: Module, optimizers: List[Optimizer], lr_schedulers: List[Any]\n ) -> Tuple[Module, List[Optimizer], List[Any]]:\n \"\"\"Connects this plugin to the accelerator and the training process.\"\"\"\n return model, optimizers, lr_schedulers\n\n def pre_backward(self, model: \"pl.LightningModule\", closure_loss: Tensor) -> Tensor:\n \"\"\"Run before precision plugin executes backward.\n\n Args:\n model: the model to be optimized\n closure_loss: the loss value obtained from the closure\n \"\"\"\n model.trainer.call_hook(\"on_before_backward\", closure_loss)\n return closure_loss\n\n def backward(\n self,\n model: \"pl.LightningModule\",\n closure_loss: Tensor,\n optimizer: Optional[Optimizer],\n *args: Any,\n **kwargs: Any,\n ) -> None:\n \"\"\"Performs the actual backpropagation.\n\n Args:\n model: the model to be optimized\n closure_loss: the loss value obtained from the closure\n optimizer: current optimizer being used. ``None`` if using manual optimization\n \"\"\"\n # do backward pass\n if model is not None and isinstance(model, pl.LightningModule):\n model.backward(closure_loss, optimizer, *args, **kwargs)\n else:\n closure_loss.backward(*args, **kwargs)\n\n def post_backward(self, model: \"pl.LightningModule\", closure_loss: Tensor) -> Tensor:\n \"\"\"Run after precision plugin executes backward.\n\n Args:\n model: the model to be optimized\n closure_loss: the loss value obtained from the closure\n \"\"\"\n # once backward has been applied, release graph\n closure_loss = closure_loss.detach()\n model.trainer.call_hook(\"on_after_backward\")\n return closure_loss\n\n def pre_optimizer_step(\n self,\n model: \"pl.LightningModule\",\n optimizer: Optimizer,\n optimizer_idx: int,\n lambda_closure: Callable,\n **kwargs: Any,\n ) -> bool:\n \"\"\"Hook to do something before each optimizer step.\"\"\"\n model.trainer.call_hook(\"on_before_optimizer_step\", optimizer, optimizer_idx)\n return True\n\n def post_optimizer_step(self, optimizer: Optimizer, optimizer_idx: int) -> None:\n \"\"\"Hook to do something after each optimizer step.\"\"\"\n\n def clip_gradients(\n self,\n optimizer: Optimizer,\n clip_val: Union[int, float],\n gradient_clip_algorithm: GradClipAlgorithmType = GradClipAlgorithmType.NORM,\n model: Optional[Module] = None,\n ) -> None:\n \"\"\"Clips the gradients.\"\"\"\n if clip_val is None:\n return\n\n clip_val = float(clip_val)\n if clip_val <= 0:\n return\n\n if gradient_clip_algorithm == GradClipAlgorithmType.VALUE:\n self.clip_grad_by_value(optimizer, clip_val)\n elif gradient_clip_algorithm == GradClipAlgorithmType.NORM:\n # TODO: there should be a mechanism to set `norm_type`\n self.clip_grad_by_norm(optimizer, clip_val)\n\n def clip_grad_by_value(self, optimizer: Optimizer, clip_val: Union[int, float]) -> None:\n \"\"\"Clip gradients by value.\"\"\"\n parameters = self.master_params(optimizer)\n torch.nn.utils.clip_grad_value_(parameters, clip_value=clip_val)\n\n def clip_grad_by_norm(self, optimizer: Optimizer, clip_val: Union[int, float]) -> None:\n \"\"\"Clip gradients by norm.\"\"\"\n parameters = self.master_params(optimizer)\n torch.nn.utils.clip_grad_norm_(parameters, clip_val)\n\n def pre_dispatch(self) -> None:\n \"\"\"Hook to do something before the training/evaluation/prediction starts.\"\"\"\n\n def dispatch(self, trainer: \"pl.Trainer\") -> None:\n \"\"\"Hook to do something when ``Accelerator.dispatch()`` gets called.\"\"\"\n\n def post_dispatch(self) -> None:\n \"\"\"Hook to do something after the training/evaluation/prediction finishes.\"\"\"\n\n @contextlib.contextmanager\n def train_step_context(self) -> Generator:\n \"\"\"A contextmanager for the training step.\"\"\"\n yield\n\n @contextlib.contextmanager\n def val_step_context(self) -> Generator:\n \"\"\"A contextmanager for the validation step.\"\"\"\n yield\n\n @contextlib.contextmanager\n def test_step_context(self) -> Generator:\n \"\"\"A contextmanager for the test step.\"\"\"\n yield\n\n @contextlib.contextmanager\n def predict_step_context(self) -> Generator:\n \"\"\"A contextmanager for the predict step.\"\"\"\n yield\n"
] |
[
[
"torch.randn",
"torch.tensor"
],
[
"torch.nn.utils.clip_grad_norm_",
"torch.nn.utils.clip_grad_value_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jarethholt/teospy
|
[
"3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f"
] |
[
"teospy/iceair4b.py"
] |
[
"\"\"\"Icy air Gibbs energy and related properties.\n\nThis module provides the Gibbs function for ice-saturated (icy) air and\nrelated thermodynamic properties. The primary variables are the total\ndry air fraction, temperature, and pressure. The 'total' fraction here\nis the mass fraction of dry air in the total parcel (including ice), and\nuses the variable ``wair``. The dry air mass fraction in humid air uses\nthe variable ``airf``.\n\n:Examples:\n\n>>> iceair_g(0,0,0,.5,270.,1e5)\n-2595.57166634\n>>> iceair_g(1,0,0,.5,270.,1e5)\n2382.35592988\n>>> iceair_g(0,0,1,.5,270.,1e5)\n0.389645501224\n>>> iceair_g(1,1,0,.5,270.,1e5)\n-1269.41767949\n>>> cp(0.5,270.,1e5)\n1893.230554\n>>> enthalpy(0.5,270.,1e5)\n-167366.990802\n>>> lapserate(0.5,270.,1e5)\n2.283832444e-04\n>>> solidfraction(0.5,270.,1e5)\n0.498525089434\n\n:Functions:\n\n* :func:`iceair_g`: Icy air Gibbs free energy with derivatives.\n* :func:`cp`: Icy air isobaric heat capacity.\n* :func:`density`: Icy air density.\n* :func:`enthalpy`: Icy air enthalpy.\n* :func:`entropy`: Icy air entropy.\n* :func:`expansion`: Icy air thermal expansion coefficient.\n* :func:`kappa_t`: Icy air isothermal compressibility.\n* :func:`lapserate`: Icy air adiabatic lapse rate.\n* :func:`solidfraction`: Total mass fraction of ice in icy air.\n* :func:`vapourfraction`: Total mass fraction of water vapour in icy\n air.\n\n\"\"\"\n\n__all__ = ['iceair_g','cp','density','enthalpy','entropy','expansion','kappa_t',\n 'lapserate','solidfraction','vapourfraction']\n\nimport numpy\nfrom teospy import constants0\nfrom teospy import ice1\nfrom teospy import air2\nfrom teospy import ice2\nfrom teospy import maths3\nfrom teospy import air3a\nfrom teospy import iceair4a\n\n_CHKTOL = constants0.CHKTOL\n_chkhumbnds = constants0.chkhumbnds\n_chkicebnds = constants0.chkicebnds\n_ice_g = ice1.ice_g\n_air_f = air2.air_f\n_eq_pressure = air2.eq_pressure\n_eq_vappot = air2.eq_vappot\n_newton = maths3.newton\n_eq_atpe = iceair4a.eq_atpe\n\n\n## Gibbs function\ndef iceair_g(drvw,drvt,drvp,wair,temp,pres,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,\n mathargs=None):\n \"\"\"Calculate icy air Gibbs free energy with derivatives.\n \n Calculate the specific Gibbs free energy of icy air or its\n derivatives with respect to total dry air fraction, temperature,\n and pressure.\n \n :arg int drvw: Number of dry fraction derivatives.\n :arg int drvt: Number of temperature derivatives.\n :arg int drvp: Number of pressure derivatives.\n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Gibbs free energy in units of\n (J/kg) / (kg/kg)^drvw / K^drvt / Pa^drvp.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> iceair_g(0,0,0,.5,270.,1e5)\n -2595.57166634\n >>> iceair_g(1,0,0,.5,270.,1e5)\n 2382.35592988\n >>> iceair_g(0,1,0,.5,270.,1e5)\n 610.264515318\n >>> iceair_g(0,0,1,.5,270.,1e5)\n 0.389645501224\n >>> iceair_g(2,0,0,.5,270.,1e5)\n 0.0\n >>> iceair_g(1,1,0,.5,270.,1e5)\n -1269.41767949\n >>> iceair_g(1,0,1,.5,270.,1e5)\n 0.777110408175\n >>> iceair_g(0,2,0,.5,270.,1e5)\n -7.011965016\n >>> iceair_g(0,1,1,.5,270.,1e5)\n 1.601415320e-03\n >>> iceair_g(0,0,2,.5,270.,1e5)\n -3.911839890e-06\n \"\"\"\n airf, __, __, dhum = _eq_atpe(temp=temp,pres=pres,airf=airf,dhum=dhum,\n chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,\n mathargs=mathargs)\n if airf <= wair:\n warnmsg = 'Air with the given parameters is unsaturated'\n warnings.warn(warnmsg,RuntimeWarning)\n g = air3a.air_g(drvw,drvt,drvp,wair,temp,pres,dhum=dhum)\n return g\n w = wair / airf\n \n # Simple derivative cases\n if (drvw,drvt,drvp) == (0,0,0):\n fh = _air_f(0,0,0,airf,temp,dhum)\n fh_d = _air_f(0,0,1,airf,temp,dhum)\n gi = _ice_g(0,0,temp,pres)\n g = w*(fh + dhum*fh_d) + (1-w)*gi\n return g\n elif (drvw,drvt,drvp) == (1,0,0):\n fh_a = _air_f(1,0,0,airf,temp,dhum)\n g_w = fh_a\n return g_w\n elif (drvw,drvt,drvp) == (0,1,0):\n gh_t = _air_f(0,1,0,airf,temp,dhum)\n gi_t = _ice_g(1,0,temp,pres)\n g_t = w*gh_t + (1-w)*gi_t\n return g_t\n elif (drvw,drvt,drvp) == (0,0,1):\n gh_p = dhum**(-1)\n gi_p = _ice_g(0,1,temp,pres,chkbnd=chkbnd)\n g_p = w*gh_p + (1-w)*gi_p\n return g_p\n elif (drvw,drvt,drvp) == (2,0,0):\n g_ww = 0.\n return g_ww\n elif (drvw,drvt,drvp) == (1,1,0):\n gh_t = _air_f(0,1,0,airf,temp,dhum)\n gi_t = _ice_g(1,0,temp,pres)\n g_wt = (gh_t - gi_t) / airf\n return g_wt\n elif (drvw,drvt,drvp) == (1,0,1):\n gh_p = dhum**(-1)\n gi_p = _ice_g(0,1,temp,pres)\n g_wp = (gh_p - gi_p) / airf\n return g_wp\n \n # Derivative cases requiring inversion\n __, __, __, pg_ad = iceair4a._diff_tp(airf,dhum,temp,pres)\n if (drvw,drvt,drvp) == (0,2,0):\n ph_t = _eq_pressure(0,1,0,airf,temp,dhum)\n gv_t = _eq_vappot(0,1,0,airf,temp,dhum)\n gi_t = _ice_g(1,0,temp,pres)\n pg_t = numpy.array([ph_t,gv_t-gi_t])\n ad_t = numpy.linalg.solve(pg_ad,-pg_t)\n \n fh_t = _air_f(0,1,0,airf,temp,dhum)\n fh_at = _air_f(1,1,0,airf,temp,dhum)\n fh_tt = _air_f(0,2,0,airf,temp,dhum)\n fh_td = _air_f(0,1,1,airf,temp,dhum)\n gi_t = _ice_g(1,0,temp,pres)\n gi_tt = _ice_g(2,0,temp,pres)\n g_ta = w/airf*(airf*fh_at - fh_t + gi_t)\n g_td = w*fh_td\n g_tx = numpy.array([g_ta,g_td])\n g_tt = w*fh_tt + (1-w)*gi_tt + g_tx.dot(ad_t)\n return g_tt\n elif (drvw,drvt,drvp) == (0,1,1):\n ph_t = _eq_pressure(0,1,0,airf,temp,dhum)\n gv_t = _eq_vappot(0,1,0,airf,temp,dhum)\n gi_t = _ice_g(1,0,temp,pres)\n pg_t = numpy.array([ph_t,gv_t-gi_t])\n ad_t = numpy.linalg.solve(pg_ad,-pg_t)\n \n gi_p = _ice_g(0,1,temp,pres)\n gi_tp = _ice_g(1,1,temp,pres)\n g_pa = -w/airf * (dhum**(-1) - gi_p)\n g_pd = -w/dhum**2\n g_px = numpy.array([g_pa,g_pd])\n g_tp = (1-w)*gi_tp + g_px.dot(ad_t)\n return g_tp\n elif (drvw,drvt,drvp) == (0,0,2):\n gi_p = _ice_g(0,1,temp,pres)\n pg_p = numpy.array([1.,gi_p])\n ad_p = numpy.linalg.solve(pg_ad,pg_p)\n \n gi_pp = _ice_g(0,2,temp,pres)\n g_pa = -w/airf * (dhum**(-1) - gi_p)\n g_pd = -w/dhum**2\n g_px = numpy.array([g_pa,g_pd])\n g_pp = (1-w)*gi_pp + g_px.dot(ad_p)\n return g_pp\n \n # Should not have made it this far!\n errmsg = 'Derivatives {0} not recognized'.format((drvw,drvt,drvp))\n raise ValueError(errmsg)\n\n\n## Thermodynamic properties\ndef cp(wair,temp,pres,airf=None,dhum=None,chkvals=False,chktol=_CHKTOL,\n airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air isobaric heat capacity.\n \n Calculate the isobaric (constant pressure) heat capacity of icy air.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Heat capacity in J/kg/K.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> cp(0.5,270.,1e5)\n 1893.230554\n \"\"\"\n g_tt = iceair_g(0,2,0,wair,temp,pres,airf=airf,dhum=dhum,chkvals=chkvals,\n chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n cp = -temp * g_tt\n return cp\n\ndef density(wair,temp,pres,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air density.\n \n Calculate the density of icy air, the total density of the parcel\n including ice mass.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Density in kg/m3.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> density(0.5,270.,1e5)\n 2.56643538000\n \"\"\"\n g_p = iceair_g(0,0,1,wair,temp,pres,airf=airf,dhum=dhum,chkvals=chkvals,\n chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n rho = g_p**(-1)\n return rho\n\ndef enthalpy(wair,temp,pres,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air enthalpy.\n \n Calculate the specific enthalpy of icy air.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Enthalpy in J/kg.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> enthalpy(0.5,270.,1e5)\n -167366.990802\n \"\"\"\n airf, __, __, dhum = _eq_atpe(temp=temp,pres=pres,airf=airf,dhum=dhum,\n chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,\n mathargs=mathargs)\n if airf <= wair:\n warnmsg = 'Air with the given parameters is unsaturated'\n warnings.warn(warnmsg,RuntimeWarning)\n h = air3b.enthalpy(wair,temp,pres,dhum0=dhum0,mathargs=mathargs)\n return h\n g = iceair_g(0,0,0,wair,temp,pres,airf=airf,dhum=dhum)\n g_t = iceair_g(0,1,0,wair,temp,pres,airf=airf,dhum=dhum)\n h = g - temp*g_t\n return h\n\ndef entropy(wair,temp,pres,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air entropy.\n \n Calculate the specific entropy of icy air.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Entropy in J/kg/K.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> entropy(0.5,270.,1e5)\n -610.264515318\n \"\"\"\n g_t = iceair_g(0,1,0,wair,temp,pres,airf=airf,dhum=dhum,chkvals=chkvals,\n chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n s = -g_t\n return s\n\ndef expansion(wair,temp,pres,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air expansion coefficient.\n \n Calculate the thermal expansion coefficient of icy air.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Expansion coefficient in J/kg/K.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> expansion(0.5,270.,1e5)\n 4.109928935e-03\n \"\"\"\n airf, __, __, dhum = _eq_atpe(temp=temp,pres=pres,airf=airf,dhum=dhum,\n chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,\n mathargs=mathargs)\n if airf <= wair:\n warnmsg = 'Air with the given parameters is unsaturated'\n warnings.warn(warnmsg,RuntimeWarning)\n alpha = air3b.expansion(wair,temp,pres,dhum0=dhum0,mathargs=mathargs)\n return alpha\n g_p = iceair_g(0,0,1,wair,temp,pres,airf=airf,dhum=dhum)\n g_tp = iceair_g(0,1,1,wair,temp,pres,airf=airf,dhum=dhum)\n alpha = g_tp / g_p\n return alpha\n\ndef kappa_t(wair,temp,pres,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air isothermal compressibility.\n \n Calculate the isothermal compressibility of icy air.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Compressibility in 1/Pa.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> kappa_t(0.5,270.,1e5)\n 1.003948429e-05\n \"\"\"\n airf, __, __, dhum = _eq_atpe(temp=temp,pres=pres,airf=airf,dhum=dhum,\n chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,\n mathargs=mathargs)\n if airf <= wair:\n warnmsg = 'Air with the given parameters is unsaturated'\n warnings.warn(warnmsg,RuntimeWarning)\n kappa = air3b.kappa_t(wair,temp,pres,dhum0=dhum0,mathargs=mathargs)\n return kappa\n g_p = iceair_g(0,0,1,wair,temp,pres,airf=airf,dhum=dhum)\n g_pp = iceair_g(0,0,2,wair,temp,pres,airf=airf,dhum=dhum)\n kappa = -g_pp / g_p\n return kappa\n\ndef lapserate(wair,temp,pres,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air adiabatic lapse rate.\n \n Calculate the adiabatic lapse rate of icy air.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Lapse rate in K/Pa.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> lapserate(0.5,270.,1e5)\n 2.283832444e-04\n \"\"\"\n airf, __, __, dhum = _eq_atpe(temp=temp,pres=pres,airf=airf,dhum=dhum,\n chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,\n mathargs=mathargs)\n if airf <= wair:\n warnmsg = 'Air with the given parameters is unsaturated'\n warnings.warn(warnmsg,RuntimeWarning)\n gamma = air3b.lapserate(wair,temp,pres,dhum0=dhum0,mathargs=mathargs)\n return gamma\n g_tt = iceair_g(0,2,0,wair,temp,pres,airf=airf,dhum=dhum)\n g_tp = iceair_g(0,1,1,wair,temp,pres,airf=airf,dhum=dhum)\n gamma = -g_tp / g_tt\n return gamma\n\ndef solidfraction(wair,temp,pres,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air ice fraction.\n \n Calculate the mass fraction of ice in icy air.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Mass fraction in kg/kg.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> solidfraction(0.5,270.,1e5)\n 0.498525089434\n \"\"\"\n airf, __, __, dhum = _eq_atpe(temp=temp,pres=pres,airf=airf,dhum=dhum,\n chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,\n mathargs=mathargs)\n if airf <= wair:\n warnmsg = 'Air with the given parameters is unsaturated'\n warnings.warn(warnmsg,RuntimeWarning)\n wice = max(1 - wair/airf, 0.)\n return wice\n\ndef vapourfraction(wair,temp,pres,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,dhum0=None,chkbnd=False,mathargs=None):\n \"\"\"Calculate icy air vapour fraction.\n \n Calculate the mass fraction of water vapour in icy air.\n \n :arg float wair: Total dry air fraction in kg/kg.\n :arg float temp: Temperature in K.\n :arg float pres: Pressure in Pa.\n :arg airf: Dry air fraction in humid air in kg/kg.\n :type airf: float or None\n :arg dhum: Humid air density in kg/m3. If unknown, pass None\n (default) and it will be calculated.\n :type dhum: float or None\n :arg bool chkvals: If True (default False) and all values are given,\n this function will calculate the disequilibrium and raise a\n warning if the results are not within a given tolerance.\n :arg float chktol: Tolerance to use when checking values (default\n _CHKTOL).\n :arg airf0: Initial guess for the dry fraction in kg/kg. If None\n (default) then `iceair4a._approx_tp` is used.\n :type airf0: float or None\n :arg dhum0: Initial guess for the humid air density in kg/m3. If\n None (default) then `iceair4a._approx_tp` is used.\n :type dhum0: float or None\n :arg bool chkbnd: If True then warnings are raised when the given\n values are valid but outside the recommended bounds (default\n False).\n :arg mathargs: Keyword arguments to the root-finder\n :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None\n (default) then no arguments are passed and default parameters\n will be used.\n :returns: Mass fraction in kg/kg.\n :raises RuntimeWarning: If the relative disequilibrium is more than\n chktol, if chkvals is True and all values are given.\n :raises RuntimeWarning: If air with the given parameters would be\n unsaturated.\n \n :Examples:\n \n >>> vapourfraction(0.5,270.,1e5)\n 1.47491056602e-3\n \"\"\"\n airf, __, __, dhum = _eq_atpe(temp=temp,pres=pres,airf=airf,dhum=dhum,\n chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,chkbnd=chkbnd,\n mathargs=mathargs)\n if airf <= wair:\n warnmsg = 'Air with the given parameters is unsaturated'\n warnings.warn(warnmsg,RuntimeWarning)\n wvap = min(wair * (1-airf)/airf, 1-wair)\n return wvap\n\n"
] |
[
[
"numpy.array",
"numpy.linalg.solve"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liyc7711/seq2seq
|
[
"1592b842b652ae648b96c164bead38eb089ce08e"
] |
[
"seq2seq/tasks/decode_text.py"
] |
[
"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTask where both the input and output sequence are plain text.\n\"\"\"\n\nimport functools\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow import gfile\n\nfrom seq2seq.tasks.inference_task import InferenceTask, unbatch_dict\n\n\ndef _get_prediction_length(predictions_dict):\n \"\"\"Returns the length of the prediction based on the index\n of the first SEQUENCE_END token.\n \"\"\"\n tokens_iter = enumerate(predictions_dict[\"predicted_tokens\"])\n return next(((i + 1) for i, _ in tokens_iter if _ == \"SEQUENCE_END\"),\n len(predictions_dict[\"predicted_tokens\"]))\n\n\ndef _get_unk_mapping(filename):\n \"\"\"Reads a file that specifies a mapping from source to target tokens.\n The file must contain lines of the form <source>\\t<target>\"\n\n Args:\n filename: path to the mapping file\n\n Returns:\n A dictionary that maps from source -> target tokens.\n \"\"\"\n with gfile.GFile(filename, \"r\") as mapping_file:\n lines = mapping_file.readlines()\n mapping = dict([_.split(\"\\t\")[0:2] for _ in lines])\n mapping = {k.strip(): v.strip() for k, v in mapping.items()}\n return mapping\n\n\ndef _unk_replace(source_tokens,\n predicted_tokens,\n attention_scores,\n mapping=None):\n \"\"\"Replaces UNK tokens with tokens from the source or a\n provided mapping based on the attention scores.\n\n Args:\n source_tokens: A numpy array of strings.\n predicted_tokens: A numpy array of strings.\n attention_scores: A numeric numpy array\n of shape `[prediction_length, source_length]` that contains\n the attention scores.\n mapping: If not provided, an UNK token is replaced with the\n source token that has the highest attention score. If provided\n the token is insead replaced with `mapping[chosen_source_token]`.\n\n Returns:\n A new `predicted_tokens` array.\n \"\"\"\n result = []\n for token, scores in zip(predicted_tokens, attention_scores):\n if token == \"UNK\":\n max_score_index = np.argmax(scores)\n chosen_source_token = source_tokens[max_score_index]\n new_target = chosen_source_token\n if mapping is not None and chosen_source_token in mapping:\n new_target = mapping[chosen_source_token]\n result.append(new_target)\n else:\n result.append(token)\n return np.array(result)\n\n\nclass DecodeText(InferenceTask):\n \"\"\"Defines inference for tasks where both the input and output sequences\n are plain text.\n\n Params:\n delimiter: Character by which tokens are delimited. Defaults to space.\n unk_replace: If true, enable unknown token replacement based on attention\n scores.\n unk_mapping: If `unk_replace` is true, this can be the path to a file\n defining a dictionary to improve UNK token replacement. Refer to the\n documentation for more details.\n dump_attention_dir: Save attention scores and plots to this directory.\n dump_attention_no_plot: If true, only save attention scores, not\n attention plots.\n dump_beams: Write beam search debugging information to this file.\n \"\"\"\n\n def __init__(self, params):\n super(DecodeText, self).__init__(params)\n self._unk_mapping = None\n self._unk_replace_fn = None\n\n if self.params[\"unk_mapping\"] is not None:\n self._unk_mapping = _get_unk_mapping(self.params[\"unk_mapping\"])\n if self.params[\"unk_replace\"]:\n self._unk_replace_fn = functools.partial(\n _unk_replace, mapping=self._unk_mapping)\n\n @staticmethod\n def default_params():\n params = {}\n params.update({\n \"delimiter\": \" \",\n \"unk_replace\": False,\n \"unk_mapping\": None,\n })\n return params\n\n def before_run(self, _run_context):\n fetches = {}\n fetches[\"predicted_tokens\"] = self._predictions[\"predicted_tokens\"]\n fetches[\"features.source_len\"] = self._predictions[\"features.source_len\"]\n fetches[\"features.source_tokens\"] = self._predictions[\n \"features.source_tokens\"]\n\n if \"attention_scores\" in self._predictions:\n fetches[\"attention_scores\"] = self._predictions[\"attention_scores\"]\n\n return tf.train.SessionRunArgs(fetches)\n\n def after_run(self, _run_context, run_values):\n fetches_batch = run_values.results\n for fetches in unbatch_dict(fetches_batch):\n # Convert to unicode\n fetches[\"predicted_tokens\"] = np.char.decode(\n fetches[\"predicted_tokens\"].astype(\"S\"), \"utf-8\")\n predicted_tokens = fetches[\"predicted_tokens\"]\n\n # If we're using beam search we take the first beam\n if np.ndim(predicted_tokens) > 1:\n predicted_tokens = predicted_tokens[:, 0]\n\n fetches[\"features.source_tokens\"] = np.char.decode(\n fetches[\"features.source_tokens\"].astype(\"S\"), \"utf-8\")\n source_tokens = fetches[\"features.source_tokens\"]\n source_len = fetches[\"features.source_len\"]\n\n if self._unk_replace_fn is not None:\n # We slice the attention scores so that we do not\n # accidentially replace UNK with a SEQUENCE_END token\n attention_scores = fetches[\"attention_scores\"]\n attention_scores = attention_scores[:, :source_len - 1]\n predicted_tokens = self._unk_replace_fn(\n source_tokens=source_tokens,\n predicted_tokens=predicted_tokens,\n attention_scores=attention_scores)\n\n sent = self.params[\"delimiter\"].join(predicted_tokens).split(\n \"SEQUENCE_END\")[0]\n # Replace special BPE tokens\n sent = sent.replace(\"@@ \", \"\")\n sent = sent.strip()\n\n print(sent)\n"
] |
[
[
"tensorflow.gfile.GFile",
"tensorflow.train.SessionRunArgs",
"numpy.ndim",
"numpy.argmax",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Sciguymjm/AlphaVantageAPI
|
[
"54feb3c6566bfd561c5e366b1a129a4b44311a45"
] |
[
"tests/test_api.py"
] |
[
"from alphaVantageAPI.alphavantage import AlphaVantage\n\nfrom unittest import TestCase\nfrom unittest.mock import patch\nfrom pandas import DataFrame, read_csv\n\nfrom .utils import Path\nfrom .utils import Constant as C\nfrom .utils import load_json, _mock_response\n\n## Python 3.7 + Pandas DeprecationWarning\n# /alphaVantageAPI/env/lib/python3.7/site-packages/pandas/core/frame.py:7476:\n# DeprecationWarning: Using or importing the ABCs from \"collections\" instead of from \"collections.abc\" is deprecated, and in 3.8 it will stop working elif isinstance(data[0], collections.Mapping):\n\nclass TestAlphaVantageAPI(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.test_data_path = C.TEST_DATA_PATH\n # Set premium to True to avoid API throttling for testing\n av = AlphaVantage(api_key=C.API_KEY_TEST, premium=True)\n\n # Minimum parameters\n cls.fx_parameters = {\"function\":\"CURRENCY_EXCHANGE_RATE\", \"from_currency\":\"USD\", \"to_currency\":\"JPY\"}\n cls.fx_daily_parameters = {\"function\":\"FX_DAILY\", \"from_currency\":\"EUR\", \"to_currency\":\"USD\"}\n cls.fx_intraday_parameters = {\"function\":\"FX_INTRADAY\", \"from_currency\":\"EUR\", \"to_currency\":\"USD\"}\n cls.fx_monthly_parameters = {\"function\":\"FX_MONTHLY\", \"from_currency\":\"EUR\", \"to_currency\":\"USD\"}\n cls.fx_weekly_parameters = {\"function\":\"FX_WEEKLY\", \"from_currency\":\"EUR\", \"to_currency\":\"USD\"}\n cls.data_parameters = {\"function\":\"TIME_SERIES_DAILY_ADJUSTED\", \"symbol\":C.API_DATA_TEST}\n cls.intraday_parameters = {\"function\":\"TIME_SERIES_INTRADAY\", \"symbol\":C.API_DATA_TEST}\n cls.indicator_parameters = {\"function\":\"RSI\", \"symbol\":C.API_DATA_TEST, \"interval\":\"weekly\", \"series_type\":\"open\", \"time_period\":10}\n cls.digital_parameters = {\"function\":\"DIGITAL_CURRENCY_DAILY\", \"symbol\":C.API_DIGITAL_TEST, \"market\":\"CNY\"}\n cls.digital_rating_parameters = {\"function\":\"CRYPTO_RATING\", \"symbols\":C.API_DIGITAL_TEST}\n cls.global_quote_parameters = {\"function\":\"GLOBAL_QUOTE\", \"symbols\":C.API_DIGITAL_TEST}\n cls.overview_parameters = {\"function\":\"OVERVIEW\", \"symbols\":C.API_FUNDA_TEST}\n cls.balance_parameters = {\"function\":\"BALANCE_SHEET\", \"symbols\":C.API_FUNDA_TEST}\n cls.income_parameters = {\"function\":\"INCOME_STATEMENT\", \"symbols\":C.API_FUNDA_TEST}\n cls.cashflow_parameters = {\"function\":\"CASH_FLOW\", \"symbols\":C.API_FUNDA_TEST}\n\n cls.earnings_parameters = {\"function\": \"EARNINGS_CALENDAR\"}\n cls.ipos_parameters = {\"function\": \"IPO_CALENDAR\"}\n cls.listing_parameters = {\"function\": \"LISTING_STATUS\"}\n\n # json files of sample data\n cls.json_fx = load_json(cls.test_data_path / \"mock_fx.json\")\n cls.json_fx_daily = load_json(cls.test_data_path / \"mock_fx_daily.json\")\n cls.json_fx_intraday = load_json(cls.test_data_path / \"mock_fx_intraday.json\")\n cls.json_fx_monthly = load_json(cls.test_data_path / \"mock_fx_monthly.json\")\n cls.json_fx_weekly = load_json(cls.test_data_path / \"mock_fx_weekly.json\")\n cls.json_data = load_json(cls.test_data_path / \"mock_data.json\")\n cls.json_indicator = load_json(cls.test_data_path / \"mock_indicator.json\")\n cls.json_digital = load_json(cls.test_data_path / \"mock_digital.json\")\n cls.json_digital_rating = load_json(cls.test_data_path / \"mock_digital_rating.json\")\n cls.json_global_quote = load_json(cls.test_data_path / \"mock_global_quote.json\")\n cls.json_overview = load_json(cls.test_data_path / \"mock_overview.json\")\n cls.json_balance = load_json(cls.test_data_path / \"mock_balance_sheet.json\")\n cls.json_income = load_json(cls.test_data_path / \"mock_income_statement.json\")\n cls.json_cashflow = load_json(cls.test_data_path / \"mock_cash_flow.json\")\n\n # csv files of sample data\n cls.csv_earnings_cal = read_csv(cls.test_data_path / \"mock_earnings_cal.csv\")\n cls.csv_ipos_cal = read_csv(cls.test_data_path / \"mock_ipos_cal.csv\")\n cls.csv_delisted = read_csv(cls.test_data_path / \"mock_delisted_status.csv\")\n cls.csv_listed = read_csv(cls.test_data_path / \"mock_listed_status.csv\")\n\n # Pandas DataFrames of sample data\n cls.df_fx = av._to_dataframe(\"CURRENCY_EXCHANGE_RATE\", cls.json_fx)\n cls.df_fx_daily = av._to_dataframe(\"FX_DAILY\", cls.json_fx_daily)\n cls.df_fx_intraday = av._to_dataframe(\"FX_INTRADAY\", cls.json_fx_intraday)\n cls.df_fx_monthly = av._to_dataframe(\"FX_MONTHLY\", cls.json_fx_monthly)\n cls.df_fx_weekly = av._to_dataframe(\"FX_WEEKLY\", cls.json_fx_weekly)\n cls.df_data = av._to_dataframe(\"TIME_SERIES_DAILY_ADJUSTED\", cls.json_data)\n cls.df_indicator = av._to_dataframe(\"RSI\", cls.json_indicator)\n cls.df_digital = av._to_dataframe(\"DIGITAL_CURRENCY_DAILY\", cls.json_digital)\n cls.df_digital_rating = av._to_dataframe(\"CRYPTO_RATING\", cls.json_digital_rating)\n cls.df_global_quote = av._to_dataframe(\"GLOBAL_QUOTE\", cls.json_global_quote)\n cls.df_overview = av._to_dataframe(\"OVERVIEW\", cls.json_overview)\n cls.df_balance = av._to_dataframe(\"BALANCE_SHEET\", cls.json_balance)\n cls.df_income = av._to_dataframe(\"INCOME_STATEMENT\", cls.json_income)\n cls.df_cashflow = av._to_dataframe(\"CASH_FLOW\", cls.json_cashflow)\n\n cls.df_earnings = DataFrame(cls.csv_earnings_cal)\n cls.df_ipos = DataFrame(cls.csv_ipos_cal)\n cls.df_delisted = DataFrame(cls.csv_delisted)\n cls.df_listed = DataFrame(cls.csv_listed)\n\n\n @classmethod\n def tearDownClass(cls):\n del cls.test_data_path\n\n del cls.fx_parameters\n del cls.fx_daily_parameters\n del cls.fx_intraday_parameters\n del cls.fx_monthly_parameters\n del cls.fx_weekly_parameters\n # del cls.sector_parameters\n del cls.data_parameters\n del cls.intraday_parameters\n del cls.indicator_parameters\n del cls.digital_parameters\n del cls.digital_rating_parameters\n del cls.global_quote_parameters\n del cls.overview_parameters\n del cls.balance_parameters\n del cls.income_parameters\n del cls.cashflow_parameters\n\n del cls.earnings_parameters\n del cls.ipos_parameters\n del cls.listing_parameters\n\n del cls.json_fx\n del cls.json_fx_daily\n del cls.json_fx_intraday\n del cls.json_fx_monthly\n del cls.json_fx_weekly\n del cls.json_data\n del cls.json_indicator\n del cls.json_digital\n del cls.json_digital_rating\n del cls.json_global_quote\n del cls.json_overview\n del cls.json_balance\n del cls.json_income\n del cls.json_cashflow\n\n del cls.df_fx\n del cls.df_fx_daily\n del cls.df_fx_intraday\n del cls.df_fx_monthly\n del cls.df_fx_weekly\n del cls.df_data\n del cls.df_indicator\n del cls.df_digital\n del cls.df_digital_rating\n del cls.df_global_quote\n del cls.df_overview\n del cls.df_balance\n del cls.df_income\n del cls.df_cashflow\n\n del cls.csv_earnings_cal\n del cls.csv_ipos_cal\n del cls.csv_delisted\n del cls.csv_listed\n\n def setUp(self):\n self.av = AlphaVantage(api_key=C.API_KEY_TEST)\n\n def tearDown(self):\n del self.av\n\n\n # tests of: fx, sectors, data, intraday, and digital\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._av_api_call\")\n def test_fx(self, mock_av_api_call):\n mock_av_api_call.side_effect = [None, self.df_fx, self.json_fx]\n self.assertIsNone(self.av.fx(C.API_FX_TEST))\n # 4/7/2019 Stopped passing!?\n # self.assertIsInstance(self.av.fx(C.API_FX_TEST), DataFrame)\n # self.assertIsInstance(self.av.fx(C.API_FX_TEST), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._av_api_call\")\n def test_digital(self, mock_av_api_call):\n mock_av_api_call.side_effect = [None, self.df_digital, self.json_digital]\n self.assertIsNone(self.av.digital(C.API_DIGITAL_TEST))\n self.assertIsInstance(self.av.digital(C.API_DIGITAL_TEST), DataFrame)\n self.assertIsInstance(self.av.digital(C.API_DIGITAL_TEST), dict)\n \n # @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._av_api_call\")\n # def test_intraday(self, mock_av_api_call):\n # mock_av_api_call.side_effect = [None, self.df_sectors, self.json_sectors]\n # self.assertIsNone(self.av.intraday(C.API_DATA_TEST))\n # self.assertIsInstance(self.av.intraday(C.API_DATA_TEST), DataFrame)\n # self.assertIsInstance(self.av.intraday(C.API_DATA_TEST), dict)\n\n # @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._av_api_call\")\n # def test_data(self, mock_av_api_call):\n # mock_av_api_call.side_effect = [None, self.df_data, self.json_data]\n # self.assertIsNone(self.av.data(\"D\", C.API_DATA_TEST))\n # self.assertIsInstance(self.av.data(\"D\", C.API_DATA_TEST), DataFrame)\n # self.assertIsInstance(self.av.data(\"D\", C.API_DATA_TEST), dict)\n\n\n # av_api_call tests\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_fx)\n mock_to_dataframe.return_value = self.df_fx\n\n av_api_call = self.av._av_api_call(self.fx_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_fx)\n\n av_api_call = self.av._av_api_call(self.fx_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_daily(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_fx_daily)\n mock_to_dataframe.return_value = self.df_fx_daily\n\n av_api_call = self.av._av_api_call(self.fx_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_daily_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_fx_daily)\n\n av_api_call = self.av._av_api_call(self.fx_daily_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_intraday(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_fx_intraday)\n mock_to_dataframe.return_value = self.df_fx\n\n av_api_call = self.av._av_api_call(self.fx_intraday_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_intraday_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_fx_intraday)\n\n av_api_call = self.av._av_api_call(self.fx_intraday_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_monthly(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_fx_monthly)\n mock_to_dataframe.return_value = self.df_fx_monthly\n\n av_api_call = self.av._av_api_call(self.fx_monthly_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_monthly_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_fx_monthly)\n\n av_api_call = self.av._av_api_call(self.fx_monthly_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_weekly(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_fx_weekly)\n mock_to_dataframe.return_value = self.df_fx_weekly\n\n av_api_call = self.av._av_api_call(self.fx_weekly_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_fx_weekly_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_fx_weekly)\n\n av_api_call = self.av._av_api_call(self.fx_weekly_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_data(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_data)\n mock_to_dataframe.return_value = self.df_data\n\n av_api_call = self.av._av_api_call(self.data_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_data_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n mock_requests_get.return_value = _mock_response(text_data=self.json_data)\n\n av_api_call = self.av._av_api_call(self.data_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_intraday(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_data)\n mock_to_dataframe.return_value = self.df_data\n\n av_api_call = self.av._av_api_call(self.intraday_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_intraday_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_data)\n\n av_api_call = self.av._av_api_call(self.intraday_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_indicator(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_indicator)\n mock_to_dataframe.return_value = self.df_indicator\n\n av_api_call = self.av._av_api_call(self.indicator_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_indicator_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_indicator)\n\n av_api_call = self.av._av_api_call(self.indicator_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_digital(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_digital)\n mock_to_dataframe.return_value = self.df_digital\n\n av_api_call = self.av._av_api_call(self.digital_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_digital_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_digital)\n\n av_api_call = self.av._av_api_call(self.digital_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n# \n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_digital_rating(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_digital_rating)\n mock_to_dataframe.return_value = self.df_digital_rating\n\n av_api_call = self.av._av_api_call(self.digital_rating_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_digital_rating_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_digital_rating)\n\n av_api_call = self.av._av_api_call(self.digital_rating_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_global_quote(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_global_quote)\n mock_to_dataframe.return_value = self.df_global_quote\n\n av_api_call = self.av._av_api_call(self.global_quote_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_global_quote_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_global_quote)\n\n av_api_call = self.av._av_api_call(self.global_quote_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_overview(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_overview)\n mock_to_dataframe.return_value = self.df_overview\n\n av_api_call = self.av._av_api_call(self.overview_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_overview_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_overview)\n\n av_api_call = self.av._av_api_call(self.overview_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_overview(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_overview)\n mock_to_dataframe.return_value = self.df_overview\n\n av_api_call = self.av._av_api_call(self.overview_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n self.assertIsInstance(mock_to_dataframe(), DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_overview_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_overview)\n\n av_api_call = self.av._av_api_call(self.overview_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_balance_sheet(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_balance)\n mock_to_dataframe.return_value = self.df_balance\n\n av_api_call = self.av._av_api_call(self.balance_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n\n self.assertIsInstance(mock_to_dataframe(), list)\n self.assertIsInstance(mock_to_dataframe()[0], DataFrame)\n self.assertIsInstance(mock_to_dataframe()[1], DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_balance_sheet_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_balance)\n\n av_api_call = self.av._av_api_call(self.balance_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_income_statement(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_income)\n mock_to_dataframe.return_value = self.df_income\n\n av_api_call = self.av._av_api_call(self.income_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n\n self.assertIsInstance(mock_to_dataframe(), list)\n self.assertIsInstance(mock_to_dataframe()[0], DataFrame)\n self.assertIsInstance(mock_to_dataframe()[1], DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_income_statement_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.json_income)\n\n av_api_call = self.av._av_api_call(self.income_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_cashflow(self, mock_requests_get, mock_to_dataframe):\n mock_requests_get.return_value = _mock_response(json_data=self.json_cashflow)\n mock_to_dataframe.return_value = self.df_cashflow\n\n av_api_call = self.av._av_api_call(self.cashflow_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 1)\n\n self.assertIsInstance(mock_to_dataframe(), list)\n self.assertIsInstance(mock_to_dataframe()[0], DataFrame)\n self.assertIsInstance(mock_to_dataframe()[1], DataFrame)\n\n @patch(\"alphaVantageAPI.alphavantage.AlphaVantage._to_dataframe\")\n @patch(\"alphaVantageAPI.alphavantage.requests.get\")\n def test_av_api_call_cashflow_csv(self, mock_requests_get, mock_to_dataframe):\n self.av.datatype = \"csv\"\n\n mock_requests_get.return_value = _mock_response(text_data=self.cashflow_parameters)\n\n av_api_call = self.av._av_api_call(self.cashflow_parameters)\n\n self.assertEqual(mock_requests_get.call_count, 1)\n self.assertEqual(mock_to_dataframe.call_count, 0)\n self.assertIsInstance(av_api_call(), dict)\n\n\n # save_df tests\n # @patch(\"alphaVantageAPI.alphavantage.AlphaVantage.last\")\n # @patch(\"alphaVantageAPI.alphavantage.DataFrame.to_csv\")\n # def test_save_df_to_csv(self, mock_to_csv, mock_last):\n # self.av.output = \"csv\"\n # mock_last.return_value = self.sector_parameters\n # mock_to_csv.return_result = True\n\n # self.av._save_df(self.sector_parameters[\"function\"], self.df_sectors)\n\n # self.assertEqual(mock_last.call_count, 1)\n # self.assertEqual(mock_to_csv.call_count, 1)\n\n\n # @patch(\"alphaVantageAPI.alphavantage.AlphaVantage.last\")\n # @patch(\"alphaVantageAPI.alphavantage.DataFrame.to_json\")\n # def test_save_df_to_json(self, mock_to_json, mock_last):\n # self.av.output = \"json\"\n # mock_last.return_value = self.sector_parameters\n # mock_to_json.return_result = True\n\n # self.av._save_df(self.sector_parameters[\"function\"], self.df_sectors)\n\n # self.assertEqual(mock_last.call_count, 1)\n # self.assertEqual(mock_to_json.call_count, 1)\n\n\n # @patch(\"alphaVantageAPI.alphavantage.AlphaVantage.last\")\n # @patch(\"alphaVantageAPI.alphavantage.DataFrame.to_pickle\")\n # def test_save_df_to_pickle(self, mock_to_pickle, mock_last):\n # self.av.output = \"pkl\"\n # mock_last.return_value = self.sector_parameters\n # mock_to_pickle.return_result = True\n\n # self.av._save_df(self.sector_parameters[\"function\"], self.df_sectors)\n\n # self.assertEqual(mock_last.call_count, 1)\n # self.assertEqual(mock_to_pickle.call_count, 1)\n\n\n # @patch(\"alphaVantageAPI.alphavantage.AlphaVantage.last\")\n # @patch(\"alphaVantageAPI.alphavantage.DataFrame.to_html\")\n # def test_save_df_to_html(self, mock_to_html, mock_last):\n # self.av.output = \"html\"\n # mock_last.return_value = self.sector_parameters\n # mock_to_html.return_result = True\n\n # self.av._save_df(self.sector_parameters[\"function\"], self.df_sectors)\n\n # self.assertEqual(mock_last.call_count, 1)\n # self.assertEqual(mock_to_html.call_count, 1)\n\n\n # @patch(\"alphaVantageAPI.alphavantage.AlphaVantage.last\")\n # @patch(\"alphaVantageAPI.alphavantage.Path.write_text\")\n # def test_save_df_to_txt(self, mock_write_text, mock_last):\n # self.av.output = \"txt\"\n # mock_last.return_value = self.sector_parameters\n # mock_write_text.return_result = True\n\n # self.av._save_df(self.sector_parameters[\"function\"], self.df_sectors)\n\n # self.assertEqual(mock_last.call_count, 1)\n # self.assertEqual(mock_write_text.call_count, 1)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
zplab/zplib
|
[
"dc789acd481df86e0bc99e137ceb05c196425d03",
"dc789acd481df86e0bc99e137ceb05c196425d03"
] |
[
"zplib/scalar_stats/smoothing.py",
"zplib/image/active_contour.py"
] |
[
"import numpy\n\ndef weighted_mean_and_std(x, w):\n \"\"\"Return the mean and standard deviation of the data points x, weighted by\n the weights in w (which do not need to sum to 1).\"\"\"\n w = numpy.array(w, dtype=float)\n w /= w.sum()\n x = numpy.asarray(x)\n weighted_mean = (w*x).sum()\n squared_diff = (x - weighted_mean)**2\n weighted_var = (w * squared_diff).sum()\n return weighted_mean, numpy.sqrt(weighted_var)\n\ndef weighted_mean(x, w):\n \"\"\"Return the mean of the data points x, weighted by the weights in w\n (which do not need to sum to 1).\"\"\"\n w = numpy.array(w, dtype=float)\n w /= w.sum()\n x = numpy.asarray(x)\n return (w*x).sum()\n\ndef _gaussian(x, mu=0, sigma=1):\n return (1/numpy.sqrt(2 * numpy.pi * sigma**2) * numpy.exp(-0.5 * ((numpy.asarray(x)-mu)/sigma)**2))\n\ndef gaussian_mean(x, y, p, std=1):\n \"\"\"Given a set of positions x where values y were observed, calculate\n the gaussian-weighted mean of those values at a set of new positions p,\n where the gaussian has a specied standard deviation.\n \"\"\"\n return numpy.array([weighted_mean(y, _gaussian(x, mu=pp, sigma=std)) for pp in p])\n\n\ndef savitzky_golay(data, kernel=11, order=4):\n \"\"\"Apply Savitzky-Golay smoothing to the input data.\n\n http://en.wikipedia.org/wiki/Savitzky-Golay_filter\n \"\"\"\n kernel = abs(int(kernel))\n order = abs(int(order))\n if kernel % 2 != 1 or kernel < 1:\n raise TypeError(\"kernel size must be a positive odd number, was: %d\" % kernel)\n if kernel < order + 2:\n raise TypeError(\"kernel is to small for the polynomals\\nshould be > order + 2\")\n order_range = range(order+1)\n half_window = (kernel-1) // 2\n m = numpy.linalg.pinv([[k**i for i in order_range] for k in range(-half_window, half_window+1)])[0]\n window_size = len(m)\n half_window = (window_size-1) // 2\n offsets = range(-half_window, half_window+1)\n offset_data = list(zip(offsets, m))\n smooth_data = list()\n data = numpy.concatenate((numpy.ones(half_window)*data[0], data, numpy.ones(half_window)*data[-1]))\n for i in range(half_window, len(data) - half_window):\n value = 0.0\n for offset, weight in offset_data:\n value += weight * data[i + offset]\n smooth_data.append(value)\n return numpy.array(smooth_data)\n\ndef lowess(x, y, f=2/3., iters=3, outlier_threshold=6, weights=None, degree=1):\n \"\"\"Apply LOWESS to fit a nonparametric regression curve to a scatterplot.\n\n http://en.wikipedia.org/wiki/Local_regression\n\n Parameters:\n x, y: 1-d arrays containing data points in x and y.\n f: smoothing parameter in range [0, 1]. Lower values = less smoothing.\n iter: number of robustifying iterations (after each of which outliers\n are detected and excluded). Larger numbers = more robustness, but\n slower run-time.\n outlier_threshold: data points that are further from the lowess estimate\n than outlier_threshold * numpy.median(numpy.abs(residuals)) are\n declared outliers.\n degree: degree of locally weighted fit. Generally 1 is fine, though for\n data with local minima/maxima, degree=2 may fit better.\n\n Returns: array of smoothed y-values for the input x-values.\n \"\"\"\n x = numpy.asarray(x)\n y = numpy.asarray(y)\n r = max(4, int(numpy.ceil(f*(len(x)-1))))\n # below hogs RAM for large input, without much speed gain.\n # h = [numpy.sort(numpy.abs(x - xv))[r] for xv in x]\n # w = numpy.clip(numpy.abs(numpy.subtract.outer(x, x) / h), 0, 1)\n # w = (1 - w**3)**3\n delta = 1\n max_dists = numpy.empty_like(x)\n if weights is None:\n weights = 1\n for it in range(iters):\n y_est = []\n for i, xv in enumerate(x): # for xv, wv in zip(x, w.T):\n x_dists = numpy.abs(x - xv)\n if it == 0:\n max_dist = numpy.partition(x_dists, r)[r]\n max_dists[i] = max_dist\n else:\n max_dist = max_dists[i]\n wv = numpy.clip(x_dists/max_dist, 0, 1)\n wv = (1 - wv**3)**3\n final_weights = delta * wv * weights\n if degree > 1:\n poly = numpy.poly1d(numpy.polyfit(x, y, degree, w=final_weights))\n y_est.append(poly(xv))\n else: # faster to hard-code weighted linear regression formula\n weighted_x = final_weights * x\n b1 = numpy.dot(final_weights, y)\n b2 = numpy.dot(weighted_x, y)\n A11 = numpy.sum(final_weights)\n A12 = A21 = numpy.sum(weighted_x)\n A22 = numpy.dot(weighted_x, x)\n # solve linear system A*beta = b where A = [[A11, A12], [A21, A22]] and b = [b1, b2]\n determinant = A11 * A22 - A12 * A21\n beta1 = (A22*b1 - A12*b2) / determinant\n beta2 = (A11*b2 - A21*b1) / determinant\n y_est.append(beta1 + beta2 * xv)\n y_est = numpy.array(y_est)\n residuals = y - y_est\n s = numpy.median(numpy.abs(residuals))\n if s > 0:\n delta = numpy.clip(residuals / (outlier_threshold * s), -1, 1)\n delta = (1 - delta**2)**2\n return numpy.array(y_est)\n\n\ndef robust_polyfit(x, y, degree=2, iters=3):\n \"\"\"Fit a polynomial to scattered data, robust to outliers.\n\n Parameters:\n x, y: 1-d arrays containing data points in x and y.\n degree: degree of the polynomial to fit.\n iter: number of robustifying iterations (after each of which outliers\n are detected and excluded). Larger numbers = more robustness, but\n slower run-time.\n\n Returns: polynomial coefficients, array of smoothed y-values for the input x-values.\n \"\"\"\n x, y = numpy.asarray(x), numpy.asarray(y)\n weights = numpy.ones(len(x), float) # delta in original formulation\n for _ in range(iters):\n cs = numpy.polynomial.polynomial.polyfit(x, y, degree, w=weights)\n y_est = numpy.polynomial.polynomial.polyval(x, cs)\n residuals = y - y_est\n s = numpy.median(numpy.abs(residuals))\n if s > 0:\n weights = (residuals / (6 * s)).clip(-1, 1)\n weights = (1 - weights**2)**2\n return cs, y_est\n",
"\"\"\"Geodesic Active Contours and Chan-Vese \"Active Contours Without Edges\"\n\nImplementation based on morphological variant of these algorithms per:\nMarquez-Neila P, Baumela L, & Alvarez L. (2014).\nA morphological approach to curvature-based evolution of curves and surfaces.\nIEEE Transactions on Pattern Analysis and Machine Intelligence, 36(1).\n\nThis is a much-optimized version of the demo code available here:\nhttps://github.com/pmneila/morphsnakes\nIn particular, a \"narrow-band\" approach is used whereby only pixels at the\nedges of the mask are examined / updated. This speeds the code up by at least\nseveral orders of magnitude compared to the naive approach.\n\nExample GAC usage:\n image # image containing a structure to segment\n bounds # boolean mask with False for any image regions from which the\n # segmented structure must be excluded. (Optional.)\n initial # boolean mask with initial segmentation\n\n edges = ndimage.gaussian_gradient_magnitude(image, sigma)\n strong_edges = edges > threshold\n edge_gradient = numpy.gradient(edges)\n balloon_direction = -1\n # negative balloon_direction values means \"deflate\" the initial mask toward\n # edges except where strong_edges == True. (NB: balloon_direction can also\n # be a mask of -1, 0, and 1 values to allow spatially-varying balloon forces.)\n gac = GACMorphology(mask=initial, advection_direction=edge_gradient,\n advection_mask=strong_edges, balloon_direction=balloon_direction,\n max_region_mask=bounds)\n\n stopper = StoppingCondition(gac, max_iterations=100)\n while stopper.should_continue():\n gac.balloon_force() # apply balloon force\n gac.advect() # move region edges in advection_direction\n gac.smooth() # smooth region boundaries.\n\nObviously, different schedules of the balloon force, advection, and smoothing\ncan be applied. To aid in this, each of the methods above take an 'iters'\nparameter to apply that many iterations of that step in a row. Also, initial\nwarm-up rounds with just advection or advection and smoothing may be helpful.\n\nThe smooth() method takes a 'depth' parameter that controls the spatial\nsmoothness of the curve. With depth=1, only very small jaggies are smoothed, but\nwith larger values, the curve is smoothed along larger spatial scales (rule of\nthumb: min radius of curvature after smoothing is on the order of depth*3 or so).\n\nAnother useful way to calculate the advection_direction parameter, instead of\nfrom the edge gradient (as above) is to use the edge_direction() function, which\ntakes a thresholded edge mask (Canny edges work great), and returns the distance\nfrom each pixel to the nearest edge, as well as the direction from each pixel\nto the nearest edge. This latter can be passed directly as the advection_direction\nparameter, which will dramatically increase the capture radius for distant edges,\nso a balloon_force parameter may not be needed.\n\nLast, for Canny edge images especially, consider the EdgeClaimingAdvection\nactive contour class. (See the documentation for that class.)\n\nExample ACWE usage:\n acwe = ACWEMorphology(mask=initial, image=image, max_region_mask=bounds)\n\n for i in range(iterations):\n acwe.acwe_step() # make inside and outside pixel means different.\n acwe.smooth() # smooth region boundaries\n\nThere is also a BinnedACWE class that uses a histogram of the image for the\ninside and outside regions, rather than just means. This can be useful if\nthe region to segment has very distinct brightness values from the background,\nbut is not overall brighter or darker on average. Note that it is best to\nreduce the input image to a small number of brightness values (16-64) before\nusing this class. The discretize_image() function is useful for this.\n\nLast: the ActiveContour class allows for both GAC and ACWE steps, if that's\nuseful. BinnedActiveContour, ActiveClaimingContour, and BinnedActiveClaimingContour\nsimilarly combine [Binned]ACWE and [EdgeClaiming]Advection functionality.\n\"\"\"\n\nimport numpy\nimport itertools\nimport collections\nfrom scipy import ndimage\n\nfrom . import neighborhood\n\nclass MaskNarrowBand:\n \"\"\"Track the inside and outside edge of a masked region, while allowing\n pixels from the inside edge to be moved outside and vice-versa.\n\n Base-class for fast morphological operations for region growing, shrinking,\n and reshaping.\n \"\"\"\n S = ndimage.generate_binary_structure(2, 2)\n\n def __init__(self, mask, max_region_mask=None):\n mask = mask.astype(bool)\n if max_region_mask is not None:\n mask = mask & max_region_mask\n self.max_region_mask = max_region_mask\n self.mask_neighborhood = neighborhood.make_neighborhood_view(mask > 0,\n pad_mode='constant', constant_values=0) # shape = image.shape + (3, 3)\n # make self.mask identical to mask, but be a view on the center pixels of mask_neighborhood\n self.mask = self.mask_neighborhood[:,:,1,1]\n self.indices = numpy.dstack(numpy.indices(mask.shape)).astype(numpy.int32) # shape = mask.shape + (2,)\n self.index_neighborhood = neighborhood.make_neighborhood_view(self.indices,\n pad_mode='constant', constant_values=-1) # shape = mask.shape + (3, 3, 2)\n inside_border_mask = mask ^ ndimage.binary_erosion(mask, self.S) # all True pixels with a False neighbor\n outside_border_mask = mask ^ ndimage.binary_dilation(mask, self.S) # all False pixels with a True neighbor\n self.inside_border_indices = self.indices[inside_border_mask] # shape = (inside_border_mask.sum(), 2)\n self.outside_border_indices = self.indices[outside_border_mask] # shape = (outside_border_mask.sum(), 2)\n # NB: to index a numpy array with these indices, must turn shape(num_indices, 2) array\n # into tuple of two num_indices-length arrays, a la:\n # self.mask[tuple(self.outside_border_indices.T)]\n self.changed = 0\n\n def _assert_invariants(self):\n \"\"\"Test whether the border masks and indices are in sync, and whether the\n borders are correct for the current mask.\"\"\"\n inside_border_mask = self.mask ^ ndimage.binary_erosion(self.mask, self.S)\n outside_border_mask = self.mask ^ ndimage.binary_dilation(self.mask, self.S)\n assert len(self.inside_border_indices) == inside_border_mask.sum()\n assert inside_border_mask[tuple(self.inside_border_indices.T)].sum() == len(self.inside_border_indices)\n assert len(self.outside_border_indices) == outside_border_mask.sum()\n assert outside_border_mask[tuple(self.outside_border_indices.T)].sum() == len(self.outside_border_indices)\n\n def _move_to_outside(self, to_outside):\n \"\"\"to_outside must be a boolean mask on the inside border pixels\n (in the order defined by inside_border_indices).\"\"\"\n self.inside_border_indices, self.outside_border_indices, added_idx, removed_indices = self._change_pixels(\n to_change=to_outside,\n old_border_indices=self.inside_border_indices,\n new_value=False,\n new_border_indices=self.outside_border_indices,\n some_match_old_value=numpy.any\n )\n return added_idx, removed_indices\n\n def _move_to_inside(self, to_inside):\n \"\"\"to_inside must be a boolean mask on the outside border pixels\n (in the order defined by outside_border_indices).\"\"\"\n self.outside_border_indices, self.inside_border_indices, added_idx, removed_indices = self._change_pixels(\n to_change=to_inside,\n old_border_indices=self.outside_border_indices,\n new_value=True,\n new_border_indices=self.inside_border_indices,\n some_match_old_value=_not_all\n )\n return added_idx, removed_indices\n\n def _change_pixels(self, to_change, old_border_indices, new_value, new_border_indices, some_match_old_value):\n change_indices = old_border_indices[to_change]\n # prevent changes outside of the max_region_mask\n if new_value is True and self.max_region_mask is not None:\n in_region_mask = self.max_region_mask[tuple(change_indices.T)]\n change_indices = change_indices[in_region_mask]\n to_change[to_change] = in_region_mask\n change_idx = tuple(change_indices.T)\n if len(change_indices) == 0:\n return old_border_indices, new_border_indices, change_idx, change_indices\n self.changed += len(change_indices)\n # Find out which neighbors of changing pixels have the new value.\n # If we did the below after changing the mask, we would also pick up the\n # center pixels, which have the new value.\n if new_value is True:\n new_valued_neighbors = self.mask_neighborhood[change_idx]\n else:\n new_valued_neighbors = ~self.mask_neighborhood[change_idx]\n # Now, update the mask and border indices\n # (1) Update changed pixels in the mask, and remove them from old_border_indices.\n self.mask[change_idx] = new_value\n old_border_indices = old_border_indices[~to_change]\n\n # (2) add old-valued neighbors of newly-changed pixels to old_border_indices,\n # and then make sure the indices don't contain duplicates.\n # (Duplicates appear both because the indices might already be in the list,\n # or because the neighborhoods overlap, so old-valued neighbors might be\n # mulitply identified from several changed pixels.)\n changed_neighborhood = self.mask_neighborhood[change_idx]\n changed_neighborhood_indices = self.index_neighborhood[change_idx]\n # Find out which neighbors of changing pixels have the old value.\n # If we did the below before changing the mask, we would also pick up the\n # center pixels, which had the old value\n if new_value is True:\n old_valued_neighbors = ~changed_neighborhood\n else:\n old_valued_neighbors = changed_neighborhood\n old_valued_neighbor_indices = changed_neighborhood_indices[old_valued_neighbors]\n if new_value is True:\n # Exclude neighbors that are actually out-of-bounds \"padding\" pixels.\n # Only relevant for changing values to True (i.e. moving pixels inside)\n # because the out-of-bounds mask area is \"False\" and will thus otherwise\n # get picked up.\n # Out of bounds indices are -1 in the index_neighborhood array.\n good_indices = ~(old_valued_neighbor_indices == -1).any(axis=1)\n old_valued_neighbor_indices = old_valued_neighbor_indices[good_indices]\n # NB: many of the old_valued_neighbors are already in the old_border_indices...\n # If we kept a mask of the old_border pixels, we could look these up and\n # exclude them, which would make _unique_indices() below a bit faster. However,\n # that requires a lot of bookkeeping, so it doesn't speed things up in every case.\n old_border_indices = numpy.concatenate([old_border_indices, old_valued_neighbor_indices])\n old_border_indices = _unique_indices(old_border_indices)\n\n # (3) Remove all pixels from new_border_indices that no longer have any\n # old-valued neighbors left.\n # Such pixels must be a new-valued neighbor of one of the pixels\n # that changed to the new value. We know that these pixels are necessarily\n # in the new_border already because they are next to a pixel that changed.\n new_valued_neighbors_indices = changed_neighborhood_indices[new_valued_neighbors]\n # need to unique-ify indices because neighborhoods overlap and may pick up the same pixels\n new_valued_neighbors_indices = _unique_indices(new_valued_neighbors_indices)\n neighbors_of_new_valued_neighbors = self.mask_neighborhood[tuple(new_valued_neighbors_indices.T)]\n no_old_valued_neighbors = ~some_match_old_value(neighbors_of_new_valued_neighbors, axis=(1,2))\n remove_from_new_border_indices = new_valued_neighbors_indices[no_old_valued_neighbors]\n new_border_indices = _diff_indices(new_border_indices, remove_from_new_border_indices)\n\n # (4) Add newly-changed pixels to new_border_indices if they have an old-valued neighbor.\n changed_with_old_neighbor = some_match_old_value(changed_neighborhood, axis=(1,2))\n add_to_new_border_indices = change_indices[changed_with_old_neighbor]\n new_border_indices = numpy.concatenate([new_border_indices, add_to_new_border_indices])\n return old_border_indices, new_border_indices, change_idx, remove_from_new_border_indices\n\nclass CurvatureMorphology(MaskNarrowBand):\n \"\"\"Implement basic erosion, dilation, and curvature-smoothing morphology\n steps (the latter from Marquez-Neila et al.) using a fast narrow-band approach.\n Base class for more sophisticated region-modifying steps: main function of interest\n is smooth().\n\n smooth_mask, if not None, is region where smoothing may be applied.\n \"\"\"\n def __init__(self, mask, smooth_mask=None, max_region_mask=None):\n super().__init__(mask, max_region_mask)\n self.smooth_mask = smooth_mask\n self._reset_smoothing()\n\n def _reset_smoothing(self):\n self._smooth_funcs = itertools.cycle([self._SIoIS, self._ISoSI])\n\n def dilate(self, iters=1):\n for _ in range(iters):\n border_mask = numpy.ones(len(self.outside_border_indices), dtype=bool)\n self._move_to_inside(border_mask)\n\n def erode(self, iters=1):\n for _ in range(iters):\n border_mask = numpy.ones(len(self.inside_border_indices), dtype=bool)\n self._move_to_outside(border_mask)\n\n def smooth(self, iters=1, depth=1):\n \"\"\"Apply 'iters' iterations of edge-curvature smoothing.\n 'depth' controls the spatial scale of the smoothing. With depth=1, only\n the highest-frequency edges get smoothed out. Larger depth values smooth\n lower-frequency structures.\"\"\"\n for _ in range(iters):\n smoother = next(self._smooth_funcs)\n smoother(depth)\n\n def _SI(self):\n idx, idx_mask = _masked_idx(self.smooth_mask, self.inside_border_indices)\n inside_border = self.mask_neighborhood[idx]\n on_a_line = ((inside_border[:,0,0] & inside_border[:,2,2]) |\n (inside_border[:,1,0] & inside_border[:,1,2]) |\n (inside_border[:,0,1] & inside_border[:,2,1]) |\n (inside_border[:,2,0] & inside_border[:,0,2]))\n self._move_to_outside(_unmask_idx(idx_mask, ~on_a_line))\n\n def _IS(self):\n idx, idx_mask = _masked_idx(self.smooth_mask, self.outside_border_indices)\n outside_border = ~self.mask_neighborhood[idx]\n on_a_line = ((outside_border[:,0,0] & outside_border[:,2,2]) |\n (outside_border[:,1,0] & outside_border[:,1,2]) |\n (outside_border[:,0,1] & outside_border[:,2,1]) |\n (outside_border[:,2,0] & outside_border[:,0,2]))\n self._move_to_inside(_unmask_idx(idx_mask, ~on_a_line))\n\n def _SIoIS(self, depth=1):\n for i in range(depth):\n self._IS()\n for i in range(depth):\n self._SI()\n\n def _ISoSI(self, depth=1):\n for i in range(depth):\n self._SI()\n for i in range(depth):\n self._IS()\n\nclass ACWE(CurvatureMorphology):\n def __init__(self, mask, image, acwe_mask=None, inside_bias=1,\n smooth_mask=None, max_region_mask=None):\n \"\"\"Class for Active Contours Without Edges region-growing.\n\n Relevant methods for region-growing are smooth() and acwe_step().\n\n Parameters:\n mask: mask containing the initial state of the region to evolve\n image: ndarray of same shape as mask containing image values. The\n difference in mean image value inside and outside the region will\n be maximized by acwe_step()\n acwe_mask: region where ACWE updates will be applied. (Inside /\n outside values will still be computed outside this region.)\n inside_bias: weight for comparing means of inside vs. outside pixels.\n Generally 1 works properly. Values < 1 make it \"easier\" to add\n pixels to the region, and values > 1 make it \"easier\" to remove\n pixels from the region. (Simplification of the \"lambda\"\n parameters from the ACWE literature.)\n smooth_mask: region in which smoothing may be applied.\n max_region_mask: mask beyond which the region may not grow. If\n provided, this mask will also represent the pixels over which\n the ACWE inside/outside means are computed.\n \"\"\"\n super().__init__(mask, smooth_mask, max_region_mask)\n # do work in _setup rather than __init__ to allow for complex multiple\n # inheritance from this class that super() alone can't handle. See\n # ActiveContour class.\n self._setup(image, acwe_mask, inside_bias)\n\n def _setup(self, image, acwe_mask, inside_bias):\n self.image = image\n assert self.image.shape == self.mask.shape\n self.acwe_mask = acwe_mask\n if acwe_mask is not None:\n assert self.image.shape == self.acwe_mask.shape\n self.inside_bias = inside_bias\n # note: self.mask is clipped to self.max_region_mask so the below works.\n self.inside_count = self.mask.sum()\n self.outside_count = numpy.product(self.mask[self.max_region_mask].shape) - self.inside_count\n self.inside_sum = self.image[self.mask].sum()\n self.outside_sum = self.image[self.max_region_mask].sum() - self.inside_sum\n\n def _assert_invariants(self):\n super()._assert_invariants()\n assert self.inside_count == self.mask.sum()\n assert self.outside_count == numpy.product(self.mask[self.max_region_mask].shape) - self.inside_count\n assert self.inside_sum == self.image[self.mask].sum()\n assert self.outside_sum == self.image[self.max_region_mask].sum() - self.inside_sum\n assert numpy.allclose(self.inside_sum/self.inside_count, self.image[self.mask].mean())\n if self.max_region_mask is None:\n assert numpy.allclose(self.outside_sum/self.outside_count, self.image[~self.mask].mean())\n else:\n assert numpy.allclose(self.outside_sum/self.outside_count, self.image[self.max_region_mask & ~self.mask].mean())\n\n def _image_sum_count(self, changed_idx):\n count = len(changed_idx[0])\n image_sum = self.image[changed_idx].sum()\n return count, image_sum\n\n def _move_to_outside(self, to_outside):\n \"\"\"to_outside must be a boolean mask on the inside border pixels\n (in the order defined by inside_border_indices).\"\"\"\n added_idx, removed_indices = super()._move_to_outside(to_outside)\n count, image_sum = self._image_sum_count(added_idx)\n self.inside_count -= count\n self.outside_count += count\n self.inside_sum -= image_sum\n self.outside_sum += image_sum\n return added_idx, removed_indices\n\n def _move_to_inside(self, to_inside):\n \"\"\"to_inside must be a boolean mask on the outside border pixels\n (in the order defined by outside_border_indices).\"\"\"\n added_idx, removed_indices = super()._move_to_inside(to_inside)\n count, image_sum = self._image_sum_count(added_idx)\n self.outside_count -= count\n self.inside_count += count\n self.outside_sum -= image_sum\n self.inside_sum += image_sum\n return added_idx, removed_indices\n\n def acwe_step(self, iters=1):\n \"\"\"Apply 'iters' iterations of the Active Contours Without Edges step,\n wherein the region inside the mask is made to have a mean value as different\n from the region outside the mask as possible.\"\"\"\n for _ in range(iters):\n if self.inside_count == 0 or self.outside_count == 0:\n return\n inside_mean = self.inside_sum / self.inside_count\n outside_mean = self.outside_sum / self.outside_count\n self._acwe_step(inside_mean, outside_mean, self.inside_bias,\n self.inside_border_indices, self._move_to_outside)\n self._acwe_step(outside_mean, inside_mean, 1/self.inside_bias,\n self.outside_border_indices, self._move_to_inside)\n\n def _acwe_step(self, mean_from, mean_to, from_bias, border_indices, move_operation):\n idx, idx_mask = _masked_idx(self.acwe_mask, border_indices)\n border_values = self.image[idx]\n to_move = from_bias*(border_values - mean_from)**2 > (border_values - mean_to)**2\n move_operation(_unmask_idx(idx_mask, to_move))\n\nclass BinnedACWE(CurvatureMorphology):\n def __init__(self, mask, image, acwe_mask=None, inside_bias=1,\n smooth_mask=None, max_region_mask=None):\n \"\"\"Class for Active Contours Without Edges region-growing, using image\n histograms rather than simply means.\n\n Relevant methods for region-growing are smooth(), and acwe_step().\n\n Note that it is best to reduce the input image to a small number of\n brightness values (e.g. 16-64) before using this class.\n The discretize_image() function is useful for this.\n\n Parameters:\n mask: mask containing the initial state of the region to evolve\n image: ndarray of same shape as mask containing image values. The\n difference in image histograms inside and outside the region will\n be maximized by acwe_step()\n acwe_mask: region where ACWE updates will be applied. (Inside /\n outside values will still be computed outside this region.)\n inside_bias: weight for comparing means of inside vs. outside pixels.\n Generally 1 works properly. Values < 1 make it \"easier\" to add\n pixels to the region, and values > 1 make it \"easier\" to remove\n pixels from the region. (Simplification of the \"lambda\"\n parameters from the ACWE literature.)\n smooth_mask: region in which smoothing may be applied.\n max_region_mask: mask beyond which the region may not grow. If\n provided, this mask will also represent the pixels over which\n the ACWE inside/outside means are computed.\n \"\"\"\n super().__init__(mask, smooth_mask, max_region_mask)\n # do work in _setup rather than __init__ to allow for complex multiple\n # inheritance from this class that super() alone can't handle. See\n # ActiveContour class.\n self._setup(image, acwe_mask, inside_bias)\n\n def _setup(self, image, acwe_mask, inside_bias):\n if image.dtype == bool:\n image = image.astype(numpy.uint8)\n self.image = image\n assert self.image.shape == self.mask.shape\n self.acwe_mask = acwe_mask\n if acwe_mask is not None:\n assert self.image.shape == self.acwe_mask.shape\n # note: self.mask is clipped to self.max_region_mask so the below works.\n self.inside_count = self.mask.sum()\n self.inside_bias = inside_bias\n self.outside_count = numpy.product(self.mask[self.max_region_mask].shape) - self.inside_count\n bincounts = numpy.bincount(self.image[self.max_region_mask].flat)\n self.bins = len(bincounts)\n self.inside_bincounts = self._bincount(self.mask)\n self.outside_bincounts = bincounts - self.inside_bincounts\n\n def _bincount(self, index_exp):\n return numpy.bincount(self.image[index_exp], minlength=self.bins)\n\n def _assert_invariants(self):\n super()._assert_invariants()\n assert self.inside_count == self.mask.sum()\n assert self.outside_count == numpy.product(self.mask[self.max_region_mask].shape) - self.inside_count\n assert (self.inside_bincounts == self._bincount(self.mask)).all()\n assert (self.outside_bincounts == self._bincount(self.max_region_mask) - self.inside_bincounts).all()\n if self.max_region_mask is not None:\n assert (self.outside_histogram == self._bincount(self.max_region_mask & ~self.mask)).all()\n\n def _image_bincount_count(self, changed_idx):\n count = len(changed_idx[0])\n bincounts = self._bincount(changed_idx)\n return count, bincounts\n\n def _move_to_outside(self, to_outside):\n \"\"\"to_outside must be a boolean mask on the inside border pixels\n (in the order defined by inside_border_indices).\"\"\"\n added_idx, removed_indices = super()._move_to_outside(to_outside)\n count, bincounts = self._image_bincount_count(added_idx)\n self.inside_count -= count\n self.outside_count += count\n self.inside_bincounts -= bincounts\n self.outside_bincounts += bincounts\n return added_idx, removed_indices\n\n def _move_to_inside(self, to_inside):\n \"\"\"to_inside must be a boolean mask on the outside border pixels\n (in the order defined by outside_border_indices).\"\"\"\n added_idx, removed_indices = super()._move_to_inside(to_inside)\n count, bincounts = self._image_bincount_count(added_idx)\n self.outside_count -= count\n self.inside_count += count\n self.outside_bincounts -= bincounts\n self.inside_bincounts += bincounts\n return added_idx, removed_indices\n\n def acwe_step(self, iters=1):\n \"\"\"Apply 'iters' iterations of the Active Contours Without Edges step,\n wherein the region inside the mask is made to have a mean value as different\n from the region outside the mask as possible.\"\"\"\n for _ in range(iters):\n if self.inside_count == 0 or self.outside_count == 0:\n return\n inside_rate = self.inside_bincounts / self.inside_count\n outside_rate = self.outside_bincounts / self.outside_count\n # small values of inside bias mean it is easier to move/stay inside\n self._acwe_step(self.inside_bias * outside_rate > inside_rate,\n self.inside_border_indices, self._move_to_outside)\n self._acwe_step(inside_rate > self.inside_bias * outside_rate,\n self.outside_border_indices, self._move_to_inside)\n\n def _acwe_step(self, move_bins, border_indices, move_operation):\n idx, idx_mask = _masked_idx(self.acwe_mask, border_indices)\n border_values = self.image[idx]\n to_move = move_bins[border_values]\n move_operation(_unmask_idx(idx_mask, to_move))\n\nclass BalloonForceMorphology(CurvatureMorphology):\n \"\"\"Basic morphology operations plus spatially-varying balloon-force operation.\n Base-class to add balloon forces to more complex region-growing steps;\n rarely useful directly.\n \"\"\"\n def __init__(self, mask, balloon_direction, smooth_mask=None, max_region_mask=None):\n \"\"\"balloon_direction: (-1, 0, 1), or ndarray with same shape as 'mask'\n containing those values.\"\"\"\n super().__init__(mask, smooth_mask, max_region_mask)\n # do work in _setup rather than __init__ to allow for complex multiple\n # inheritance from this class that super() alone can't handle. See\n # ActiveContour class.\n self._setup(balloon_direction)\n\n def _setup(self, balloon_direction):\n if numpy.isscalar(balloon_direction):\n if balloon_direction == 0:\n self.balloon_direction = None\n else:\n self.balloon_direction = numpy.zeros(self.mask.shape, dtype=numpy.int8)\n self.balloon_direction += balloon_direction\n else:\n self.balloon_direction = balloon_direction.copy() # may get changed internally by subclasses\n\n def balloon_force(self, iters=1):\n \"\"\"Apply 'iters' iterations of balloon force region expansion / shrinkage.\"\"\"\n if self.balloon_direction is None:\n return\n for _ in range(iters):\n to_erode = self.balloon_direction[tuple(self.inside_border_indices.T)] < 0\n self._move_to_outside(to_erode)\n to_dilate = self.balloon_direction[tuple(self.outside_border_indices.T)] > 0\n self._move_to_inside(to_dilate)\n\nclass GAC(BalloonForceMorphology):\n def __init__(self, mask, advection_direction, advection_mask=None,\n balloon_direction=0, smooth_mask=None, max_region_mask=None):\n \"\"\"Class for Geodesic Active Contours region-growing.\n\n Relevant methods for region-growing are smooth(), balloon_force(),\n and advect().\n\n Parameters:\n mask: mask containing the initial state of the region to evolve\n advection_direction: list of two arrays providing the x- and y-\n coordinates of the direction that the region edge should move\n in at any given point. (Only the sign of the direction matters.)\n The gradient of an edge-magnitude image works nicely here, as\n does the result of the edge_direction() function in this module\n when applied to a thresholded edge-magnitude image.\n advection_mask: boolean mask specifying where edge advection should\n be applied (versus balloon forces -- it makes no sense to try\n to apply both in the same location). If no advection_mask is\n provided, but a balloon_direction map is given, assume that\n advection is to be applied wherever the balloon_direction is\n 0. If a scalar, non-zero balloon_direction is given, and no\n advection_mask is provided, then there will be no edge\n advection.\n balloon_direction: scalar balloon force direction (-1, 0, 1) or\n image map of same values. If advection_mask is provided,\n balloon_direction will be zeroed out in regions of where\n advection is allowed.\n smooth_mask: region in which smoothing may be applied.\n max_region_mask: mask beyond which the region may not grow.\n \"\"\"\n CurvatureMorphology.__init__(self, mask, smooth_mask, max_region_mask)\n # do work in _setup rather than __init__ to allow for complex multiple\n # inheritance from this class that super() alone can't handle. See\n # ActiveContour class.\n self._setup(balloon_direction, advection_direction, advection_mask)\n\n def _setup(self, balloon_direction, advection_direction, advection_mask):\n BalloonForceMorphology._setup(self, balloon_direction)\n self.adv_dir_x, self.adv_dir_y = advection_direction\n # None balloon direction means no balloon force was asked for.\n if advection_mask is None:\n if self.balloon_direction is not None:\n self.advection_mask = self.balloon_direction == 0\n else:\n self.advection_mask = None\n else:\n self.advection_mask = advection_mask\n if self.balloon_direction is not None:\n self.balloon_direction[advection_mask] = 0\n\n def advect(self, iters=1):\n \"\"\"Apply 'iters' iterations of edge advection, whereby the region edges\n are moved in the direction specified by advection_direction.\"\"\"\n for _ in range(iters):\n # Move pixels on the inside border to the outside if advection*gradient sum > 0 (see _advect for interpretation of sum)\n self._advect(self.inside_border_indices, numpy.greater, self._move_to_outside)\n # Move pixels on the outside border to the inside if advection*gradient sum < 0\n self._advect(self.outside_border_indices, numpy.less, self._move_to_inside)\n\n def _advect(self, border_indices, criterion, move_operation):\n idx, idx_mask = _masked_idx(self.advection_mask, border_indices)\n neighbors = self.mask_neighborhood[idx].astype(numpy.int8)\n dx = neighbors[:,2,1] - neighbors[:,0,1]\n dy = neighbors[:,1,2] - neighbors[:,1,0]\n adv_dx = self.adv_dir_x[idx]\n adv_dy = self.adv_dir_y[idx]\n # positive gradient => outside-to-inside in that dimension\n # positive advection direction => edge should move in the positive direction\n # So: + gradient / + advection = move pixels outside\n # + gradient / - advection or - gradient / + advection = move pixels inside\n # Tricky case: x and y disagree = go in direction with largest abs advection direction\n # To find this, see if sum of advection and gradient in each direction is > 0\n # (move pixels outside) or > 0 (move pixels inside).\n to_move = criterion(dx * adv_dx + dy * adv_dy, 0)\n move_operation(_unmask_idx(idx_mask, to_move))\n\nclass EdgeClaimingAdvection(CurvatureMorphology):\n def __init__(self, mask, edge_mask, distance_exponent=3, force_min=0.01, smooth_mask=None, max_region_mask=None):\n \"\"\"Class for growing a region toward edges, such that only the edges\n that are not already at the region border influence further region\n growth.\n\n Given a set of image edges (ideally one pixel wide, as produced by\n Canny filtering), a region is grown toward those edges. The direction\n of growth at a region border pixel is determined by the sum of \"forces\",\n where each edge pixel exerts a force in inverse proportion to its\n distance from the region border pixel. This allows the region to move\n over large distances to find an edge.\n\n Only region border pixels that are not atop an edge pixel are free to\n have move. Only edge pixels not \"captured\" by a region border are free\n to exert forces on movable region border pixels. This capturing\n mechaimsm means that gaps in the edges will not cause region borders to\n \"flow in\" toward other edges in the image that already have a different\n part of the border atop them. Instead, gaps in the edges will be bridged\n smoothly.\n\n Parameters:\n mask: mask containing the initial state of the region to evolve.\n edge_mask: mask containing the positions of the edges to occupy.\n Thinned edges (such as from Canny edge detection) are best.\n distance_exponent: the \"force\" on any region border pixel exerted\n by any edge pixel is 1/d**distance_exponent, where d is the\n distance in pixels between the edge pixel and the region border\n pixel. A larger value increases the influence of nearby\n uncaptured edge pixels over more distant uncaptured edge pixels.\n force_min: forces less than this minimum value will cause region\n borders to move. Important to prevent a few distant, unclaimed\n pixels from influencing borders that are bridging gaps.\n smooth_mask: region in which smoothing may be applied.\n max_region_mask: mask beyond which the region may not grow.\n \"\"\"\n super().__init__(mask, smooth_mask, max_region_mask)\n # do work in _setup rather than __init__ to allow for complex multiple\n # inheritance from this class that super() alone can't handle. See\n # ActiveContour class.\n self._setup(edge_mask, distance_exponent, force_min)\n\n def _setup(self, edge_mask, distance_exponent, force_min):\n # Edges are \"captured\" iff there is an inside-border pixel on the edge.\n # Non-captured edges can generate advection forces.\n # Inside border pixels are free to advect iff they are not atop an edge.\n # Outside border pixels are free to advect iff they are (atop an edge\n # OR not adjacent a captured edge).\n self.edge_mask = edge_mask > 0\n # make an exponent to apply to the squared distance\n self.inverse_squared_distance_exponent = -distance_exponent/2\n self.force_min = force_min\n self.inside_free = ~self.edge_mask\n self.outside_free_neighborhood = neighborhood.make_neighborhood_view(numpy.zeros_like(self.edge_mask),\n pad_mode='constant', constant_values=0) # shape = edge_mask.shape + (3, 3)\n self.outside_free = self.outside_free_neighborhood[:,:,1,1]\n self.edge_indices = self.indices[self.edge_mask]\n self.noncaptured = numpy.zeros_like(self.edge_mask)\n self.update_captured()\n\n def update_captured(self):\n idx_mask = self.edge_mask[tuple(self.inside_border_indices.T)]\n captured_indices = self.inside_border_indices[idx_mask]\n captured_idx = tuple(captured_indices.T)\n self.captured_idx = captured_idx\n self.outside_free[:] = True\n self.outside_free[captured_idx] = False\n neighbor_indices = self.index_neighborhood[captured_idx] # shape = (m, 3, 3, 2)\n all_neighbors = neighbor_indices.reshape((numpy.product(neighbor_indices.shape[:-1]), 2)) # shape = (m*3*3, 2)\n all_idx = tuple(all_neighbors.T)\n # now erode to get pixels non-adjacent to captured edges\n self.outside_free[all_idx] = self.outside_free_neighborhood[all_idx].all()\n # Technically, we should set outside_free[captured_idx] to True,\n # but it doesn't matter because we will never look up those locations.\n # the outside_free array is only used to look at the outside_border_indices\n # positions, which are non-overlapping with inside_border_indices.\n # Now free outside pixels atop an edge:\n self.outside_free[tuple(self.edge_indices.T)] = True\n self.noncaptured_indices = _diff_indices(self.edge_indices, captured_indices)\n self.noncaptured[:] = False\n self.noncaptured[tuple(self.noncaptured_indices.T)] = True\n\n def advection_forces(self, border_indices, border_idx, mask_dx, mask_dy):\n \"\"\"border_indices: shape (n, 2) list of indices\"\"\"\n edge_indices = self.noncaptured_indices # shape (m, 2)\n on_edge = self.noncaptured[border_idx] # shape (n,)\n dx = numpy.subtract.outer(edge_indices[:,0], border_indices[:,0]) # shape (m, n)\n dy = numpy.subtract.outer(edge_indices[:,1], border_indices[:,1])\n square_dist = dx**2 + dy**2\n square_dist[:,on_edge] = 1\n weighting = square_dist**self.inverse_squared_distance_exponent\n fx = (dx * weighting).sum(axis=0) # shape (n,)\n fx[on_edge] = -mask_dx[on_edge]\n fx[numpy.absolute(fx) < self.force_min] = 0\n fy = (dy * weighting).sum(axis=0)\n fy[on_edge] = -mask_dy[on_edge]\n fy[numpy.absolute(fy) < self.force_min] = 0\n return fx, fy\n\n def advect(self, iters=1):\n \"\"\"Apply 'iters' iterations of edge advection, whereby uncaptured\n region edges are moved towrad uncaptured pixels in the edge_mask.\"\"\"\n for _ in range(iters):\n self.update_captured()\n # Move pixels on the inside border to the outside if advection*gradient sum > 0 (see _advect for interpretation of sum)\n # and if they are not on an edge\n self._advect(self.inside_free, self.inside_border_indices, numpy.greater, self._move_to_outside)\n # Move pixels on the outside border to the inside if advection*gradient sum < 0\n # and if they arte not adjacent to a captured edge\n self._advect(self.outside_free, self.outside_border_indices, numpy.less, self._move_to_inside)\n\n def _advect(self, mask, border_indices, criterion, move_operation):\n idx_mask = mask[tuple(border_indices.T)]\n border_indices = border_indices[idx_mask]\n border_idx = tuple(border_indices.T)\n neighbors = self.mask_neighborhood[border_idx].astype(numpy.int8)\n dx = neighbors[:,2,1] - neighbors[:,0,1]\n dy = neighbors[:,1,2] - neighbors[:,1,0]\n adv_dx, adv_dy = self.advection_forces(border_indices, border_idx, dx, dy)\n # positive gradient => outside-to-inside in that dimension\n # positive advection direction => edge should move in the positive direction\n # So: + gradient / + advection = move pixels outside\n # + gradient / - advection or - gradient / + advection = move pixels inside\n # Tricky case: x and y disagree = go in direction with largest abs advection direction\n # To find this, see if sum of advection and gradient in each direction is > 0\n # (move pixels outside) or > 0 (move pixels inside).\n to_move = criterion(dx * adv_dx + dy * adv_dy, 0)\n move_operation(_unmask_idx(idx_mask, to_move))\n\n\nclass ActiveContour(GAC, ACWE):\n def __init__(self, mask, image, advection_direction, acwe_mask=None,\n inside_bias=1, advection_mask=None, balloon_direction=0,\n smooth_mask=None, max_region_mask=None):\n \"\"\"See documentation for GAC and ACWE for parameters.\"\"\"\n CurvatureMorphology.__init__(self, mask, smooth_mask, max_region_mask)\n GAC._setup(self, balloon_direction, advection_direction, advection_mask)\n ACWE._setup(self, image, acwe_mask, inside_bias)\n\nclass BinnedActiveContour(GAC, BinnedACWE):\n def __init__(self, mask, image, advection_direction, acwe_mask=None,\n inside_bias=1, advection_mask=None, balloon_direction=0,\n smooth_mask=None, max_region_mask=None):\n \"\"\"See documentation for GAC and BinnedACWE for parameters.\"\"\"\n CurvatureMorphology.__init__(self, mask, smooth_mask, max_region_mask)\n GAC._setup(self, balloon_direction, advection_direction, advection_mask)\n BinnedACWE._setup(self, image, acwe_mask, inside_bias)\n\nclass ActiveClaimingContour(EdgeClaimingAdvection, ACWE):\n def __init__(self, mask, image, edge_mask, force_min=0.01, acwe_mask=None,\n inside_bias=1, smooth_mask=None, max_region_mask=None):\n \"\"\"See documentation for EdgeClaimingAdvection and ACWE for parameters.\"\"\"\n CurvatureMorphology.__init__(self, mask, smooth_mask, max_region_mask)\n EdgeClaimingAdvection._setup(self, edge_mask, force_min)\n ACWE._setup(self, image, acwe_mask, inside_bias)\n\nclass BinnedActiveClaimingContour(EdgeClaimingAdvection, BinnedACWE):\n def __init__(self, mask, image, edge_mask, distance_exponent=3, force_min=0.01,\n acwe_mask=None, inside_bias=1, smooth_mask=None, max_region_mask=None):\n \"\"\"See documentation for EdgeClaimingAdvection and BinnedACWE for parameters.\"\"\"\n CurvatureMorphology.__init__(self, mask, smooth_mask, max_region_mask)\n EdgeClaimingAdvection._setup(self, edge_mask, distance_exponent, force_min)\n BinnedACWE._setup(self, image, acwe_mask, inside_bias)\n\n\nclass StoppingCondition:\n def __init__(self, ac, max_iterations, sample_every=5, changed_min=5, cycle_max=6):\n \"\"\"Class to simplify deciding when to stop active contour iteration,\n based on total number of iterations and changes to the mask.\n\n Usage example:\n ac = ACWE(mask, image)\n stopper = StoppingCondition(ac, max_iterations=50)\n while stopper.should_continue():\n ac.acwe_step()\n ac.smooth()\n\n Parameters:\n ac: active contour object of any type\n max_iterations: total number of iterations permitted\n sample_every: period at which the mask is examined to determine if\n any pixels have changed. Doing this every iteration can be slow,\n so this parameter allows trading off between stopping sooner and\n iterating faster.\n changed_min: if fewer than this number of pixels have changed, stop\n iteration.\n cycle_max: if the number of changed pixels is constant this many\n times in a row, stop iteration.\n \"\"\"\n self.ac = ac\n self.max_iterations = max_iterations\n self.sample_every = sample_every\n self.changed_min = changed_min\n self.cycle_max = cycle_max\n self.reset()\n\n def reset(self):\n self.recent_changes = collections.deque(maxlen=self.cycle_max-1)\n self.i = 0\n\n def should_continue(self):\n if self.i == self.max_iterations:\n return False\n if self.i % self.sample_every == 0:\n if self.i > 0:\n changed = (self.ac.mask != self.old_mask).sum()\n if changed < self.changed_min:\n return False\n if (len(self.recent_changes) == self.recent_changes.maxlen and\n all(prev == changed for prev in self.recent_changes)):\n return False\n self.recent_changes.append(changed)\n self.old_mask = self.ac.mask.copy()\n self.i += 1\n return True\n\ndef edge_direction(edge_mask):\n \"\"\"Given an edge mask, return the distance from each pixel to the mask,\n and the vector from each pixel to the nearest pixel in the mask.\n\n Parameter:\n edge_mask: boolean array that is True where image edges are.\n\n Returns: distances, nearest_edge\n distances: array same shape as edge_mask, containing the distance from\n every non-edge-mask pixel to the nearest pixel in the edge mask.\n nearest_edge: arary of shape (2,)+edge_mask.shape, containing the x and\n y coordinates of the vector from each non-edge pixel to the nearest\n edge pixel.\n \"\"\"\n distances, nearest_edge = ndimage.distance_transform_cdt(~edge_mask,\n return_distances=True, return_indices=True)\n nearest_edge = nearest_edge - numpy.indices(edge_mask.shape)\n nearest_edge[:, edge_mask] = 0\n return distances, nearest_edge\n\ndef discretize_image(image, n_levels):\n \"\"\"Given an image, reduce the number of distinct intensity levels.\n\n Parameters:\n image: ndarray of any type\n n_levels: number of intensity levels in the output image. Must be < 2**16.\n \"\"\"\n assert n_levels < 2**16\n max = image.max()\n min = image.min()\n left_edges = numpy.linspace(min, max, n_levels)\n discretized = numpy.digitize(image, left_edges) - 1\n if n_levels > 256:\n return discretized.astype(numpy.uint8)\n else:\n return discretized.astype(numpy.uint16)\n\ndef _diff_indices(indices, to_remove):\n \"\"\"Given two arrays of shape (n,2) containing x,y indices, remove those in\n the second array from the first array.\n \"\"\"\n assert indices.flags.c_contiguous\n assert to_remove.flags.c_contiguous\n assert indices.dtype == to_remove.dtype\n dtype = numpy.dtype('S'+str(indices.itemsize*2)) # treat (x,y) indices as binary data instead of pairs of ints\n remaining = numpy.setdiff1d(indices.view(dtype), to_remove.view(dtype), assume_unique=True)\n return remaining.view(indices.dtype).reshape((-1, 2))\n\ndef _unique_indices(indices):\n \"\"\"Given an array of shape (n,2) containing x,y indices, return only the\n unique indices from that array.\n \"\"\"\n assert indices.flags.c_contiguous\n dtype = numpy.dtype('S'+str(indices.itemsize*2)) # treat (x,y) indices as binary data instead of pairs of ints\n unique = numpy.unique(indices.view(dtype))\n return unique.view(indices.dtype).reshape((-1, 2))\n\ndef _not_all(array, axis):\n return ~numpy.all(array, axis)\n\ndef _masked_idx(mask, indices):\n \"\"\"Return index expression for given indices, only if mask is also true there.\n\n Parameters:\n mask: True/False mask\n indices: indices into array, of shape (n, mask.ndim)\n Returns:\n idx: numpy index expression for selected indices\n idx_mask: mask of same length as indices, indicating which indices had true\n values in the mask\n \"\"\"\n if mask is None:\n idx_mask = None\n else:\n idx_mask = mask[tuple(indices.T)]\n indices = indices[idx_mask]\n idx = tuple(indices.T)\n return idx, idx_mask\n\ndef _unmask_idx(idx_mask, to_change):\n \"\"\"Return mask over all indices originally passed to _masked_idx, marked\n False where indices were originally masked out or where to_change parameter\n is False.\n\n Parameters:\n idx_mask: as returned by _masked_idx\n to_change: mask over True values in idx_mask\n \"\"\"\n if idx_mask is None:\n return to_change\n else:\n idx_mask[idx_mask] = to_change\n return idx_mask\n"
] |
[
[
"numpy.partition",
"numpy.dot",
"numpy.polyfit",
"numpy.sqrt",
"numpy.abs",
"numpy.clip",
"numpy.asarray",
"numpy.empty_like",
"numpy.polynomial.polynomial.polyfit",
"numpy.ones",
"numpy.polynomial.polynomial.polyval",
"numpy.array",
"numpy.sum"
],
[
"scipy.ndimage.binary_erosion",
"numpy.product",
"numpy.absolute",
"numpy.linspace",
"scipy.ndimage.generate_binary_structure",
"numpy.indices",
"numpy.concatenate",
"numpy.subtract.outer",
"numpy.all",
"numpy.zeros_like",
"numpy.bincount",
"numpy.isscalar",
"scipy.ndimage.binary_dilation",
"numpy.digitize",
"scipy.ndimage.distance_transform_cdt",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
alipsgh/deep-mix-nets
|
[
"3c60897687046523d58a321ca0f7cd69dbcf78a1"
] |
[
"models/fasttext.py"
] |
[
"\nimport torch\n\nfrom models.deep_mix_net import DeepSeqNet\nfrom torch import nn\n\n\nclass FastText(DeepSeqNet):\n\n def __init__(self, vocab_size, embeddings, embedding_size, hidden_layer_size,\n tab_input_dim, linear_layers_dim, output_dim, dropout_rate, optimizer=\"adam\", learning_rate=0.01):\n\n super(FastText, self).__init__()\n\n self.output_dim = output_dim\n self.dropout_rate = dropout_rate\n\n # ==========\n # FastText\n # ==========\n\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.hidden_layer_size = hidden_layer_size\n\n self.embeddings = nn.Embedding(self.vocab_size, self.embedding_size)\n self.embeddings.weight = nn.Parameter(embeddings, requires_grad=False)\n self.embed_fc_layer = nn.Linear(self.embedding_size, self.hidden_layer_size)\n\n # ==============================\n # Feed Forward Neural Networks\n # ==============================\n self.linear_layers = nn.ModuleList()\n self.activation_layer = nn.ReLU()\n\n for i, hidden_dim in enumerate(linear_layers_dim):\n if i == 0:\n self.linear_layers.append(nn.Linear(tab_input_dim, hidden_dim))\n else:\n self.linear_layers.append(nn.Linear(self.linear_layers[-1].out_features, hidden_dim))\n\n self.dropout = nn.Dropout(self.dropout_rate)\n self.fc = nn.Linear(self.hidden_layer_size + self.linear_layers[-1].out_features, self.output_dim)\n self.softmax = nn.Softmax(dim=1)\n\n self.optimizer, self.scheduler, self.criterion = None, None, None\n self._compile(optimizer, learning_rate)\n\n if torch.cuda.is_available():\n self.cuda()\n\n def txt_net_forward(self, x_txt):\n embedded_sequence = self.embeddings(x_txt)\n feature_vector = self.embed_fc_layer(embedded_sequence.mean(1))\n return feature_vector\n\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.Parameter",
"torch.nn.ModuleList",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PedroLelis/tensorflow
|
[
"8852b0032ad49acbc59009776665c60f86c06f91"
] |
[
"tensorflow/contrib/tensor_forest/data/data_ops.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Ops for preprocessing data.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nfrom tensorflow.contrib.tensor_forest.python import constants\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import load_library\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import tf_logging as logging\n\nDATA_OPS_FILE = '_data_ops.so'\n\n_data_ops = None\n_ops_lock = threading.Lock()\n\nops.NotDifferentiable('SparseValuesToIndices')\nops.NotDifferentiable('StringToFloat')\n\n\n# Workaround for the fact that importing tensorflow imports contrib\n# (even if a user isn't using this or any other contrib op), but\n# there's not yet any guarantee that the shared object exists.\n# In which case, \"import tensorflow\" will always crash, even for users that\n# never use contrib.\ndef Load():\n \"\"\"Load the data ops library and return the loaded module.\"\"\"\n with _ops_lock:\n global _data_ops\n if not _data_ops:\n ops_path = resource_loader.get_path_to_datafile(DATA_OPS_FILE)\n logging.info('data path: %s', ops_path)\n _data_ops = load_library.load_op_library(ops_path)\n\n assert _data_ops, 'Could not load _data_ops.so'\n return _data_ops\n\n\ndef _ParseSparse(data):\n \"\"\"Concat sparse tensors together.\n\n Args:\n data: A dict of name -> Tensor.\n\n Returns:\n A single sparse tensor and a 1-D input spec Tensor.\n\n Raises:\n NotImplementedError: Combining dense and sparse tensors is not\n supported.\n ValueError: If data contains non-string Tensors.\n \"\"\"\n for k in sorted(data.keys()):\n if not isinstance(data[k], sparse_tensor.SparseTensor):\n raise NotImplementedError(\n 'Features should be either all sparse or all dense. Use a '\n 'feature engineering function to convert some of them.')\n\n data_spec = [\n constants.DATA_CATEGORICAL if data[data.keys()[0]].dtype == dtypes.string\n else constants.DATA_FLOAT\n ]\n return sparse_ops.sparse_concat(1, data.values()), data_spec\n\n\ndef _ParseDense(data):\n \"\"\"Return a single flat tensor, keys, and a data spec list.\n\n Args:\n data: A dict mapping feature names to Tensors.\n\n Returns:\n A tuple of (single dense float Tensor, keys tensor (if exists), data spec).\n \"\"\"\n convert_ops = Load()\n data_spec = [constants.DATA_CATEGORICAL if (data[k].dtype == dtypes.string or\n data[k].dtype == dtypes.int32 or\n data[k].dtype == dtypes.int64)\n else constants.DATA_FLOAT for k in sorted(data.keys())]\n data_spec = [constants.DATA_FLOAT] + data_spec\n features = []\n for k in sorted(data.keys()):\n if data[k].dtype == dtypes.string:\n features.append(convert_ops.string_to_float(data[k]))\n elif data[k].dtype == dtypes.int64 or data[k].dtype == dtypes.int32:\n features.append(math_ops.to_float(data[k]))\n else:\n features.append(data[k])\n return array_ops.concat(1, features), data_spec\n\n\ndef ParseDataTensorOrDict(data):\n \"\"\"Return a tensor to use for input data.\n\n The incoming features can be a dict where keys are the string names of the\n columns, which we turn into a single 2-D tensor.\n\n Args:\n data: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A 2-D tensor for input to tensor_forest, a keys tensor for the\n tf.Examples if they exist, and a list of the type of each column\n (e.g. continuous float, categorical).\n \"\"\"\n if isinstance(data, dict):\n # If there's at least one sparse tensor, everything has to be sparse.\n is_sparse = False\n for v in data.values():\n if isinstance(v, sparse_tensor.SparseTensor):\n is_sparse = True\n break\n if is_sparse:\n return _ParseSparse(data)\n else:\n return _ParseDense(data)\n else:\n return (data, [constants.DATA_FLOAT] * data.get_shape().as_list()[1])\n\n\ndef ParseLabelTensorOrDict(labels):\n \"\"\"Return a tensor to use for input labels to tensor_forest.\n\n The incoming targets can be a dict where keys are the string names of the\n columns, which we turn into a single 1-D tensor for classification or\n 2-D tensor for regression.\n\n Converts sparse tensors to dense ones.\n\n Args:\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A 2-D tensor for labels/outputs.\n \"\"\"\n if isinstance(labels, dict):\n return math_ops.to_float(array_ops.concat(\n 1, [sparse_ops.sparse_tensor_to_dense(labels[k], default_value=-1)\n if isinstance(labels, sparse_tensor.SparseTensor)\n else labels[k] for k in sorted(labels.keys())]))\n else:\n if isinstance(labels, sparse_tensor.SparseTensor):\n return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(\n labels, default_value=-1))\n else:\n return math_ops.to_float(labels)\n"
] |
[
[
"tensorflow.python.platform.resource_loader.get_path_to_datafile",
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.framework.load_library.load_op_library",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense",
"tensorflow.python.ops.math_ops.to_float"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"1.4",
"2.7",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.6",
"1.2",
"2.10"
]
}
] |
thodan/epos
|
[
"d67657bbb06da5a6adb8a035a2f58fc305e396f7",
"d67657bbb06da5a6adb8a035a2f58fc305e396f7",
"d67657bbb06da5a6adb8a035a2f58fc305e396f7",
"d67657bbb06da5a6adb8a035a2f58fc305e396f7"
] |
[
"external/slim/nets/s3dg.py",
"external/slim/nets/i3d.py",
"external/slim/nets/resnet_v1_test.py",
"epos_lib/common.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the definition for Gated Separable 3D nets (S3D-G).\n\nThe nets architecture is proposed by:\n Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy,\n Rethinking Spatiotemporal Feature Learning For Video Understanding.\n https://arxiv.org/abs/1712.04851.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nets import i3d_utils\n\ntrunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)\nconv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal\ninception_block_v1_3d = i3d_utils.inception_block_v1_3d\n\n# Orignaly, arg_scope = slim.arg_scope and layers = slim, now switch to more\n# update-to-date tf.contrib.* API.\narg_scope = tf.contrib.framework.arg_scope\nlayers = tf.contrib.layers\n\n\ndef s3dg_arg_scope(weight_decay=1e-7,\n batch_norm_decay=0.999,\n batch_norm_epsilon=0.001):\n \"\"\"Defines default arg_scope for S3D-G.\n\n Args:\n weight_decay: The weight decay to use for regularizing the nets.\n batch_norm_decay: Decay for batch norm moving average.\n batch_norm_epsilon: Small float added to variance to avoid dividing by zero\n in batch norm.\n\n Returns:\n sc: An arg_scope to use for the models.\n \"\"\"\n batch_norm_params = {\n # Decay for the moving averages.\n 'decay': batch_norm_decay,\n # epsilon to prevent 0s in variance.\n 'epsilon': batch_norm_epsilon,\n # Turns off fused batch norm.\n 'fused': False,\n # collection containing the moving mean and moving variance.\n 'variables_collections': {\n 'beta': None,\n 'gamma': None,\n 'moving_mean': ['moving_vars'],\n 'moving_variance': ['moving_vars'],\n }\n }\n\n with arg_scope(\n [layers.conv3d, conv3d_spatiotemporal],\n weights_regularizer=layers.l2_regularizer(weight_decay),\n activation_fn=tf.nn.relu,\n normalizer_fn=layers.batch_norm,\n normalizer_params=batch_norm_params):\n with arg_scope([conv3d_spatiotemporal], separable=True) as sc:\n return sc\n\n\ndef self_gating(input_tensor, scope, data_format='NDHWC'):\n \"\"\"Feature gating as used in S3D-G.\n\n Transforms the input features by aggregating features from all\n spatial and temporal locations, and applying gating conditioned\n on the aggregated features. More details can be found at:\n https://arxiv.org/abs/1712.04851\n\n Args:\n input_tensor: A 5-D float tensor of size [batch_size, num_frames,\n height, width, channels].\n scope: scope for `variable_scope`.\n data_format: An optional string from: \"NDHWC\", \"NCDHW\". Defaults to \"NDHWC\".\n The data format of the input and output data. With the default format\n \"NDHWC\", the data is stored in the order of: [batch, in_depth, in_height,\n in_width, in_channels]. Alternatively, the format could be \"NCDHW\", the\n data storage order is:\n [batch, in_channels, in_depth, in_height, in_width].\n\n Returns:\n A tensor with the same shape as input_tensor.\n \"\"\"\n\n index_c = data_format.index('C')\n index_d = data_format.index('D')\n index_h = data_format.index('H')\n index_w = data_format.index('W')\n input_shape = input_tensor.get_shape().as_list()\n t = input_shape[index_d]\n w = input_shape[index_w]\n h = input_shape[index_h]\n num_channels = input_shape[index_c]\n\n spatiotemporal_average = layers.avg_pool3d(\n input_tensor, [t, w, h],\n stride=1,\n data_format=data_format,\n scope=scope + '/self_gating/avg_pool3d')\n\n weights = layers.conv3d(\n spatiotemporal_average,\n num_channels, [1, 1, 1],\n activation_fn=None,\n normalizer_fn=None,\n biases_initializer=None,\n data_format=data_format,\n weights_initializer=trunc_normal(0.01),\n scope=scope + '/self_gating/transformer_W')\n\n tile_multiples = [1, t, w, h]\n tile_multiples.insert(index_c, 1)\n weights = tf.tile(weights, tile_multiples)\n weights = tf.nn.sigmoid(weights)\n\n return tf.multiply(weights, input_tensor)\n\n\ndef s3dg_base(inputs,\n first_temporal_kernel_size=3,\n temporal_conv_startat='Conv2d_2c_3x3',\n gating_startat='Conv2d_2c_3x3',\n final_endpoint='Mixed_5c',\n min_depth=16,\n depth_multiplier=1.0,\n data_format='NDHWC',\n scope='InceptionV1'):\n \"\"\"Defines the I3D/S3DG base architecture.\n\n Note that we use the names as defined in Inception V1 to facilitate checkpoint\n conversion from an image-trained Inception V1 checkpoint to I3D checkpoint.\n\n Args:\n inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,\n channels].\n first_temporal_kernel_size: Specifies the temporal kernel size for the first\n conv3d filter. A larger value slows down the nets but provides little\n accuracy improvement. The default is 7 in the original I3D and S3D-G but 3\n gives better performance. Must be set to one of 1, 3, 5 or 7.\n temporal_conv_startat: Specifies the first conv block to use 3D or separable\n 3D convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is\n used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the\n first valid block to use separable 3D convs. If provided block name is\n not present, all valid blocks will use separable 3D convs. Note that\n 'Conv2d_1a_7x7' cannot be made into a separable 3D conv, but can be made\n into a 2D or 3D conv using the `first_temporal_kernel_size` option.\n gating_startat: Specifies the first conv block to use self gating.\n 'Conv2d_2c_3x3' is the first valid block to use self gating. If provided\n block name is not present, all valid blocks will use separable 3D convs.\n final_endpoint: Specifies the endpoint to construct the nets up to. It\n can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',\n 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',\n 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',\n 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the nets.\n data_format: An optional string from: \"NDHWC\", \"NCDHW\". Defaults to \"NDHWC\".\n The data format of the input and output data. With the default format\n \"NDHWC\", the data is stored in the order of: [batch, in_depth, in_height,\n in_width, in_channels]. Alternatively, the format could be \"NCDHW\", the\n data storage order is:\n [batch, in_channels, in_depth, in_height, in_width].\n scope: Optional variable_scope.\n\n Returns:\n A dictionary from components of the nets to the corresponding activation.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values, or\n if depth_multiplier <= 0.\n \"\"\"\n\n assert data_format in ['NDHWC', 'NCDHW']\n end_points = {}\n t = 1\n # For inverted pyramid models, we start with gating switched off.\n use_gating = False\n self_gating_fn = None\n def gating_fn(inputs, scope):\n return self_gating(inputs, scope, data_format=data_format)\n\n if depth_multiplier <= 0:\n raise ValueError('depth_multiplier is not greater than zero.')\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\n\n with tf.variable_scope(scope, 'InceptionV1', [inputs]):\n with arg_scope([layers.conv3d], weights_initializer=trunc_normal(0.01)):\n with arg_scope(\n [layers.conv3d, layers.max_pool3d, conv3d_spatiotemporal],\n stride=1,\n data_format=data_format,\n padding='SAME'):\n # batch_size x 32 x 112 x 112 x 64\n end_point = 'Conv2d_1a_7x7'\n if first_temporal_kernel_size not in [1, 3, 5, 7]:\n raise ValueError(\n 'first_temporal_kernel_size can only be 1, 3, 5 or 7.')\n # Separable conv is slow when used at first conv layer.\n net = conv3d_spatiotemporal(\n inputs,\n depth(64), [first_temporal_kernel_size, 7, 7],\n stride=2,\n separable=False,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n # batch_size x 32 x 56 x 56 x 64\n end_point = 'MaxPool_2a_3x3'\n net = layers.max_pool3d(\n net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n # batch_size x 32 x 56 x 56 x 64\n end_point = 'Conv2d_2b_1x1'\n net = layers.conv3d(net, depth(64), [1, 1, 1], scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n # batch_size x 32 x 56 x 56 x 192\n end_point = 'Conv2d_2c_3x3'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = conv3d_spatiotemporal(net, depth(192), [t, 3, 3], scope=end_point)\n if use_gating:\n net = self_gating(net, scope=end_point, data_format=data_format)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n # batch_size x 32 x 28 x 28 x 192\n end_point = 'MaxPool_3a_3x3'\n net = layers.max_pool3d(\n net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n # batch_size x 32 x 28 x 28 x 256\n end_point = 'Mixed_3b'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(64),\n num_outputs_1_0a=depth(96),\n num_outputs_1_0b=depth(128),\n num_outputs_2_0a=depth(16),\n num_outputs_2_0b=depth(32),\n num_outputs_3_0b=depth(32),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n end_point = 'Mixed_3c'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(128),\n num_outputs_1_0a=depth(128),\n num_outputs_1_0b=depth(192),\n num_outputs_2_0a=depth(32),\n num_outputs_2_0b=depth(96),\n num_outputs_3_0b=depth(64),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n end_point = 'MaxPool_4a_3x3'\n net = layers.max_pool3d(\n net, [3, 3, 3], stride=[2, 2, 2], scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n # batch_size x 16 x 14 x 14 x 512\n end_point = 'Mixed_4b'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(192),\n num_outputs_1_0a=depth(96),\n num_outputs_1_0b=depth(208),\n num_outputs_2_0a=depth(16),\n num_outputs_2_0b=depth(48),\n num_outputs_3_0b=depth(64),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n # batch_size x 16 x 14 x 14 x 512\n end_point = 'Mixed_4c'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(160),\n num_outputs_1_0a=depth(112),\n num_outputs_1_0b=depth(224),\n num_outputs_2_0a=depth(24),\n num_outputs_2_0b=depth(64),\n num_outputs_3_0b=depth(64),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n # batch_size x 16 x 14 x 14 x 512\n end_point = 'Mixed_4d'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(128),\n num_outputs_1_0a=depth(128),\n num_outputs_1_0b=depth(256),\n num_outputs_2_0a=depth(24),\n num_outputs_2_0b=depth(64),\n num_outputs_3_0b=depth(64),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n # batch_size x 16 x 14 x 14 x 528\n end_point = 'Mixed_4e'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(112),\n num_outputs_1_0a=depth(144),\n num_outputs_1_0b=depth(288),\n num_outputs_2_0a=depth(32),\n num_outputs_2_0b=depth(64),\n num_outputs_3_0b=depth(64),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n # batch_size x 16 x 14 x 14 x 832\n end_point = 'Mixed_4f'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(256),\n num_outputs_1_0a=depth(160),\n num_outputs_1_0b=depth(320),\n num_outputs_2_0a=depth(32),\n num_outputs_2_0b=depth(128),\n num_outputs_3_0b=depth(128),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n end_point = 'MaxPool_5a_2x2'\n net = layers.max_pool3d(\n net, [2, 2, 2], stride=[2, 2, 2], scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n # batch_size x 8 x 7 x 7 x 832\n end_point = 'Mixed_5b'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(256),\n num_outputs_1_0a=depth(160),\n num_outputs_1_0b=depth(320),\n num_outputs_2_0a=depth(32),\n num_outputs_2_0b=depth(128),\n num_outputs_3_0b=depth(128),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n\n # batch_size x 8 x 7 x 7 x 1024\n end_point = 'Mixed_5c'\n if temporal_conv_startat == end_point:\n t = 3\n if gating_startat == end_point:\n use_gating = True\n self_gating_fn = gating_fn\n net = inception_block_v1_3d(\n net,\n num_outputs_0_0a=depth(384),\n num_outputs_1_0a=depth(192),\n num_outputs_1_0b=depth(384),\n num_outputs_2_0a=depth(48),\n num_outputs_2_0b=depth(128),\n num_outputs_3_0b=depth(128),\n temporal_kernel_size=t,\n self_gating_fn=self_gating_fn,\n data_format=data_format,\n scope=end_point)\n end_points[end_point] = net\n if final_endpoint == end_point:\n return net, end_points\n raise ValueError('Unknown final endpoint %s' % final_endpoint)\n\n\ndef s3dg(inputs,\n num_classes=1000,\n first_temporal_kernel_size=3,\n temporal_conv_startat='Conv2d_2c_3x3',\n gating_startat='Conv2d_2c_3x3',\n final_endpoint='Mixed_5c',\n min_depth=16,\n depth_multiplier=1.0,\n dropout_keep_prob=0.8,\n is_training=True,\n prediction_fn=layers.softmax,\n spatial_squeeze=True,\n reuse=None,\n data_format='NDHWC',\n scope='InceptionV1'):\n \"\"\"Defines the S3D-G architecture.\n\n The default image size used to train this nets is 224x224.\n\n Args:\n inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,\n channels].\n num_classes: number of predicted classes.\n first_temporal_kernel_size: Specifies the temporal kernel size for the first\n conv3d filter. A larger value slows down the nets but provides little\n accuracy improvement. Must be set to one of 1, 3, 5 or 7.\n temporal_conv_startat: Specifies the first conv block to use separable 3D\n convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is\n used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the\n first valid block to use separable 3D convs. If provided block name is\n not present, all valid blocks will use separable 3D convs.\n gating_startat: Specifies the first conv block to use self gating.\n 'Conv2d_2c_3x3' is the first valid block to use self gating. If provided\n block name is not present, all valid blocks will use separable 3D convs.\n final_endpoint: Specifies the endpoint to construct the nets up to. It\n can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',\n 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',\n 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',\n 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the nets.\n dropout_keep_prob: the percentage of activation values that are retained.\n is_training: whether is training or not.\n prediction_fn: a function to get predictions out of logits.\n spatial_squeeze: if True, logits is of shape is [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n reuse: whether or not the nets and its variables should be reused. To be\n able to reuse 'scope' must be given.\n data_format: An optional string from: \"NDHWC\", \"NCDHW\". Defaults to \"NDHWC\".\n The data format of the input and output data. With the default format\n \"NDHWC\", the data is stored in the order of: [batch, in_depth, in_height,\n in_width, in_channels]. Alternatively, the format could be \"NCDHW\", the\n data storage order is:\n [batch, in_channels, in_depth, in_height, in_width].\n scope: Optional variable_scope.\n\n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, num_classes]\n end_points: a dictionary from components of the nets to the corresponding\n activation.\n \"\"\"\n assert data_format in ['NDHWC', 'NCDHW']\n # Final pooling and prediction\n with tf.variable_scope(\n scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:\n with arg_scope(\n [layers.batch_norm, layers.dropout], is_training=is_training):\n net, end_points = s3dg_base(\n inputs,\n first_temporal_kernel_size=first_temporal_kernel_size,\n temporal_conv_startat=temporal_conv_startat,\n gating_startat=gating_startat,\n final_endpoint=final_endpoint,\n min_depth=min_depth,\n depth_multiplier=depth_multiplier,\n data_format=data_format,\n scope=scope)\n with tf.variable_scope('Logits'):\n if data_format.startswith('NC'):\n net = tf.transpose(net, [0, 2, 3, 4, 1])\n kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])\n net = layers.avg_pool3d(\n net,\n kernel_size,\n stride=1,\n data_format='NDHWC',\n scope='AvgPool_0a_7x7')\n net = layers.dropout(net, dropout_keep_prob, scope='Dropout_0b')\n logits = layers.conv3d(\n net,\n num_classes, [1, 1, 1],\n activation_fn=None,\n normalizer_fn=None,\n data_format='NDHWC',\n scope='Conv2d_0c_1x1')\n # Temporal average pooling.\n logits = tf.reduce_mean(logits, axis=1)\n if spatial_squeeze:\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\n\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n return logits, end_points\n\n\ns3dg.default_image_size = 224\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the definition for Inflated 3D Inception V1 (I3D).\n\nThe network architecture is proposed by:\n Joao Carreira and Andrew Zisserman,\n Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset.\n https://arxiv.org/abs/1705.07750\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nets import i3d_utils\nfrom nets import s3dg\n\nslim = tf.contrib.slim\ntrunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)\nconv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal\n\n\ndef i3d_arg_scope(weight_decay=1e-7,\n batch_norm_decay=0.999,\n batch_norm_epsilon=0.001,\n use_renorm=False,\n separable_conv3d=False):\n \"\"\"Defines default arg_scope for I3D.\n\n Args:\n weight_decay: The weight decay to use for regularizing the network.\n batch_norm_decay: Decay for batch norm moving average.\n batch_norm_epsilon: Small float added to variance to avoid dividing by zero\n in batch norm.\n use_renorm: Whether to use batch renormalization or not.\n separable_conv3d: Whether to use separable 3d Convs.\n\n Returns:\n sc: An arg_scope to use for the models.\n \"\"\"\n batch_norm_params = {\n # Decay for the moving averages.\n 'decay': batch_norm_decay,\n # epsilon to prevent 0s in variance.\n 'epsilon': batch_norm_epsilon,\n # Turns off fused batch norm.\n 'fused': False,\n 'renorm': use_renorm,\n # collection containing the moving mean and moving variance.\n 'variables_collections': {\n 'beta': None,\n 'gamma': None,\n 'moving_mean': ['moving_vars'],\n 'moving_variance': ['moving_vars'],\n }\n }\n\n with slim.arg_scope(\n [slim.conv3d, conv3d_spatiotemporal],\n weights_regularizer=slim.l2_regularizer(weight_decay),\n activation_fn=tf.nn.relu,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n with slim.arg_scope(\n [conv3d_spatiotemporal], separable=separable_conv3d) as sc:\n return sc\n\n\ndef i3d_base(inputs, final_endpoint='Mixed_5c',\n scope='InceptionV1'):\n \"\"\"Defines the I3D base architecture.\n\n Note that we use the names as defined in Inception V1 to facilitate checkpoint\n conversion from an image-trained Inception V1 checkpoint to I3D checkpoint.\n\n Args:\n inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,\n channels].\n final_endpoint: Specifies the endpoint to construct the network up to. It\n can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',\n 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',\n 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',\n 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']\n scope: Optional variable_scope.\n\n Returns:\n A dictionary from components of the network to the corresponding activation.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values.\n \"\"\"\n\n return s3dg.s3dg_base(\n inputs,\n first_temporal_kernel_size=7,\n temporal_conv_startat='Conv2d_2c_3x3',\n gating_startat=None,\n final_endpoint=final_endpoint,\n min_depth=16,\n depth_multiplier=1.0,\n data_format='NDHWC',\n scope=scope)\n\n\ndef i3d(inputs,\n num_classes=1000,\n dropout_keep_prob=0.8,\n is_training=True,\n prediction_fn=slim.softmax,\n spatial_squeeze=True,\n reuse=None,\n scope='InceptionV1'):\n \"\"\"Defines the I3D architecture.\n\n The default image size used to train this network is 224x224.\n\n Args:\n inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,\n channels].\n num_classes: number of predicted classes.\n dropout_keep_prob: the percentage of activation values that are retained.\n is_training: whether is training or not.\n prediction_fn: a function to get predictions out of logits.\n spatial_squeeze: if True, logits is of shape is [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, num_classes]\n end_points: a dictionary from components of the network to the corresponding\n activation.\n \"\"\"\n # Final pooling and prediction\n with tf.variable_scope(\n scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:\n with slim.arg_scope(\n [slim.batch_norm, slim.dropout], is_training=is_training):\n net, end_points = i3d_base(inputs, scope=scope)\n with tf.variable_scope('Logits'):\n kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])\n net = slim.avg_pool3d(\n net, kernel_size, stride=1, scope='AvgPool_0a_7x7')\n net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')\n logits = slim.conv3d(\n net,\n num_classes, [1, 1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='Conv2d_0c_1x1')\n # Temporal average pooling.\n logits = tf.reduce_mean(logits, axis=1)\n if spatial_squeeze:\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\n\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n return logits, end_points\n\n\ni3d.default_image_size = 224\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.nets.resnet_v1.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom nets import resnet_utils\nfrom nets import resnet_v1\n\nslim = tf.contrib.slim\n\n\ndef create_test_input(batch_size, height, width, channels):\n \"\"\"Create test input tensor.\n\n Args:\n batch_size: The number of images per batch or `None` if unknown.\n height: The height of each image or `None` if unknown.\n width: The width of each image or `None` if unknown.\n channels: The number of channels per image or `None` if unknown.\n\n Returns:\n Either a placeholder `Tensor` of dimension\n [batch_size, height, width, channels] if any of the inputs are `None` or a\n constant `Tensor` with the mesh grid values along the spatial dimensions.\n \"\"\"\n if None in [batch_size, height, width, channels]:\n return tf.placeholder(tf.float32, (batch_size, height, width, channels))\n else:\n return tf.cast(\n np.tile(\n np.reshape(\n np.reshape(np.arange(height), [height, 1]) +\n np.reshape(np.arange(width), [1, width]),\n [1, height, width, 1]),\n [batch_size, 1, 1, channels]), tf.float32)\n\n\nclass ResnetUtilsTest(tf.test.TestCase):\n\n def testSubsampleThreeByThree(self):\n x = tf.reshape(tf.cast(tf.range(9), tf.float32), [1, 3, 3, 1])\n x = resnet_utils.subsample(x, 2)\n expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])\n with self.test_session():\n self.assertAllClose(x.eval(), expected.eval())\n\n def testSubsampleFourByFour(self):\n x = tf.reshape(tf.cast(tf.range(16), tf.float32), [1, 4, 4, 1])\n x = resnet_utils.subsample(x, 2)\n expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])\n with self.test_session():\n self.assertAllClose(x.eval(), expected.eval())\n\n def testConv2DSameEven(self):\n n, n2 = 4, 2\n\n # Input image.\n x = create_test_input(1, n, n, 1)\n\n # Convolution kernel.\n w = create_test_input(1, 3, 3, 1)\n w = tf.reshape(w, [3, 3, 1, 1])\n\n tf.get_variable('Conv/weights', initializer=w)\n tf.get_variable('Conv/biases', initializer=tf.zeros([1]))\n tf.get_variable_scope().reuse_variables()\n\n y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')\n y1_expected = tf.cast([[14, 28, 43, 26],\n [28, 48, 66, 37],\n [43, 66, 84, 46],\n [26, 37, 46, 22]], tf.float32)\n y1_expected = tf.reshape(y1_expected, [1, n, n, 1])\n\n y2 = resnet_utils.subsample(y1, 2)\n y2_expected = tf.cast([[14, 43], [43, 84]], tf.float32)\n y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])\n\n y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')\n y3_expected = y2_expected\n\n y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')\n y4_expected = tf.cast([[48, 37], [37, 22]], tf.float32)\n y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n self.assertAllClose(y1.eval(), y1_expected.eval())\n self.assertAllClose(y2.eval(), y2_expected.eval())\n self.assertAllClose(y3.eval(), y3_expected.eval())\n self.assertAllClose(y4.eval(), y4_expected.eval())\n\n def testConv2DSameOdd(self):\n n, n2 = 5, 3\n\n # Input image.\n x = create_test_input(1, n, n, 1)\n\n # Convolution kernel.\n w = create_test_input(1, 3, 3, 1)\n w = tf.reshape(w, [3, 3, 1, 1])\n\n tf.get_variable('Conv/weights', initializer=w)\n tf.get_variable('Conv/biases', initializer=tf.zeros([1]))\n tf.get_variable_scope().reuse_variables()\n\n y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')\n y1_expected = tf.cast([[14, 28, 43, 58, 34],\n [28, 48, 66, 84, 46],\n [43, 66, 84, 102, 55],\n [58, 84, 102, 120, 64],\n [34, 46, 55, 64, 30]], tf.float32)\n y1_expected = tf.reshape(y1_expected, [1, n, n, 1])\n\n y2 = resnet_utils.subsample(y1, 2)\n y2_expected = tf.cast([[14, 43, 34],\n [43, 84, 55],\n [34, 55, 30]], tf.float32)\n y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])\n\n y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')\n y3_expected = y2_expected\n\n y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')\n y4_expected = y2_expected\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n self.assertAllClose(y1.eval(), y1_expected.eval())\n self.assertAllClose(y2.eval(), y2_expected.eval())\n self.assertAllClose(y3.eval(), y3_expected.eval())\n self.assertAllClose(y4.eval(), y4_expected.eval())\n\n def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):\n \"\"\"A plain ResNet without extra layers before or after the ResNet blocks.\"\"\"\n with tf.variable_scope(scope, values=[inputs]):\n with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):\n net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)\n end_points = slim.utils.convert_collection_to_dict('end_points')\n return net, end_points\n\n def testEndPointsV1(self):\n \"\"\"Test the end points of a tiny v1 bottleneck network.\"\"\"\n blocks = [\n resnet_v1.resnet_v1_block(\n 'block1', base_depth=1, num_units=2, stride=2),\n resnet_v1.resnet_v1_block(\n 'block2', base_depth=2, num_units=2, stride=1),\n ]\n inputs = create_test_input(2, 32, 16, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')\n expected = [\n 'tiny/block1/unit_1/bottleneck_v1/shortcut',\n 'tiny/block1/unit_1/bottleneck_v1/conv1',\n 'tiny/block1/unit_1/bottleneck_v1/conv2',\n 'tiny/block1/unit_1/bottleneck_v1/conv3',\n 'tiny/block1/unit_2/bottleneck_v1/conv1',\n 'tiny/block1/unit_2/bottleneck_v1/conv2',\n 'tiny/block1/unit_2/bottleneck_v1/conv3',\n 'tiny/block2/unit_1/bottleneck_v1/shortcut',\n 'tiny/block2/unit_1/bottleneck_v1/conv1',\n 'tiny/block2/unit_1/bottleneck_v1/conv2',\n 'tiny/block2/unit_1/bottleneck_v1/conv3',\n 'tiny/block2/unit_2/bottleneck_v1/conv1',\n 'tiny/block2/unit_2/bottleneck_v1/conv2',\n 'tiny/block2/unit_2/bottleneck_v1/conv3']\n self.assertItemsEqual(expected, end_points.keys())\n\n def _stack_blocks_nondense(self, net, blocks):\n \"\"\"A simplified ResNet Block stacker without output stride control.\"\"\"\n for block in blocks:\n with tf.variable_scope(block.scope, 'block', [net]):\n for i, unit in enumerate(block.args):\n with tf.variable_scope('unit_%d' % (i + 1), values=[net]):\n net = block.unit_fn(net, rate=1, **unit)\n return net\n\n def testAtrousValuesBottleneck(self):\n \"\"\"Verify the values of dense feature extraction by atrous convolution.\n\n Make sure that dense feature extraction by stack_blocks_dense() followed by\n subsampling gives identical results to feature extraction at the nominal\n model output stride using the simple self._stack_blocks_nondense() above.\n \"\"\"\n block = resnet_v1.resnet_v1_block\n blocks = [\n block('block1', base_depth=1, num_units=2, stride=2),\n block('block2', base_depth=2, num_units=2, stride=2),\n block('block3', base_depth=4, num_units=2, stride=2),\n block('block4', base_depth=8, num_units=2, stride=1),\n ]\n nominal_stride = 8\n\n # Test both odd and even input dimensions.\n height = 30\n width = 31\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n with slim.arg_scope([slim.batch_norm], is_training=False):\n for output_stride in [1, 2, 4, 8, None]:\n with tf.Graph().as_default():\n with self.test_session() as sess:\n tf.set_random_seed(0)\n inputs = create_test_input(1, height, width, 3)\n # Dense feature extraction followed by subsampling.\n output = resnet_utils.stack_blocks_dense(inputs,\n blocks,\n output_stride)\n if output_stride is None:\n factor = 1\n else:\n factor = nominal_stride // output_stride\n\n output = resnet_utils.subsample(output, factor)\n # Make the two networks use the same weights.\n tf.get_variable_scope().reuse_variables()\n # Feature extraction at the nominal network rate.\n expected = self._stack_blocks_nondense(inputs, blocks)\n sess.run(tf.global_variables_initializer())\n output, expected = sess.run([output, expected])\n self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)\n\n def testStridingLastUnitVsSubsampleBlockEnd(self):\n \"\"\"Compares subsampling at the block's last unit or block's end.\n\n Makes sure that the final output is the same when we use a stride at the\n last unit of a block vs. we subsample activations at the end of a block.\n \"\"\"\n block = resnet_v1.resnet_v1_block\n\n blocks = [\n block('block1', base_depth=1, num_units=2, stride=2),\n block('block2', base_depth=2, num_units=2, stride=2),\n block('block3', base_depth=4, num_units=2, stride=2),\n block('block4', base_depth=8, num_units=2, stride=1),\n ]\n\n # Test both odd and even input dimensions.\n height = 30\n width = 31\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n with slim.arg_scope([slim.batch_norm], is_training=False):\n for output_stride in [1, 2, 4, 8, None]:\n with tf.Graph().as_default():\n with self.test_session() as sess:\n tf.set_random_seed(0)\n inputs = create_test_input(1, height, width, 3)\n\n # Subsampling at the last unit of the block.\n output = resnet_utils.stack_blocks_dense(\n inputs, blocks, output_stride,\n store_non_strided_activations=False,\n outputs_collections='output')\n output_end_points = slim.utils.convert_collection_to_dict(\n 'output')\n\n # Make the two networks use the same weights.\n tf.get_variable_scope().reuse_variables()\n\n # Subsample activations at the end of the blocks.\n expected = resnet_utils.stack_blocks_dense(\n inputs, blocks, output_stride,\n store_non_strided_activations=True,\n outputs_collections='expected')\n expected_end_points = slim.utils.convert_collection_to_dict(\n 'expected')\n\n sess.run(tf.global_variables_initializer())\n\n # Make sure that the final output is the same.\n output, expected = sess.run([output, expected])\n self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)\n\n # Make sure that intermediate block activations in\n # output_end_points are subsampled versions of the corresponding\n # ones in expected_end_points.\n for i, block in enumerate(blocks[:-1:]):\n output = output_end_points[block.scope]\n expected = expected_end_points[block.scope]\n atrous_activated = (output_stride is not None and\n 2 ** i >= output_stride)\n if not atrous_activated:\n expected = resnet_utils.subsample(expected, 2)\n output, expected = sess.run([output, expected])\n self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)\n\n\nclass ResnetCompleteNetworkTest(tf.test.TestCase):\n \"\"\"Tests with complete small ResNet v1 model.\"\"\"\n\n def _resnet_small(self,\n inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n include_root_block=True,\n spatial_squeeze=True,\n reuse=None,\n scope='resnet_v1_small'):\n \"\"\"A shallow and thin ResNet v1 for faster tests.\"\"\"\n block = resnet_v1.resnet_v1_block\n blocks = [\n block('block1', base_depth=1, num_units=3, stride=2),\n block('block2', base_depth=2, num_units=3, stride=2),\n block('block3', base_depth=4, num_units=3, stride=2),\n block('block4', base_depth=8, num_units=2, stride=1),\n ]\n return resnet_v1.resnet_v1(inputs, blocks, num_classes,\n is_training=is_training,\n global_pool=global_pool,\n output_stride=output_stride,\n include_root_block=include_root_block,\n spatial_squeeze=spatial_squeeze,\n reuse=reuse,\n scope=scope)\n\n def testClassificationEndPoints(self):\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n logits, end_points = self._resnet_small(inputs, num_classes,\n global_pool=global_pool,\n spatial_squeeze=False,\n scope='resnet')\n self.assertTrue(logits.op.name.startswith('resnet/logits'))\n self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])\n self.assertTrue('predictions' in end_points)\n self.assertListEqual(end_points['predictions'].get_shape().as_list(),\n [2, 1, 1, num_classes])\n self.assertTrue('global_pool' in end_points)\n self.assertListEqual(end_points['global_pool'].get_shape().as_list(),\n [2, 1, 1, 32])\n\n def testClassificationEndPointsWithNoBatchNormArgscope(self):\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n logits, end_points = self._resnet_small(inputs, num_classes,\n global_pool=global_pool,\n spatial_squeeze=False,\n is_training=None,\n scope='resnet')\n self.assertTrue(logits.op.name.startswith('resnet/logits'))\n self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])\n self.assertTrue('predictions' in end_points)\n self.assertListEqual(end_points['predictions'].get_shape().as_list(),\n [2, 1, 1, num_classes])\n self.assertTrue('global_pool' in end_points)\n self.assertListEqual(end_points['global_pool'].get_shape().as_list(),\n [2, 1, 1, 32])\n\n def testEndpointNames(self):\n # Like ResnetUtilsTest.testEndPointsV1(), but for the public API.\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(inputs, num_classes,\n global_pool=global_pool,\n scope='resnet')\n expected = ['resnet/conv1']\n for block in range(1, 5):\n for unit in range(1, 4 if block < 4 else 3):\n for conv in range(1, 4):\n expected.append('resnet/block%d/unit_%d/bottleneck_v1/conv%d' %\n (block, unit, conv))\n expected.append('resnet/block%d/unit_%d/bottleneck_v1' % (block, unit))\n expected.append('resnet/block%d/unit_1/bottleneck_v1/shortcut' % block)\n expected.append('resnet/block%d' % block)\n expected.extend(['global_pool', 'resnet/logits', 'resnet/spatial_squeeze',\n 'predictions'])\n self.assertItemsEqual(end_points.keys(), expected)\n\n def testClassificationShapes(self):\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(inputs, num_classes,\n global_pool=global_pool,\n scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 28, 28, 4],\n 'resnet/block2': [2, 14, 14, 8],\n 'resnet/block3': [2, 7, 7, 16],\n 'resnet/block4': [2, 7, 7, 32]}\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n inputs = create_test_input(2, 321, 321, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(inputs, num_classes,\n global_pool=global_pool,\n spatial_squeeze=False,\n scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 41, 41, 4],\n 'resnet/block2': [2, 21, 21, 8],\n 'resnet/block3': [2, 11, 11, 16],\n 'resnet/block4': [2, 11, 11, 32]}\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testRootlessFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n inputs = create_test_input(2, 128, 128, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(inputs, num_classes,\n global_pool=global_pool,\n include_root_block=False,\n spatial_squeeze=False,\n scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 64, 64, 4],\n 'resnet/block2': [2, 32, 32, 8],\n 'resnet/block3': [2, 16, 16, 16],\n 'resnet/block4': [2, 16, 16, 32]}\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testAtrousFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n output_stride = 8\n inputs = create_test_input(2, 321, 321, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(inputs,\n num_classes,\n global_pool=global_pool,\n output_stride=output_stride,\n spatial_squeeze=False,\n scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 41, 41, 4],\n 'resnet/block2': [2, 41, 41, 8],\n 'resnet/block3': [2, 41, 41, 16],\n 'resnet/block4': [2, 41, 41, 32]}\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testAtrousFullyConvolutionalValues(self):\n \"\"\"Verify dense feature extraction with atrous convolution.\"\"\"\n nominal_stride = 32\n for output_stride in [4, 8, 16, 32, None]:\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n with tf.Graph().as_default():\n with self.test_session() as sess:\n tf.set_random_seed(0)\n inputs = create_test_input(2, 81, 81, 3)\n # Dense feature extraction followed by subsampling.\n output, _ = self._resnet_small(inputs, None, is_training=False,\n global_pool=False,\n output_stride=output_stride)\n if output_stride is None:\n factor = 1\n else:\n factor = nominal_stride // output_stride\n output = resnet_utils.subsample(output, factor)\n # Make the two networks use the same weights.\n tf.get_variable_scope().reuse_variables()\n # Feature extraction at the nominal network rate.\n expected, _ = self._resnet_small(inputs, None, is_training=False,\n global_pool=False)\n sess.run(tf.global_variables_initializer())\n self.assertAllClose(output.eval(), expected.eval(),\n atol=1e-4, rtol=1e-4)\n\n def testUnknownBatchSize(self):\n batch = 2\n height, width = 65, 65\n global_pool = True\n num_classes = 10\n inputs = create_test_input(None, height, width, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n logits, _ = self._resnet_small(inputs, num_classes,\n global_pool=global_pool,\n spatial_squeeze=False,\n scope='resnet')\n self.assertTrue(logits.op.name.startswith('resnet/logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, 1, 1, num_classes])\n images = create_test_input(batch, height, width, 3)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 1, 1, num_classes))\n\n def testFullyConvolutionalUnknownHeightWidth(self):\n batch = 2\n height, width = 65, 65\n global_pool = False\n inputs = create_test_input(batch, None, None, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n output, _ = self._resnet_small(inputs, None, global_pool=global_pool)\n self.assertListEqual(output.get_shape().as_list(),\n [batch, None, None, 32])\n images = create_test_input(batch, height, width, 3)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(output, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 3, 3, 32))\n\n def testAtrousFullyConvolutionalUnknownHeightWidth(self):\n batch = 2\n height, width = 65, 65\n global_pool = False\n output_stride = 8\n inputs = create_test_input(batch, None, None, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n output, _ = self._resnet_small(inputs,\n None,\n global_pool=global_pool,\n output_stride=output_stride)\n self.assertListEqual(output.get_shape().as_list(),\n [batch, None, None, 32])\n images = create_test_input(batch, height, width, 3)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(output, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 9, 9, 32))\n\n def testDepthMultiplier(self):\n resnets = [\n resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101,\n resnet_v1.resnet_v1_152, resnet_v1.resnet_v1_200\n ]\n resnet_names = [\n 'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200'\n ]\n for resnet, resnet_name in zip(resnets, resnet_names):\n depth_multiplier = 0.25\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n scope_base = resnet_name + '_base'\n _, end_points_base = resnet(\n inputs,\n num_classes,\n global_pool=global_pool,\n min_base_depth=1,\n scope=scope_base)\n scope_test = resnet_name + '_test'\n _, end_points_test = resnet(\n inputs,\n num_classes,\n global_pool=global_pool,\n min_base_depth=1,\n depth_multiplier=depth_multiplier,\n scope=scope_test)\n for block in ['block1', 'block2', 'block3', 'block4']:\n block_name_base = scope_base + '/' + block\n block_name_test = scope_test + '/' + block\n self.assertTrue(block_name_base in end_points_base)\n self.assertTrue(block_name_test in end_points_test)\n self.assertEqual(\n len(end_points_base[block_name_base].get_shape().as_list()), 4)\n self.assertEqual(\n len(end_points_test[block_name_test].get_shape().as_list()), 4)\n self.assertListEqual(\n end_points_base[block_name_base].get_shape().as_list()[:3],\n end_points_test[block_name_test].get_shape().as_list()[:3])\n self.assertEqual(\n int(depth_multiplier *\n end_points_base[block_name_base].get_shape().as_list()[3]),\n end_points_test[block_name_test].get_shape().as_list()[3])\n\n def testMinBaseDepth(self):\n resnets = [\n resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101,\n resnet_v1.resnet_v1_152, resnet_v1.resnet_v1_200\n ]\n resnet_names = [\n 'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200'\n ]\n for resnet, resnet_name in zip(resnets, resnet_names):\n min_base_depth = 5\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = resnet(\n inputs,\n num_classes,\n global_pool=global_pool,\n min_base_depth=min_base_depth,\n depth_multiplier=0,\n scope=resnet_name)\n for block in ['block1', 'block2', 'block3', 'block4']:\n block_name = resnet_name + '/' + block\n self.assertTrue(block_name in end_points)\n self.assertEqual(\n len(end_points[block_name].get_shape().as_list()), 4)\n # The output depth is 4 times base_depth.\n depth_expected = min_base_depth * 4\n self.assertEqual(\n end_points[block_name].get_shape().as_list()[3], depth_expected)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 Tomas Hodan ([email protected]).\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n\n\"\"\"Provides common names and flags used throughout the code.\"\"\"\n\nimport os\nimport copy\nimport collections\nimport yaml\nimport tensorflow as tf\n\n\n# Common names.\n# ------------------------------------------------------------------------------\n# 6D object pose estimation tasks (see Hodan et al. ECCVW 2018).\nLOCALIZATION = 'localization'\nDETECTION = 'detection'\n\n# Pose fitting methods.\nPROGRESSIVE_X = 'progressive_x'\nOPENCV_RANSAC = 'opencv_ransac'\n\n# Predictions.\nPRED_OBJ_LABEL = 'pred_obj_label' # Object labels.\nPRED_OBJ_CONF = 'pred_obj_conf' # Object confidences.\nPRED_FRAG_CONF = 'pred_frag_conf' # Fragment confidences.\nPRED_FRAG_LOC = 'pred_frag_loc' # Precise 3D locations on fragments.\nPRED_CORRESP = 'pred_corresp' # 2D-3D correspondences.\n\n# Ground-truth output.\nGT_OBJ_LABEL = 'gt_obj_label' # Object labels.\nGT_FRAG_LABEL = 'gt_frag_label' # Fragment labels.\nGT_FRAG_LOC = 'gt_frag_loc' # Precise 3D locations on fragments.\nGT_FRAG_WEIGHT = 'gt_frag_weight'\n\n# Lists of ground truth annotations (elements at the same position in the lists\n# belong to the same annotated object instance).\nGT_OBJ_IDS = 'gt_obj_ids' # List of object ID's.\nGT_OBJ_VISIB_FRACT = 'gt_obj_visib_fract' # List of visibility fractions.\nGT_OBJ_MASKS = 'gt_obj_masks' # List of masks of object instances.\nGT_OBJ_QUATS = 'gt_obj_quats' # List of quaternions.\nGT_OBJ_TRANS = 'gt_obj_trans' # List of translation vectors.\n\n# Other common names.\nK = 'K'\nIMAGE = 'image'\nIMAGE_PATH = 'image_path'\nSCENE_ID = 'scene_id'\nIM_ID = 'im_id'\nTEST_SET = 'test'\nPARAMS_FILENAME = 'params.yml'\n\n\n# Common flags.\n# ------------------------------------------------------------------------------\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# EPOS flags.\nflags.DEFINE_boolean(\n 'frag_cls_agnostic', False,\n 'Whether the fragment classification is object agnostic.')\nflags.DEFINE_boolean(\n 'frag_loc_agnostic', False,\n 'Whether the fragment localization is object agnostic.')\nflags.DEFINE_integer(\n 'num_frags', 64,\n 'Number of fragments per object.')\nflags.DEFINE_float(\n 'min_visib_fract', 0.1,\n 'Minimum visibility to consider an annotated object instance.')\nflags.DEFINE_float(\n 'corr_min_obj_conf', 0.1,\n 'Threshold on the object confidence (tau_a in the EPOS paper).')\nflags.DEFINE_float(\n 'corr_min_frag_rel_conf', 0.5,\n 'Threshold on the relative fragment confidence (tau_b in the EPOS paper).')\nflags.DEFINE_boolean(\n 'corr_project_to_model', False,\n 'Whether to project the predicted points to the object model.')\n\n# Other flags.\nflags.DEFINE_string(\n 'model', None,\n 'Name of the model.')\nflags.DEFINE_string(\n 'dataset', None,\n 'Name of the dataset.')\n# See feature.py for supported model variants.\nflags.DEFINE_string(\n 'model_variant', 'xception_65',\n 'Deep model variant.')\nflags.DEFINE_integer(\n 'logits_kernel_size', 1,\n 'The kernel size for the convolutional kernel that generates logits.')\nflags.DEFINE_multi_float(\n 'image_pyramid', None,\n 'Input scales for multi-scale feature extraction.')\nflags.DEFINE_boolean(\n 'add_image_level_feature', True,\n 'Add image level feature.')\nflags.DEFINE_list(\n 'image_pooling_stride', '1,1',\n 'Image pooling stride [height, width] used in the ASPP image pooling. ')\nflags.DEFINE_boolean(\n 'aspp_with_batch_norm', True,\n 'Use batch norm parameters for ASPP or not.')\nflags.DEFINE_boolean(\n 'aspp_with_separable_conv', True,\n 'Use separable convolution for ASPP or not.')\n# Defaults to None. Set multi_grid = [1, 2, 4] when using provided\n# 'resnet_v1_{50,101}_beta' checkpoints.\nflags.DEFINE_multi_integer(\n 'multi_grid', None,\n 'Employ a hierarchy of atrous rates for ResNet.')\nflags.DEFINE_float(\n 'depth_multiplier', 1.0,\n 'Multiplier for the depth (number of channels) for all convolution ops used '\n 'in MobileNet.')\nflags.DEFINE_integer(\n 'divisible_by', None,\n 'An integer that ensures the layer # channels are divisible by this value. '\n 'Used in MobileNet.')\nflags.DEFINE_multi_integer(\n 'atrous_rates', [12, 24, 36],\n 'Atrous rates for atrous spatial pyramid pooling.')\nflags.DEFINE_list(\n 'decoder_output_stride', [4],\n 'Comma-separated list of strings with the number specifying output stride of '\n 'low-level features at each model level. Current implementation assumes at '\n 'most one output stride (i.e., either None or a list with only one element). '\n 'If None, decoder is not used.')\nflags.DEFINE_integer(\n 'encoder_output_stride', 8,\n 'The ratio of input to encoder output spatial resolution.')\nflags.DEFINE_boolean(\n 'decoder_use_separable_conv', True,\n 'Employ separable convolution for decoder or not.')\nflags.DEFINE_enum(\n 'merge_method', 'max', ['max', 'avg'],\n 'Scheme to merge multi scale features.')\nflags.DEFINE_boolean(\n 'prediction_with_upsampled_logits', True,\n 'When performing prediction, there are two options: (1) bilinear upsampling '\n 'the logits followed by argmax, or (2) armax followed by nearest upsampling '\n 'the predicted labels. The second option may introduce some'\n '\"blocking effect\", but it is more computationally efficient.')\nflags.DEFINE_bool(\n 'use_bounded_activation', False,\n 'Whether or not to use bounded activations. Bounded activations better lend '\n 'themselves to quantized inference.')\nflags.DEFINE_boolean(\n 'upsample_logits', False,\n 'Whether to upsample logits.')\n\n\ndef update_flags(model_params_path):\n \"\"\"Updates flags with values loaded from a YAML file.\n\n Args:\n model_params_path: Path to a YAML file.\n \"\"\"\n if not os.path.exists(model_params_path):\n return\n\n tf.logging.info('Loading flags from: {}'.format(model_params_path))\n if os.path.basename(model_params_path).split('.')[1] not in ['yml', 'yaml']:\n raise ValueError('Only YAML format is currently supported.')\n\n with open(model_params_path, 'r') as f:\n params = yaml.load(f, Loader=yaml.CLoader)\n for par_name, par_val in params.items():\n if par_name in FLAGS.__flags.keys():\n if par_name in ['train_crop_size', 'infer_crop_size', 'eval_crop_size']:\n FLAGS.__flags[par_name].value = [int(x) for x in par_val.split(',')]\n else:\n FLAGS.__flags[par_name].value = par_val\n\n\ndef print_flags():\n \"\"\"Prints all flags and their values.\"\"\"\n tf.logging.info('Flags:')\n tf.logging.info('----------')\n for flag_name, flag_value in FLAGS.__flags.items():\n tf.logging.info('{}: {}'.format(flag_name, flag_value.value))\n tf.logging.info('----------')\n\n\ndef get_outputs_to_num_channels(num_objs, num_frags):\n \"\"\"Returns a map from output type to the number of associated channels.\n\n Args:\n num_objs: Number of objects.\n num_frags: Number of surface fragments per object.\n \"\"\"\n return {\n PRED_OBJ_CONF:\n num_objs + 1,\n PRED_FRAG_CONF:\n (1 if FLAGS.frag_cls_agnostic else num_objs) * num_frags,\n PRED_FRAG_LOC:\n (1 if FLAGS.frag_cls_agnostic else num_objs) * num_frags * 3,\n }\n\n\nclass ModelOptions(\n collections.namedtuple('ModelOptions', [\n 'outputs_to_num_channels',\n 'crop_size',\n 'atrous_rates',\n 'encoder_output_stride',\n 'preprocessed_images_dtype',\n 'merge_method',\n 'add_image_level_feature',\n 'image_pooling_stride',\n 'aspp_with_batch_norm',\n 'aspp_with_separable_conv',\n 'multi_grid',\n 'decoder_output_stride',\n 'decoder_use_separable_conv',\n 'logits_kernel_size',\n 'model_variant',\n 'depth_multiplier',\n 'divisible_by',\n 'prediction_with_upsampled_logits',\n 'use_bounded_activation'\n ])):\n \"\"\"Immutable class to hold model options.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n outputs_to_num_channels,\n crop_size=None,\n atrous_rates=None,\n encoder_output_stride=8,\n preprocessed_images_dtype=tf.float32):\n \"\"\"Constructor to set default values.\n\n Args:\n outputs_to_num_channels: A dictionary from output type to the number of\n classes. For example, for the task of object segmentation with 21\n classes, we would have outputs_to_num_channels['semantic']=21.\n crop_size: A tuple [im_height, im_width].\n atrous_rates: A list of atrous convolution rates for ASPP.\n encoder_output_stride: The ratio of input to encoder output resolution.\n preprocessed_images_dtype: The type after the preprocessing function.\n\n Returns:\n A new ModelOptions instance.\n \"\"\"\n decoder_output_stride = None\n if FLAGS.decoder_output_stride:\n decoder_output_stride = [int(x) for x in FLAGS.decoder_output_stride]\n if sorted(decoder_output_stride, reverse=True) != decoder_output_stride:\n raise ValueError('Decoder output stride need to be sorted in the '\n 'descending order.')\n\n image_pooling_stride = [1, 1]\n if FLAGS.image_pooling_stride:\n image_pooling_stride = [int(x) for x in FLAGS.image_pooling_stride]\n\n return super(ModelOptions, cls).__new__(\n cls,\n outputs_to_num_channels,\n crop_size,\n atrous_rates,\n encoder_output_stride,\n preprocessed_images_dtype,\n FLAGS.merge_method,\n FLAGS.add_image_level_feature,\n image_pooling_stride,\n FLAGS.aspp_with_batch_norm,\n FLAGS.aspp_with_separable_conv,\n FLAGS.multi_grid,\n decoder_output_stride,\n FLAGS.decoder_use_separable_conv,\n FLAGS.logits_kernel_size,\n FLAGS.model_variant,\n FLAGS.depth_multiplier,\n FLAGS.divisible_by,\n FLAGS.prediction_with_upsampled_logits,\n FLAGS.use_bounded_activation)\n\n def __deepcopy__(self, memo):\n return ModelOptions(copy.deepcopy(self.outputs_to_num_channels),\n self.crop_size,\n self.atrous_rates,\n self.encoder_output_stride,\n self.preprocessed_images_dtype)\n"
] |
[
[
"tensorflow.multiply",
"tensorflow.nn.sigmoid",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.truncated_normal_initializer",
"tensorflow.squeeze",
"tensorflow.variable_scope",
"tensorflow.tile"
],
[
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.truncated_normal_initializer",
"tensorflow.reduce_mean"
],
[
"tensorflow.get_variable",
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.zeros",
"numpy.arange",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.test.main",
"tensorflow.set_random_seed",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
],
[
"tensorflow.logging.info"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
gugerlir/rainforest
|
[
"85a9d51acf2036245f0cebf7232e735c2cf2dfc4"
] |
[
"rainforest/common/utils.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nSet of functions that can be useful\n\nDaniel Wolfensberger\nMeteoSwiss/EPFL\[email protected]\nDecember 2019\n\"\"\"\n\n# Global imports\nimport datetime\nimport io\nimport os\nfrom collections import OrderedDict\nimport numpy as np\nfrom scipy.stats import energy_distance\nfrom dateutil import parser\nimport glob\nimport logging\nimport yaml\nimport dask.dataframe as dd\nimport re\n\n# Local imports\nfrom .wgs84_ch1903 import GPSConverter\nfrom . import constants\n\ndef hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n\ndef envyaml(filename):\n \"\"\"\n Reads a yaml configuration file while parsing environment variables.\n Environment variables must be written as ${NAME_OF_VAR} in the yaml file\n \n Parameters\n ----------\n filename : str\n path of the input yaml file\n\n Returns\n -------\n dict\n the yaml content in the form of a python dict\n\n \"\"\"\n pattern = \"(\\\\$\\\\{[A-Za-z0-9]+\\\\})\"\n file = open(filename,'r')\n filebuff = io.StringIO()\n for l in file.readlines():\n matches = re.findall(pattern, l)\n for m in matches:\n l = l.replace(m, os.environ[m.replace('${','').replace('}','')])\n filebuff.write(l)\n filebuff.seek(0)\n \n return yaml.load(filebuff, Loader = yaml.FullLoader)\n\ndef perfscores(est_data, ref_data, bounds = None, array = False):\n \"\"\"\n Computes a set of precipitation performance scores, on different data ranges.\n The scores are\n - scatter: 0.5 * (Qw84(x) - Qw16(x)), where Qw is a quantile weighted\n by ref_data / sum(ref_data) and x is est_data / ref_data in dB scale\n - RMSE: root mean square error (linear error)\n - bias: (ME/mean(ref_data) + 1) in dB\n - ED: the energy distance which is a measure of the distance between\n two distributions (https://en.wikipedia.org/wiki/Energy_distance)\n \n Parameters\n ----------\n est_data : ndarray\n array of estimates (ex. precip from QPE)\n ref_data : ndarray\n array of reference (ex. precip from gauge)\n bounds : list (optional)\n list of bounds on ref_data for which to compute the error metrics,\n by default all data will be used (unbounded), note that even if you \n prescribe bounds the scores for the overall data will always be \n added in the output\n array: boolean (optional)\n Whether or not to convert the output dict to a numpy array\n \n Returns\n -------\n all_metrics : dict or ndarray\n a dictionary containing all the scores, organized in the following way\n all_metrics[bound][score] \n \"\"\"\n all_metrics = OrderedDict()\n \n valid = np.logical_and(est_data >= 0, ref_data >= 0)\n est_data = est_data[valid > 0]\n ref_data = ref_data[valid > 0]\n \n est = est_data\n ref = ref_data \n \n \n all_metrics['all'] = _perfscores(est, ref)\n \n if bounds != None:\n for i in range(len(bounds) -1):\n bound_str = '{:2.1f}-{:2.1f}'.format(bounds[i],bounds[i+1])\n cond = np.logical_and(ref_data < bounds[i+1],\n ref_data >= bounds[i])\n if np.sum(cond) > 0:\n est = est_data[cond]\n ref = ref_data[cond]\n \n all_metrics[bound_str] = _perfscores(est, ref)\n \n if array == True:\n arr = []\n for k in all_metrics:\n arr.append(list(all_metrics[k].values()))\n arr = np.array(arr)\n all_metrics = np.array(arr)\n \n return all_metrics\n\ndef _perfscores(est_data, ref_data):\n \"\"\"An unbounded version of the previous function\"\"\"\n doublecond = np.logical_and(ref_data > 0.1, est_data > 0.1)\n rmse = np.sqrt(np.nanmean((est_data-ref_data)**2))\n db_err = 10 * np.log10(est_data[doublecond] / ref_data[doublecond])\n weights = ref_data[doublecond]/np.sum(ref_data[doublecond])\n scatter = 0.5 * (quantile(db_err,weights,0.84) -quantile(db_err,weights,0.16))\n bias_db = 10*np.log10(np.sum(est_data[doublecond]) / np.sum(ref_data[doublecond]))\n\n ed = energy_distance(est_data[np.isfinite(est_data)], ref_data[np.isfinite(est_data)])\n metrics = {'RMSE':rmse,'scatter':scatter,'logBias':bias_db,\n 'ED':ed,'N':len(ref_data)}\n \n return metrics\n\ndef split_by_time(files_rad):\n \"\"\"Separate a list of files by their timestamp\"\"\"\n out = {}\n if type(files_rad) == dict:\n for k in files_rad.keys():\n out[k] = _split_by_time(files_rad[k])\n else:\n out = _split_by_time(files_rad)\n return out \n \ndef _split_by_time(files_rad):\n out = {}\n \n for f in files_rad:\n\n t = timefromfilename(f)\n if t in out.keys():\n out[t].append(f)\n else:\n out[t] = [f]\n # Avoid lists with size 1\n for k in out.keys():\n if len(out[k]) == 1:\n out[k] = out[k][0]\n return out\n\n\ndef timestamp_from_datetime(dt):\n return dt.replace(tzinfo = datetime.timezone.utc).timestamp()\n\ndef timestamp_from_datestr(datestr):\n # Datetstr in YYYYmmdd or YYYYmmddHH or YYYYmmddHHMM format\n datestr = parser.parse(datestr)\n epoch = datetime.datetime(1970,1,1)\n\n return int((datestr - epoch).total_seconds())\n\ndef timefromfilename(fname):\n \"\"\"Returns the datetime of a file based on its name\"\"\"\n bname = os.path.basename(fname)\n tstr = bname[3:12]\n return datetime.datetime.strptime(tstr,'%y%j%H%M')\n\ndef sweepnumber_fromfile(fname):\n \"\"\"Returns the sweep number of a polar file based on its name\"\"\"\n return int(os.path.basename(fname).split('.')[1])\n\ndef round_to_hour(dt):\n \"\"\"Returns the sweep number of a polar file based on its name\"\"\"\n round_delta = 60 * 30\n round_timestamp = dt.timestamp() + round_delta\n round_dt = datetime.datetime.fromtimestamp(round_timestamp)\n return round_dt.replace(microsecond=0, second=0, minute=0)\n\ndef idx_cart(x,y):\n \"\"\"Returns the Cartesian index of a set of coordinates x and y\"\"\"\n '''\n Returns the Cartesian index of a set of coordinates x and y\n\n Args:\n arrays: list of np arrays of various sizes\n (must be same rank, but not necessarily same size)\n fill_value (float, optional):\n\n Returns:\n np.ndarray\n '''\n if type(x) != np.ndarray:\n x = np.array([x])\n if type(y) != np.ndarray:\n y = np.array([y]) \n \n x_qpe = constants.X_QPE\n y_qpe = constants.Y_QPE\n\n # For x the columns in the Cartesian lookup tables are lower bounds\n # e.g. x = 563, means that radar pixels are between 563 and 564\n y_llc = (y/constants.CART_GRID_SIZE).astype(int)\n # For y the columns in the Cartesian lookup tables are upper bounds\n # e.g. x = 182, means that radar pixels are between 181 and 182 \n x_llc = (np.ceil(x/constants.CART_GRID_SIZE)).astype(int)\n \n idx = [(np.max(x_qpe) - x_llc).astype(int),\n (y_llc - np.min(y_qpe)).astype(int)]\n\n return np.squeeze(idx) \n \n \ndef stack_uneven(arrays, fill_value = np.nan):\n '''\n Fits mmltiple into a single numpy array, even if they are\n different sizes, assigning a fill_value to parts with no data\n\n Parameters\n ----------\n arrays: list of np arrays \n list of numpy array to stack, they can have different dimensions\n \n fill_value: (float, optional)\n the fill value with which to fill the missing pixels\n\n Returns\n -------\n a np.ndarray with size N x M, where N is the sum of the number of \n rows of all arrays and M is the maximal number of columns in all arrays\n '''\n \n dim0 = [a.shape[0] for a in arrays]\n dim1 = [a.shape[1] for a in arrays]\n \n dim2max = max(dim1)\n \n stacked = np.ones((np.sum(dim0), dim2max)) + fill_value\n \n idx_row = 0\n for arr in arrays:\n stacked[idx_row:idx_row + arr.shape[0], 0:arr.shape[1]] = arr\n idx_row += arr.shape[0]\n \n return stacked\n\n\ndef quantile_1D(data, weights, quantile):\n \"\"\"\n Compute the weighted quantile of a 1D numpy array.\n\n Parameters\n ----------\n data : ndarray\n Input array (one dimension).\n weights : ndarray\n Array with the weights of the same size of `data`.\n quantile : float\n Quantile to compute. It must have a value between 0 and 1.\n\n Returns\n -------\n quantile_1D : float\n The output value.\n \"\"\"\n # Check the data\n if not isinstance(data, np.matrix):\n data = np.asarray(data)\n if not isinstance(weights, np.matrix):\n weights = np.asarray(weights)\n nd = data.ndim\n if nd != 1:\n raise TypeError(\"data must be a one dimensional array\")\n ndw = weights.ndim\n if ndw != 1:\n raise TypeError(\"weights must be a one dimensional array\")\n if data.shape != weights.shape:\n raise TypeError(\"the length of data and weights must be the same\")\n if ((quantile > 1.) or (quantile < 0.)):\n raise ValueError(\"quantile must have a value between 0. and 1.\")\n # Sort the data\n ind_sorted = np.argsort(data)\n sorted_data = data[ind_sorted]\n sorted_weights = weights[ind_sorted]\n # Compute the auxiliary arrays\n Sn = np.cumsum(sorted_weights)\n # TODO: Check that the weights do not sum zero\n #assert Sn != 0, \"The sum of the weights must not be zero\"\n Pn = (Sn-0.5*sorted_weights)/np.sum(sorted_weights)\n # Get the value of the weighted median\n return np.interp(quantile, Pn, sorted_data)\n\n\ndef quantile(data, weights, quantile):\n \"\"\"\n Weighted quantile of an array with respect to the last axis.\n\n Parameters\n ----------\n data : ndarray\n Input array.\n weights : ndarray\n Array with the weights. It must have the same size of the last \n axis of `data`.\n quantile : float\n Quantile to compute. It must have a value between 0 and 1.\n\n Returns\n -------\n quantile : float\n The output value.\n \"\"\"\n # TODO: Allow to specify the axis\n nd = data.ndim\n if nd == 0:\n TypeError(\"data must have at least one dimension\")\n elif nd == 1:\n return quantile_1D(data, weights, quantile)\n elif nd > 1:\n n = data.shape\n imr = data.reshape((np.prod(n[:-1]), n[-1]))\n result = np.apply_along_axis(quantile_1D, -1, imr, weights, quantile)\n return result.reshape(n[:-1])\n \n \n\ndef wgs84toCH1903(lat, lon, heights):\n \"\"\"\n Converts a set of WGS84, lat/lon/heights to Swiss CH1903 coordinates,\n east, north and height\n\n Parameters\n ----------\n lat : ndarray\n latitudes in decimal format (degrees)\n lon : ndarray\n longitudes in decimal format (degrees)\n heights : ndarray\n heights a.s.l in WGS84 coordinates\n\n Returns\n -------\n east, north and height coordinates in CHLV190\n \"\"\"\n \n conv = GPSConverter()\n lv03 = conv.WGS84toLV03(lat, lon, heights)\n return lv03[0], lv03[1], lv03[2]\n\ndef LV03toWGS84(east, north, heights):\n \"\"\"\n Converts a set of WGS84, lat/lon/heights to Swiss CH1903 coordinates\n\n Parameters\n ----------\n east : ndarray\n Easterly Swiss coordinates (CHY)\n north : ndarray\n northerly Swiss coordinates (CHX)\n heights : ndarray\n heights a.s.l in WGS84 coordinates\n\n Returns\n -------\n lat, lon and height coordinates in WGS84\n \"\"\"\n \n conv = GPSConverter()\n wgs = conv.LV03toWGS84(east, north, heights)\n return wgs[0], wgs[1], wgs[2]\n\n\ndef chunks(l, n):\n \"\"\"\n Divides a list l into n sublists of similar sizes\n \"\"\"\n o = int(np.round(len(l)/n))\n out = []\n # For item i in a range that is a length of l,\n for i in range(0, n):\n # Create an index range for l of n items:\n if i == n-1:\n sub = l[i*o:]\n else:\n sub = l[i*o:i*o+o]\n \n if len(sub):\n out.append(sub)\n return out\n\n\ndef dict_flatten(mydict):\n \"\"\"\n Flattens a nested dictionary\n \"\"\"\n new_dict = {}\n for key,value in mydict.items():\n if type(value) == dict:\n _dict = {':'.join([key,str(_key)]):_value for _key, _value in\n dict_flatten(value).items()}\n new_dict.update(_dict)\n else:\n new_dict[key]=value\n return new_dict\n \ndef nested_dict_values(d):\n \"\"\"\n Extracts all values from a nested dictionary\n \"\"\"\n listvals = list(nested_dict_gen(d))\n listvals_unwrapped = []\n for l in listvals:\n if type(l) == list or type(l) == np.ndarray:\n for ll in l:\n listvals_unwrapped.append(ll)\n else:\n listvals_unwrapped.append(l)\n return listvals_unwrapped\n\n\ndef nested_dict_gen(d):\n \"\"\"\n The generator for the previous function\n \"\"\"\n for v in d.values():\n if isinstance(v, dict):\n yield from nested_dict_gen(v)\n else:\n yield v\n \ndef nanadd_at(a, indices, b):\n \"\"\" Replaces nans by zero in call to np.add.at \"\"\"\n mask = np.isfinite(b)\n b = b[mask]\n indices = indices[mask]\n indices = tuple([indices[:,0], indices[:,1]])\n return np.add.at(a, indices, b)\n \ndef aggregate_multi(array_3d, agg_operators):\n \"\"\"\n Aggregates a 3D numpy array alongs its first axis, using different\n aggregation operators\n \n Parameters\n ----------\n array_3d : ndarray\n 3D numpy array, of shape (N x M x L)\n agg_operators : ndarray of integers\n array of aggregation operators as defined in the constants.py file,\n must have the same length as the first dimension of array_3D (N)\n \n Returns\n -------\n An aggregated array of size M x L\n \"\"\"\n out = np.zeros(array_3d[0].shape) + np.nan\n op_un, idx = np.unique(agg_operators, return_inverse = True)\n for i, op in enumerate(op_un):\n out[:,idx == i] = constants.AVG_METHODS[op](array_3d[:,:,idx == i],\n axis = 0)\n \n return out\n\n\n\ndef rename_fields(data):\n \"\"\"\n Rename pyart fields from pyrad names to simpler names, according to the\n dictionary PYART_NAMES_MAPPING in the constants.py module\n \"\"\"\n old_keys = list(data.fields.keys())\n for k in old_keys:\n if k in constants.PYART_NAMES_MAPPING.keys():\n new_name = constants.PYART_NAMES_MAPPING[k]\n data.fields[new_name] = data.fields.pop(k)\n \ndef read_task_file(task_file): \n \"\"\"\n Reads a database processing task file\n \"\"\"\n tasks_dic = OrderedDict() # We want a sorted dict\n \n with open(task_file,'r') as f:\n for line in f:\n line = line.strip('\\n').split(',')\n line = np.array([s.replace(' ','') for s in line])\n tasks_dic[int(line[0])] = line[1:]\n return tasks_dic\n\ndef read_df(pattern, dbsystem = 'dask', sqlContext = None):\n \"\"\"\n Reads a set of data contained in a folder as a spark or dask DataFrame\n \n Parameters\n ----------\n pattern : str\n Unix style wildcard pattern pointing to the files, for example\n /store/msrad/folder/*.csv will read all csv files in that folder\n dbsystem : str\n Either \"dask\" if you want a Dask DataFrame or \"spark\" if you want a \n spark dataframe\n sqlContext : sqlContext instance\n sqlContext to use, required only if dbystem = 'spark'\n \n Returns\n -------\n A spark or dask DataFrame instance\n \"\"\"\n \n if dbsystem not in ['spark','dask']:\n raise NotImplementedError('Only dbsystem = \"spark\" or \"dask\" are supported!')\n if dbsystem == 'spark' and sqlContext == None:\n raise ValueError('sqlContext must be provided if dbystem = \"spark\"!')\n \n files = glob.glob(pattern)\n df = None\n if '.parq' in files[0] or '.parquet' in files[0]:\n # For some reason wildcards are not accepted with parquet\n if dbsystem == 'spark':\n df = sqlContext.read.parquet(*files)\n else:\n df = dd.read_parquet(pattern) \n elif '.csv' in files[0]:\n if dbsystem == 'spark':\n df = sqlContext.read.csv(pattern,\n header = True, inferSchema = True)\n else:\n if '.gz' in files[0]:\n df = dd.read_csv(pattern, compression = 'gzip')\n else:\n df = dd.read_csv(pattern)\n else:\n logging.error(\"\"\"Invalid data, only csv and parquet files are accepted.\n Make sure that they have a valid suffix (.csv, .csv.gz, .parquet,\n .parq)\"\"\")\n\n return df\n\n\ndef nearest_time(dt, reference):\n \"\"\"\n Gets the nearest earlier reference timestep to a given datetime, for ex.\n if dt = 1 Jan 2020 10:12, and reference is 10 it will return\n 1 Jan 2020 10:10, or if dt = 1 Jan 2020 10:59 and reference is 60\n it will return 1 Jan 2020 10:00\n \n Parameters\n ----------\n dt : datetime\n The datetime to check\n reference : int\n The reference timestep in minutes\n \n Returns\n -------\n The closest earlier timestep in datetime format\n \n \"\"\"\n dt2 = dt - datetime.timedelta(minutes = dt.minute % reference,\n seconds = dt.second,\n microseconds = dt.microsecond) \n if dt2 != dt:\n dt2 += datetime.timedelta(minutes = reference)\n \n return dt2\n\ndef get_qpe_files(input_folder, t0 = None, t1 = None, time_agg = None,\n list_models = None):\n \"\"\"\n Gets the list of all qpe files in a folder (as saved by qpe_compute)\n and separates them by qpe type and timestep\n \n Parameters\n ----------\n input_folder : str\n main directory where the qpe files are saved, it contains one subfolder\n for every qpe model (type) that was used\n t0 : datetime (optional)\n Starting time of the period to retrieve, will be used to filter files,\n if not provided starting time will be time of first file\n t1 : datetime (optional)\n End time of the period to retrieve, will be used to filter files,\n if not provided end time will be time of last file\n time_agg : minutes (optional)\n Will aggregate all files to a reference time in minutes (e.g. use 10 to\n put together all files that correspond to a single gauge measurement)\n list_models: (optional)\n List of qpe types to retrieve , if not provided all folders in input_folder\n will be used\n Returns\n -------\n A dictionary where every key is a QPE model and every value is a list\n with all files in chronological order\n \"\"\"\n all_files = {}\n \n for sub in glob.glob(input_folder + '/*'):\n model = os.path.basename(sub)\n\n if list_models != None:\n if model not in list_models:\n continue\n \n files = glob.glob(sub + '/*')\n for f in files:\n try:\n t = str(re.match('.*[a-zA-Z]([0-9]{9}).*',f)[1])\n t = datetime.datetime.strptime(t,'%y%j%H%M')\n if time_agg != None:\n t = nearest_time(t, time_agg)\n \n if t0 != None:\n if t < t0:\n continue\n if t1 != None:\n if t > t1:\n continue\n \n if t not in all_files.keys():\n all_files[t] = {}\n if model not in all_files[t].keys():\n all_files[t][model] = []\n \n all_files[t][model].append(f) \n except:\n pass\n \n return all_files\n"
] |
[
[
"numpy.asarray",
"numpy.squeeze",
"numpy.cumsum",
"numpy.max",
"numpy.nanmean",
"numpy.unique",
"numpy.ceil",
"numpy.apply_along_axis",
"numpy.interp",
"numpy.zeros",
"numpy.min",
"numpy.log10",
"numpy.argsort",
"numpy.array",
"numpy.logical_and",
"numpy.sum",
"numpy.add.at",
"numpy.isfinite",
"numpy.prod"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahmedqasem/image_tools
|
[
"6b6ecbe23769a5c5136b9dcccff1db898bdb7490"
] |
[
"fix_data.py"
] |
[
"import os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\n\r\n\r\ndef load_images_and_labels(path):\r\n \"\"\" scan the data folder in and return a list of paths for images and labels \"\"\"\r\n folders = os.listdir(path)\r\n slices = []\r\n gt = []\r\n for f in folders:\r\n current_folder = os.path.join(path, f)\r\n subfolders = os.listdir(current_folder)\r\n image_folder = os.path.join(current_folder, subfolders[1])\r\n label_folder = os.path.join(current_folder, subfolders[0])\r\n images = os.listdir(image_folder)\r\n labels = os.listdir(label_folder)\r\n\r\n # add images locations\r\n for m in images:\r\n # print(current_folder+os.sep+m)\r\n slices.append(image_folder + os.sep + m)\r\n # add label locations\r\n for l in labels:\r\n gt.append(label_folder + os.sep + l)\r\n\r\n return slices, gt\r\n\r\n\r\ndef red_channel(images):\r\n \"\"\" loads the images from desk, pseudo colors them and return the red channel \"\"\"\r\n red_images = []\r\n for sl in images:\r\n # load image\r\n img = cv2.imread(sl, cv2.IMREAD_GRAYSCALE)\r\n # normalize image\r\n nor_img = ((img - img.min()) * (1 / (img.max() - img.min()) * 255)).astype('uint8')\r\n # psuedocolor image\r\n img_color = cv2.applyColorMap(nor_img, cv2.COLORMAP_HSV)\r\n # choose red channel\r\n red = img_color[:, :, 0]\r\n # red = red / 255\r\n # add red image to red images\r\n red_images.append(red)\r\n\r\n return red_images\r\n\r\ndef original_images(images):\r\n \"\"\" loads the images from desk, and return the image \"\"\"\r\n or_images = []\r\n for sl in images:\r\n # load image\r\n img = cv2.imread(sl, cv2.IMREAD_GRAYSCALE)\r\n # normalize image\r\n nor_img = ((img - img.min()) * (1 / (img.max() - img.min()) * 255)).astype('uint8')\r\n # psuedocolor image\r\n # img_color = cv2.applyColorMap(nor_img, cv2.COLORMAP_HSV)\r\n # choose red channel\r\n # red = img_color[:, :, 0]\r\n # red = red / 255\r\n # add red image to red images\r\n or_images.append(nor_img)\r\n\r\n return or_images\r\n\r\n\r\ndef clean_label(labels):\r\n \"\"\" loads the label from desk, then removes all the noise and return a clean\r\n list of labels \"\"\"\r\n bin_gt = []\r\n for i, g in enumerate(labels):\r\n # load label\r\n mask = cv2.imread(g, cv2.IMREAD_GRAYSCALE)\r\n # threshold around 220 to eliminate noise around edges\r\n ret, bin_mask = cv2.threshold(mask, 220, 255, cv2.THRESH_BINARY)\r\n # normalize image\r\n nor_bin_mask = bin_mask / 255\r\n # add to master array\r\n bin_gt.append(nor_bin_mask)\r\n # bin_gt.append(bin_mask)\r\n # if i == 150:\r\n # break\r\n return bin_gt\r\n\r\n\r\n# enumerate files\r\ndef save_to_desk(images, labels, source_names, p_path, n_path):\r\n \"\"\" scans through the positive and negative images and labels and saves each in the\r\n appropriate folder \"\"\"\r\n for i in range(len(source_names)):\r\n name = source_names[i].split('/')[-1][:10] + '-' + str(i)\r\n # find image\r\n print(source_names[i])\r\n if labels[i].max() > 0:\r\n print('{} positive'.format(name))\r\n # save image and label in p_folder\r\n plt.imsave(p_path + 'images/' + name + '.png', images[i], cmap='gray')\r\n plt.imsave(p_path + 'labels/' + name + '.png', labels[i], cmap='gray')\r\n\r\n # if label = negative\r\n else:\r\n print('{} negative'.format(name))\r\n # save image and label in negative folder\r\n plt.imsave(n_path + 'images/' + name + '.png', images[i], cmap='gray')\r\n plt.imsave(n_path + 'labels/' + name + '.png', labels[i], cmap='gray')\r\n print()\r\n\r\n if i % 10 == 0:\r\n print('saved {} files successfully!'.format(i))\r\n print('saved {} files successfully!'.format(len(source_names)))\r\n\r\n\r\ndef Main():\r\n # set folder locations\r\n data_folder = '../data/data_jpg/'\r\n positive_path = '../data/original_combined_data/positive/'\r\n negative_path = '../data/original_combined_data/negative/'\r\n # process images\r\n slices, gt = load_images_and_labels(data_folder)\r\n # if you want the red channel only\r\n #final_images = red_channel(slices)\r\n # if you want the original image\r\n final_images = original_images(slices)\r\n bin_labels = clean_label(gt)\r\n # # save to desk\r\n save_to_desk(final_images, bin_labels, slices, positive_path, negative_path)\r\n\r\n\r\n # print(slices[0])\r\n # print(gt[0])\r\n # print(final_images[133])\r\n # print(final_images[133].shape)\r\n #\r\n # plt.imshow(final_images[133], cmap='gray')\r\n # plt.contour(bin_labels[133])\r\n # plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n Main()"
] |
[
[
"matplotlib.pyplot.imsave"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dgketchum/irr_impacts
|
[
"4a09296eb5cc6f36b63ecd4ca2a82b075e1581d5"
] |
[
"bulk_analysis_figs.py"
] |
[
"import os\nfrom calendar import monthrange\nfrom datetime import date\n\nfrom matplotlib import rcParams, pyplot as plt\nfrom pandas import read_csv, concat\nimport numpy as np\n\nfrom hydrograph import hydrograph\nfrom county_list import included_counties\nfrom state_county_names_codes import state_fips_code, state_county_code\n\n\ndef plot_clim_q_resid(q, ai, clim_line, desc_str, years, cc, resid, resid_line, fig_d, cci_per, flow_per):\n rcParams['figure.figsize'] = 16, 10\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.scatter(ai, q)\n ax1.plot(ai, clim_line)\n ax1.set(xlabel='ETr / PPT [-]')\n ax1.set(ylabel='q [m^3]')\n\n for i, y in enumerate(years):\n ax1.annotate(y, (ai[i], q[i]))\n plt.suptitle(desc_str)\n\n ax2.set(xlabel='cc [m]')\n ax2.set(ylabel='q epsilon [m^3]')\n ax2.scatter(cc, resid)\n ax2.plot(cc, resid_line)\n for i, y in enumerate(years):\n ax2.annotate(y, (cc[i], resid[i]))\n\n desc_split = desc_str.strip().split('\\n')\n file_name = desc_split[0].replace(' ', '_')\n\n fig_name = os.path.join(fig_d, '{}_cc_{}-{}_q_{}-{}.png'.format(file_name, cci_per[0], cci_per[1],\n flow_per[0], flow_per[1]))\n\n plt.savefig(fig_name)\n plt.close('all')\n\n\ndef plot_water_balance_trends(data, data_line, data_str, years, desc_str, fig_d):\n rcParams['figure.figsize'] = 16, 10\n fig, ax1 = plt.subplots(1, 1)\n\n color = 'tab:green'\n ax1.set_xlabel('Year')\n ax1.scatter(years, data, color=color)\n ax1.plot(years, data_line, color=color)\n ax1.set_ylabel(data_str, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n desc_split = desc_str.strip().split('\\n')\n file_name = desc_split[0].replace(' ', '_')\n\n fig_name = os.path.join(fig_d, '{}_{}.png'.format(file_name, data_str))\n\n plt.savefig(fig_name)\n plt.close('all')\n\n\ndef nass_irrmapper_climate(irr_dir, nass_c, fig_dir, countywise=True):\n\n study_counties = included_counties()\n\n ndf = read_csv(nass_c)\n ndf.dropna(how='any', axis=0, subset=['FIPS'], inplace=True)\n ndf.dropna(how='any', axis=0, subset=['ST_CNTY_STR'], inplace=True)\n ndf.fillna(0.0, inplace=True)\n ndf['GEOID'] = [str(int(x)).rjust(5, '0') for x in ndf['FIPS']]\n ndf.index = ndf['GEOID']\n\n m_start, m_end = 10, 9\n years = [x for x in range(1997, 2017)]\n clim_dates = [(date(y, 4, 1), date(y, 9, monthrange(y, m_end)[1])) for y in years]\n cc_dates = [(date(y, 5, 1), date(y, 10, 31)) for y in years]\n irr_dates = [(date(y, 7, 1), date(y, 7, 31)) for y in years]\n\n l = [os.path.join(irr_dir, x) for x in os.listdir(irr_dir)]\n if countywise:\n for c in l:\n co = os.path.basename(c).split('.')[0]\n idf = hydrograph(c)\n # idf['cci'] = idf['cc'] / idf['irr']\n\n try:\n co_desc = ndf.loc[co]['ST_CNTY_STR'].split('_')\n except (KeyError, AttributeError):\n print('\\n{} not found\\n'.format(co))\n\n co_str, st_str = co_desc[1].title(), co_desc[0]\n ppt = np.array([idf['ppt'][d[0]: d[1]].sum() for d in clim_dates])\n etr = np.array([idf['etr'][d[0]: d[1]].sum() for d in clim_dates])\n ai = etr / ppt\n cc = np.array([idf['cc'][d[0]: d[1]].sum() for d in cc_dates])\n cc[cc == 0.0] = np.nan\n irr = np.array([idf['irr'][d[0]: d[1]].sum() for d in irr_dates]) / 4046.86\n if np.any(irr[5:] < 1000):\n continue\n nrow = [(k[-4:], v) for k, v in ndf.loc[co].items() if 'VALUE' in k]\n n_v, n_y = [x[1] for x in nrow], [int(x[0]) for x in nrow]\n fig, ax = plt.subplots(4, 1)\n fig.set_figheight(8)\n fig.set_figwidth(12)\n fig.tight_layout()\n\n ax[0].plot(years, irr, color='purple', label='IrrMapper')\n ax[0].plot(n_y, n_v, color='pink', label='NASS')\n ax[0].legend()\n ax[0].set(ylabel='Acres Irrigated')\n ax[0].set_xlim(years[0], years[-1])\n ax[1].plot(years, ppt, color='blue')\n ax[1].set(ylabel='AMJJA Precipitation [m^3]')\n ax[1].set_xlim(years[0], years[-1])\n ax[2].plot(years, cc, color='black')\n ax[2].set(ylabel='Crop Consumption [m^3]')\n ax[2].set_xlim(years[0], years[-1])\n ax[3].plot(years, ai, color='red')\n ax[3].set(ylabel='AMJJA Aridity Index [-]')\n ax[3].set_xlim(years[0], years[-1])\n\n plt.suptitle('{} Co. {}'.format(co_str, st_str))\n plt.xlim(1985, 2021)\n plt.gcf().subplots_adjust(left=0.1)\n plt.tight_layout()\n\n fig_file = '{}_{}.png'.format(st_str, co_str)\n\n if co in study_counties:\n sub_dir = 'impacts_study'\n else:\n sub_dir = 'non_study'\n\n plt.savefig(os.path.join(fig_dir, sub_dir, fig_file))\n plt.close()\n print(fig_file, sub_dir)\n else:\n western_11 = ['AZ', 'CA', 'CO', 'ID', 'MT', 'NM', 'NV', 'OR', 'UT', 'WA', 'WY']\n inlcude_all = ['ALL'] + western_11\n fips = state_fips_code()\n western_fips = [v for k, v in fips.items() if k in western_11]\n\n for s in inlcude_all:\n\n if s == 'ALL':\n nass_rows = [i for i, r in ndf.iterrows() if r['ST_CNTY_STR'][:2] in western_11]\n else:\n nass_rows = [i for i, r in ndf.iterrows() if r['ST_CNTY_STR'].startswith(s)]\n\n sndf = ndf.loc[nass_rows]\n sndf = sndf[[x for x in sndf.columns if 'VALUE' in x]]\n n_v = sndf.sum(axis=0).values\n n_y = [int(x[-4:]) for x in sndf.columns]\n\n if s == 'ALL':\n csv_l = [x for x in l if os.path.basename(x).split('.')[0][:2] in western_fips]\n else:\n csv_l = [x for x in l if os.path.basename(x).split('.')[0].startswith(state_fips_code()[s])]\n\n if not s == 'ALL':\n if not len(state_county_code()[s].keys()) == len(csv_l):\n csv_l = [x for x in csv_l if os.path.basename(x).split('.')[0] in nass_rows]\n print('{} is short records from EE'.format(s))\n if not len(state_county_code()[s].keys()) == sndf.shape[0]:\n csv_l = [x for x in csv_l if os.path.basename(x).split('.')[0] in nass_rows]\n print('{} is short records from NASS'.format(s))\n\n first = True\n for c in csv_l:\n if first:\n idf = hydrograph(c)\n first = False\n continue\n idf += hydrograph(c)\n\n ppt = np.array([idf['ppt'][d[0]: d[1]].sum() for d in clim_dates])\n etr = np.array([idf['etr'][d[0]: d[1]].sum() for d in clim_dates])\n ai = etr / ppt\n cc = np.array([idf['cc'][d[0]: d[1]].sum() for d in cc_dates])\n cc[cc == 0.0] = np.nan\n irr = np.array([idf['irr'][d[0]: d[1]].sum() for d in irr_dates]) / 4046.86\n\n fig, ax = plt.subplots(4, 1)\n fig.set_figheight(8)\n fig.set_figwidth(12)\n fig.tight_layout()\n\n ax[0].plot(years, irr, color='purple', label='IrrMapper')\n ax[0].plot(n_y, n_v, color='pink', label='NASS')\n ax[0].legend()\n ax[0].set(ylabel='Acres Irrigated')\n ax[0].set_xlim(years[0], years[-1])\n ax[1].plot(years, ppt, color='blue')\n ax[1].set(ylabel='AMJJA Precipitation [m^3]')\n ax[1].set_xlim(years[0], years[-1])\n ax[2].plot(years, cc, color='black')\n ax[2].set(ylabel='Crop Consumption [m^3]')\n ax[2].set_xlim(years[0], years[-1])\n ax[3].plot(years, ai, color='red')\n ax[3].set(ylabel='AMJJA Aridity Index [-]')\n ax[3].set_xlim(years[0], years[-1])\n\n plt.suptitle('{}'.format(s))\n plt.xlim(years[0], years[-1])\n plt.gcf().subplots_adjust(left=0.1)\n plt.tight_layout()\n\n fig_file = '{}.png'.format(s)\n\n sub_dir = 'statewise'\n\n plt.savefig(os.path.join(fig_dir, sub_dir, fig_file))\n plt.close()\n print(fig_file, sub_dir)\n\n\nif __name__ == '__main__':\n root = '/media/research/IrrigationGIS'\n if not os.path.exists(root):\n root = '/home/dgketchum/data/IrrigationGIS'\n nass = os.path.join(root, 'nass_data', 'nass_merged.csv')\n co_irr = os.path.join(root, 'time_series/counties_IrrMapperComp_21DEC2021/county_monthly')\n figs = os.path.join(root, 'time_series/counties_IrrMapperComp_21DEC2021/figures')\n nass_irrmapper_climate(co_irr, nass, figs, countywise=False)\n# ========================= EOF ====================================================================\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlim",
"numpy.any",
"matplotlib.pyplot.close",
"matplotlib.pyplot.suptitle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
shigengtian/caffe2
|
[
"e19489d6acd17fea8ca98cd8e4b5b680e23a93c5",
"e19489d6acd17fea8ca98cd8e4b5b680e23a93c5",
"e19489d6acd17fea8ca98cd8e4b5b680e23a93c5",
"e19489d6acd17fea8ca98cd8e4b5b680e23a93c5",
"8f41717c46d214aaf62b53e5b3b9b308b5b8db91",
"e19489d6acd17fea8ca98cd8e4b5b680e23a93c5",
"e19489d6acd17fea8ca98cd8e4b5b680e23a93c5",
"e19489d6acd17fea8ca98cd8e4b5b680e23a93c5"
] |
[
"caffe2/python/operator_test/elementwise_op_broadcast_test.py",
"caffe2/python/muji_test.py",
"caffe2/python/operator_test/ceil_op_test.py",
"caffe2/python/core_test.py",
"caffe2/python/operator_test/deform_conv_test.py",
"caffe2/python/operator_test/fc_operator_test.py",
"caffe2/python/operator_test/flexible_top_k_test.py",
"caffe2/python/operator_test/specialized_segment_ops_test.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport unittest\n\nfrom hypothesis import given\nimport numpy as np\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import core, workspace\nimport caffe2.python.hypothesis_test_util as hu\n\n\n# TODO(jiayq): make them hypothesis tests for better coverage.\nclass TestElementwiseBroadcast(hu.HypothesisTestCase):\n @given(**hu.gcs)\n def test_broadcast_Add(self, gc, dc):\n # Set broadcast and no axis, i.e. broadcasting last dimensions.\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(4, 5).astype(np.float32)\n op = core.CreateOperator(\"Add\", [\"X\", \"Y\"], \"out\", broadcast=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X + Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n\n # broadcasting intermediate dimensions\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(3, 4).astype(np.float32)\n op = core.CreateOperator(\"Add\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X + Y[:, :, np.newaxis])\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n\n # broadcasting the first dimension\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(2).astype(np.float32)\n op = core.CreateOperator(\"Add\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=0)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(\n out, X + Y[:, np.newaxis, np.newaxis, np.newaxis])\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n\n # broadcasting with single elem dimensions at both ends\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(1, 4, 1).astype(np.float32)\n op = core.CreateOperator(\"Add\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X + Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n\n @given(**hu.gcs)\n def test_broadcast_Mul(self, gc, dc):\n # Set broadcast and no axis, i.e. broadcasting last dimensions.\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(4, 5).astype(np.float32)\n op = core.CreateOperator(\"Mul\", [\"X\", \"Y\"], \"out\", broadcast=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X * Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n\n # broadcasting intermediate dimensions\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(3, 4).astype(np.float32)\n op = core.CreateOperator(\"Mul\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X * Y[:, :, np.newaxis])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # broadcasting the first dimension\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(2).astype(np.float32)\n op = core.CreateOperator(\"Mul\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=0)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(\n out, X * Y[:, np.newaxis, np.newaxis, np.newaxis])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # broadcasting with single elem dimensions at both ends\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(1, 4, 1).astype(np.float32)\n op = core.CreateOperator(\"Mul\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X * Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n\n @given(**hu.gcs)\n def test_broadcast_Sub(self, gc, dc):\n # Set broadcast and no axis, i.e. broadcasting last dimensions.\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(4, 5).astype(np.float32)\n op = core.CreateOperator(\"Sub\", [\"X\", \"Y\"], \"out\", broadcast=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X - Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n\n # broadcasting intermediate dimensions\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(3, 4).astype(np.float32)\n op = core.CreateOperator(\"Sub\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X - Y[:, :, np.newaxis])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # broadcasting the first dimension\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(2).astype(np.float32)\n op = core.CreateOperator(\"Sub\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=0)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(\n out, X - Y[:, np.newaxis, np.newaxis, np.newaxis])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # broadcasting with single elem dimensions at both ends\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(1, 4, 1).astype(np.float32)\n op = core.CreateOperator(\"Sub\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X - Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n self.assertGradientChecks(gc, op, [X, Y], 1, [0])\n\n @given(**hu.gcs)\n def test_broadcast_powt(self, gc, dc):\n np.random.seed(101)\n\n #operator\n def powt_op(X, Y):\n return [np.power(X, Y)]\n\n #two gradients Y*X^(Y-1) and X^Y * ln(X)\n def powt_grad(g_out, outputs, fwd_inputs):\n [X, Y] = fwd_inputs\n Z = outputs[0]\n return ([Y * np.power(X, Y - 1), Z * np.log(X)] * g_out)\n\n #1. Set broadcast and no axis, i.e. broadcasting last dimensions.\n X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0\n Y = np.random.rand(4, 5).astype(np.float32) + 2.0\n\n #two gradients Y*X^(Y-1) and X^Y * ln(X)\n #latter gradient is sumed over 1 and 0 dims to account for broadcast\n def powt_grad_broadcast(g_out, outputs, fwd_inputs):\n [GX, GY] = powt_grad(g_out, outputs, fwd_inputs)\n return ([GX, np.sum(np.sum(GY, 1), 0)])\n\n op = core.CreateOperator(\"Pow\", [\"X\", \"Y\"], \"Z\", broadcast=1)\n self.assertReferenceChecks(device_option=gc,\n op=op,\n inputs=[X, Y],\n reference=powt_op,\n output_to_grad=\"Z\",\n grad_reference=powt_grad_broadcast)\n\n #2. broadcasting intermediate dimensions\n X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0\n Y = np.random.rand(3, 4).astype(np.float32) + 2.0\n\n #pow op with the latter array increased by one dim\n def powt_op_axis1(X, Y):\n return powt_op(X, Y[:, :, np.newaxis])\n\n #two gradients Y*X^(Y-1) and X^Y * ln(X)\n #latter gradient is sumed over 3 and 0 dims to account for broadcast\n def powt_grad_axis1(g_out, outputs, fwd_inputs):\n [X, Y] = fwd_inputs\n [GX, GY] = powt_grad(g_out, outputs, [X, Y[:, :, np.newaxis]])\n return ([GX, np.sum(np.sum(GY, 3), 0)])\n\n op = core.CreateOperator(\"Pow\", [\"X\", \"Y\"], \"Z\", broadcast=1, axis=1)\n self.assertReferenceChecks(device_option=gc,\n op=op,\n inputs=[X, Y],\n reference=powt_op_axis1,\n output_to_grad=\"Z\",\n grad_reference=powt_grad_axis1)\n\n #3. broadcasting the first dimension\n X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0\n Y = np.random.rand(2).astype(np.float32) + 2.0\n\n #pow op with the latter array increased by one dim\n def powt_op_axis0(X, Y):\n return powt_op(X, Y[:, np.newaxis, np.newaxis, np.newaxis])\n\n #two gradients Y*X^(Y-1) and X^Y * ln(X)\n #latter gradient is sumed over 3, 2 and 1 dims to account for broadcast\n def powt_grad_axis0(g_out, outputs, fwd_inputs):\n [X, Y] = fwd_inputs\n [GX, GY] = powt_grad(g_out,\n outputs,\n [X, Y[:, np.newaxis, np.newaxis, np.newaxis]])\n return ([GX, np.sum(np.sum(np.sum(GY, 3), 2), 1)])\n\n op = core.CreateOperator(\"Pow\", [\"X\", \"Y\"], \"Z\", broadcast=1, axis=0)\n self.assertReferenceChecks(device_option=gc,\n op=op,\n inputs=[X, Y],\n reference=powt_op_axis0,\n output_to_grad=\"Z\",\n grad_reference=powt_grad_axis0)\n\n #4. broadcasting with single elem dimensions at both ends\n X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0\n Y = np.random.rand(1, 4, 1).astype(np.float32) + 2.0\n\n #pow op with the latter array increased by one dim\n def powt_op_mixed(X, Y):\n return powt_op(X, Y[np.newaxis, :, :, :])\n\n #two gradients Y*X^(Y-1) and X^Y * ln(X)\n #latter gradient is sumed over 0 and 1 dims to account for broadcast\n def powt_grad_mixed(g_out, outputs, fwd_inputs):\n [X, Y] = fwd_inputs\n [GX, GY] = powt_grad(g_out, outputs, [X, Y[np.newaxis, :, :, :]])\n return ([GX, np.reshape(np.sum(np.sum(np.sum(GY, 3), 1), 0),\n (1, 4, 1))])\n\n op = core.CreateOperator(\"Pow\", [\"X\", \"Y\"], \"Z\", broadcast=1, axis=1)\n self.assertReferenceChecks(device_option=gc,\n op=op,\n inputs=[X, Y],\n reference=powt_op_mixed,\n output_to_grad=\"Z\",\n grad_reference=powt_grad_mixed)\n\n @given(**hu.gcs)\n def test_broadcast_scalar(self, gc, dc):\n # broadcasting constant\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(1).astype(np.float32)\n op = core.CreateOperator(\"Add\", [\"X\", \"Y\"], \"out\", broadcast=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(\n out, X + Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # broadcasting scalar\n X = np.random.rand(1).astype(np.float32)\n Y = np.random.rand(1).astype(np.float32).reshape([])\n op = core.CreateOperator(\"Add\", [\"X\", \"Y\"], \"out\", broadcast=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(\n out, X + Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n @given(**hu.gcs)\n def test_semantic_broadcast(self, gc, dc):\n # NCHW as default\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(3).astype(np.float32)\n op = core.CreateOperator(\n \"Add\", [\"X\", \"Y\"], \"out\", broadcast=1, axis_str=\"C\")\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(\n out, X + Y[:, np.newaxis, np.newaxis])\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # NHWC\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(5).astype(np.float32)\n op = core.CreateOperator(\n \"Add\", [\"X\", \"Y\"], \"out\", broadcast=1, axis_str=\"C\", order=\"NHWC\")\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n np.testing.assert_array_almost_equal(out, X + Y)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n @given(**hu.gcs)\n def test_sum_reduce(self, gc, dc):\n # Set broadcast and no axis, i.e. broadcasting last dimensions.\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(4, 5).astype(np.float32)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n res = np.sum(X, axis=0)\n res = np.sum(res, axis=0)\n np.testing.assert_array_almost_equal(out, res)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # Set broadcast and no axis, i.e. broadcasting last dimensions.\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(2, 3).astype(np.float32)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=0)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n res = np.sum(X, axis=3)\n res = np.sum(res, axis=2)\n np.testing.assert_array_almost_equal(out, res, decimal=3)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # broadcasting intermediate dimensions\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(3, 4).astype(np.float32)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n res = np.sum(X, axis=0)\n res = np.sum(res, axis=2)\n np.testing.assert_array_almost_equal(out, res)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # broadcasting intermediate dimensions\n X = np.random.rand(2, 3, 4, 500).astype(np.float64)\n Y = np.random.rand(1).astype(np.float64)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n res = np.array(np.sum(X))\n np.testing.assert_array_almost_equal(out, res, decimal=0)\n\n # broadcasting with single elem dimensions at both ends\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n Y = np.random.rand(1, 3, 4, 1).astype(np.float32)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1)\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"Y\", Y)\n workspace.RunOperatorOnce(op)\n out = workspace.FetchBlob(\"out\")\n res = np.sum(X, axis=0)\n res = np.sum(res, axis=2).reshape(Y.shape)\n np.testing.assert_array_almost_equal(out, res)\n self.assertDeviceChecks(dc, op, [X, Y], [0])\n\n # fp64 is not supported with the CUDA op\n dc_cpu_only = [d for d in dc if d.device_type != caffe2_pb2.CUDA]\n self.assertDeviceChecks(dc_cpu_only, op, [X, Y], [0])\n\n @unittest.skipIf(not workspace.has_gpu_support, \"No gpu support\")\n @given(**hu.gcs_gpu_only)\n def test_sum_reduce_fp16(self, gc, dc):\n # Set broadcast and no axis, i.e. broadcasting last dimensions.\n X = np.random.rand(2, 3, 4, 5).astype(np.float16)\n Y = np.random.rand(4, 5).astype(np.float16)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1, device_option=gc)\n\n def ref_op(X, Y):\n res = np.sum(X, axis=0)\n res = np.sum(res, axis=0)\n return [res]\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[X, Y],\n reference=ref_op,\n threshold=1e-3)\n\n # Set broadcast and no axis, i.e. broadcasting last dimensions.\n X = np.random.rand(2, 3, 4, 5).astype(np.float16)\n Y = np.random.rand(2, 3).astype(np.float16)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=0)\n\n def ref_op(X, Y):\n res = np.sum(X, axis=3)\n res = np.sum(res, axis=2)\n return [res]\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[X, Y],\n reference=ref_op,\n threshold=1e-3)\n\n # broadcasting intermediate dimensions\n X = np.random.rand(2, 3, 4, 5).astype(np.float16)\n Y = np.random.rand(3, 4).astype(np.float16)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1, axis=1)\n\n def ref_op(X, Y):\n res = np.sum(X, axis=0)\n res = np.sum(res, axis=2)\n return [res]\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[X, Y],\n reference=ref_op,\n threshold=1e-3)\n\n # broadcasting with single elem dimensions at both ends\n X = np.random.rand(2, 3, 4, 5).astype(np.float16)\n Y = np.random.rand(1, 3, 4, 1).astype(np.float16)\n op = core.CreateOperator(\n \"SumReduceLike\", [\"X\", \"Y\"], \"out\", broadcast=1)\n\n def ref_op(X, Y):\n res = np.sum(X, axis=0)\n res = np.sum(res, axis=2)\n return [res.reshape(Y.shape)]\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[X, Y],\n reference=ref_op,\n threshold=1e-3)\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import numpy as np\nimport unittest\n\nfrom caffe2.python import core, workspace, muji, test_util\n\n\[email protected](not workspace.has_gpu_support, \"no gpu\")\nclass TestMuji(test_util.TestCase):\n def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):\n \"\"\"A base function to test different scenarios.\"\"\"\n net = core.Net(\"mujitest\")\n for id in gpu_ids:\n net.ConstantFill(\n [],\n \"testblob_gpu_\" + str(id),\n shape=[1, 2, 3, 4],\n value=float(id + 1),\n device_option=muji.OnGPU(id)\n )\n allreduce_function(\n net, [\"testblob_gpu_\" + str(i)\n for i in gpu_ids], \"_reduced\", gpu_ids\n )\n workspace.RunNetOnce(net)\n target_value = sum(gpu_ids) + len(gpu_ids)\n all_blobs = workspace.Blobs()\n all_blobs.sort()\n for blob in all_blobs:\n print('{} {}'.format(blob, workspace.FetchBlob(blob)))\n\n for idx in gpu_ids:\n blob = workspace.FetchBlob(\"testblob_gpu_\" + str(idx) + \"_reduced\")\n np.testing.assert_array_equal(\n blob,\n target_value,\n err_msg=\"gpu id %d of %s\" % (idx, str(gpu_ids))\n )\n\n def testAllreduceFallback(self):\n self.RunningAllreduceWithGPUs(\n list(range(workspace.NumCudaDevices())), muji.AllreduceFallback\n )\n\n def testAllreduceSingleGPU(self):\n for i in range(workspace.NumCudaDevices()):\n self.RunningAllreduceWithGPUs([i], muji.Allreduce)\n\n def testAllreduceWithTwoGPUs(self):\n pattern = workspace.GetCudaPeerAccessPattern()\n if pattern.shape[0] >= 2 and np.all(pattern[:2, :2]):\n self.RunningAllreduceWithGPUs([0, 1], muji.Allreduce2)\n else:\n print('Skipping allreduce with 2 gpus. Not peer access ready.')\n\n def testAllreduceWithFourGPUs(self):\n pattern = workspace.GetCudaPeerAccessPattern()\n if pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):\n self.RunningAllreduceWithGPUs([0, 1, 2, 3], muji.Allreduce4)\n else:\n print('Skipping allreduce with 4 gpus. Not peer access ready.')\n\n def testAllreduceWithEightGPUs(self):\n pattern = workspace.GetCudaPeerAccessPattern()\n if (\n pattern.shape[0] >= 8 and np.all(pattern[:4, :4]) and\n np.all(pattern[4:, 4:])\n ):\n self.RunningAllreduceWithGPUs(\n list(range(8)), muji.Allreduce8)\n else:\n print('Skipping allreduce with 8 gpus. Not peer access ready.')\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core\nfrom hypothesis import given\nimport hypothesis.strategies as st\nimport caffe2.python.hypothesis_test_util as hu\nimport numpy as np\n\nimport unittest\n\n\nclass TestCeil(hu.HypothesisTestCase):\n\n @given(X=hu.tensor(),\n engine=st.sampled_from([\"\", \"CUDNN\"]),\n **hu.gcs)\n def test_ceil(self, X, gc, dc, engine):\n op = core.CreateOperator(\"Ceil\", [\"X\"], [\"Y\"], engine=engine)\n\n def ceil_ref(X):\n return (np.ceil(X),)\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[X],\n reference=ceil_ref)\n\n # Check over multiple devices\n self.assertDeviceChecks(dc, op, [X], [0])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom inspect import currentframe, getframeinfo\nimport unittest\n\nimport numpy as np\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import core, workspace, test_util\nfrom caffe2.python.task import Node, Task\n\n\nclass TestScopes(test_util.TestCase):\n def testBlobReferenceIsIndependentFromNameScope(self):\n blob_v = core.BlobReference(\"v\")\n with core.NameScope(\"foo\"):\n blob_w = core.BlobReference(\"w\")\n with core.NameScope(\"bar\"):\n blob_x = core.BlobReference(\"x\")\n self.assertEqual(str(blob_v), \"v\")\n self.assertEqual(str(blob_w), \"w\")\n self.assertEqual(str(blob_x), \"x\")\n\n def testNameScopeWithOp(self):\n global_x = core.BlobReference(\"x\")\n global_y = core.BlobReference(\"y\")\n with core.NameScope(\"foo\"):\n # Raw strings should have namescope prepended.\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"foo/x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"foo/y\")\n # BlobReferences should not.\n op = core.CreateOperator(\"Relu\", global_x, global_y)\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"y\")\n\n def testNameScopeWithReset(self):\n with core.NameScope(\"foo\"):\n # foo/\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"foo/x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"foo/y\")\n with core.NameScope(\"bar\"):\n # foo/bar/\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"foo/bar/x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"foo/bar/y\")\n # Back to foo/\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"foo/x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"foo/y\")\n with core.NameScope(\"bar\", reset=True):\n # bar/\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"bar/x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"bar/y\")\n # Back to foo/\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"foo/x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"foo/y\")\n\n def testDeviceScope(self):\n # No device\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertFalse(op.HasField('device_option'))\n # explicitly setting a device\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n op = core.CreateOperator(\"Relu\", \"x\", \"y\", device_option=device_option)\n self.assertTrue(op.HasField('device_option'))\n self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n with core.DeviceScope(device_option):\n # from device scope\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertTrue(op.HasField('device_option'))\n self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n # from an overridden device option\n override_device = caffe2_pb2.DeviceOption()\n override_device.device_type = caffe2_pb2.CPU\n op = core.CreateOperator(\n \"Relu\", \"x\", \"y\", device_option=override_device)\n self.assertTrue(op.HasField('device_option'))\n self.assertEqual(op.device_option.device_type, caffe2_pb2.CPU)\n # back from normal: no device\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertFalse(op.HasField('device_option'))\n device_option = caffe2_pb2.DeviceOption()\n\n def testNameAndDeviceScopeTogether(self):\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n with core.DeviceScope(device_option):\n with core.NameScope(\"foo\"):\n op = core.CreateOperator(\"Relu\", \"x\", \"y\")\n self.assertTrue(op.HasField('device_option'))\n self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"foo/x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"foo/y\")\n\n\nclass TestCloneNet(test_util.TestCase):\n def testPartialClone(self):\n params = core.Net('params')\n p1 = params.ConstantFill([], ['p1'])\n workspace.CreateNet(params)\n workspace.RunNetOnce(params)\n\n n = core.Net('original')\n a1 = n.AddExternalInput('a1')\n a2 = n.AddExternalInput('a2')\n b1, b2 = n.Concat([a1, a2], ['b1', 'b2'], axis=0)\n c1 = n.Sum([b1, p1], ['c1'])\n c2 = n.Sum([b2], ['c2'])\n d = n.Sum([c1, c2], ['d'])\n\n # test that gradient ops are ignored when partial-cloning\n n.AddGradientOperators([d])\n\n # test some in-place ops\n k = n.Sum([p1], ['k'])\n e = n.Sum([d], ['e'])\n e = n.Sum([e, k], [e])\n e = n.Sum([e], [e])\n f = n.Sum(e, ['f'])\n\n def net_assert(net, num_ops, inputs, outputs, internals):\n self.assertEqual(len(net.Proto().op), num_ops)\n self.assertEqual(set(net.Proto().external_input), inputs)\n self.assertEqual(set(net.Proto().external_output), outputs)\n all_blobs = set(net.Proto().external_input)\n all_blobs |= set(net.Proto().external_output)\n for op in net.Proto().op:\n all_blobs |= set(op.input) | set(op.output)\n self.assertEqual(all_blobs, inputs | outputs | internals)\n # create net to make sure its valid\n for input in inputs:\n workspace.FeedBlob(input, np.array([]))\n workspace.CreateNet(net)\n\n n2, (d22, ) = n.ClonePartial('f1', {a1: 'a11', a2: 'a22'}, [d])\n net_assert(\n n2, 4, {'p1', 'a11', 'a22'}, {'f1/d'},\n {'f1/b1', 'f1/b2', 'f1/c1', 'f1/c2', 'p1'})\n self.assertTrue(isinstance(d22, core.BlobReference))\n self.assertEqual(d22.Net(), n2)\n self.assertEqual(str(d22), 'f1/d')\n\n n3, (d22, ) = n.ClonePartial('f2', [b1, b2], [d])\n net_assert(\n n3, 3, {'p1', 'b1', 'b2'}, {'f2/d'}, {'f2/c1', 'f2/c2', 'p1'})\n self.assertEqual(str(d22), 'f2/d')\n\n n4, (c22, ) = n.ClonePartial('f3', [b1], [c1])\n net_assert(n4, 1, {'p1', 'b1'}, {'f3/c1'}, {'p1'})\n self.assertEqual(str(c22), 'f3/c1')\n\n n5, (c11, c22) = n.ClonePartial('f4', [b1, b2], [c1, c2])\n net_assert(n5, 2, {'p1', 'b1', 'b2'}, {'f4/c1', 'f4/c2'}, {'p1'})\n self.assertEqual(str(c11), 'f4/c1')\n self.assertEqual(str(c22), 'f4/c2')\n\n with self.assertRaises(AssertionError):\n n.ClonePartial('f4', [a1, a2, c2], [d])\n\n n6, (e22, ) = n.ClonePartial('f5', [d], [e])\n net_assert(n6, 4, {'p1', 'd'}, {'f5/e'}, {'f5/k', 'p1'})\n self.assertEqual(str(e22), 'f5/e')\n\n n8, (e22, f22) = n.ClonePartial('f7', [d], [e, f])\n net_assert(n8, 5, {'p1', 'd'}, {'f7/e', 'f7/f'}, {'p1', 'f7/k'})\n self.assertEqual(str(e22), 'f7/e')\n self.assertEqual(str(f22), 'f7/f')\n\n params._CheckLookupTables()\n n._CheckLookupTables()\n\n\nclass TestCreateOperator(test_util.TestCase):\n def testCreate(self):\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n op = core.CreateOperator(\n \"Ludicrous\", \"x\", \"y\", name=\"ludicrous\",\n control_input=\"z\", device_option=device_option,\n engine=\"WARP\", arg1=1, arg2=\"2\", arg3=[1, 2, 3])\n self.assertEqual(op.type, \"Ludicrous\")\n self.assertEqual(op.name, \"ludicrous\")\n self.assertEqual(op.engine, \"WARP\")\n self.assertEqual(len(op.input), 1)\n self.assertEqual(op.input[0], \"x\")\n self.assertEqual(len(op.output), 1)\n self.assertEqual(op.output[0], \"y\")\n self.assertEqual(len(op.control_input), 1)\n self.assertEqual(op.control_input[0], \"z\")\n self.assertTrue(op.HasField('device_option'))\n self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertTrue(len(op.arg), 3)\n\n # can't guarantee ordering of kwargs, so generate a set of args\n # to test with\n arg_map = {}\n for arg in op.arg:\n arg_map[arg.name] = arg\n\n # Check all elements exist that should\n self.assertEqual(\"arg1\" in arg_map, True)\n self.assertEqual(\"arg2\" in arg_map, True)\n self.assertEqual(\"arg3\" in arg_map, True)\n\n # Now test that all args were initialized correctly\n self.assertEqual(arg_map[\"arg1\"].i, 1)\n self.assertEqual(arg_map[\"arg2\"].s, b\"2\")\n self.assertEqual(list(arg_map[\"arg3\"].ints), [1, 2, 3])\n\n def testCreateWithNoneKwarg(self):\n with self.assertRaises(ValueError):\n core.CreateOperator(\"Ludicrous\", \"x\", \"y\", arg1=None)\n\n\nclass TestAutoNaming(test_util.TestCase):\n def assertOperatorListEqual(self, operatorDefList1, operatorDefList2):\n for op in operatorDefList1:\n op.debug_info = \"\"\n for op in operatorDefList2:\n op.debug_info = \"\"\n self.assertEqual(operatorDefList1, operatorDefList2)\n \"\"\"\n Test that operators are named with different names, and that automatically\n named blob names don't clash intra or inter networks.\n \"\"\"\n def test_next_blob(self):\n def create_net():\n net = core.Net('net')\n with core.NameScope('foo'):\n net.Add(['a', 'b'], net.NextScopedBlob('ab'))\n\n net.Add(['c', 'd'], net.NextBlob('cd'))\n return net\n\n net_a = create_net()\n net_b = create_net()\n # created net proto is predicatable.\n self.assertOperatorListEqual(net_a.Proto().op,\n net_b.Proto().op)\n self.assertEqual(net_a.Proto().op[0].output[0], 'foo/ab')\n self.assertEqual(net_a.Proto().op[1].output[0], 'cd')\n\n net_c = core.Net('net')\n # different calls return different blob names\n self.assertNotEqual(str(net_c.NextBlob('b')), str(net_c.NextBlob('b')))\n\n def test_auto_naming(self):\n a = core.Net('net')\n b = core.Net('net')\n self.assertNotEqual(a.Proto().name, b.Proto().name)\n a_in1 = a.AddExternalInput('a')\n b_in1 = b.AddExternalInput('b')\n all_outputs_single = []\n all_outputs_list = []\n\n def add_ops():\n all_outputs_single.append(a.Sum([a_in1, a_in1]))\n all_outputs_single.append(a.Sum([a_in1, a_in1]))\n all_outputs_single.append(b.Sum([b_in1, b_in1]))\n all_outputs_single.append(b.Sum([b_in1, b_in1]))\n all_outputs_list.append(a.Sum([a_in1, a_in1], outputs=2))\n all_outputs_list.append(a.Sum([a_in1, a_in1], outputs=2))\n all_outputs_list.append(b.Sum([b_in1, b_in1], outputs=2))\n all_outputs_list.append(b.Sum([b_in1, b_in1], outputs=2))\n\n add_ops()\n with core.NameScope('n1'):\n add_ops()\n\n # Force reset of lookup tables\n a.Proto().name\n\n with core.NameScope('n2'):\n add_ops()\n\n all_outputs = []\n for s in all_outputs_single:\n all_outputs.append(str(s))\n for l in all_outputs_list:\n for o in l:\n all_outputs.append(str(o))\n\n for i, o1 in enumerate(all_outputs):\n for j, o2 in enumerate(all_outputs):\n if i != j:\n self.assertNotEqual(str(o1), str(o2))\n\n a._CheckLookupTables()\n b._CheckLookupTables()\n\n\nclass TestAppendNet(test_util.TestCase):\n\n def test_external_inputs_merged_correctly(self):\n netA = core.Net(\"A\")\n netA.Sum([\"in1\", \"in2\"], [\"sum1\"])\n self.assertTrue(\"in1\" in netA.external_inputs)\n\n netB = core.Net(\"B\")\n netB.Sum([\"in3\", \"in4\"], [\"in1\"])\n netB.AppendNet(netA)\n self.assertFalse(\"in1\" in netB.external_inputs)\n\n def test_external_inputs_merged_correctlyB(self):\n netA = core.Net(\"A\")\n netA.Sum([\"in1\", \"in2\"], [\"sum1\"])\n self.assertTrue(\"in1\" in netA.external_inputs)\n\n netB = core.Net(\"B\")\n netB.Sum([\"in3\", \"in4\"], [\"in1\"])\n netA.AppendNet(netB) # note different order than in prev test\n self.assertTrue(\"in1\" in netA.external_inputs)\n\n\nclass TestExtractPredictorNet(test_util.TestCase):\n\n def test_extract_simple(self):\n from caffe2.python import brew\n from caffe2.python.model_helper import ModelHelper, ExtractPredictorNet\n\n model = ModelHelper(name=\"test\", arg_scope={'order': 'NCHW'})\n [data, label] = brew.image_input(\n model,\n \"reader\", [\"xx/data\", \"label\"],\n is_test=1,\n )\n cnv = brew.conv(model, data, 'cnv', 32, 32, 4)\n a = brew.fc(model, cnv, 'a', 100, 200)\n pred = brew.fc(model, a, 'pred', 200, 5)\n brew.softmax(model, [pred, label], \"softmax\")\n\n (predict_net, export_blobs) = ExtractPredictorNet(\n net_proto=model.net.Proto(),\n input_blobs=[\"xx/data\"],\n output_blobs=[\"pred\"],\n renames={\"xx/data\": \"image\"},\n )\n export_blobs = set(export_blobs)\n\n ops = list(predict_net.Proto().op)\n for op in ops:\n self.assertFalse(op.type == \"Softmax\")\n self.assertFalse(\"xx/data\" in op.input)\n\n # Note: image input should not be included\n self.assertEquals(ops[0].type, \"Conv\")\n self.assertEquals(ops[1].type, \"FC\")\n self.assertEquals(ops[2].type, \"FC\")\n self.assertEquals(len(ops), 3)\n\n # test rename happened\n self.assertEquals(ops[0].input[0], \"image\")\n\n # Check export blobs\n self.assertTrue(\"image\" not in export_blobs)\n self.assertTrue(\"xx/data\" not in export_blobs)\n self.assertEqual(set([str(p) for p in model.params]), export_blobs)\n\n # Check external inputs/outputs\n self.assertTrue(\"image\" in predict_net.Proto().external_input)\n self.assertEquals(set([\"pred\"]), set(predict_net.Proto().external_output))\n self.assertEqual(\n set(predict_net.Proto().external_input) -\n set([str(p) for p in model.params]), set([\"image\"])\n )\n\n\nclass TestOperatorTraceback(test_util.TestCase):\n def op_name_check(self, net, cf, line, func):\n net.PopulateProtoWithFileName()\n filename = getframeinfo(cf).filename\n self.assertEqual(net.Proto().op[0].name, '{}:{}:{}'.format(\n filename, line, func))\n\n def test_operator_constructor_traceback(self):\n net = core.Net(\"test\")\n a, b = net.AddExternalInput(\"a\", \"b\")\n net.Mul([a, b], \"c\"); cf = currentframe(); line = cf.f_lineno\n func = cf.f_code.co_name\n with self.assertRaises(Exception):\n workspace.RunNetOnce(net)\n with self.assertRaises(Exception):\n workspace.CreateNet(net)\n self.op_name_check(net, cf, line, func)\n\n def test_operator_runtime_traceback(self):\n net = core.Net(\"test\")\n a = net.AddExternalInput(\"a\")\n workspace.blobs[a] = np.array([1, 2, 3], dtype=np.float32)\n net.Split(a, [\"b\", \"c\"], axis=0); cf = currentframe(); line = cf.f_lineno\n func = cf.f_code.co_name\n with self.assertRaises(Exception):\n workspace.RunNetOnce(net)\n workspace.CreateNet(net)\n with self.assertRaises(Exception):\n workspace.RunNet(net)\n self.op_name_check(net, cf, line, func)\n\n def test_c_workspace_constructor(self):\n net = core.Net(\"test\")\n a, b = net.AddExternalInput(\"a\", \"b\")\n net.Mul([a, b], \"c\"); cf = currentframe(); line = cf.f_lineno\n func = cf.f_code.co_name\n ws = workspace.C.Workspace()\n with self.assertRaises(Exception):\n ws.run(net)\n with self.assertRaises(Exception):\n ws.create_net(net)\n self.op_name_check(net, cf, line, func)\n\n def test_c_workspace_runtime(self):\n net = core.Net(\"test\")\n a = net.AddExternalInput(\"a\")\n net.Split(a, [\"b\", \"c\"], axis=0); cf = currentframe(); line = cf.f_lineno\n func = cf.f_code.co_name\n ws = workspace.C.Workspace()\n ws.create_blob(str(a)).feed(np.array([1, 2, 3], dtype=np.float32))\n ws.create_net(net)\n with self.assertRaises(Exception):\n ws.run(net)\n self.op_name_check(net, cf, line, func)\n\n def test_async_exception_handling(self):\n net = core.Net(\"test\")\n net.Proto().type = 'dag' # this runs operators on background threads\n a = net.AddExternalInput(\"a\")\n net.Split(a, [\"b\", \"c\"], axis=0); cf = currentframe(); line = cf.f_lineno\n func = cf.f_code.co_name\n workspace.FeedBlob(a, np.array([1, 2, 3], dtype=np.float32))\n with self.assertRaises(Exception) as enforceNotMet:\n workspace.RunNetOnce(net)\n self.assertIn('enforce fail', str(enforceNotMet.exception))\n self.op_name_check(net, cf, line, func)\n\n\nclass TestCreatePlan(test_util.TestCase):\n\n def test_create_plan_from_proto_correctly(self):\n from caffe2.python.net_builder import ops\n with Node('trainer'), Task(name='my_task', num_instances=2) as task:\n with ops.task_init():\n globl = ops.Const(0)\n with ops.task_instance_init():\n local = ops.Const(0)\n with ops.loop(100):\n ops.Copy(globl, local)\n with ops.task_instance_exit():\n ops.Add([globl, local], [globl])\n with ops.task_exit():\n ops.Mul([globl, globl], [globl])\n\n plan = core.Plan(task.get_step())\n test_plan = core.Plan.create_from_proto(plan.Proto())\n\n self.assertEqual(len(plan.Steps()), 1)\n self.assertEqual(len(test_plan.Steps()), 1)\n self.assertEqual(plan.Steps()[0].Name(), test_plan.Steps()[0].Name())\n\n self.assertEqual(len(plan.Nets()), len(test_plan.Nets()))\n for idx in range(0, len(plan.Nets())):\n # When we create Net for test_plan, we will end up with new Net\n # name with postfix.\n net_1 = plan.Nets()[idx]\n net_2 = test_plan.Nets()[idx]\n trim_size = len(net_1.Name())\n self.assertEqual(net_1.Name(), net_2.Name()[:trim_size])\n\n\nclass TestOpRegistryKey(test_util.TestCase):\n def test_is_operator(self):\n self.assertTrue(core.IsOperator('Relu'))\n self.assertFalse(core.IsOperator('NOEXIST'))\n\n def test_is_operator_with_engine(self):\n self.assertTrue(core.IsOperatorWithEngine('Relu', 'DEFAULT'))\n self.assertFalse(core.IsOperatorWithEngine('Relu', 'NOEXIST'))\n\n\nclass TestDeviceOption(test_util.TestCase):\n def test_check_equal_node_name(self):\n opt1 = core.DeviceOption(0)\n opt2 = core.DeviceOption(0)\n self.assertTrue(core.device_option_equal(opt1, opt2))\n opt2.node_name = 'test'\n self.assertTrue(core.device_option_equal(opt1, opt2))\n self.assertFalse(core.device_option_equal(opt1, opt2, ignore_node_name=False))\n opt1.node_name = 'test'\n self.assertTrue(core.device_option_equal(opt1, opt2, ignore_node_name=False))\n\n def test_check_equal_default_value(self):\n opt1 = caffe2_pb2.DeviceOption()\n opt2 = caffe2_pb2.DeviceOption()\n opt1.device_type = 0\n self.assertTrue(core.device_option_equal(opt1, opt2))\n opt1.cuda_gpu_id = 5\n # opt1 still is on CPU, so the options should be equal\n self.assertTrue(core.device_option_equal(opt1, opt2))\n opt2.device_type = 0\n self.assertTrue(core.device_option_equal(opt1, opt2))\n opt1.device_type = 1\n self.assertFalse(core.device_option_equal(opt1, opt2))\n\n\[email protected](not workspace.has_gpu_support, 'No GPU support')\nclass TestInferDevice(test_util.TestCase):\n\n def setUp(self):\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n self.cuda_option = device_option\n self.cpu_option = caffe2_pb2.DeviceOption()\n\n def _test_op(\n self,\n op_name,\n in_option,\n out_option,\n op_option=None,\n inputs=None,\n outputs=None\n ):\n op_option = self.cuda_option if not op_option else op_option\n inputs = [\"blob_1\"] if not inputs else inputs\n outputs = [\"blob_2\"] if not outputs else outputs\n with core.DeviceScope(op_option):\n op = core.CreateOperator(op_name, inputs, outputs)\n input_dev, output_dev = core.InferOpBlobDevices(op)\n for in_dev in input_dev:\n self.assertEqual(in_dev, in_option)\n for out_dev in output_dev:\n self.assertEqual(out_dev, out_option)\n\n def test_infer_device(self):\n self._test_op(\n \"FC\",\n self.cuda_option,\n self.cuda_option,\n op_option=self.cuda_option,\n inputs=[\"data\", \"fc_w\", \"fc_b\"],\n outputs=[\"fc_1\"]\n )\n\n def test_infer_device_cross_device(self):\n self._test_op(\"CopyGPUToCPU\", self.cuda_option, self.cpu_option)\n self._test_op(\"CopyCPUToGPU\", self.cpu_option, self.cuda_option)\n self._test_op(\"EnsureCPUOutput\", self.cuda_option, self.cpu_option)\n self._test_op(\"CopyFromCPUInput\", self.cpu_option, self.cuda_option)\n self._test_op(\n \"EnsureCPUOutput\",\n self.cpu_option,\n self.cpu_option,\n op_option=self.cpu_option\n )\n self._test_op(\n \"CopyFromCPUInput\",\n self.cpu_option,\n self.cpu_option,\n op_option=self.cpu_option\n )\n\n def test_device_inference_function(self):\n # ConcatOp.\n op_option = self.cuda_option\n with core.DeviceScope(op_option):\n op = core.CreateOperator(\n 'Concat',\n ['X_{}'.format(i) for i in range(4)],\n ['concat_result', 'split_info'],\n axis=1)\n input_dev, output_dev = core.InferOpBlobDevices(op)\n # 2nd output's type is CPU irrespective of Concat op's device option.\n self.assertEqual(output_dev[1], self.cpu_option)\n\n #SplitOp.\n op_option = self.cuda_option\n with core.DeviceScope(op_option):\n op = core.CreateOperator(\n 'Split',\n ['input', 'split'],\n ['X_{}'.format(i) for i in range(4)],\n axis=0)\n input_dev, output_dev = core.InferOpBlobDevices(op)\n # 2nd input's type is CPU irrespective of Split op's device option.\n self.assertEqual(input_dev[1], self.cpu_option)\n\n def test_inject_copy(self):\n net = core.Net(\"test\")\n init_net = core.Net(\"init\")\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n weight = init_net.XavierFill([], 'fc_w', shape=[10, 100])\n bias = init_net.ConstantFill([], 'fc_b', shape=[10, ])\n\n with core.DeviceScope(device_option):\n net.FC([\"data\", weight, bias], \"fc1\")\n\n _, blob_to_device = core.InjectCrossDeviceCopies(init_net)\n new_net, blob_to_device = core.InjectCrossDeviceCopies(\n net, blob_to_device\n )\n op = new_net._net.op[-1]\n self.assertEqual(op.type, \"FC\")\n self.assertEqual(op.input[0], \"data_cuda_1\")\n self.assertEqual(op.input[1], \"fc_w_cuda_1\")\n self.assertEqual(op.input[2], \"fc_b_cuda_1\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertEqual(new_net._net.op[-2].type, \"CopyCPUToGPU\")\n self.assertEqual(new_net._net.op[0].type, \"CopyCPUToGPU\")\n self.assertNotEqual(blob_to_device[\"fc_w\"], device_option)\n\n def test_cross_nets(self):\n net = core.Net(\"test\")\n init_net = core.Net(\"init\")\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n weight = init_net.XavierFill([], 'fc_w', shape=[10, 100])\n bias = init_net.ConstantFill([], 'fc_b', shape=[10, ])\n const = init_net.ConstantFill([], 'const', shape=[], value=1.)\n with core.DeviceScope(device_option):\n const = init_net.Add([const, const], [const])\n fc_out = net.FC([\"data\", weight, bias], \"fc1\")\n net.Add([fc_out, const], [fc_out])\n\n data_remap = {'data': device_option}\n nets, _ = core.InjectDeviceCopiesAmongNets(\n [init_net, net], blob_to_device_init=data_remap\n )\n op = nets[1]._net.op[0]\n self.assertEqual(op.type, \"CopyCPUToGPU\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertEqual(op.output[0], \"fc_w_cuda_1\")\n op = nets[1]._net.op[1]\n self.assertEqual(op.type, \"CopyCPUToGPU\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertEqual(op.output[0], \"fc_b_cuda_1\")\n op = nets[1]._net.op[2]\n self.assertEqual(op.type, \"FC\")\n self.assertEqual(op.input[0], \"data\")\n self.assertEqual(op.input[1], \"fc_w_cuda_1\")\n self.assertEqual(op.input[2], \"fc_b_cuda_1\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n op = nets[1]._net.op[3]\n self.assertEqual(op.type, \"Add\")\n self.assertEqual(op.input[0], \"fc1\")\n self.assertEqual(op.input[1], \"const_cuda_1\")\n # check that moved blob is in input to the new net\n for c in [\"data\", \"fc_w\", \"fc_b\", \"const_cuda_1\"]:\n self.assertTrue(c in nets[1]._net.external_input)\n \"\"\"\nFor reference, net.Proto() should be like:\nname: \"\"\nop {\n input: \"fc_w\"\n output: \"fc_w_cuda_1\"\n name: \"\"\n type: \"CopyCPUToGPU\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nop {\n input: \"fc_b\"\n output: \"fc_b_cuda_1\"\n name: \"\"\n type: \"CopyCPUToGPU\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nop {\n input: \"data\"\n input: \"fc_w_cuda_1\"\n input: \"fc_b_cuda_1\"\n output: \"fc1\"\n name: \"\"\n type: \"FC\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nop {\n input: \"fc1\"\n input: \"const_cuda_1\"\n output: \"fc1\"\n name: \"\"\n type: \"Add\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nexternal_input: \"data\"\nexternal_input: \"fc_w\"\nexternal_input: \"fc_b\"\nexternal_input: \"const\"\nexternal_input: \"const_cuda_1\"\n\"\"\"\n\n def test_cross_nets_no_change(self):\n net = core.Net(\"test\")\n init_net = core.Net(\"init\")\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n\n with core.DeviceScope(device_option):\n weight = init_net.XavierFill([], 'fc_w', shape=[10, 100])\n bias = init_net.ConstantFill([], 'fc_b', shape=[10, ])\n net.FC([\"data\", weight, bias], \"fc1\")\n\n data_remap = {'data': device_option}\n nets = core.InjectDeviceCopiesAmongNetsWithoutB2D(\n [init_net, net], blob_to_device_init=data_remap\n )\n op = nets[1]._net.op[0]\n self.assertEqual(op.type, \"FC\")\n self.assertEqual(op.input[0], \"data\")\n self.assertEqual(op.input[1], \"fc_w\")\n self.assertEqual(op.input[2], \"fc_b\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n \"\"\"\nFor reference, net.Proto() should be like:\nname: \"\"\nop {\n input: \"data\"\n input: \"fc_w\"\n input: \"fc_b\"\n output: \"fc1\"\n name: \"\"\n type: \"FC\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nexternal_input: \"data\"\nexternal_input: \"fc_w\"\nexternal_input: \"fc_b\"\n\"\"\"\n\n def test_inject_copy_multi_use(self):\n net = core.Net(\"test\")\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n\n with core.DeviceScope(device_option):\n net.Relu(\"data\", \"relu1\")\n net.Relu(\"data\", \"relu2\")\n with core.DeviceScope(device_option):\n net.Relu(\"data\", \"relu3\")\n net.Relu(\"data\", \"relu4\")\n device_option.cuda_gpu_id = 0\n with core.DeviceScope(device_option):\n net.Relu(\"data\", \"relu5\")\n device_option.cuda_gpu_id = 1\n with core.DeviceScope(device_option):\n net.Relu(\"data\", \"relu6\")\n\n new_net, _ = core.InjectCrossDeviceCopies(net)\n op = new_net._net.op[0]\n self.assertEqual(op.type, \"CopyCPUToGPU\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertEqual(op.output[0], \"data_cuda_1\")\n op = new_net._net.op[1]\n self.assertEqual(op.type, \"Relu\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertEqual(op.output[0], \"relu1\")\n op = new_net._net.op[2]\n self.assertEqual(op.type, \"Relu\")\n self.assertEqual(op.device_option.device_type, 0)\n self.assertEqual(op.output[0], \"relu2\")\n op = new_net._net.op[3]\n self.assertEqual(op.type, \"Relu\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertEqual(op.input[0], \"data_cuda_1\")\n self.assertEqual(op.output[0], \"relu3\")\n op = new_net._net.op[4]\n self.assertEqual(op.type, \"Relu\")\n self.assertEqual(op.device_option.device_type, 0)\n self.assertEqual(op.output[0], \"relu4\")\n op = new_net._net.op[5]\n self.assertEqual(op.type, \"CopyCPUToGPU\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 0)\n self.assertEqual(op.output[0], \"data_cuda_0\")\n op = new_net._net.op[6]\n self.assertEqual(op.type, \"Relu\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 0)\n self.assertEqual(op.input[0], \"data_cuda_0\")\n self.assertEqual(op.output[0], \"relu5\")\n op = new_net._net.op[7]\n self.assertEqual(op.type, \"Relu\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 1)\n self.assertEqual(op.input[0], \"data_cuda_1\")\n self.assertEqual(op.output[0], \"relu6\")\n \"\"\"\nFor reference, net.Proto() should be like:\nname: \"\"\nop {\n input: \"data\"\n output: \"data_cuda_1\"\n name: \"\"\n type: \"CopyCPUToGPU\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nop {\n input: \"data_cuda_1\"\n output: \"relu1\"\n name: \"\"\n type: \"Relu\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nop {\n input: \"data\"\n output: \"relu2\"\n name: \"\"\n type: \"Relu\"\n}\nop {\n input: \"data_cuda_1\"\n output: \"relu3\"\n name: \"\"\n type: \"Relu\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nop {\n input: \"data\"\n output: \"relu4\"\n name: \"\"\n type: \"Relu\"\n}\nop {\n input: \"data\"\n output: \"data_cuda_0\"\n name: \"\"\n type: \"CopyCPUToGPU\"\n device_option {\n device_type: 1\n cuda_gpu_id: 0\n }\n}\nop {\n input: \"data_cuda_0\"\n output: \"relu5\"\n name: \"\"\n type: \"Relu\"\n device_option {\n device_type: 1\n cuda_gpu_id: 0\n }\n}\nop {\n input: \"data_cuda_1\"\n output: \"relu6\"\n name: \"\"\n type: \"Relu\"\n device_option {\n device_type: 1\n cuda_gpu_id: 1\n }\n}\nexternal_input: \"data\"\n\"\"\"\n\n def test_inject_copy_placeholder_ops(self):\n '''\n Test inject cross device copies with placeholder ops. Placeholder ops\n are decorator/fake ops that don't have operator schema.\n '''\n # Create CPU and GPU devices on 2 nodes.\n cpu_device = []\n gpu_device = []\n for i in range(0, 2):\n cpu_device.append(caffe2_pb2.DeviceOption())\n cpu_device[i].node_name = 'node:' + str(i)\n gpu_device.append(caffe2_pb2.DeviceOption())\n gpu_device[i].device_type = caffe2_pb2.CUDA\n gpu_device[i].cuda_gpu_id = 0\n gpu_device[i].node_name = 'node:' + str(i)\n send_node = 'node:0'\n recv_node = 'node:1'\n placeholder_send = 'Placeholder:Dummy:Send'\n placeholder_recv = 'Placeholder:Dummy:Recv'\n\n # init_net.\n init_net = core.Net(\"init_net\")\n with core.DeviceScope(gpu_device[0]):\n weight = init_net.XavierFill([], 'fc_w', shape=[10, 100])\n bias = init_net.ConstantFill([], 'fc_b', shape=[10, ])\n with core.DeviceScope(cpu_device[0]):\n op = core.CreateOperator(\n placeholder_send, [weight, bias], [],\n dst_node=recv_node, callsite_id=0)\n init_net._net.op.extend([op])\n\n # train_net\n train_net = core.Net(\"train_net\")\n with core.DeviceScope(cpu_device[1]):\n # XXX. replace hardcoded op name. Move test to net_transforms.\n op = core.CreateOperator(\n placeholder_recv, [], [weight, bias],\n src_node=send_node, callsite_id=0)\n train_net._net.op.extend([op])\n train_net.FC([\"data\", weight, bias], \"fc1\")\n\n # Inject cross device copies.\n init_net, x_dev_state = core.InjectCrossDeviceCopies(\n init_net,\n placeHolderOps=[placeholder_send, placeholder_recv])\n train_net, x_dev_state = core.InjectCrossDeviceCopies(\n train_net, x_dev_state,\n placeHolderOps=[placeholder_send, placeholder_recv])\n\n # Verify (init_net)\n op = init_net._net.op[2]\n self.assertEqual(op.type, \"CopyGPUToCPU\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 0)\n self.assertEqual(op.output[0], \"fc_w_cpu\")\n op = init_net._net.op[3]\n self.assertEqual(op.type, \"CopyGPUToCPU\")\n self.assertEqual(op.device_option.device_type, 1)\n self.assertEqual(op.device_option.cuda_gpu_id, 0)\n self.assertEqual(op.output[0], \"fc_b_cpu\")\n op = init_net._net.op[4]\n self.assertEqual(op.type, placeholder_send)\n self.assertEqual(op.device_option.device_type, 0)\n self.assertEqual(op.input[0], \"fc_w_cpu\")\n self.assertEqual(op.input[1], \"fc_b_cpu\")\n # Verify (train_net)\n op = train_net._net.op[0]\n self.assertEqual(op.type, placeholder_recv)\n self.assertEqual(op.device_option.device_type, 0)\n self.assertEqual(op.output[0], \"fc_w_cpu\")\n self.assertEqual(op.output[1], \"fc_b_cpu\")\n op = train_net._net.op[3]\n self.assertEqual(op.type, \"FC\")\n self.assertEqual(op.device_option.device_type, 0)\n self.assertEqual(op.input[1], \"fc_w_cpu\")\n self.assertEqual(op.input[2], \"fc_b_cpu\")\n\n def test_blob_inplace(self):\n net = core.Net(\"test\")\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = caffe2_pb2.CUDA\n device_option.cuda_gpu_id = 1\n\n net.Adagrad(['param', 'moment', 'grad', 'lr'], ['param', 'moment'])\n with core.DeviceScope(device_option):\n net.Relu(\"param\", \"param_relu_no_sense\")\n net, _ = core.InjectCrossDeviceCopies(net)\n op = net._net.op[1]\n self.assertEqual(op.type, 'CopyCPUToGPU')\n self.assertEqual(op.input[0], 'param')\n self.assertEqual(op.output[0], 'param_cuda_1')\n op = net._net.op[2]\n self.assertEqual(op.input[0], 'param_cuda_1')\n\n net.Relu('nonsense_input', 'moment')\n # should not raise inplace error\n core.InjectCrossDeviceCopies(net)\n with core.DeviceScope(device_option):\n net.Relu('nonsense_input_gpu', 'moment')\n with self.assertRaises(RuntimeError):\n core.InjectCrossDeviceCopies(net)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom hypothesis import assume, given\nimport hypothesis.strategies as st\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import core, workspace\nimport caffe2.python.hypothesis_test_util as hu\n\nimport unittest\n\n\ndef _cudnn_supports(\n dilation=False,\n nhwc=False,\n):\n \"\"\"Return True if cuDNN supports this configuration.\"\"\"\n v = workspace.GetCuDNNVersion()\n if dilation and v < 6000:\n # Dilation not supported until v6\n return False\n if dilation and nhwc:\n # Dilation and NHWC not supported together\n return False\n return True\n\n\ndef _conv_1d_output_size(size, kernel, pad, dilation, stride):\n return max(\n 1,\n int((size + pad * 2 - (dilation * (kernel - 1) + 1)) / stride) + 1\n )\n\n\ndef _conv_2d_output_size(size, kernel, pad_h, pad_w, dilation,\n stride_h, stride_w):\n return [\n _conv_1d_output_size(size, kernel, pad_h, dilation, stride_h),\n _conv_1d_output_size(size, kernel, pad_w, dilation, stride_w)\n ]\n\n\ndef _conv_2d_offsets_dims(\n batch_size,\n size,\n kernel,\n pad_h,\n pad_w,\n dilation,\n stride_h,\n stride_w,\n deformable_group\n):\n dims = [batch_size, 2 * kernel * kernel * deformable_group]\n dims.extend(_conv_2d_output_size(size, kernel, pad_h, pad_w,\n dilation, stride_h, stride_w))\n return dims\n\n\ndef _conv_2d_random_offsets(\n batch_size,\n kernel,\n dims,\n num_deformable_group\n):\n o = []\n for y0 in range(0, kernel):\n for x0 in range(0, kernel):\n # stay away from integer offsets which correspond to \"ridges\" on the\n # interpolated surface resulting in less precise estimates\n x = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)\n y = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)\n o.append(y - y0)\n o.append(x - x0)\n o = o * num_deformable_group\n e = []\n for v in o:\n e.append([[v] * dims[1]] * dims[0])\n return np.array([e] * batch_size).astype(np.float32)\n\n\ndef _conv_2d_shuffle_offsets(\n batch_size,\n kernel,\n dims,\n num_deformable_group,\n input_channels,\n output_channels\n):\n o = []\n w0 = [[0 for x in range(kernel)] for y in range(kernel)]\n for y0 in range(0, kernel):\n for x0 in range(0, kernel):\n x = np.random.randint(0, kernel)\n y = np.random.randint(0, kernel)\n o.append(y - y0)\n o.append(x - x0)\n w0[y][x] += 1\n o = o * num_deformable_group\n e = []\n for v in o:\n e.append([[v] * int(dims[1])] * int(dims[0]))\n w0 = [[w0] * input_channels] * output_channels\n return (\n np.array([e] * batch_size).astype(np.float32),\n np.array(w0).astype(np.float32).transpose((0, 2, 3, 1))\n )\n\n\nclass TestConvolution(hu.HypothesisTestCase):\n\n @unittest.skipIf(not workspace.has_gpu_support, \"No gpu support\")\n @given(stride=st.integers(1, 3),\n pad=st.integers(0, 3),\n kernel=st.integers(1, 5),\n dilation=st.integers(1, 3),\n size=st.integers(7, 10),\n input_channels=st.integers(1, 8),\n output_channels=st.integers(1, 8),\n batch_size=st.integers(1, 3),\n order=st.sampled_from([\"NCHW\"]),\n engine=st.sampled_from([\"\", \"CUDNN\", \"MKLDNN\"]),\n use_bias=st.booleans(),\n deformable_group=st.integers(1, 3),\n **hu.gcs_gpu_only)\n def test_null_offset_convolution(self, stride, pad, kernel, dilation, size,\n input_channels, output_channels, batch_size,\n order, engine, use_bias, deformable_group,\n gc, dc):\n dkernel = dilation * (kernel - 1) + 1\n\n if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':\n assume(_cudnn_supports(dilation=(dilation > 1),\n nhwc=(order == 'NHWC')))\n\n assume(engine != \"MKLDNN\" or use_bias is True)\n\n op = core.CreateOperator(\n \"DeformConv\",\n [\"X\", \"o\", \"w\", \"b\"] if use_bias else [\"X\", \"o\", \"w\"],\n [\"Y\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n deformable_group=deformable_group,\n )\n offset_dims = _conv_2d_offsets_dims(batch_size, size, kernel, pad, pad,\n dilation, stride, stride,\n deformable_group)\n X = np.random.rand(\n batch_size, size, size, input_channels).astype(np.float32) - 0.5\n o = np.zeros(tuple(offset_dims), np.float32)\n w = np.random.rand(\n output_channels, kernel, kernel, input_channels).astype(np.float32)\\\n - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n if order == \"NCHW\":\n X = X.transpose((0, 3, 1, 2))\n w = w.transpose((0, 3, 1, 2))\n\n inputs = [X, o, w, b] if use_bias else [X, o, w]\n\n # Error handling path.\n if size + pad + pad < dkernel or size + pad + pad < dkernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if input_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if output_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n def reference_conv_op(*args):\n reference_op = core.CreateOperator(\n \"Conv\",\n [\"X\", \"w\", \"b\"] if use_bias else [\"X\", \"w\"],\n [\"Y0\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n device_option=gc\n )\n workspace.RunOperatorOnce(reference_op)\n reference_blob = workspace.FetchBlob(\"Y0\")\n return (reference_blob,)\n\n self.assertReferenceChecks(gc, op, inputs, reference_conv_op)\n\n @unittest.skipIf(not workspace.has_gpu_support, \"No gpu support\")\n @given(stride=st.integers(1, 3),\n pad=st.integers(0, 0),\n kernel=st.integers(1, 5),\n dilation=st.integers(1, 3),\n size=st.integers(7, 10),\n input_channels=st.integers(1, 8),\n output_channels=st.integers(1, 8),\n batch_size=st.integers(1, 3),\n order=st.sampled_from([\"NCHW\"]),\n engine=st.sampled_from([\"\", \"CUDNN\", \"MKLDNN\"]),\n use_bias=st.booleans(),\n deformable_group=st.integers(1, 4),\n **hu.gcs_gpu_only)\n def test_flat_input_convolution(self, stride, pad, kernel, dilation, size,\n input_channels, output_channels, batch_size,\n order, engine, use_bias,\n deformable_group, gc, dc):\n dkernel = dilation * (kernel - 1) + 1\n\n if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':\n assume(_cudnn_supports(dilation=(dilation > 1),\n nhwc=(order == 'NHWC')))\n\n assume(engine != \"MKLDNN\" or use_bias is True)\n\n op = core.CreateOperator(\n \"DeformConv\",\n [\"X\", \"o\", \"w\", \"b\"] if use_bias else [\"X\", \"o\", \"w\"],\n [\"Y\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n deformable_group=deformable_group,\n )\n X = np.ones((batch_size, size, size, input_channels), np.float32) - 0.5\n output_size = _conv_2d_output_size(size, kernel, pad, pad,\n dilation, stride, stride)\n o = _conv_2d_random_offsets(batch_size, kernel, output_size,\n deformable_group)\n w = np.ones((output_channels, kernel, kernel, input_channels), np.float32) - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n if order == \"NCHW\":\n X = X.transpose((0, 3, 1, 2))\n w = w.transpose((0, 3, 1, 2))\n\n inputs = [X, o, w, b] if use_bias else [X, o, w]\n\n # Error handling path.\n if size + pad + pad < dkernel or size + pad + pad < dkernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if input_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if output_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n def reference_conv_op(*args):\n reference_op = core.CreateOperator(\n \"Conv\",\n [\"X\", \"w\", \"b\"] if use_bias else [\"X\", \"w\"],\n [\"Y0\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n device_option=gc\n )\n workspace.RunOperatorOnce(reference_op)\n reference_blob = workspace.FetchBlob(\"Y0\")\n return (reference_blob,)\n\n self.assertReferenceChecks(gc, op, inputs, reference_conv_op)\n\n @unittest.skipIf(not workspace.has_gpu_support, \"No gpu support\")\n @given(stride=st.integers(1, 1),\n pad=st.integers(0, 0),\n kernel=st.integers(1, 5),\n dilation=st.integers(1, 1),\n size=st.integers(7, 10),\n input_channels=st.integers(1, 8),\n output_channels=st.integers(1, 8),\n batch_size=st.integers(1, 3),\n order=st.sampled_from([\"NCHW\"]),\n engine=st.sampled_from([\"\", \"CUDNN\", \"MKLDNN\"]),\n use_bias=st.booleans(),\n deformable_group=st.integers(1, 4),\n **hu.gcs_gpu_only)\n def test_shuffle_input_convolution(self, stride, pad, kernel, dilation, size,\n input_channels, output_channels, batch_size,\n order, engine, use_bias,\n deformable_group, gc, dc):\n dkernel = dilation * (kernel - 1) + 1\n\n if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':\n assume(_cudnn_supports(dilation=(dilation > 1),\n nhwc=(order == 'NHWC')))\n\n assume(engine != \"MKLDNN\" or use_bias is True)\n\n op = core.CreateOperator(\n \"DeformConv\",\n [\"X\", \"o\", \"w\", \"b\"] if use_bias else [\"X\", \"o\", \"w\"],\n [\"Y\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n deformable_group=deformable_group,\n )\n X = np.random.rand(\n batch_size, size, size, input_channels).astype(np.float32) - 0.5\n output_size = _conv_2d_output_size(size, kernel, pad, pad,\n dilation, stride, stride)\n o, w0 = _conv_2d_shuffle_offsets(batch_size, kernel, output_size,\n deformable_group, input_channels,\n output_channels)\n w = np.ones((output_channels, kernel, kernel, input_channels), np.float32)\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n\n if order == \"NCHW\":\n X = X.transpose((0, 3, 1, 2))\n w = w.transpose((0, 3, 1, 2))\n w0 = w0.transpose((0, 3, 1, 2))\n\n inputs = [X, o, w, b] if use_bias else [X, o, w]\n\n # Error handling path.\n if size + pad + pad < dkernel or size + pad + pad < dkernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if input_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if output_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n def reference_conv_op(*args):\n with core.DeviceScope(gc):\n workspace.FeedBlob(\"w0\", w0)\n reference_op = core.CreateOperator(\n \"Conv\",\n [\"X\", \"w0\", \"b\"] if use_bias else [\"X\", \"w0\"],\n [\"Y0\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n device_option=gc\n )\n workspace.RunOperatorOnce(reference_op)\n reference_blob = workspace.FetchBlob(\"Y0\")\n return (reference_blob,)\n\n self.assertReferenceChecks(gc, op, inputs, reference_conv_op)\n\n # CUDNN does NOT support different padding values and we skip it\n @unittest.skipIf(not workspace.has_gpu_support, \"No gpu support\")\n @given(stride_h=st.integers(1, 3),\n stride_w=st.integers(1, 3),\n pad_h=st.integers(0, 3),\n pad_w=st.integers(0, 3),\n kernel=st.integers(2, 5),\n size=st.integers(1, 8),\n input_channels=st.integers(1, 3),\n output_channels=st.integers(1, 3),\n batch_size=st.integers(1, 3),\n order=st.sampled_from([\"NCHW\"]),\n engine=st.sampled_from([\"\", \"EIGEN\"]),\n shared_buffer=st.booleans(),\n use_bias=st.booleans(),\n deformable_group=st.integers(1, 3),\n **hu.gcs_gpu_only)\n def test_conv_separate_stride_pad_gradients(self, stride_h, stride_w,\n pad_h, pad_w, kernel, size,\n input_channels, output_channels,\n batch_size, order, engine,\n shared_buffer, use_bias,\n deformable_group, gc, dc):\n op = core.CreateOperator(\n \"DeformConv\",\n [\"X\", \"o\", \"w\", \"b\"] if use_bias else [\"X\", \"o\", \"w\"],\n [\"Y\"],\n stride_h=stride_h,\n stride_w=stride_w,\n pad_t=pad_h,\n pad_l=pad_w,\n pad_b=pad_h,\n pad_r=pad_w,\n kernel=kernel,\n order=order,\n engine=engine,\n shared_buffer=int(shared_buffer),\n deformable_group=deformable_group,\n )\n X = np.random.rand(\n batch_size, size, size, input_channels).astype(np.float32) - 0.5\n output_size = _conv_2d_output_size(size, kernel, pad_h, pad_w, 1,\n stride_h, stride_w)\n o = _conv_2d_random_offsets(batch_size, kernel, output_size,\n deformable_group)\n w = np.random.rand(\n output_channels, kernel, kernel, input_channels).astype(np.float32)\\\n - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n if order == \"NCHW\":\n X = X.transpose((0, 3, 1, 2))\n w = w.transpose((0, 3, 1, 2))\n\n inputs = [X, o, w, b] if use_bias else [X, o, w]\n\n # Error handling path.\n if size + pad_h < kernel or size + pad_w < kernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if input_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if output_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n @unittest.skipIf(not workspace.has_gpu_support, \"No gpu support\")\n @given(stride=st.integers(1, 3),\n pad=st.integers(0, 3),\n kernel=st.integers(1, 5),\n dilation=st.integers(1, 3),\n size=st.integers(7, 10),\n input_channels=st.integers(1, 8),\n output_channels=st.integers(1, 8),\n batch_size=st.integers(1, 3),\n order=st.sampled_from([\"NCHW\"]),\n engine=st.sampled_from([\"\", \"CUDNN\", \"MKLDNN\"]),\n use_bias=st.booleans(),\n deformable_group=st.integers(1, 3),\n **hu.gcs_gpu_only)\n def test_conv_gradients(self, stride, pad, kernel, dilation, size,\n input_channels, output_channels, batch_size, order,\n engine, use_bias, deformable_group, gc, dc):\n dkernel = dilation * (kernel - 1) + 1\n\n if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':\n assume(_cudnn_supports(dilation=(dilation > 1),\n nhwc=(order == 'NHWC')))\n\n assume(engine != \"MKLDNN\" or use_bias is True)\n\n op = core.CreateOperator(\n \"DeformConv\",\n [\"X\", \"o\", \"w\", \"b\"] if use_bias else [\"X\", \"o\", \"w\"],\n [\"Y\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n deformable_group=deformable_group,\n )\n X = np.random.rand(\n batch_size, size, size, input_channels).astype(np.float32) - 0.5\n output_size = _conv_2d_output_size(size, kernel, pad, pad,\n dilation, stride, stride)\n o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)\n w = np.random.rand(\n output_channels, kernel, kernel, input_channels).astype(np.float32)\\\n - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n if order == \"NCHW\":\n X = X.transpose((0, 3, 1, 2))\n w = w.transpose((0, 3, 1, 2))\n\n inputs = [X, o, w, b] if use_bias else [X, o, w]\n # Error handling path.\n if size + pad + pad < dkernel or size + pad + pad < dkernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if input_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n if output_channels % deformable_group != 0:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n\nif __name__ == \"__main__\":\n import unittest\n unittest.main()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import core\nfrom hypothesis import assume, given, settings\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\n\n\nclass TestFcOperator(hu.HypothesisTestCase):\n def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):\n if dtype == np.float16:\n # fp16 only supported with CUDA\n assume(gc.device_type == caffe2_pb2.CUDA)\n dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]\n\n if engine == 'TENSORCORE':\n # TensorCore only makes sense with CUDA\n assume(gc.device_type == caffe2_pb2.CUDA)\n # ensures TensorCore kernels can be called\n m *= 8\n k *= 8\n n *= 8\n\n X = np.random.rand(m, k).astype(dtype) - 0.5\n if multi_dim:\n if transposed:\n W = np.random.rand(k, n, 1, 1).astype(dtype) - 0.5\n else:\n W = np.random.rand(n, k, 1, 1).astype(dtype) - 0.5\n else:\n if transposed:\n W = np.random.rand(k, n).astype(dtype) - 0.5\n else:\n W = np.random.rand(n, k).astype(dtype) - 0.5\n b = np.random.rand(n).astype(dtype) - 0.5\n\n def fc_op(X, W, b):\n return [np.dot(X, W.reshape(n, k).transpose()) + b.reshape(n)]\n\n def fc_tranposed_op(X, W, b):\n return [np.dot(X, W.reshape(k, n)) + b.reshape(n)]\n\n op = core.CreateOperator(\n 'FCTransposed' if transposed else 'FC',\n ['X', 'W', 'b'],\n 'out',\n engine=engine,\n )\n\n if dtype == np.float16 and gc.device_type == caffe2_pb2.CUDA:\n a = caffe2_pb2.Argument()\n a.i = 1\n a.name = \"float16_compute\"\n op.arg.extend([a])\n\n # Check against numpy reference\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[X, W, b],\n reference=fc_tranposed_op if transposed else fc_op,\n )\n # Check over multiple devices\n self.assertDeviceChecks(dc, op, [X, W, b], [0])\n\n # Gradient checks\n threshold = 0.5 if dtype == np.float16 else 0.005\n stepsize = 0.5 if dtype == np.float16 else 0.05\n for i in range(3):\n self.assertGradientChecks(gc, op, [X, W, b], i, [0],\n threshold=threshold, stepsize=stepsize)\n\n @settings(max_examples=50)\n @given(n=st.integers(1, 5),\n m=st.integers(0, 5),\n k=st.integers(1, 5),\n multi_dim=st.sampled_from([True, False]),\n dtype=st.sampled_from([np.float32, np.float16]),\n engine=st.sampled_from(['', 'TENSORCORE']),\n **hu.gcs)\n def test_fc(self, **kwargs):\n self._run_test(transposed=False, **kwargs)\n\n @settings(max_examples=50)\n @given(n=st.integers(1, 5),\n m=st.integers(0, 5),\n k=st.integers(1, 5),\n multi_dim=st.sampled_from([True, False]),\n dtype=st.sampled_from([np.float32, np.float16]),\n engine=st.sampled_from(['', 'TENSORCORE']),\n **hu.gcs)\n def test_fc_transposed(self, **kwargs):\n self._run_test(transposed=True, **kwargs)\n\n\nif __name__ == \"__main__\":\n import unittest\n unittest.main()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom caffe2.python import core\nfrom hypothesis import given\nimport caffe2.python.hypothesis_test_util as hu\n\n\nclass TestFlexibleTopK(hu.HypothesisTestCase):\n def flexible_top_k_ref(self, X, k):\n X_flat = X.reshape((-1, X.shape[-1]))\n indices_ref = np.ndarray(shape=sum(k), dtype=np.int32)\n values_ref = np.ndarray(shape=sum(k), dtype=np.float32)\n offset = 0\n for i in range(X_flat.shape[0]):\n od = OrderedDict()\n for j in range(X_flat.shape[1]):\n val = X_flat[i, j]\n if val not in od:\n od[val] = []\n od[val].append(j)\n k_ = 0\n for val, idxs in sorted(od.items(), reverse=True):\n for idx in idxs:\n indices_ref[offset + k_] = idx\n values_ref[offset + k_] = val\n k_ += 1\n if k_ >= k[i]:\n break\n if k_ >= k[i]:\n break\n offset += k[i]\n\n return (values_ref, indices_ref)\n\n @given(X=hu.tensor(min_dim=2), **hu.gcs_cpu_only)\n def test_flexible_top_k(self, X, gc, dc):\n X = X.astype(dtype=np.float32)\n k_shape = (int(X.size / X.shape[-1]), )\n k = np.random.randint(1, high=X.shape[-1] + 1, size=k_shape)\n\n output_list = [\"Values\", \"Indices\"]\n op = core.CreateOperator(\"FlexibleTopK\", [\"X\", \"k\"], output_list,\n device_option=gc)\n\n def bind_ref(X_loc, k):\n ret = self.flexible_top_k_ref(X_loc, k)\n return ret\n\n self.assertReferenceChecks(gc, op, [X, k], bind_ref)\n\n @given(X=hu.tensor(min_dim=2), **hu.gcs_cpu_only)\n def test_flexible_top_k_grad(self, X, gc, dc):\n X = X.astype(np.float32)\n k_shape = (int(X.size / X.shape[-1]), )\n k = np.random.randint(1, high=X.shape[-1] + 1, size=k_shape)\n\n # this try to make sure adding stepsize (0.05)\n # will not change TopK selections at all\n # since dims max_value = 5 as defined in\n # caffe2/caffe2/python/hypothesis_test_util.py\n for i in range(X.shape[-1]):\n X[..., i] = i * 1.0 / X.shape[-1]\n\n op = core.CreateOperator(\n \"FlexibleTopK\", [\"X\", \"k\"], [\"Values\", \"Indices\"], device_option=gc\n )\n\n self.assertGradientChecks(gc, op, [X, k], 0, [0])\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom caffe2.python import core\nfrom hypothesis import given\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\nimport unittest\n\n\nclass TestSpecializedSegmentOps(hu.HypothesisTestCase):\n\n @given(batchsize=st.integers(1, 20),\n fptype=st.sampled_from([np.float16, np.float32]),\n fp16asint=st.booleans(),\n blocksize=st.sampled_from([8, 17, 32, 64, 85, 96, 128, 163]),\n normalize_by_lengths=st.booleans(), **hu.gcs)\n def test_sparse_lengths_sum_cpu(\n self, batchsize, fptype, fp16asint, blocksize, normalize_by_lengths, gc, dc):\n\n if normalize_by_lengths == False:\n print(\"<test_sparse_lengths_sum_cpu>\")\n else:\n print(\"<test_sparse_lengths_sum_mean_cpu>\")\n\n tblsize = 300\n if fptype == np.float32:\n Tbl = np.random.rand(tblsize, blocksize).astype(np.float32)\n atol = 1e-5\n else:\n if fp16asint:\n Tbl = (10.0 * np.random.rand(tblsize, blocksize)\n ).round().astype(np.float16)\n atol = 1e-3\n else:\n Tbl = np.random.rand(tblsize, blocksize).astype(np.float16)\n atol = 1e-1\n\n # array of each row length\n Lengths = np.random.randint(1, 30, size=batchsize).astype(np.int32)\n # flat indices\n Indices = np.random.randint(\n 0, tblsize, size=sum(Lengths)).astype(np.int64)\n\n if normalize_by_lengths == False:\n op = core.CreateOperator(\"SparseLengthsSum\", [\n \"Tbl\", \"Indices\", \"Lengths\"], \"out\")\n else:\n op = core.CreateOperator(\"SparseLengthsMean\", [\n \"Tbl\", \"Indices\", \"Lengths\"], \"out\")\n\n self.ws.create_blob(\"Tbl\").feed(Tbl)\n self.ws.create_blob(\"Indices\").feed(Indices)\n self.ws.create_blob(\"Lengths\").feed(Lengths)\n self.ws.run(op)\n\n def sparse_lengths_sum_ref(Tbl, Indices, Lengths):\n rptr = np.cumsum(np.insert(Lengths, [0], [0]))\n out = np.zeros((len(Lengths), blocksize))\n if normalize_by_lengths == False:\n for i in range(0, len(rptr[0:-1])):\n out[i] = Tbl[Indices[rptr[i]:rptr[i + 1]]].sum(axis=0)\n else:\n for i in range(0, len(rptr[0:-1])):\n out[i] = Tbl[Indices[rptr[i]:rptr[i + 1]]\n ].sum(axis=0) * 1.0 / float(Lengths[i])\n\n return out\n\n np.testing.assert_allclose(self.ws.blobs[(\"out\")].fetch(),\n sparse_lengths_sum_ref(Tbl, Indices, Lengths), rtol=1e-3, atol=atol)\n\n @given(batchsize=st.integers(1, 20),\n fptype=st.sampled_from([np.float16, np.float32]),\n fp16asint=st.booleans(),\n blocksize=st.sampled_from([8, 17, 32, 64, 85, 96, 128, 163]),\n **hu.gcs)\n def test_sparse_lengths_weightedsum_cpu(\n self, batchsize, fptype, fp16asint, blocksize, gc, dc):\n\n print(\"<test_sparse_lengths_weightedsum_cpu>\")\n\n tblsize = 300\n if fptype == np.float32:\n Tbl = np.random.rand(tblsize, blocksize).astype(np.float32)\n atol = 1e-5\n else:\n if fp16asint:\n Tbl = (10.0 * np.random.rand(tblsize, blocksize)\n ).round().astype(np.float16)\n atol = 1e-3\n else:\n Tbl = np.random.rand(tblsize, blocksize).astype(np.float16)\n atol = 1e-1\n\n # array of each row length\n Lengths = np.random.randint(1, 30, size=batchsize).astype(np.int32)\n # flat indices\n Indices = np.random.randint(\n 0, tblsize, size=sum(Lengths)).astype(np.int64)\n Weights = np.random.rand(sum(Lengths)).astype(np.float32)\n\n op = core.CreateOperator(\"SparseLengthsWeightedSum\", [\n \"Tbl\", \"Weights\", \"Indices\", \"Lengths\"], \"out\")\n\n self.ws.create_blob(\"Tbl\").feed(Tbl)\n self.ws.create_blob(\"Indices\").feed(Indices)\n self.ws.create_blob(\"Lengths\").feed(Lengths)\n self.ws.create_blob(\"Weights\").feed(Weights)\n self.ws.run(op)\n\n def sparse_lengths_weightedsum_ref(Tbl, Weights, Indices, Lengths):\n rptr = np.cumsum(np.insert(Lengths, [0], [0]))\n out = np.zeros((len(Lengths), blocksize))\n for i in range(0, len(rptr[0:-1])):\n w = Weights[rptr[i]:rptr[i + 1]]\n out[i] = (Tbl[Indices[rptr[i]:rptr[i + 1]]]\n * w[:, np.newaxis]).sum(axis=0)\n return out\n\n np.testing.assert_allclose(self.ws.blobs[(\"out\")].fetch(),\n sparse_lengths_weightedsum_ref(Tbl, Weights, Indices, Lengths), rtol=1e-3, atol=atol)\n\n\n @given(batchsize=st.integers(1, 20),\n blocksize=st.sampled_from([8, 16, 17, 26, 32, 64, 85, 96, 128, 148, 163]),\n normalize_by_lengths=st.booleans(), **hu.gcs)\n def test_sparse_lengths_weightedsum_8BitsRowwiseOp_cpu(\n self, batchsize, blocksize, normalize_by_lengths, gc, dc):\n\n if normalize_by_lengths == False:\n print(\"<test_sparse_lengths_weightedsum_SparseLengthsWeightedSum8BitsRowwise_cpu>\")\n else:\n print(\"<test_sparse_lengths_weightedsum_SparseLengthsWeightedMean8BitsRowwise_cpu>\")\n\n tblsize = 300\n Tbl = np.random.randint(7, size = (tblsize, blocksize)).astype(np.uint8)\n atol = 1e-5\n\n # array of each row length\n Lengths = np.random.randint(1, 30, size=batchsize).astype(np.int32)\n # flat indices\n Indices = np.random.randint(\n 0, tblsize, size=sum(Lengths)).astype(np.int64)\n Weights = np.random.rand(sum(Lengths)).astype(np.float32)\n Scale_Bias = np.random.rand(tblsize, 2).astype(np.float32)\n\n if normalize_by_lengths == False:\n op = core.CreateOperator(\"SparseLengthsWeightedSum8BitsRowwise\", [\n \"Tbl\", \"Weights\", \"Indices\", \"Lengths\", \"Scale_Bias\"], \"out\")\n else:\n op = core.CreateOperator(\"SparseLengthsWeightedMean8BitsRowwise\", [\n \"Tbl\", \"Weights\", \"Indices\", \"Lengths\", \"Scale_Bias\"], \"out\")\n\n self.ws.create_blob(\"Tbl\").feed(Tbl)\n self.ws.create_blob(\"Weights\").feed(Weights)\n self.ws.create_blob(\"Indices\").feed(Indices)\n self.ws.create_blob(\"Lengths\").feed(Lengths)\n self.ws.create_blob(\"Scale_Bias\").feed(Scale_Bias)\n self.ws.run(op)\n\n def sparse_lengths_weightedsum_8BitsRowwiseOp_cpu_ref(Tbl, Weights, Indices, Lengths, Scale_Bias):\n rptr = np.cumsum(np.insert(Lengths, [0], [0]))\n out = np.zeros((len(Lengths), blocksize))\n for i in range(0, len(rptr[0:-1])):\n w = Weights[rptr[i]:rptr[i + 1]]\n s = Scale_Bias[Indices[rptr[i]:rptr[i + 1]], 0][:, np.newaxis]\n b = Scale_Bias[Indices[rptr[i]:rptr[i + 1]], 1][:, np.newaxis]\n f = 1.0\n if normalize_by_lengths == True:\n f = 1.0 / float(Lengths[i])\n out[i] = (w[:, np.newaxis] *\n (s * Tbl[Indices[rptr[i]:rptr[i + 1]]] + b)).sum(axis=0) * f\n return out\n\n np.testing.assert_allclose(self.ws.blobs[(\"out\")].fetch(),\n sparse_lengths_weightedsum_8BitsRowwiseOp_cpu_ref(Tbl, Weights, Indices, Lengths, Scale_Bias),\n rtol=1e-3, atol=atol)\n\n\n\n @given(batchsize=st.integers(1, 20),\n blocksize=st.sampled_from([8, 16, 17, 26, 32, 64, 85, 96, 128, 148, 163]),\n normalize_by_lengths=st.booleans(), **hu.gcs)\n def test_sparse_lengths_sum_8BitsRowwiseOp_cpu(\n self, batchsize, blocksize, normalize_by_lengths, gc, dc):\n\n if normalize_by_lengths == False:\n print(\"<test_sparse_lengths_sum_SparseLengthsSum8BitsRowwise_cpu>\")\n else:\n print(\"<test_sparse_lengths_sum_SparseLengthsMean8BitsRowwise_cpu>\")\n\n tblsize = 300\n Tbl = np.random.randint(7, size = (tblsize, blocksize)).astype(np.uint8)\n atol = 1e-5\n\n # array of each row length\n Lengths = np.random.randint(1, 30, size=batchsize).astype(np.int32)\n # flat indices\n Indices = np.random.randint(\n 0, tblsize, size=sum(Lengths)).astype(np.int64)\n Scale_Bias = np.random.rand(tblsize, 2).astype(np.float32)\n\n if normalize_by_lengths == False:\n op = core.CreateOperator(\"SparseLengthsSum8BitsRowwise\", [\n \"Tbl\", \"Indices\", \"Lengths\", \"Scale_Bias\"], \"out\")\n else:\n op = core.CreateOperator(\"SparseLengthsMean8BitsRowwise\", [\n \"Tbl\", \"Indices\", \"Lengths\", \"Scale_Bias\"], \"out\")\n\n self.ws.create_blob(\"Tbl\").feed(Tbl)\n self.ws.create_blob(\"Indices\").feed(Indices)\n self.ws.create_blob(\"Lengths\").feed(Lengths)\n self.ws.create_blob(\"Scale_Bias\").feed(Scale_Bias)\n self.ws.run(op)\n\n def sparse_lengths_sum_8BitsRowwiseOp_cpu_reg(Tbl, Indices, Lengths, Scale_Bias):\n rptr = np.cumsum(np.insert(Lengths, [0], [0]))\n out = np.zeros((len(Lengths), blocksize))\n for i in range(0, len(rptr[0:-1])):\n s = Scale_Bias[Indices[rptr[i]:rptr[i + 1]], 0][:, np.newaxis]\n b = Scale_Bias[Indices[rptr[i]:rptr[i + 1]], 1][:, np.newaxis]\n f = 1.0\n if normalize_by_lengths == True:\n f = 1.0 / float(Lengths[i])\n out[i] = (s * Tbl[Indices[rptr[i]:rptr[i + 1]]] + b).sum(axis=0) * f\n return out\n\n np.testing.assert_allclose(self.ws.blobs[(\"out\")].fetch(),\n sparse_lengths_sum_8BitsRowwiseOp_cpu_reg(Tbl, Indices, Lengths, Scale_Bias),\n rtol=1e-3, atol=atol)\n\n\n @given(batchsize=st.integers(1, 20),\n blocksize=st.sampled_from([8, 16, 17, 26, 32, 64, 85, 96, 128, 148, 163]),\n normalize_by_lengths=st.booleans(), **hu.gcs)\n def test_sparse_lengths_sum_8BitsRowwiseOp_cpu_invalid_index(\n self, batchsize, blocksize, normalize_by_lengths, gc, dc):\n\n tblsize = 300\n Tbl = np.random.randint(7, size = (tblsize, blocksize)).astype(np.uint8)\n\n # array of each row length\n Lengths = np.random.randint(1, 30, size=batchsize).astype(np.int32)\n # flat indices\n Indices = np.random.randint(\n 0, tblsize, size=sum(Lengths)).astype(np.int64)\n Indices[0] += 1000\n Scale_Bias = np.random.rand(tblsize, 2).astype(np.float32)\n\n if normalize_by_lengths == False:\n op = core.CreateOperator(\"SparseLengthsSum8BitsRowwise\", [\n \"Tbl\", \"Indices\", \"Lengths\", \"Scale_Bias\"], \"out\")\n else:\n op = core.CreateOperator(\"SparseLengthsMean8BitsRowwise\", [\n \"Tbl\", \"Indices\", \"Lengths\", \"Scale_Bias\"], \"out\")\n\n self.ws.create_blob(\"Tbl\").feed(Tbl)\n self.ws.create_blob(\"Indices\").feed(Indices)\n self.ws.create_blob(\"Lengths\").feed(Lengths)\n self.ws.create_blob(\"Scale_Bias\").feed(Scale_Bias)\n with self.assertRaises(RuntimeError):\n self.ws.run(op)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.log",
"numpy.random.seed",
"numpy.power",
"numpy.random.rand",
"numpy.sum",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.all"
],
[
"numpy.ceil"
],
[
"numpy.array"
],
[
"numpy.ones",
"numpy.random.rand",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.random.rand"
],
[
"numpy.random.randint"
],
[
"numpy.random.rand",
"numpy.insert",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pengzhou93/dancenet
|
[
"89bc44fe723d12bee87d87e1bcb5d19d6dcb8eb1"
] |
[
"dancegen.py"
] |
[
"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom model import vae,decoder\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\nfrom keras.layers import Dense\nfrom keras.layers import Reshape\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.optimizers import adam\nfrom keras.callbacks import ModelCheckpoint\nimport cv2\nimport numpy as np\nimport mdn\nfrom sklearn.preprocessing import MinMaxScaler\n\n\n# # Set paths\n\n# In[2]:\n\n\nENCODED_DATA_PATH = 'models/data/lv.npy'\nVAE_PATH = 'models/weights/vae_cnn.h5'\nDANCENET_PATH = 'models/weights/gendance.h5'\n\n\n# # Load encoded data\n\n# In[3]:\n\n\ndata = np.load(ENCODED_DATA_PATH)\nprint(data.shape)\n\n\n# # Normalize data\n\n# In[4]:\n\n\ndata = np.array(data).reshape(-1,128)\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaler = scaler.fit(data)\ndata = scaler.transform(data)\n\n\n# In[5]:\n\n\nnumComponents = 24\noutputDim = 128\n\n\n# # LSTM + MDN \n\n# In[6]:\n\n\ninputs = Input(shape=(128,))\nx = Reshape((1,128))(inputs)\nx = LSTM(512, return_sequences=True,input_shape=(1,128))(x)\nx = Dropout(0.40)(x)\nx = LSTM(512, return_sequences=True)(x)\nx = Dropout(0.40)(x)\nx = LSTM(512)(x)\nx = Dropout(0.40)(x)\nx = Dense(1000,activation='relu')(x)\noutputs = mdn.MDN(outputDim, numComponents)(x)\nmodel = Model(inputs=inputs,outputs=outputs)\nprint(model.summary())\n\n\n# In[7]:\n\n\nopt = adam(lr=0.0005)\nmodel.compile(loss=mdn.get_mixture_loss_func(outputDim,numComponents),optimizer=opt)\n\n\n# In[8]:\n\n\ntrain = False #change to True to train from scratch\n\nif train:\n X = data[0:len(data)-1]\n Y = data[1:len(data)]\n checkpoint = ModelCheckpoint(DANCENET_PATH, monitor='loss', verbose=1, save_best_only=True, mode='auto')\n callbacks_list = [checkpoint]\n model.fit(X,Y,batch_size=1024, verbose=1, shuffle=False, validation_split=0.20, epochs=10000, callbacks=callbacks_list)\n\n\n# # Load weights\n\n# In[9]:\n\n\nvae.load_weights(VAE_PATH)\nmodel.load_weights(DANCENET_PATH)\n\n\n# # Generate Video\n\n# In[10]:\n\n\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\nvideo = cv2.VideoWriter(\"out.mp4\", fourcc, 30.0, (208, 120))\nlv_in = data[0]\n\nfor i in range(500):\n input = np.array(lv_in).reshape(1,128)\n lv_out = model.predict(input)\n shape = np.array(lv_out).shape[1]\n lv_out = np.array(lv_out).reshape(shape)\n lv_out = mdn.sample_from_output(lv_out,128,numComponents,temp=0.01)\n lv_out = scaler.inverse_transform(lv_out)\n img = decoder.predict(np.array(lv_out).reshape(1,128))\n img = np.array(img).reshape(120,208,1)\n img = img * 255\n img = np.array(img).astype(\"uint8\")\n img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)\n lv_in = lv_out\n video.write(img)\nvideo.release()\n\n"
] |
[
[
"numpy.load",
"numpy.array",
"sklearn.preprocessing.MinMaxScaler"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Rblack999/github-slideshow
|
[
"f424ba5acdeb9a4d418c30f06ec5992f94c565a3"
] |
[
"Biologic/Old_PyExpLabSys/sp150_CV_export.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 5 12:43:51 2019\r\n\r\n@author: Blackr\r\n\"\"\"\r\n\"\"\"Cyclic Voltammetry (CV) technique class.\r\n\r\nThe CV technique returns data on fields (in order):\r\n\r\n* time (float)\r\n* Ec (float)\r\n* I (float)\r\n* Ewe (float)\r\n* cycle (int)\r\n\"\"\"\r\n''' E_we\r\n ^\r\n | E_1\r\n | /\\\r\n | / \\\r\n | / \\ E_f\r\n | E_i/ \\ /\r\n | \\ /\r\n | \\/\r\n | E_2\r\n +----------------------> t\r\n\r\n Args:\r\n vs_initial (list): List (or tuple) of 5 booleans indicating\r\n whether the current step is vs. the initial one\r\n voltage_step (list): List (or tuple) of 5 floats (Ei, E1, E2, Ei,\r\n Ef) indicating the voltage steps (V)\r\n scan_rate (list): List (or tuple) of 5 floats indicating the scan\r\n rates (mV/s)\r\n record_every_dE (float): Record every dE (V)\r\n average_over_dE (bool): Whether averaging should be performed over\r\n dE\r\n N_cycles (int): The number of cycles\r\n begin_measuring_I (float): Begin step accumulation, 1 is 100%\r\n end_measuring_I (float): Begin step accumulation, 1 is 100%\r\n I_Range (str): A string describing the I range, see the\r\n :data:`I_RANGES` module variable for possible values\r\n E_range (str): A string describing the E range to use, see the\r\n :data:`E_RANGES` module variable for possible values\r\n Bandwidth (str): A string describing the bandwidth setting, see the\r\n :data:`BANDWIDTHS` module variable for possible values'''\r\n\r\n\"\"\"CV example\"\"\"\r\n'''A program to run a typical CV experiment and export the data to a .csv file'''\r\n'''Currently have 32-bit vs. 64-bit interpreter problems with pandas library, so dump to .csv and use other to put into pandas database'''\r\nimport time\r\nimport numpy\r\nfrom bio_logic import SP150, CV\r\n\r\ndef run_cv():\r\n \"\"\"Test the CV technique\"\"\"\r\n ip_address = 'USB0' # REPLACE THIS WITH A VALID IP\r\n # Instantiate the instrument and connect to it\r\n sp150 = SP150(ip_address, 'C:\\\\EC-Lab Development Package\\\\EC-Lab Development Package\\\\EClib.dll')\r\n sp150.connect()\r\n sp150.load_firmware([1])\r\n # Instantiate the technique. Make sure to give values for all the\r\n # arguments where the default values does not fit your purpose. The\r\n # default values can be viewed in the API documentation for the\r\n # technique.\r\n cv = CV(vs_initial=(False,) * 5,\r\n voltage_step=(2, 0.5, -0.7, 0.0, 0.0),\r\n scan_rate=(10.0,) * 5,\r\n record_every_dE=0.01,\r\n N_cycles=3)\r\n\r\n # Load the technique onto channel 0 of the potentiostat and start it\r\n sp150.load_technique(0, cv)\r\n sp150.start_channel(0)\r\n\r\n Time = numpy.array([])\r\n Ewe = numpy.array([])\r\n Ec = numpy.array([])\r\n I = numpy.array([])\r\n cycle = numpy.array([])\r\n while True:\r\n # Get the currently available data on channel 0 (only what has\r\n # been gathered since last get_data)\r\n data_out = sp150.get_data(0)\r\n\r\n # If there is none, assume the technique has finished\r\n if data_out is None:\r\n break\r\n\r\n # The data is available in lists as attributes on the data\r\n # object. The available data fields are listed in the API\r\n # documentation for the technique.\r\n # print(\"Time:\", data_out.time)\r\n # print(\"Ewe:\", data_out.Ewe)\r\n\r\n # If numpy is installed, the data can also be retrieved as\r\n # numpy arrays\r\n # printing the values to follow for testing\r\n print('Time:', data_out.time_numpy)\r\n print('Ewe:', data_out.Ewe_numpy)\r\n print('Ec', data_out.Ec_numpy)\r\n print('I', data_out.I_numpy)\r\n print('cycle', data_out.cycle_numpy)\r\n # Updating the variables with the appended data per data call\r\n Ewe = numpy.append(Ewe, data_out.Ewe_numpy_numpy)\r\n Time = numpy.append(Time, data_out.time_numpy)\r\n Ec = numpy.append(Ec, data_out.Ec_numpy)\r\n I = numpy.append(I, data_out.I_numpy)\r\n cycle = numpy.append(cycle, data_out.cycle_numpy)\r\n # Sleep\r\n # dataframe of each variable\r\n df = (Time, Ewe, Ec, I, cycle)\r\n\r\n #Due to compatibility issues (in my head, this can be fixed), writing data to a .csv for importing into pandas\r\n # Note the order of header and the df as indicated\r\n numpy.savetxt(\"testCV.csv\", numpy.transpose(df), delimiter=\",\", header = 'Time,Ewe,Ec,I,cycle', comments = '')\r\n\r\n sp150.stop_channel(0)\r\n sp150.disconnect()\r\n\r\n\r\nif __name__ == '__main__':\r\n run_cv()\r\n"
] |
[
[
"numpy.append",
"numpy.array",
"numpy.transpose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Syniez/Joint_360depth
|
[
"fcdec95bf3ad109767d27396434b51cf3aad2b4b",
"4f28c3b5b7f648173480052e205e898c6c7a5151"
] |
[
"evaluate/previous_works/svsyn/spherical/grid.py",
"evaluate/previous_works/HoHoNet/lib/model/horizon_upsample/upsample1d.py"
] |
[
"import torch\r\nimport numpy\r\n\r\ndef create_image_grid(width, height, data_type=torch.float32): \r\n v_range = (\r\n torch.arange(0, height) # [0 - h]\r\n .view(1, height, 1) # [1, [0 - h], 1]\r\n .expand(1, height, width) # [1, [0 - h], W]\r\n .type(data_type) # [1, H, W]\r\n )\r\n u_range = (\r\n torch.arange(0, width) # [0 - w]\r\n .view(1, 1, width) # [1, 1, [0 - w]]\r\n .expand(1, height, width) # [1, H, [0 - w]]\r\n .type(data_type) # [1, H, W]\r\n )\r\n return torch.stack((u_range, v_range), dim=1) # [1, 2, H, W]\r\n\r\ndef coord_u(uvgrid):\r\n return uvgrid[:, 0, :, :].unsqueeze(1)\r\n\r\ndef coord_v(uvgrid):\r\n return uvgrid[:, 1, :, :].unsqueeze(1)\r\n\r\ndef create_spherical_grid(width, horizontal_shift=(-numpy.pi - numpy.pi / 2.0),\r\n vertical_shift=(-numpy.pi / 2.0), data_type=torch.float32):\r\n height = int(width // 2.0)\r\n v_range = (\r\n torch.arange(0, height) # [0 - h]\r\n .view(1, height, 1) # [1, [0 - h], 1]\r\n .expand(1, height, width) # [1, [0 - h], W]\r\n .type(data_type) # [1, H, W]\r\n )\r\n u_range = (\r\n torch.arange(0, width) # [0 - w]\r\n .view(1, 1, width) # [1, 1, [0 - w]]\r\n .expand(1, height, width) # [1, H, [0 - w]]\r\n .type(data_type) # [1, H, W]\r\n )\r\n u_range *= (2 * numpy.pi / width) # [0, 2 * pi]\r\n v_range *= (numpy.pi / height) # [0, pi]\r\n u_range += horizontal_shift # [-hs, 2 * pi - hs] -> standard values are [-3 * pi / 2, pi / 2]\r\n v_range += vertical_shift # [-vs, pi - vs] -> standard values are [-pi / 2, pi / 2]\r\n return torch.stack((u_range, v_range), dim=1) # [1, 2, H, W]\r\n\r\ndef phi(sgrid): # longitude or azimuth\r\n return sgrid[:, 0, :, :].unsqueeze(1)\r\n\r\ndef azimuth(sgrid): # longitude or phi\r\n return sgrid[:, 0, :, :].unsqueeze(1)\r\n\r\ndef longitude(sgrid): # phi or azimuth\r\n return sgrid[:, 0, :, :].unsqueeze(1)\r\n\r\ndef theta(sgrid): # latitude or elevation\r\n return sgrid[:, 1, :, :].unsqueeze(1)\r\n\r\ndef elevation(sgrid): # theta or elevation\r\n return sgrid[:, 1, :, :].unsqueeze(1)\r\n\r\ndef latitude(sgrid): # latitude or theta\r\n return sgrid[:, 1, :, :].unsqueeze(1)",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..utils import PanoUpsampleW\n\n\nclass Upsample1D(nn.Sequential):\n def __init__(self, ic, oc):\n super(Upsample1D, self).__init__(\n PanoUpsampleW(4),\n nn.Conv1d(ic, oc, 3, padding=1, bias=False),\n nn.BatchNorm1d(oc),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, feat):\n feat1d = feat['1D']\n for module in self:\n feat1d = module(feat1d)\n feat['1D'] = feat1d\n return feat\n"
] |
[
[
"torch.stack",
"torch.arange"
],
[
"torch.nn.BatchNorm1d",
"torch.nn.ReLU",
"torch.nn.Conv1d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Royzon/YOLOV4_MCMOT
|
[
"cd4c8b1b60f9cf809579609caa29d408432845ba"
] |
[
"mAPEvaluate/TestmApDetect.py"
] |
[
"# encoding=utf-8\n\nimport os\nimport cv2\nimport time\nimport numpy\nimport copy\nimport shutil\n\nimport mAPEvaluate.darknet as dn\nimport mAPEvaluate.cmp_det_label_sf as cdl\n\nfrom mAPEvaluate.ReadAndSaveDarknetDetRes import read_det_res, save_det_res\nfrom mAPEvaluate.ReadAnnotations import load_label\nfrom mAPEvaluate.voc_eval import voc_eval\n\n\n# 读取文件列表\ndef Load_file_list(files):\n fl = open(files, \"r\")\n file_lists = []\n while True:\n lines = fl.readlines()\n if len(lines) == 0:\n break\n # print(path_list)\n\n for line in lines:\n line = line.strip('\\n')\n # ph = line.split(\"/\")\n # file_name = ph[-1]\n # file_name = os.path.basename(line)\n # file_name = file_name.replace(\".jpg\", \"\")\n file_lists.append(line)\n # print(file_name)\n # print(path_lists)\n fl.close()\n return file_lists\n\n\ndef listdir(path):\n list_name = []\n for file in os.listdir(path):\n file_path = os.path.join(path, file)\n if os.path.isdir(file_path):\n continue\n # listdir(file_path, list_name)\n else:\n list_name.append(file_path)\n return list_name\n\n\ndef img_path2label_path(img_path):\n \"\"\"\n :param img_path:\n :return:\n \"\"\"\n image_dir = os.path.dirname(img_path)\n p = image_dir.split('/')\n root_dir = \"/\".join(p[:-1])\n label_dir = os.path.join(root_dir, 'Annotations')\n image_name = os.path.basename(img_path)\n image_name = image_name.replace(\".jpg\", \"\")\n label_path = os.path.join(label_dir, image_name + '.xml')\n\n return label_path\n\n\ndef get_file_name(file_path):\n file_name = os.path.basename(file_path)\n p = file_name.split('.')\n name = ''\n for i in range(len(p) - 1):\n name += p[i]\n # file_name = p[]\n\n return name\n\n\ndef getMetaCfgName(file_path):\n # 寻找file_path的同文件夹里的.data文件\n p = os.path.dirname(file_path)\n for file in os.listdir(p):\n if '.data' in file:\n data_path = file\n data_path = p + '/' + data_path\n if 'test.cfg' in file:\n cfg_path = file\n cfg_path = p + '/' + cfg_path\n\n return data_path.encode('utf-8'), cfg_path.encode('utf-8')\n\n\ndef batch_detection():\n pass\n\n\ndef batch_analysis(weights_list_file,\n img_list_file,\n thresh,\n iou_thresh,\n result_dir):\n \"\"\"\n :param weights_list_file:\n :param img_list_file:\n :param thresh:\n :param iou_thresh:\n :param result_dir:\n :return:\n \"\"\"\n image_list = Load_file_list(img_list_file)\n image_num = len(image_list)\n weights_list = Load_file_list(weights_list_file)\n result = []\n for weights in weights_list:\n weights_name = get_file_name(weights)\n\n # print('weights_name: ',weights)\n\n meta_file, cfg_file = getMetaCfgName(weights)\n # meta = dn.load_meta(meta_file)\n # net = dn.load_net(cfg_file,bytes(weights,'utf-8'),0)\n\n # 选择对应的dn\n meta = dn.load_meta(meta_file)\n # net = dn.load_net(cfg_file, bytes(weights, 'utf-8'), 0)\n\n object_type = [meta.names[i].decode('utf-8').strip() for i in range(meta.classes)]\n\n # @even: tmp modification\n weights_name = 'tmp' # sub_dir name\n\n results_dir = os.path.join(result_dir, weights_name)\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n # # @even: comment detections now\n # # detect result and save to text\n # time_all = 0\n # for j, img_path in enumerate(image_list):\n # print('detect: ' + str(j + 1) + '/' + str(len(image_list)))\n # label_path = img_path2label_path(img_path)\n # image_name = get_file_name(img_path)\n # det_save_path = os.path.join(result_path, image_name + '.txt')\n # # det = dn.detect_ext(net, meta, bytes(image_path,'utf-8'),thresh)\n #\n # # 选择对应的dn\n # det, time_1 = dn.detect_ext(net, meta, bytes(img_path, 'utf-8'), thresh)\n # time_all = time_all + time_1\n #\n # # save detection result to text\n # save_det_res(det, det_save_path, object_type)\n # time.sleep(0.001)\n # print('xxxxxxxxxxx', 'FPS, ', len(image_list) / time_all)\n # # dn.free_net(net)\n\n # compare label and detection result\n for i, obj_type in enumerate(object_type):\n\n # if obj_type != 'fr':\n # continue\n\n total_label = 0\n total_detect = 0\n total_corr = 0\n total_iou = 0\n cmp_result = []\n det_ = []\n anno_path = []\n\n det_all = [['name', 'obj_type', 'score', 0, 0, 0, 0]] # 此处为xywh(中心), 应该变为xmin, ymin, xmax, ymax\n\n img_set_file = []\n for j, img_path in enumerate(image_list):\n label_path = img_path2label_path(img_path)\n image_name = os.path.split(img_path)[-1][:-4] # get_file_name(img_path)\n img_set_file.append(image_name)\n img_save_path = os.path.join(results_dir, image_name + '.jpg')\n det_save_path = os.path.join(results_dir, image_name + '.txt')\n\n # detpath.append(det_save_path)\n anno_path.append(label_path)\n # print(img_save_path)\n label = []\n if os.path.exists(label_path):\n label = load_label(label_path, object_type)\n\n # read\n det = read_det_res(det_save_path)\n for d in det:\n if d[0] > len(object_type) - 1:\n d[0] = ' '\n continue\n\n d[0] = object_type[d[0]] # 类别编号 -> 类别名称\n\n for d in det:\n x_min = float(copy.deepcopy(d[2])) - float(copy.deepcopy(d[4])) * 0.5\n y_min = float(copy.deepcopy(d[3])) - float(copy.deepcopy(d[5])) * 0.5\n x_max = float(copy.deepcopy(d[2])) + float(copy.deepcopy(d[4])) * 0.5\n y_max = float(copy.deepcopy(d[3])) + float(copy.deepcopy(d[5])) * 0.5\n\n # ----- img_name type conf x_min y_min x_max y_max\n d_ = [image_name, d[0], d[1], x_min, y_min, x_max, y_max]\n det_.append(d_)\n\n if len(det_) != 0:\n det_all = numpy.vstack((det_all, det_))\n det_ = []\n\n if i > 0:\n img_path = img_save_path\n\n # print(j, image_path)\n img = cv2.imread(img_path)\n if img is None:\n print(\"load image error&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\")\n continue\n\n cmp_res = cdl.cmp_data(obj_type, det, label, thresh, iou_thresh, img)\n\n cmp_res.update({'image_name': image_name})\n total_corr += cmp_res['correct']\n total_iou += cmp_res['avg_iou'] * cmp_res['label_num']\n\n cmp_result.append(cmp_res)\n print(\n '%s: %d/%d label: %d detect: %d correct: %d recall: %f avg_iou: %f accuracy: %f precision: %f' %\n (str(obj_type), j + 1, image_num, cmp_res['label_num'], cmp_res['detect_num'],\n cmp_res['correct'], cmp_res['recall'], cmp_res['avg_iou'],\n cmp_res['accuracy'], cmp_res['precision']))\n total_label += cmp_res['label_num']\n total_detect += cmp_res['detect_num']\n cv2.imwrite(img_save_path, img)\n img = []\n time.sleep(0.001)\n\n # 求出AP值\n # ap=0\n det_all = numpy.delete(det_all, 0, axis=0)\n det_obj_type = [obj for obj in det_all if obj[1] == obj_type]\n if len(det_obj_type) == 0:\n ap = 0\n else:\n ap = voc_eval(det_obj_type, anno_path, img_set_file, obj_type, iou_thresh)\n det_all = []\n\n # 数据集分析结果\n avg_recall = 0\n if total_label > 0:\n avg_recall = total_corr / float(total_label)\n avg_iou = 0\n if total_iou > 0:\n avg_iou = total_iou / total_label\n avg_acc = 0\n if total_label + total_detect - total_corr > 0:\n avg_acc = float(total_corr) / (total_label + total_detect - total_corr)\n avg_precision = 0\n if total_detect > 0:\n avg_precision = float(total_corr) / total_detect\n total_result = [total_label, total_detect, total_corr, avg_recall, avg_iou, avg_acc, avg_precision]\n cdl.ExportAnaRes(obj_type, cmp_result, total_result, img_path, results_dir)\n\n print(\n \"total_label: %d total_detect: %d total_corr: %d recall: %f average iou: %f accuracy: %f precision: %f ap: %f\\n\" % \\\n (total_result[0], total_result[1], total_result[2], total_result[3], total_result[4], total_result[5],\n total_result[6], ap))\n\n result.append([weights_name] + [obj_type] + total_result + [float(ap)])\n\n # 输出所有类别总的结果\n cdl.ExportAnaResAll(result, result_dir)\n time.sleep(0.001)\n\n\nif __name__ == \"__main__\":\n\n dn.set_gpu(0)\n # weights_list_file = \"/users/duanyou/c5/puer/weights.txt\"\n # weights_list_file = \"/users/duanyou/c5/v4_all_train/weights.txt\"\n weights_list_file = \"/users/duanyou/c5/v4_half_train/weights.txt\"\n\n # yancheng_test\n # data_path = \"/users/duanyou/c5/yancheng\"\n # image_list_file = os.path.join(data_path,\"test.txt\")\n # result_dir = os.path.join(\"/users/duanyou/c5/results_new/results_yancheng/\")\n # if not os.path.exists(result_dir):\n # os.mkdir(result_dir)\n # batch_analysis(weights_list_file,image_list_file,0.20,0.45,result_dir)\n\n # all_test\n data_path = \"/users/duanyou/c5/all_pretrain\"\n image_list_file = os.path.join(data_path, 'test1.txt') # c5_test.txt or test1.txt\n result_dir = os.path.join(\"/users/duanyou/c5/results_new/results_all/\")\n if not os.path.exists(result_dir):\n os.mkdir(result_dir)\n batch_analysis(weights_list_file, image_list_file, 0.20, 0.45, result_dir)\n\n # # changsha_test\n # data_path = \"/users/duanyou/c5/changsha\"\n # image_list_file = os.path.join(data_path,\"test.txt\")\n # result_dir = os.path.join(\"/users/duanyou/c5/results_new/results_changsha/\")\n # if not os.path.exists(result_dir):\n # os.mkdir(result_dir)\n # batch_analysis(weights_list_file,image_list_file,0.20,0.45,result_dir)\n\n # hezhoupucheng_test\n # data_path = \"/users/duanyou/c5/hezhoupucheng\"\n # image_list_file = os.path.join(data_path,\"test.txt\")\n # result_dir = os.path.join(\"/users/duanyou/c5/results_new/results_hezhoupucheng/\")\n # if not os.path.exists(result_dir):\n # os.mkdir(result_dir)\n # batch_analysis(weights_list_file,image_list_file,0.20,0.45,result_dir)\n\n # # ----- puer_test\n # data_path = \"/users/duanyou/c5/puer\"\n # image_list_file = os.path.join(data_path, \"test.txt\")\n # result_dir = os.path.join(\"/users/duanyou/c5/results_new/results_puer/\")\n # if not os.path.exists(result_dir):\n # os.mkdir(result_dir)\n # batch_analysis(weights_list_file, image_list_file, 0.20, 0.45, result_dir)\n\n # # some_img_test\n # data_path = \"/users/duanyou/backup_c5/test_2\"\n # image_list_file = os.path.join(data_path, \"test.txt\")\n # result_dir = os.path.join(\"/users/duanyou/backup_c5/test_2/result/\")\n #\n # if not os.path.exists(result_dir):\n # os.mkdir(result_dir)\n #\n # batch_analysis(weights_list_file, image_list_file, 0.20, 0.45, result_dir)\n"
] |
[
[
"numpy.delete",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rockwellw/dask
|
[
"c1f6d61368af6f9c67d39d23406eb267a0308bb9",
"c1f6d61368af6f9c67d39d23406eb267a0308bb9",
"c1f6d61368af6f9c67d39d23406eb267a0308bb9"
] |
[
"dask/array/utils.py",
"dask/dataframe/partitionquantiles.py",
"dask/sizeof.py"
] |
[
"import difflib\nimport functools\nimport math\nimport numbers\nimport os\nimport warnings\n\nimport numpy as np\nfrom toolz import frequencies, concat\n\nfrom .core import Array\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import has_keyword, ignoring, is_arraylike\n\ntry:\n AxisError = np.AxisError\nexcept AttributeError:\n try:\n np.array([0]).sum(axis=5)\n except Exception as e:\n AxisError = type(e)\n\n\ndef normalize_to_array(x):\n if \"cupy\" in str(type(x)): # TODO: avoid explicit reference to cupy\n return x.get()\n else:\n return x\n\n\ndef meta_from_array(x, ndim=None, dtype=None):\n \"\"\" Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n \"\"\"\n # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)\n # implement a _meta attribute that are incompatible with Dask Array._meta\n if hasattr(x, \"_meta\") and isinstance(x, Array):\n x = x._meta\n\n if dtype is None and x is None:\n raise ValueError(\"You must specify the meta or dtype of the array\")\n\n if np.isscalar(x):\n x = np.array(x)\n\n if x is None:\n x = np.ndarray\n\n if isinstance(x, type):\n x = x(shape=(0,) * (ndim or 0), dtype=dtype)\n\n if (\n not hasattr(x, \"shape\")\n or not hasattr(x, \"dtype\")\n or not isinstance(x.shape, tuple)\n ):\n return x\n\n if isinstance(x, list) or isinstance(x, tuple):\n ndims = [\n 0\n if isinstance(a, numbers.Number)\n else a.ndim\n if hasattr(a, \"ndim\")\n else len(a)\n for a in x\n ]\n a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]\n return a if isinstance(x, list) else tuple(x)\n\n if ndim is None:\n ndim = x.ndim\n\n try:\n meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]\n if meta.ndim != ndim:\n if ndim > x.ndim:\n meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]\n meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]\n elif ndim == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * ndim)\n except Exception:\n meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n if dtype and meta.dtype != dtype:\n meta = meta.astype(dtype)\n\n return meta\n\n\ndef compute_meta(func, _dtype, *args, **kwargs):\n with np.errstate(all=\"ignore\"), warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n\n args_meta = [meta_from_array(x) if is_arraylike(x) else x for x in args]\n kwargs_meta = {\n k: meta_from_array(v) if is_arraylike(v) else v for k, v in kwargs.items()\n }\n\n # todo: look for alternative to this, causes issues when using map_blocks()\n # with np.vectorize, such as dask.array.routines._isnonzero_vec().\n if isinstance(func, np.vectorize):\n meta = func(*args_meta)\n else:\n try:\n # some reduction functions need to know they are computing meta\n if has_keyword(func, \"computing_meta\"):\n kwargs_meta[\"computing_meta\"] = True\n meta = func(*args_meta, **kwargs_meta)\n except TypeError as e:\n if (\n \"unexpected keyword argument\" in str(e)\n or \"is an invalid keyword for\" in str(e)\n or \"Did not understand the following kwargs\" in str(e)\n ):\n raise\n else:\n return None\n except Exception:\n return None\n\n if _dtype and getattr(meta, \"dtype\", None) != _dtype:\n with ignoring(AttributeError):\n meta = meta.astype(_dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return meta\n\n\ndef allclose(a, b, equal_nan=False, **kwargs):\n a = normalize_to_array(a)\n b = normalize_to_array(b)\n if getattr(a, \"dtype\", None) != \"O\":\n return np.allclose(a, b, equal_nan=equal_nan, **kwargs)\n if equal_nan:\n return a.shape == b.shape and all(\n np.isnan(b) if np.isnan(a) else a == b for (a, b) in zip(a.flat, b.flat)\n )\n return (a == b).all()\n\n\ndef same_keys(a, b):\n def key(k):\n if isinstance(k, str):\n return (k, -1, -1, -1)\n else:\n return k\n\n return sorted(a.dask, key=key) == sorted(b.dask, key=key)\n\n\ndef _not_empty(x):\n return x.shape and 0 not in x.shape\n\n\ndef _check_dsk(dsk):\n \"\"\" Check that graph is well named and non-overlapping \"\"\"\n if not isinstance(dsk, HighLevelGraph):\n return\n\n assert all(isinstance(k, (tuple, str)) for k in dsk.layers)\n freqs = frequencies(concat(dsk.dicts.values()))\n non_one = {k: v for k, v in freqs.items() if v != 1}\n assert not non_one, non_one\n\n\ndef assert_eq_shape(a, b, check_nan=True):\n for aa, bb in zip(a, b):\n if math.isnan(aa) or math.isnan(bb):\n if check_nan:\n assert math.isnan(aa) == math.isnan(bb)\n else:\n assert aa == bb\n\n\ndef _get_dt_meta_computed(x, check_shape=True, check_graph=True):\n x_original = x\n x_meta = None\n x_computed = None\n\n if isinstance(x, Array):\n assert x.dtype is not None\n adt = x.dtype\n if check_graph:\n _check_dsk(x.dask)\n x_meta = getattr(x, \"_meta\", None)\n x = x.compute(scheduler=\"sync\")\n x_computed = x\n if hasattr(x, \"todense\"):\n x = x.todense()\n if not hasattr(x, \"dtype\"):\n x = np.array(x, dtype=\"O\")\n if _not_empty(x):\n assert x.dtype == x_original.dtype\n if check_shape:\n assert_eq_shape(x_original.shape, x.shape, check_nan=False)\n else:\n if not hasattr(x, \"dtype\"):\n x = np.array(x, dtype=\"O\")\n adt = getattr(x, \"dtype\", None)\n\n return x, adt, x_meta, x_computed\n\n\ndef assert_eq(a, b, check_shape=True, check_graph=True, check_meta=True, **kwargs):\n a_original = a\n b_original = b\n\n a, adt, a_meta, a_computed = _get_dt_meta_computed(\n a, check_shape=check_shape, check_graph=check_graph\n )\n b, bdt, b_meta, b_computed = _get_dt_meta_computed(\n b, check_shape=check_shape, check_graph=check_graph\n )\n\n if str(adt) != str(bdt):\n # Ignore check for matching length of flexible dtypes, since Array._meta\n # can't encode that information\n if adt.type == bdt.type and not (adt.type == np.bytes_ or adt.type == np.str_):\n diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())\n raise AssertionError(\n \"string repr are different\" + os.linesep + os.linesep.join(diff)\n )\n\n try:\n assert a.shape == b.shape\n if check_meta:\n if hasattr(a, \"_meta\") and hasattr(b, \"_meta\"):\n assert_eq(a._meta, b._meta)\n if hasattr(a_original, \"_meta\"):\n assert a_original._meta.ndim == a.ndim\n if a_meta is not None:\n assert type(a_original._meta) == type(a_meta)\n if not (np.isscalar(a_meta) or np.isscalar(a_computed)):\n assert type(a_meta) == type(a_computed)\n if hasattr(b_original, \"_meta\"):\n assert b_original._meta.ndim == b.ndim\n if b_meta is not None:\n assert type(b_original._meta) == type(b_meta)\n if not (np.isscalar(b_meta) or np.isscalar(b_computed)):\n assert type(b_meta) == type(b_computed)\n assert allclose(a, b, **kwargs)\n return True\n except TypeError:\n pass\n\n c = a == b\n\n if isinstance(c, np.ndarray):\n assert c.all()\n else:\n assert c\n\n return True\n\n\ndef safe_wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS):\n \"\"\"Like functools.wraps, but safe to use even if wrapped is not a function.\n\n Only needed on Python 2.\n \"\"\"\n if all(hasattr(wrapped, attr) for attr in assigned):\n return functools.wraps(wrapped, assigned=assigned)\n else:\n return lambda x: x\n\n\ndef empty_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.empty_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.empty(shape, **kwargs).\n \"\"\"\n try:\n return np.empty_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.empty(shape, **kwargs)\n\n\ndef full_like_safe(a, fill_value, shape, **kwargs):\n \"\"\"\n Return np.full_like(a, fill_value, shape=shape, **kwargs) if the\n shape argument is supported (requires NumPy >= 1.17), otherwise\n falls back to using the old behavior, returning\n np.full(shape, fill_value, **kwargs).\n \"\"\"\n try:\n return np.full_like(a, fill_value, shape=shape, **kwargs)\n except TypeError:\n return np.full(shape, fill_value, **kwargs)\n\n\ndef ones_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.ones_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.ones(shape, **kwargs).\n \"\"\"\n try:\n return np.ones_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.ones(shape, **kwargs)\n\n\ndef zeros_like_safe(a, shape, **kwargs):\n \"\"\"\n Return np.zeros_like(a, shape=shape, **kwargs) if the shape argument\n is supported (requires NumPy >= 1.17), otherwise falls back to\n using the old behavior, returning np.zeros(shape, **kwargs).\n \"\"\"\n try:\n return np.zeros_like(a, shape=shape, **kwargs)\n except TypeError:\n return np.zeros(shape, **kwargs)\n\n\ndef validate_axis(axis, ndim):\n \"\"\" Validate an input to axis= keywords \"\"\"\n if isinstance(axis, (tuple, list)):\n return tuple(validate_axis(ax, ndim) for ax in axis)\n if not isinstance(axis, numbers.Integral):\n raise TypeError(\"Axis value must be an integer, got %s\" % axis)\n if axis < -ndim or axis >= ndim:\n raise AxisError(\n \"Axis %d is out of bounds for array of dimension %d\" % (axis, ndim)\n )\n if axis < 0:\n axis += ndim\n return axis\n\n\ndef _is_nep18_active():\n class A:\n def __array_function__(self, *args, **kwargs):\n return True\n\n try:\n return np.concatenate([A()])\n except ValueError:\n return False\n\n\nIS_NEP18_ACTIVE = _is_nep18_active()\n",
"\"\"\"Determine new partition divisions using approximate percentiles.\n\nWe use a custom algorithm to calculate approximate, evenly-distributed\npercentiles of arbitrarily-ordered data for any dtype in a distributed\nfashion with one pass over the data. This is used to determine new\npartition divisions when changing the index of a dask.dataframe. We claim\nno statistical guarantees, but we use a variety of heuristics to try to\nprovide reliable, robust results that are \"good enough\" and can scale to\nlarge number of partitions.\n\nOur approach is similar to standard approaches such as t- and q-digest,\nGK, and sampling-based algorithms, which consist of three parts:\n\n1. **Summarize:** create summaries of subsets of data\n2. **Merge:** combine summaries to make a new summary\n3. **Compress:** periodically compress a summary into a smaller summary\n\nWe summarize the data in each partition by calculating several percentiles.\nThe value at each percentile is given a weight proportional to the length\nof the partition and the differences between the current percentile and\nthe adjacent percentiles. Merging summaries is simply a ``merge_sorted``\nof the values and their weights, which we do with a reduction tree.\n\nPercentiles is a good choice for our case, because we are given a numpy\narray of the partition's data, and percentiles is a relatively cheap\noperation. Moreover, percentiles are, by definition, much less\nsusceptible to the underlying distribution of the data, so the weights\ngiven to each value--even across partitions--should be comparable.\n\nLet us describe this to a child of five. We are given many small cubes\n(of equal size) with numbers on them. Split these into many piles. This\nis like the original data. Let's sort and stack the cubes from one of the\npiles. Next, we are given a bunch of unlabeled blocks of different sizes,\nand most are much larger than the the original cubes. Stack these blocks\nuntil they're the same height as our first stack. Let's write a number on\neach block of the new stack. To do this, choose the number of the cube in\nthe first stack that is located in the middle of an unlabeled block. We\nare finished with this stack once all blocks have a number written on them.\nRepeat this for all the piles of cubes. Finished already? Great! Now\ntake all the stacks of the larger blocks you wrote on and throw them into\na single pile. We'll be sorting these blocks next, which may be easier if\nyou carefully move the blocks over and organize... ah, nevermind--too late.\nOkay, sort and stack all the blocks from that amazing, disorganized pile\nyou just made. This will be very tall, so we had better stack it sideways\non the floor like so. This will also make it easier for us to split the\nstack into groups of approximately equal size, which is our final task...\n\nThis, in a nutshell, is the algorithm we deploy. The main difference\nis that we don't always assign a block the number at its median (ours\nfluctuates around the median). The numbers at the edges of the final\ngroups is what we use as divisions for repartitioning. We also need\nthe overall min and max, so we take the 0th and 100th percentile of\neach partition, and another sample near each edge so we don't give\ndisproportionate weights to extreme values.\n\nChoosing appropriate percentiles to take in each partition is where things\nget interesting. The data is arbitrarily ordered, which means it may be\nsorted, random, or follow some pathological distribution--who knows. We\nhope all partitions are of similar length, but we ought to expect some\nvariation in lengths. The number of partitions may also be changing\nsignificantly, which could affect the optimal choice of percentiles. For\nimproved robustness, we use both evenly-distributed and random percentiles.\nIf the number of partitions isn't changing, then the total number of\npercentiles across all partitions scales as ``npartitions**1.5``. Although\nwe only have a simple compression operation (step 3 above) that combines\nweights of equal values, a more sophisticated one could be added if needed,\nsuch as for extremely large ``npartitions`` or if we find we need to\nincrease the sample size for each partition.\n\n\"\"\"\nimport math\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_datetime64tz_dtype\n\nfrom toolz import merge, merge_sorted, take\n\nfrom ..utils import random_state_data\nfrom ..base import tokenize\nfrom .core import Series\nfrom .utils import is_categorical_dtype\n\n\ndef sample_percentiles(num_old, num_new, chunk_length, upsample=1.0, random_state=None):\n \"\"\"Construct percentiles for a chunk for repartitioning.\n\n Adapt the number of total percentiles calculated based on the number\n of current and new partitions. Returned percentiles include equally\n spaced percentiles between [0, 100], and random percentiles. See\n detailed discussion below.\n\n Parameters\n ----------\n num_old: int\n Number of partitions of the current object\n num_new: int\n Number of partitions of the new object\n chunk_length: int\n Number of rows of the partition\n upsample : float\n Multiplicative factor to increase the number of samples\n\n Returns\n -------\n qs : numpy.ndarray of sorted percentiles between 0, 100\n\n Constructing ordered (i.e., not hashed) partitions is hard. Calculating\n approximate percentiles for generic objects in an out-of-core fashion is\n also hard. Fortunately, partition boundaries don't need to be perfect\n in order for partitioning to be effective, so we strive for a \"good enough\"\n method that can scale to many partitions and is reasonably well-behaved for\n a wide variety of scenarios.\n\n Two similar approaches come to mind: (1) take a subsample of every\n partition, then find the best new partitions for the combined subsamples;\n and (2) calculate equally-spaced percentiles on every partition (a\n relatively cheap operation), then merge the results. We do both, but\n instead of random samples, we use random percentiles.\n\n If the number of partitions isn't changing, then the ratio of fixed\n percentiles to random percentiles is 2 to 1. If repartitioning goes from\n a very high number of partitions to a very low number of partitions, then\n we use more random percentiles, because a stochastic approach will be more\n stable to potential correlations in the data that may cause a few equally-\n spaced partitions to under-sample the data.\n\n The more partitions there are, then the more total percentiles will get\n calculated across all partitions. Squaring the number of partitions\n approximately doubles the number of total percentiles calculated, so\n num_total_percentiles ~ sqrt(num_partitions). We assume each partition\n is approximately the same length. This should provide adequate resolution\n and allow the number of partitions to scale.\n\n For numeric data, one could instead use T-Digest for floats and Q-Digest\n for ints to calculate approximate percentiles. Our current method works\n for any dtype.\n \"\"\"\n # *waves hands*\n random_percentage = 1 / (1 + (4 * num_new / num_old) ** 0.5)\n num_percentiles = upsample * num_new * (num_old + 22) ** 0.55 / num_old\n num_fixed = int(num_percentiles * (1 - random_percentage)) + 2\n num_random = int(num_percentiles * random_percentage) + 2\n\n if num_fixed + num_random + 5 >= chunk_length:\n return np.linspace(0, 100, chunk_length + 1)\n\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n q_fixed = np.linspace(0, 100, num_fixed)\n q_random = random_state.rand(num_random) * 100\n q_edges = [60 / (num_fixed - 1), 100 - 60 / (num_fixed - 1)]\n qs = np.concatenate([q_fixed, q_random, q_edges, [0, 100]])\n qs.sort()\n # Make the divisions between percentiles a little more even\n qs = 0.5 * (qs[:-1] + qs[1:])\n return qs\n\n\ndef tree_width(N, to_binary=False):\n \"\"\"Generate tree width suitable for ``merge_sorted`` given N inputs\n\n The larger N is, the more tasks are reduced in a single task.\n\n In theory, this is designed so all tasks are of comparable effort.\n \"\"\"\n if N < 32:\n group_size = 2\n else:\n group_size = int(math.log(N))\n num_groups = N // group_size\n if to_binary or num_groups < 16:\n return 2 ** int(math.log(N / group_size, 2))\n else:\n return num_groups\n\n\ndef tree_groups(N, num_groups):\n \"\"\"Split an integer N into evenly sized and spaced groups.\n\n >>> tree_groups(16, 6)\n [3, 2, 3, 3, 2, 3]\n \"\"\"\n # Bresenham, you so smooth!\n group_size = N // num_groups\n dx = num_groups\n dy = N - group_size * num_groups\n D = 2 * dy - dx\n rv = []\n for _ in range(num_groups):\n if D < 0:\n rv.append(group_size)\n else:\n rv.append(group_size + 1)\n D -= 2 * dx\n D += 2 * dy\n return rv\n\n\ndef create_merge_tree(func, keys, token):\n \"\"\"Create a task tree that merges all the keys with a reduction function.\n\n Parameters\n ----------\n func: callable\n Reduction function that accepts a single list of values to reduce.\n keys: iterable\n Keys to reduce from the source dask graph.\n token: object\n Included in each key of the returned dict.\n\n This creates a k-ary tree where k depends on the current level and is\n greater the further away a node is from the root node. This reduces the\n total number of nodes (thereby reducing scheduler overhead), but still\n has beneficial properties of trees.\n\n For reasonable numbers of keys, N < 1e5, the total number of nodes in the\n tree is roughly ``N**0.78``. For 1e5 < N < 2e5, is it roughly ``N**0.8``.\n \"\"\"\n level = 0\n prev_width = len(keys)\n prev_keys = iter(keys)\n rv = {}\n while prev_width > 1:\n width = tree_width(prev_width)\n groups = tree_groups(prev_width, width)\n keys = [(token, level, i) for i in range(width)]\n rv.update(\n (key, (func, list(take(num, prev_keys)))) for num, key in zip(groups, keys)\n )\n prev_width = width\n prev_keys = iter(keys)\n level += 1\n return rv\n\n\ndef percentiles_to_weights(qs, vals, length):\n \"\"\"Weigh percentile values by length and the difference between percentiles\n\n >>> percentiles = np.array([0, 25, 50, 90, 100])\n >>> values = np.array([2, 3, 5, 8, 13])\n >>> length = 10\n >>> percentiles_to_weights(percentiles, values, length)\n ([2, 3, 5, 8, 13], [125.0, 250.0, 325.0, 250.0, 50.0])\n\n The weight of the first element, ``2``, is determined by the difference\n between the first and second percentiles, and then scaled by length:\n\n >>> 0.5 * length * (percentiles[1] - percentiles[0])\n 125.0\n\n The second weight uses the difference of percentiles on both sides, so\n it will be twice the first weight if the percentiles are equally spaced:\n\n >>> 0.5 * length * (percentiles[2] - percentiles[0])\n 250.0\n \"\"\"\n if length == 0:\n return ()\n diff = np.ediff1d(qs, 0.0, 0.0)\n weights = 0.5 * length * (diff[1:] + diff[:-1])\n return vals.tolist(), weights.tolist()\n\n\ndef merge_and_compress_summaries(vals_and_weights):\n \"\"\"Merge and sort percentile summaries that are already sorted.\n\n Each item is a tuple like ``(vals, weights)`` where vals and weights\n are lists. We sort both by vals.\n\n Equal values will be combined, their weights summed together.\n \"\"\"\n vals_and_weights = [x for x in vals_and_weights if x]\n if not vals_and_weights:\n return ()\n it = merge_sorted(*[zip(x, y) for x, y in vals_and_weights])\n vals = []\n weights = []\n vals_append = vals.append\n weights_append = weights.append\n val, weight = prev_val, prev_weight = next(it)\n for val, weight in it:\n if val == prev_val:\n prev_weight += weight\n else:\n vals_append(prev_val)\n weights_append(prev_weight)\n prev_val, prev_weight = val, weight\n if val == prev_val:\n vals_append(prev_val)\n weights_append(prev_weight)\n return vals, weights\n\n\ndef process_val_weights(vals_and_weights, npartitions, dtype_info):\n \"\"\"Calculate final approximate percentiles given weighted vals\n\n ``vals_and_weights`` is assumed to be sorted. We take a cumulative\n sum of the weights, which makes them percentile-like (their scale is\n [0, N] instead of [0, 100]). Next we find the divisions to create\n partitions of approximately equal size.\n\n It is possible for adjacent values of the result to be the same. Since\n these determine the divisions of the new partitions, some partitions\n may be empty. This can happen if we under-sample the data, or if there\n aren't enough unique values in the column. Increasing ``upsample``\n keyword argument in ``df.set_index`` may help.\n \"\"\"\n dtype, info = dtype_info\n\n if not vals_and_weights:\n try:\n return np.array(None, dtype=dtype)\n except Exception:\n # dtype does not support None value so allow it to change\n return np.array(None, dtype=np.float_)\n\n vals, weights = vals_and_weights\n vals = np.array(vals)\n weights = np.array(weights)\n\n # We want to create exactly `npartition` number of groups of `vals` that\n # are approximately the same weight and non-empty if possible. We use a\n # simple approach (more accurate algorithms exist):\n # 1. Remove all the values with weights larger than the relative\n # percentile width from consideration (these are `jumbo`s)\n # 2. Calculate percentiles with \"interpolation=left\" of percentile-like\n # weights of the remaining values. These are guaranteed to be unique.\n # 3. Concatenate the values from (1) and (2), sort, and return.\n #\n # We assume that all values are unique, which happens in the previous\n # step `merge_and_compress_summaries`.\n\n if len(vals) == npartitions + 1:\n rv = vals\n elif len(vals) < npartitions + 1:\n # The data is under-sampled\n if np.issubdtype(vals.dtype, np.number) and not is_categorical_dtype(dtype):\n # Interpolate extra divisions\n q_weights = np.cumsum(weights)\n q_target = np.linspace(q_weights[0], q_weights[-1], npartitions + 1)\n rv = np.interp(q_target, q_weights, vals)\n else:\n # Distribute the empty partitions\n duplicated_index = np.linspace(\n 0, len(vals) - 1, npartitions - len(vals) + 1, dtype=int\n )\n duplicated_vals = vals[duplicated_index]\n rv = np.concatenate([vals, duplicated_vals])\n rv.sort()\n else:\n target_weight = weights.sum() / npartitions\n jumbo_mask = weights >= target_weight\n jumbo_vals = vals[jumbo_mask]\n\n trimmed_vals = vals[~jumbo_mask]\n trimmed_weights = weights[~jumbo_mask]\n trimmed_npartitions = npartitions - len(jumbo_vals)\n\n # percentile-like, but scaled by weights\n q_weights = np.cumsum(trimmed_weights)\n q_target = np.linspace(0, q_weights[-1], trimmed_npartitions + 1)\n\n left = np.searchsorted(q_weights, q_target, side=\"left\")\n right = np.searchsorted(q_weights, q_target, side=\"right\") - 1\n # stay inbounds\n np.maximum(right, 0, right)\n lower = np.minimum(left, right)\n trimmed = trimmed_vals[lower]\n\n rv = np.concatenate([trimmed, jumbo_vals])\n rv.sort()\n\n if is_categorical_dtype(dtype):\n rv = pd.Categorical.from_codes(rv, info[0], info[1])\n elif is_datetime64tz_dtype(dtype):\n rv = pd.DatetimeIndex(rv).tz_localize(dtype.tz)\n elif \"datetime64\" in str(dtype):\n rv = pd.DatetimeIndex(rv, dtype=dtype)\n elif rv.dtype != dtype:\n rv = rv.astype(dtype)\n return rv\n\n\ndef percentiles_summary(df, num_old, num_new, upsample, state):\n \"\"\"Summarize data using percentiles and derived weights.\n\n These summaries can be merged, compressed, and converted back into\n approximate percentiles.\n\n Parameters\n ----------\n df: pandas.Series\n Data to summarize\n num_old: int\n Number of partitions of the current object\n num_new: int\n Number of partitions of the new object\n upsample: float\n Scale factor to increase the number of percentiles calculated in\n each partition. Use to improve accuracy.\n \"\"\"\n from dask.array.percentile import _percentile\n\n length = len(df)\n if length == 0:\n return ()\n random_state = np.random.RandomState(state)\n qs = sample_percentiles(num_old, num_new, length, upsample, random_state)\n data = df.values\n interpolation = \"linear\"\n if is_categorical_dtype(data):\n data = data.codes\n interpolation = \"nearest\"\n vals, n = _percentile(data, qs, interpolation=interpolation)\n if interpolation == \"linear\" and np.issubdtype(data.dtype, np.integer):\n vals = np.round(vals).astype(data.dtype)\n vals_and_weights = percentiles_to_weights(qs, vals, length)\n return vals_and_weights\n\n\ndef dtype_info(df):\n info = None\n if is_categorical_dtype(df):\n data = df.values\n info = (data.categories, data.ordered)\n return df.dtype, info\n\n\ndef partition_quantiles(df, npartitions, upsample=1.0, random_state=None):\n \"\"\" Approximate quantiles of Series used for repartitioning\n \"\"\"\n assert isinstance(df, Series)\n # currently, only Series has quantile method\n # Index.quantile(list-like) must be pd.Series, not pd.Index\n return_type = Series\n\n qs = np.linspace(0, 1, npartitions + 1)\n token = tokenize(df, qs, upsample)\n if random_state is None:\n random_state = int(token, 16) % np.iinfo(np.int32).max\n state_data = random_state_data(df.npartitions, random_state)\n\n df_keys = df.__dask_keys__()\n\n name0 = \"re-quantiles-0-\" + token\n dtype_dsk = {(name0, 0): (dtype_info, df_keys[0])}\n\n name1 = \"re-quantiles-1-\" + token\n val_dsk = {\n (name1, i): (\n percentiles_summary,\n key,\n df.npartitions,\n npartitions,\n upsample,\n state,\n )\n for i, (state, key) in enumerate(zip(state_data, df_keys))\n }\n\n name2 = \"re-quantiles-2-\" + token\n merge_dsk = create_merge_tree(merge_and_compress_summaries, sorted(val_dsk), name2)\n if not merge_dsk:\n # Compress the data even if we only have one partition\n merge_dsk = {(name2, 0, 0): (merge_and_compress_summaries, [list(val_dsk)[0]])}\n\n merged_key = max(merge_dsk)\n\n name3 = \"re-quantiles-3-\" + token\n last_dsk = {\n (name3, 0): (\n pd.Series, # TODO: Use `type(df._meta)` when cudf adds `tolist()`\n (process_val_weights, merged_key, npartitions, (name0, 0)),\n qs,\n None,\n df.name,\n )\n }\n\n dsk = merge(df.dask, dtype_dsk, val_dsk, merge_dsk, last_dsk)\n new_divisions = [0.0, 1.0]\n return return_type(dsk, name3, df._meta, new_divisions)\n",
"import sys\nfrom distutils.version import LooseVersion\n\nfrom .utils import Dispatch\n\ntry: # PyPy does not support sys.getsizeof\n sys.getsizeof(1)\n getsizeof = sys.getsizeof\nexcept (AttributeError, TypeError): # Monkey patch\n\n def getsizeof(x):\n return 100\n\n\nsizeof = Dispatch(name=\"sizeof\")\n\n\[email protected](object)\ndef sizeof_default(o):\n return getsizeof(o)\n\n\[email protected](list)\[email protected](tuple)\[email protected](set)\[email protected](frozenset)\ndef sizeof_python_collection(seq):\n return getsizeof(seq) + sum(map(sizeof, seq))\n\n\[email protected](dict)\ndef sizeof_python_dict(d):\n if len(d) > 10:\n return getsizeof(d) + 1000 * len(d)\n else:\n return getsizeof(d) + sum(map(sizeof, d.keys())) + sum(map(sizeof, d.values()))\n\n\[email protected]_lazy(\"cupy\")\ndef register_cupy():\n import cupy\n\n @sizeof.register(cupy.ndarray)\n def sizeof_cupy_ndarray(x):\n return int(x.nbytes)\n\n\[email protected]_lazy(\"numba\")\ndef register_numba():\n import numba.cuda\n\n @sizeof.register(numba.cuda.cudadrv.devicearray.DeviceNDArray)\n def sizeof_numba_devicendarray(x):\n return int(x.nbytes)\n\n\[email protected]_lazy(\"rmm\")\ndef register_rmm():\n import rmm\n\n # Only included in 0.11.0+\n if hasattr(rmm, \"DeviceBuffer\"):\n\n @sizeof.register(rmm.DeviceBuffer)\n def sizeof_rmm_devicebuffer(x):\n return int(x.nbytes)\n\n\[email protected]_lazy(\"numpy\")\ndef register_numpy():\n import numpy as np\n\n @sizeof.register(np.ndarray)\n def sizeof_numpy_ndarray(x):\n return int(x.nbytes)\n\n\[email protected]_lazy(\"pandas\")\ndef register_pandas():\n import pandas as pd\n import numpy as np\n\n def object_size(x):\n if not len(x):\n return 0\n sample = np.random.choice(x, size=20, replace=True)\n sample = list(map(sizeof, sample))\n return sum(sample) / 20 * len(x)\n\n @sizeof.register(pd.DataFrame)\n def sizeof_pandas_dataframe(df):\n p = sizeof(df.index)\n for name, col in df.iteritems():\n p += col.memory_usage(index=False)\n if col.dtype == object:\n p += object_size(col._values)\n return int(p) + 1000\n\n @sizeof.register(pd.Series)\n def sizeof_pandas_series(s):\n p = int(s.memory_usage(index=True))\n if s.dtype == object:\n p += object_size(s._values)\n if s.index.dtype == object:\n p += object_size(s.index)\n return int(p) + 1000\n\n @sizeof.register(pd.Index)\n def sizeof_pandas_index(i):\n p = int(i.memory_usage())\n if i.dtype == object:\n p += object_size(i)\n return int(p) + 1000\n\n @sizeof.register(pd.MultiIndex)\n def sizeof_pandas_multiindex(i):\n p = int(sum(object_size(l) for l in i.levels))\n for c in i.codes if hasattr(i, \"codes\") else i.labels:\n p += c.nbytes\n return int(p) + 1000\n\n\[email protected]_lazy(\"scipy\")\ndef register_spmatrix():\n from scipy import sparse\n\n @sizeof.register(sparse.dok_matrix)\n def sizeof_spmatrix_dok(s):\n return s.__sizeof__()\n\n @sizeof.register(sparse.spmatrix)\n def sizeof_spmatrix(s):\n return sum(sizeof(v) for v in s.__dict__.values())\n\n\[email protected]_lazy(\"pyarrow\")\ndef register_pyarrow():\n import pyarrow as pa\n\n def _get_col_size(data):\n p = 0\n if not isinstance(data, pa.ChunkedArray):\n data = data.data # pyarrow <0.15.0\n for chunk in data.iterchunks():\n for buffer in chunk.buffers():\n if buffer:\n p += buffer.size\n return p\n\n @sizeof.register(pa.Table)\n def sizeof_pyarrow_table(table):\n p = sizeof(table.schema.metadata)\n for col in table.itercolumns():\n p += _get_col_size(col)\n return int(p) + 1000\n\n @sizeof.register(pa.ChunkedArray)\n def sizeof_pyarrow_chunked_array(data):\n return int(_get_col_size(data)) + 1000\n\n # Handle pa.Column for pyarrow < 0.15\n if pa.__version__ < LooseVersion(\"0.15.0\"):\n\n @sizeof.register(pa.Column)\n def sizeof_pyarrow_column(col):\n return int(_get_col_size(col)) + 1000\n"
] |
[
[
"numpy.ones_like",
"numpy.allclose",
"numpy.isnan",
"numpy.empty_like",
"numpy.full",
"numpy.full_like",
"numpy.ones",
"numpy.zeros_like",
"numpy.isscalar",
"numpy.errstate",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"pandas.Categorical.from_codes",
"numpy.maximum",
"numpy.minimum",
"numpy.linspace",
"numpy.ediff1d",
"numpy.issubdtype",
"numpy.cumsum",
"pandas.DatetimeIndex",
"numpy.concatenate",
"numpy.round",
"pandas.api.types.is_datetime64tz_dtype",
"numpy.interp",
"numpy.searchsorted",
"numpy.iinfo",
"numpy.array",
"numpy.random.RandomState"
],
[
"numpy.random.choice"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
myknotruby/recommenders
|
[
"d4a41de4e10cf0fa4db7f4e5c4d6bccc6629e201",
"d4a41de4e10cf0fa4db7f4e5c4d6bccc6629e201"
] |
[
"tests/unit/reco_utils/recommender/test_ncf_singlenode.py",
"reco_utils/recommender/sar/sar_singlenode.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nimport shutil\nimport numpy as np\nimport pytest\n\ntry:\n from reco_utils.recommender.ncf.ncf_singlenode import NCF\n from reco_utils.recommender.ncf.dataset import Dataset\n from reco_utils.common.constants import (\n DEFAULT_USER_COL,\n DEFAULT_ITEM_COL,\n SEED,\n )\nexcept ImportError:\n pass # skip this import if we are in cpu environment\n\n\nN_NEG = 5\nN_NEG_TEST = 10\n\n\[email protected]\[email protected](\n \"model_type, n_users, n_items\", [(\"NeuMF\", 1, 1), (\"GMF\", 10, 10), (\"MLP\", 4, 8)]\n)\ndef test_init(model_type, n_users, n_items):\n model = NCF(\n n_users=n_users, n_items=n_items, model_type=model_type, n_epochs=1, seed=SEED\n )\n # model type\n assert model.model_type == model_type.lower()\n # number of users in dataset\n assert model.n_users == n_users\n # number of items in dataset\n assert model.n_items == n_items\n # dimension of gmf user embedding\n assert model.embedding_gmf_P.shape == [n_users, model.n_factors]\n # dimension of gmf item embedding\n assert model.embedding_gmf_Q.shape == [n_items, model.n_factors]\n # dimension of mlp user embedding\n assert model.embedding_mlp_P.shape == [n_users, model.n_factors]\n # dimension of mlp item embedding\n assert model.embedding_mlp_Q.shape == [n_items, model.n_factors]\n\n # TODO: more parameters\n\n\[email protected]\[email protected](\n \"model_type, n_users, n_items\", [(\"NeuMF\", 5, 5), (\"GMF\", 5, 5), (\"MLP\", 5, 5)]\n)\ndef test_regular_save_load(model_type, n_users, n_items):\n ckpt = \".%s\" % model_type\n if os.path.exists(ckpt):\n shutil.rmtree(ckpt)\n\n model = NCF(\n n_users=n_users, n_items=n_items, model_type=model_type, n_epochs=1, seed=SEED\n )\n model.save(ckpt)\n if model.model_type == \"neumf\":\n P = model.sess.run(model.embedding_gmf_P)\n Q = model.sess.run(model.embedding_mlp_Q)\n elif model.model_type == \"gmf\":\n P = model.sess.run(model.embedding_gmf_P)\n Q = model.sess.run(model.embedding_gmf_Q)\n elif model.model_type == \"mlp\":\n P = model.sess.run(model.embedding_mlp_P)\n Q = model.sess.run(model.embedding_mlp_Q)\n\n del model\n model = NCF(\n n_users=n_users, n_items=n_items, model_type=model_type, n_epochs=1, seed=SEED\n )\n\n if model.model_type == \"neumf\":\n model.load(neumf_dir=ckpt)\n P_ = model.sess.run(model.embedding_gmf_P)\n Q_ = model.sess.run(model.embedding_mlp_Q)\n elif model.model_type == \"gmf\":\n model.load(gmf_dir=ckpt)\n P_ = model.sess.run(model.embedding_gmf_P)\n Q_ = model.sess.run(model.embedding_gmf_Q)\n elif model.model_type == \"mlp\":\n model.load(mlp_dir=ckpt)\n P_ = model.sess.run(model.embedding_mlp_P)\n Q_ = model.sess.run(model.embedding_mlp_Q)\n\n # test load function\n assert np.array_equal(P, P_)\n assert np.array_equal(Q, Q_)\n\n if os.path.exists(ckpt):\n shutil.rmtree(ckpt)\n\n\[email protected]\[email protected](\"n_users, n_items\", [(5, 5), (4, 8)])\ndef test_neumf_save_load(n_users, n_items):\n model_type = \"gmf\"\n ckpt_gmf = \".%s\" % model_type\n if os.path.exists(ckpt_gmf):\n shutil.rmtree(ckpt_gmf)\n model = NCF(n_users=n_users, n_items=n_items, model_type=model_type, n_epochs=1)\n model.save(ckpt_gmf)\n P_gmf = model.sess.run(model.embedding_gmf_P)\n Q_gmf = model.sess.run(model.embedding_gmf_Q)\n del model\n\n model_type = \"mlp\"\n ckpt_mlp = \".%s\" % model_type\n if os.path.exists(ckpt_mlp):\n shutil.rmtree(ckpt_mlp)\n model = NCF(n_users=n_users, n_items=n_items, model_type=model_type, n_epochs=1)\n model.save(\".%s\" % model_type)\n P_mlp = model.sess.run(model.embedding_mlp_P)\n Q_mlp = model.sess.run(model.embedding_mlp_Q)\n del model\n\n model_type = \"neumf\"\n model = NCF(n_users=n_users, n_items=n_items, model_type=model_type, n_epochs=1)\n model.load(gmf_dir=ckpt_gmf, mlp_dir=ckpt_mlp)\n\n P_gmf_ = model.sess.run(model.embedding_gmf_P)\n Q_gmf_ = model.sess.run(model.embedding_gmf_Q)\n\n P_mlp_ = model.sess.run(model.embedding_mlp_P)\n Q_mlp_ = model.sess.run(model.embedding_mlp_Q)\n\n assert np.array_equal(P_gmf, P_gmf_)\n assert np.array_equal(Q_gmf, Q_gmf_)\n assert np.array_equal(P_mlp, P_mlp_)\n assert np.array_equal(Q_mlp, Q_mlp_)\n\n if os.path.exists(ckpt_gmf):\n shutil.rmtree(ckpt_gmf)\n if os.path.exists(ckpt_mlp):\n shutil.rmtree(ckpt_mlp)\n\n # TODO: test loading fc-concat\n\n\[email protected]\[email protected](\"model_type\", [\"NeuMF\", \"GMF\", \"MLP\"])\ndef test_fit(python_dataset_ncf, model_type):\n train, test = python_dataset_ncf\n data = Dataset(train=train, test=test, n_neg=N_NEG, n_neg_test=N_NEG_TEST)\n model = NCF(\n n_users=data.n_users, n_items=data.n_items, model_type=model_type, n_epochs=1\n )\n model.fit(data)\n\n\[email protected]\[email protected](\"model_type\", [\"NeuMF\", \"GMF\", \"MLP\"])\ndef test_predict(python_dataset_ncf, model_type):\n # test data format\n train, test = python_dataset_ncf\n data = Dataset(train=train, test=test, n_neg=N_NEG, n_neg_test=N_NEG_TEST)\n model = NCF(\n n_users=data.n_users, n_items=data.n_items, model_type=model_type, n_epochs=1\n )\n model.fit(data)\n\n test_users, test_items = list(test[DEFAULT_USER_COL]), list(test[DEFAULT_ITEM_COL])\n\n assert type(model.predict(test_users[0], test_items[0])) == float\n\n res = model.predict(test_users, test_items, is_list=True)\n\n assert type(res) == list\n assert len(res) == len(test)\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\nimport numpy as np\nimport pandas as pd\nimport logging\nfrom scipy import sparse\n\nfrom reco_utils.common.python_utils import (\n jaccard,\n lift,\n exponential_decay,\n get_top_k_scored_items,\n rescale,\n)\nfrom reco_utils.common import constants\n\n\nCOOCCUR = \"cooccurrence\"\nJACCARD = \"jaccard\"\nLIFT = \"lift\"\n\nlogger = logging.getLogger()\n\n\nclass SARSingleNode:\n \"\"\"Simple Algorithm for Recommendations (SAR) implementation\n\n SAR is a fast scalable adaptive algorithm for personalized recommendations based on user transaction history\n and items description. The core idea behind SAR is to recommend items like those that a user already has\n demonstrated an affinity to. It does this by 1) estimating the affinity of users for items, 2) estimating\n similarity across items, and then 3) combining the estimates to generate a set of recommendations for a given user.\n \"\"\"\n\n def __init__(\n self,\n col_user=constants.DEFAULT_USER_COL,\n col_item=constants.DEFAULT_ITEM_COL,\n col_rating=constants.DEFAULT_RATING_COL,\n col_timestamp=constants.DEFAULT_TIMESTAMP_COL,\n col_prediction=constants.DEFAULT_PREDICTION_COL,\n similarity_type=JACCARD,\n time_decay_coefficient=30,\n time_now=None,\n timedecay_formula=False,\n threshold=1,\n normalize=False,\n ):\n \"\"\"Initialize model parameters\n\n Args:\n col_user (str): user column name\n col_item (str): item column name\n col_rating (str): rating column name\n col_timestamp (str): timestamp column name\n col_prediction (str): prediction column name\n similarity_type (str): ['cooccurrence', 'jaccard', 'lift'] option for computing item-item similarity\n time_decay_coefficient (float): number of days till ratings are decayed by 1/2\n time_now (int | None): current time for time decay calculation\n timedecay_formula (bool): flag to apply time decay\n threshold (int): item-item co-occurrences below this threshold will be removed\n normalize (bool): option for normalizing predictions to scale of original ratings\n \"\"\"\n self.col_rating = col_rating\n self.col_item = col_item\n self.col_user = col_user\n self.col_timestamp = col_timestamp\n self.col_prediction = col_prediction\n\n if similarity_type not in [COOCCUR, JACCARD, LIFT]:\n raise ValueError(\n 'Similarity type must be one of [\"cooccurrence\" | \"jaccard\" | \"lift\"]'\n )\n self.similarity_type = similarity_type\n self.time_decay_half_life = (\n time_decay_coefficient * 24 * 60 * 60\n ) # convert to seconds\n self.time_decay_flag = timedecay_formula\n self.time_now = time_now\n self.threshold = threshold\n self.user_affinity = None\n self.item_similarity = None\n self.item_frequencies = None\n\n # threshold - items below this number get set to zero in co-occurrence counts\n if self.threshold <= 0:\n raise ValueError(\"Threshold cannot be < 1\")\n\n # set flag to capture unity-rating user-affinity matrix for scaling scores\n self.normalize = normalize\n self.col_unity_rating = \"_unity_rating\"\n\n # column for mapping user / item ids to internal indices\n self.col_item_id = \"_indexed_items\"\n self.col_user_id = \"_indexed_users\"\n\n # obtain all the users and items from both training and test data\n self.n_users = None\n self.n_items = None\n\n # The min and max of the rating scale, obtained from the training data.\n self.rating_min = None\n self.rating_max = None\n\n # mapping for item to matrix element\n self.user2index = None\n self.item2index = None\n\n # the opposite of the above map - map array index to actual string ID\n self.index2item = None\n\n def compute_affinity_matrix(self, df, rating_col):\n \"\"\" Affinity matrix.\n\n The user-affinity matrix can be constructed by treating the users and items as\n indices in a sparse matrix, and the events as the data. Here, we're treating\n the ratings as the event weights. We convert between different sparse-matrix\n formats to de-duplicate user-item pairs, otherwise they will get added up.\n\n Args:\n df (pandas.DataFrame): Indexed df of users and items\n rating_col (str): Name of column to use for ratings\n\n Returns:\n sparse.csr: Affinity matrix in Compressed Sparse Row (CSR) format.\n \"\"\"\n\n return sparse.coo_matrix(\n (df[rating_col], (df[self.col_user_id], df[self.col_item_id])),\n shape=(self.n_users, self.n_items),\n ).tocsr()\n\n def compute_time_decay(self, df, decay_column):\n \"\"\"Compute time decay on provided column.\n\n Args:\n df (pandas.DataFrame): DataFrame of users and items\n decay_column (str): column to decay\n\n Returns:\n pandas.DataFrame: with column decayed\n \"\"\"\n\n # if time_now is None use the latest time\n if self.time_now is None:\n self.time_now = df[self.col_timestamp].max()\n\n # apply time decay to each rating\n df[decay_column] *= exponential_decay(\n value=df[self.col_timestamp],\n max_val=self.time_now,\n half_life=self.time_decay_half_life,\n )\n\n # group time decayed ratings by user-item and take the sum as the user-item affinity\n return df.groupby([self.col_user, self.col_item]).sum().reset_index()\n\n def compute_coocurrence_matrix(self, df):\n \"\"\" Co-occurrence matrix.\n\n The co-occurrence matrix is defined as :math:`C = U^T * U`\n\n where U is the user_affinity matrix with 1's as values (instead of ratings).\n\n Args:\n df (pandas.DataFrame): DataFrame of users and items\n\n Returns:\n numpy.ndarray: Co-occurrence matrix\n \"\"\"\n\n user_item_hits = sparse.coo_matrix(\n (np.repeat(1, df.shape[0]), (df[self.col_user_id], df[self.col_item_id])),\n shape=(self.n_users, self.n_items),\n ).tocsr()\n\n item_cooccurrence = user_item_hits.transpose().dot(user_item_hits)\n item_cooccurrence = item_cooccurrence.multiply(\n item_cooccurrence >= self.threshold\n )\n\n return item_cooccurrence.astype(df[self.col_rating].dtype)\n\n def set_index(self, df):\n \"\"\"Generate continuous indices for users and items to reduce memory usage.\n\n Args:\n df (pandas.DataFrame): dataframe with user and item ids\n \"\"\"\n\n # generate a map of continuous index values to items\n self.index2item = dict(enumerate(df[self.col_item].unique()))\n\n # invert the mapping from above\n self.item2index = {v: k for k, v in self.index2item.items()}\n\n # create mapping of users to continuous indices\n self.user2index = {x[1]: x[0] for x in enumerate(df[self.col_user].unique())}\n\n # set values for the total count of users and items\n self.n_users = len(self.user2index)\n self.n_items = len(self.index2item)\n\n def fit(self, df):\n \"\"\"Main fit method for SAR.\n\n Args:\n df (pandas.DataFrame): User item rating dataframe\n \"\"\"\n\n # generate continuous indices if this hasn't been done\n if self.index2item is None:\n self.set_index(df)\n\n logger.info(\"Collecting user affinity matrix\")\n if not np.issubdtype(df[self.col_rating].dtype, np.number):\n raise TypeError(\"Rating column data type must be numeric\")\n\n # copy the DataFrame to avoid modification of the input\n select_columns = [self.col_user, self.col_item, self.col_rating]\n if self.time_decay_flag:\n select_columns += [self.col_timestamp]\n temp_df = df[select_columns].copy()\n\n if self.time_decay_flag:\n logger.info(\"Calculating time-decayed affinities\")\n temp_df = self.compute_time_decay(df=temp_df, decay_column=self.col_rating)\n else:\n # without time decay use the latest user-item rating in the dataset as the affinity score\n logger.info(\"De-duplicating the user-item counts\")\n temp_df = temp_df.drop_duplicates(\n [self.col_user, self.col_item], keep=\"last\"\n )\n\n logger.info(\"Creating index columns\")\n # add mapping of user and item ids to indices\n temp_df.loc[:, self.col_item_id] = temp_df[self.col_item].apply(\n lambda item: self.item2index.get(item, np.NaN)\n )\n temp_df.loc[:, self.col_user_id] = temp_df[self.col_user].apply(\n lambda user: self.user2index.get(user, np.NaN)\n )\n\n if self.normalize:\n self.rating_min = temp_df[self.col_rating].min()\n self.rating_max = temp_df[self.col_rating].max()\n logger.info(\"Calculating normalization factors\")\n temp_df[self.col_unity_rating] = 1.0\n if self.time_decay_flag:\n temp_df = self.compute_time_decay(\n df=temp_df, decay_column=self.col_unity_rating\n )\n self.unity_user_affinity = self.compute_affinity_matrix(\n df=temp_df, rating_col=self.col_unity_rating\n )\n\n # affinity matrix\n logger.info(\"Building user affinity sparse matrix\")\n self.user_affinity = self.compute_affinity_matrix(\n df=temp_df, rating_col=self.col_rating\n )\n\n # calculate item co-occurrence\n logger.info(\"Calculating item co-occurrence\")\n item_cooccurrence = self.compute_coocurrence_matrix(df=temp_df)\n\n # free up some space\n del temp_df\n\n self.item_frequencies = item_cooccurrence.diagonal()\n\n logger.info(\"Calculating item similarity\")\n if self.similarity_type is COOCCUR:\n logger.info(\"Using co-occurrence based similarity\")\n self.item_similarity = item_cooccurrence\n elif self.similarity_type is JACCARD:\n logger.info(\"Using jaccard based similarity\")\n self.item_similarity = jaccard(item_cooccurrence).astype(\n df[self.col_rating].dtype\n )\n elif self.similarity_type is LIFT:\n logger.info(\"Using lift based similarity\")\n self.item_similarity = lift(item_cooccurrence).astype(\n df[self.col_rating].dtype\n )\n else:\n raise ValueError(\"Unknown similarity type: {}\".format(self.similarity_type))\n\n # free up some space\n del item_cooccurrence\n\n logger.info(\"Done training\")\n\n def score(self, test, remove_seen=False):\n \"\"\"Score all items for test users.\n\n Args:\n test (pandas.DataFrame): user to test\n remove_seen (bool): flag to remove items seen in training from recommendation\n\n Returns:\n numpy.ndarray: Value of interest of all items for the users.\n \"\"\"\n\n # get user / item indices from test set\n user_ids = list(\n map(\n lambda user: self.user2index.get(user, np.NaN),\n test[self.col_user].unique(),\n )\n )\n if any(np.isnan(user_ids)):\n raise ValueError(\"SAR cannot score users that are not in the training set\")\n\n # calculate raw scores with a matrix multiplication\n logger.info(\"Calculating recommendation scores\")\n test_scores = self.user_affinity[user_ids, :].dot(self.item_similarity)\n\n # ensure we're working with a dense ndarray\n if isinstance(test_scores, sparse.spmatrix):\n test_scores = test_scores.toarray()\n\n if self.normalize:\n counts = self.unity_user_affinity[user_ids, :].dot(self.item_similarity)\n user_min_scores = (\n np.tile(counts.min(axis=1)[:, np.newaxis], test_scores.shape[1])\n * self.rating_min\n )\n user_max_scores = (\n np.tile(counts.max(axis=1)[:, np.newaxis], test_scores.shape[1])\n * self.rating_max\n )\n test_scores = rescale(\n test_scores,\n self.rating_min,\n self.rating_max,\n user_min_scores,\n user_max_scores,\n )\n\n # remove items in the train set so recommended items are always novel\n if remove_seen:\n logger.info(\"Removing seen items\")\n test_scores += self.user_affinity[user_ids, :] * -np.inf\n\n return test_scores\n\n def get_popularity_based_topk(self, top_k=10, sort_top_k=True):\n \"\"\"Get top K most frequently occurring items across all users.\n\n Args:\n top_k (int): number of top items to recommend.\n sort_top_k (bool): flag to sort top k results.\n\n Returns:\n pandas.DataFrame: top k most popular items.\n \"\"\"\n\n test_scores = np.array([self.item_frequencies])\n\n logger.info(\"Getting top K\")\n top_items, top_scores = get_top_k_scored_items(\n scores=test_scores, top_k=top_k, sort_top_k=sort_top_k\n )\n\n return pd.DataFrame(\n {\n self.col_item: [self.index2item[item] for item in top_items.flatten()],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n def get_item_based_topk(self, items, top_k=10, sort_top_k=True):\n \"\"\"Get top K similar items to provided seed items based on similarity metric defined.\n This method will take a set of items and use them to recommend the most similar items to that set\n based on the similarity matrix fit during training.\n This allows recommendations for cold-users (unseen during training), note - the model is not updated.\n\n The following options are possible based on information provided in the items input:\n 1. Single user or seed of items: only item column (ratings are assumed to be 1)\n 2. Single user or seed of items w/ ratings: item column and rating column\n 3. Separate users or seeds of items: item and user column (user ids are only used to separate item sets)\n 4. Separate users or seeds of items with ratings: item, user and rating columns provided\n\n Args:\n items (pandas.DataFrame): DataFrame with item, user (optional), and rating (optional) columns\n top_k (int): number of top items to recommend\n sort_top_k (bool): flag to sort top k results\n\n Returns:\n pandas.DataFrame: sorted top k recommendation items\n \"\"\"\n\n # convert item ids to indices\n item_ids = np.asarray(\n list(\n map(\n lambda item: self.item2index.get(item, np.NaN),\n items[self.col_item].values,\n )\n )\n )\n\n # if no ratings were provided assume they are all 1\n if self.col_rating in items.columns:\n ratings = items[self.col_rating]\n else:\n ratings = pd.Series(np.ones_like(item_ids))\n\n # create local map of user ids\n if self.col_user in items.columns:\n test_users = items[self.col_user]\n user2index = {x[1]: x[0] for x in enumerate(items[self.col_user].unique())}\n user_ids = test_users.map(user2index)\n else:\n # if no user column exists assume all entries are for a single user\n test_users = pd.Series(np.zeros_like(item_ids))\n user_ids = test_users\n n_users = user_ids.drop_duplicates().shape[0]\n\n # generate pseudo user affinity using seed items\n pseudo_affinity = sparse.coo_matrix(\n (ratings, (user_ids, item_ids)), shape=(n_users, self.n_items)\n ).tocsr()\n\n # calculate raw scores with a matrix multiplication\n test_scores = pseudo_affinity.dot(self.item_similarity)\n\n # remove items in the seed set so recommended items are novel\n test_scores[user_ids, item_ids] = -np.inf\n\n top_items, top_scores = get_top_k_scored_items(\n scores=test_scores, top_k=top_k, sort_top_k=sort_top_k\n )\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(\n test_users.drop_duplicates().values, top_items.shape[1]\n ),\n self.col_item: [self.index2item[item] for item in top_items.flatten()],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()\n\n def recommend_k_items(self, test, top_k=10, sort_top_k=True, remove_seen=False):\n \"\"\"Recommend top K items for all users which are in the test set\n\n Args:\n test (pandas.DataFrame): users to test\n top_k (int): number of top items to recommend\n sort_top_k (bool): flag to sort top k results\n remove_seen (bool): flag to remove items seen in training from recommendation\n\n Returns:\n pandas.DataFrame: top k recommendation items for each user\n \"\"\"\n\n test_scores = self.score(test, remove_seen=remove_seen)\n\n top_items, top_scores = get_top_k_scored_items(\n scores=test_scores, top_k=top_k, sort_top_k=sort_top_k\n )\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(\n test[self.col_user].drop_duplicates().values, top_items.shape[1]\n ),\n self.col_item: [self.index2item[item] for item in top_items.flatten()],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()\n\n def predict(self, test):\n \"\"\"Output SAR scores for only the users-items pairs which are in the test set\n\n Args:\n test (pandas.DataFrame): DataFrame that contains users and items to test\n\n Returns:\n pandas.DataFrame: DataFrame contains the prediction results\n \"\"\"\n\n test_scores = self.score(test)\n user_ids = np.asarray(\n list(\n map(\n lambda user: self.user2index.get(user, np.NaN),\n test[self.col_user].values,\n )\n )\n )\n\n # create mapping of new items to zeros\n item_ids = np.asarray(\n list(\n map(\n lambda item: self.item2index.get(item, np.NaN),\n test[self.col_item].values,\n )\n )\n )\n nans = np.isnan(item_ids)\n if any(nans):\n logger.warning(\n \"Items found in test not seen during training, new items will have score of 0\"\n )\n test_scores = np.append(test_scores, np.zeros((self.n_users, 1)), axis=1)\n item_ids[nans] = self.n_items\n item_ids = item_ids.astype(\"int64\")\n\n df = pd.DataFrame(\n {\n self.col_user: test[self.col_user].values,\n self.col_item: test[self.col_item].values,\n self.col_prediction: test_scores[user_ids, item_ids],\n }\n )\n return df\n"
] |
[
[
"numpy.array_equal"
],
[
"scipy.sparse.coo_matrix",
"numpy.ones_like",
"numpy.isnan",
"numpy.issubdtype",
"pandas.DataFrame",
"numpy.zeros_like",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
paperstiger/MoE-public
|
[
"0f6f89d25dbbc83058e4f2750c70d79a3744856e",
"0f6f89d25dbbc83058e4f2750c70d79a3744856e"
] |
[
"Navigation-DRL/externalmodel.py",
"Navigation-DRL/arguments.py"
] |
[
"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2017 Gao Tang <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nexternalmodel.py\n\nDefine several user-defined models.\n\"\"\"\nimport gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nfrom scipy.integrate import odeint\nimport numpy as np\nimport numba\nimport sys\n\n\nclass oneDBug(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.dt = 0.1\n self.viewer = None\n self.dimx, self.dimu = 2, 1\n self.xlb = np.array([-3, -5])\n self.xub = np.array([3, 5])\n self.action_space = spaces.Box(-0.5*np.ones(self.dimu), 0.5*np.ones(self.dimu))\n self.observation_space = spaces.Box(self.xlb, self.xub)\n\n def _get_obs(self):\n # return self.state.copy()\n return np.clip(self.state, self.xlb, self.xub)\n\n def getCost(self):\n obs = self._get_obs()\n cost = np.sum(obs**2)\n return cost\n\n def _step(self, a):\n self.state[0] += self.dt * self.state[1]\n self.state[1] += self.dt * a[0]\n return self._get_obs(), -self.getCost(), False, None\n\n def _reset(self):\n self.state = np.random.uniform([-1, -1], [1, 1])\n return self._get_obs()\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return None\n\n screen_width, screen_height = 500, 500\n scale = screen_width / (self.xub[0] - self.xlb[0])\n carlen, carwidth = 40.0/scale, 20.0/scale\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n print('Create rendering env now')\n self.viewer = rendering.Viewer(screen_width, screen_height)\n self.viewer.set_bounds(5*self.xlb[0], 5*self.xub[0], 5*self.xlb[0], 5*self.xub[0])\n\n l, r, t, b = -carlen/2, carlen/2, carwidth/2, -carwidth/2\n car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n self.cartrans = rendering.Transform()\n car.add_attr(self.cartrans)\n self.viewer.add_geom(car)\n\n x, v = self.state\n self.viewer.draw_line((0, -1), (0, 1))\n self.cartrans.set_translation(x, 0)\n sys.stdout.write('\\rx {} v {}'.format(x, v))\n sys.stdout.flush()\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n\nclass NdimBug(gym.Env):\n \"\"\"Implementation of a N-Dim bug that has simple dynamics but complicated environment\"\"\"\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self, dim, dt=0.1, xbd=None, ubd=None, tfweight=1, Q=None, R=None, max_dt=10, x0bd=None, clip_obs=False, **kw):\n \"\"\"Constructor for the class.\n\n Parameters\n ----------\n dim: int, dimension of space, state has 2 dim, control has 1 dim\n dt: float, integration time\n xbd: tuple of ndarray, bounds on states\n ubd: tuple of ndarray, bounds on control\n tfweight: float, the weight on time in cost function\n Q: ndarray, the lqr cost on state\n R: ndarray, the lqr cost on control\n max_dt: float, maximum time for one episode. Combine with dt for steps\n x0bd: tuple of ndarray / None, bounds on initial states\n clip_obs: if True, the observation is clipped to be inside xbd\n \"\"\"\n self.dt = dt\n self.viewer = None\n self.dimx, self.dimu = 2 * dim, dim\n if xbd is not None:\n self.xlb, self.xub = xbd\n else:\n self.xlb = -np.ones(self.dimx, dtype=np.float32)\n self.xub = -self.xlb\n if ubd is not None:\n self.ulb, self.uub = ubd\n else:\n self.ulb = -np.ones(dim, dtype=np.float32)\n self.uub = np.ones(dim, dtype=np.float32)\n self.action_space = spaces.Box(self.ulb, self.uub)\n self.observation_space = spaces.Box(self.xlb, self.xub)\n self.state = np.zeros(self.dimx, dtype=np.float32)\n self.tfweight = tfweight\n if Q is None:\n self.Q = np.ones(self.dimx)\n else:\n self.Q = Q\n if R is None:\n self.R = np.ones(self.dimu)\n else:\n self.R = R\n self._cur_step = 0\n self.max_step = int(np.ceil(max_dt / dt))\n self.x0lb, self.x0ub = x0bd\n self.clip_obs = clip_obs\n self.done_state = None\n self._reset()\n\n def _get_obs(self):\n \"\"\"Return an observation we have full state estimation\"\"\"\n if self.clip_obs:\n return np.clip(self.state, self.xlb, self.xub)\n else:\n return self.state\n\n def get_obs(self):\n return self._get_obs()\n\n def getCost(self, state, action):\n objQ = np.sum(state**2 * self.Q)\n objR = np.sum(action**2 * self.R)\n fixcost = self.tfweight # hope this encourage quick response\n return (objQ + objR + fixcost) * self.dt\n\n def _step(self, a):\n self.state[:self.dimu] += self.dt * self.state[self.dimu:] + 0.5 * a * self.dt ** 2\n self.state[self.dimu:] += self.dt * a\n cost = self.getCost(self.state, a)\n state_norm = np.linalg.norm(self.state)\n finish = False\n if state_norm < 1:\n cost -= (1 - state_norm)\n if state_norm < 0.1:\n finish = True\n cost -= 5\n if self._cur_step >= self.max_step:\n cost += 5\n finish = True\n if finish:\n self.done_state = self.state.copy()\n self._cur_step += 1\n return self._get_obs(), -cost, finish, {}\n\n def step(self, a):\n return self._step(a)\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def seed(self, seed=None):\n return self._seed(seed)\n\n def _reset(self):\n self.state = np.random.uniform(self.x0lb, self.x0ub)\n self._cur_step = 0\n return self._get_obs()\n\n def reset(self):\n return self._reset()\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return None\n\n screen_width, screen_height = 500, 500\n scale = screen_width / (self.xub[0] - self.xlb[0])\n carlen, carwidth = 40.0/scale, 20.0/scale\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n print('Create rendering env now')\n self.viewer = rendering.Viewer(screen_width, screen_height)\n self.viewer.set_bounds(5*self.xlb[0], 5*self.xub[0], 5*self.xlb[0], 5*self.xub[0])\n\n l, r, t, b = -carlen/2, carlen/2, carwidth/2, -carwidth/2\n car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n self.cartrans = rendering.Transform()\n car.add_attr(self.cartrans)\n self.viewer.add_geom(car)\n\n x, v = self.state[:self.dimu], self.state[self.dimu:]\n self.viewer.draw_line((0, -1), (0, 1))\n self.cartrans.set_translation(x[0], x[1])\n sys.stdout.write('\\rx {} v {}'.format(x, v))\n sys.stdout.flush()\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def render(self, mode='human', close=False):\n self._render(mode, close)\n\n\nclass DubinCarEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self, x0=None, wrap=True, x0bd=None):\n self.dt = 0.1\n self.viewer = None\n # define problem dimension\n self.dimx, self.dimu = 4, 2\n self.drawxlb = np.array([-15.0, -15.0])\n self.drawxub = np.array([15.0, 15.0])\n self.xlb = np.array([-20, -20, -np.pi, -5.1])\n self.xub = np.array([20, 20, np.pi, 5.1])\n if x0 is None:\n if x0bd is None:\n self.x0lb = np.array([-10, -10, -np.pi, -3.1])\n self.x0ub = np.array([10, 10, np.pi, 3.1])\n else:\n self.x0lb, self.x0ub = x0bd\n else:\n self.x0lb = self.x0ub = x0\n # define costs\n self.Q = 0.1 * np.ones(self.dimx)\n self.Q[3] = 0.1\n self.Q[2] = 0.05\n self.R = 0.1 * np.ones(self.dimu)\n self.ulb = -5 * np.ones(self.dimu)\n self.uub = 5 * np.ones(self.dimu)\n self.tfweight = 1\n self.action_space = spaces.Box(self.ulb, self.uub)\n self.observation_space = spaces.Box(np.array([-100, -100, -1, -1, -10], dtype=np.float32),\n np.array([100, 100, 1, 1, 10], dtype=np.float32))\n self.default_state = np.array([10.0, 10.0, 0.0, 0.0])\n self.wrap = wrap\n # self._seed()\n self.viewer = None\n self.state = None\n self.done_state = None\n self.max_step = 100\n self.cur_step = 0\n self._reset()\n\n def getCost(self, state, action):\n cpstate = state.copy()\n cpstate[2] = np.mod(state[2] + np.pi, 2*np.pi) - np.pi\n objQ = np.sum(cpstate**2 * self.Q)\n objR = np.sum(action**2 * self.R)\n fixcost = self.tfweight # hope this encourage quick response\n return (objQ + objR + fixcost) * self.dt\n\n def _step(self, action):\n # u = (self.uub - self.ulb)/2.0*action + (self.uub + self.ulb)/2.0\n u = action\n y = odeint(self.dyn, self.state, np.array([0.0, self.dt]), args=(u,))\n costs = self.getCost(self.state, u)\n self.state = y[-1]\n x, y, theta, v = self.state\n finish = 0\n # if np.abs(x) > 15 or np.abs(y) > 15 or np.abs(v) > 10 or np.abs(theta) > 4*np.pi:\n # if np.abs(x) > 15 or np.abs(y) > 15: # or np.abs(yv) > 10:\n # finish = 1\n # costs = 10\n test_state = self.state.copy()\n test_state[2] = np.mod(test_state[2] + np.pi, 2*np.pi) - np.pi\n # print(test_state[2])\n state_norm = np.linalg.norm(test_state)\n if state_norm < 1:\n costs = -(1 - state_norm) # give reward when close\n if state_norm < 0.1:\n finish = 1\n costs = -1\n self.cur_step += 1\n if self.cur_step == self.max_step:\n finish = 1\n if finish == 1:\n self.done_state = self.state.copy()\n return self._get_obs(), -costs, finish, {}\n\n def step(self, action):\n return self._step(action)\n\n def _get_obs(self):\n # agl = self.state[2]\n # outstate = np.clip(self.state, self.xlb, self.xub)\n # outstate[2] = agl\n # x, y, theta, v = outstate\n x, y, theta, v = self.state\n if self.wrap:\n return np.array([x, y, np.sin(theta), np.cos(theta), v])\n else:\n outtheta = np.mod(theta + np.pi, 2*np.pi) - np.pi\n return np.array([x, y, outtheta, v])\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _reset(self, x0=None):\n if x0 is None:\n self.state = np.random.uniform(low=self.x0lb, high=self.x0ub)\n else:\n self.state = x0\n # self.state = self.np_random.uniform([-1, -5, -0.3, -0.5], [1, 5, 0.3, 0.5])\n # self.state = self.default_state + np.random.normal(size=self.dimx)*0.01\n self.cur_step = 0\n return self._get_obs()\n\n def reset(self, x0=None):\n return self._reset(x0)\n\n def render(self, mode='human', close=False):\n return self._render(mode, close)\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n screen_width, screen_height = 500, 500\n scale = screen_width / (self.drawxub[0] - self.drawxlb[0])\n carlen, carwidth = 40/scale, 20/scale\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n print('Create rendering env now')\n self.viewer = rendering.Viewer(screen_width, screen_height)\n self.viewer.set_bounds(self.drawxlb[0], self.drawxub[0], self.drawxlb[1], self.drawxub[1])\n\n l, r, t, b = -carlen/2, carlen/2, carwidth/2, -carwidth/2\n # car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n car = rendering.FilledPolygon([(l, b), (l, t), (r, 0)])\n car.set_color(.8, .3, .3)\n self.cartrans = rendering.Transform()\n car.add_attr(self.cartrans)\n self.viewer.add_geom(car)\n # targetcar = rendering.FilledPolygon([(l, b), (l, t), (r, 0)])\n # targetcar.set_color(0, 1, 0)\n # defaultcartrans = rendering.Transform()\n # targetcar.add_attr(defaultcartrans)\n # self.viewer.add_geom(targetcar)\n\n x, y, theta, v = self.state\n self.cartrans.set_rotation(-theta + np.pi/2)\n self.viewer.draw_line((-1, 0), (1, 0))\n self.viewer.draw_line((0, -1), (0, 1))\n # self.cartrans.set_translation(x*scale + screen_width/2, y*scale + screen_height/2)\n # self.cartrans.set_translation(screen_width/scale/2. + x, screen_height/scale/2. + y)\n self.cartrans.set_translation(x, y)\n # self.cartrans.set_translation(30, 30)\n sys.stdout.write('step {} x {} y {} theta {} v {}\\n'.format(self.cur_step, x, y, theta, v))\n sys.stdout.flush()\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def dyn(self, x, t0, u):\n sta = np.sin(x[2])\n cta = np.cos(x[2])\n v = x[3]\n return np.array([v*sta, v*cta, u[0]*v, u[1]])\n\n\ndef main():\n import time\n env = DubinCarEnv()\n env.seed(13)\n env.reset()\n print(env.state)\n for _ in range(1):\n action = np.random.normal(size=2)\n env.step(action)\n env.render()\n print(env.state)\n time.sleep(0.05)\n # raw_input(\"Press Enter to continue\")\n #env = gym.make('Acrobot-v1')\n #env.reset()\n #for _ in xrange(100):\n # # action = np.random.normal(size=1)\n # action = np.random.randint(3)\n # env.step(action)\n # env.render()\n # time.sleep(0.1)\n\n\nif __name__ == '__main__':\n main()\n",
"import argparse\n\nimport torch\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='RL')\n parser.add_argument('--algo', default='ppo',\n help='algorithm to use: a2c | ppo | acktr')\n parser.add_argument('--lr', type=float, default=7e-4,\n help='learning rate (default: 7e-4)')\n parser.add_argument('--eps', type=float, default=1e-5,\n help='RMSprop optimizer epsilon (default: 1e-5)')\n parser.add_argument('--alpha', type=float, default=0.99,\n help='RMSprop optimizer apha (default: 0.99)')\n parser.add_argument('--gamma', type=float, default=0.99,\n help='discount factor for rewards (default: 0.99)')\n parser.add_argument('--use-gae', action='store_true', default=False,\n help='use generalized advantage estimation')\n parser.add_argument('--tau', type=float, default=0.95,\n help='gae parameter (default: 0.95)')\n parser.add_argument('--entropy-coef', type=float, default=0.01,\n help='entropy term coefficient (default: 0.01)')\n parser.add_argument('--value-loss-coef', type=float, default=0.5,\n help='value loss coefficient (default: 0.5)')\n parser.add_argument('--max-grad-norm', type=float, default=0.5,\n help='max norm of gradients (default: 0.5)')\n parser.add_argument('--seed', type=int, default=1,\n help='random seed (default: 1)')\n parser.add_argument('--num-processes', type=int, default=4,\n help='how many training CPU processes to use (default: 16)')\n parser.add_argument('--num-steps', type=int, default=100,\n help='number of forward steps in A2C (default: 5)')\n parser.add_argument('--ppo-epoch', type=int, default=4,\n help='number of ppo epochs (default: 4)')\n parser.add_argument('--num-mini-batch', type=int, default=32,\n help='number of batches for ppo (default: 32)')\n parser.add_argument('--clip-param', type=float, default=0.2,\n help='ppo clip parameter (default: 0.2)')\n parser.add_argument('--num-stack', type=int, default=1,\n help='number of frames to stack (default: 4)')\n parser.add_argument('--log-interval', type=int, default=10,\n help='log interval, one log per n updates (default: 10)')\n parser.add_argument('--save-interval', type=int, default=100,\n help='save interval, one save per n updates (default: 10)')\n parser.add_argument('--vis-interval', type=int, default=100,\n help='vis interval, one log per n updates (default: 100)')\n parser.add_argument('--num-frames', type=int, default=10e6,\n help='number of frames to train (default: 10e6)')\n parser.add_argument('--env-name', default='Dubin-v6',\n help='environment to train on (default: Dubin-v2)')\n parser.add_argument('--log-dir', default='/tmp/gym/',\n help='directory to save agent logs (default: /tmp/gym)')\n parser.add_argument('--save-dir', default='./trained_models/',\n help='directory to save agent logs (default: ./trained_models/)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--add-timestep', action='store_true', default=False,\n help='add timestep to observations')\n parser.add_argument('--recurrent-policy', action='store_true', default=False,\n help='use a recurrent policy')\n parser.add_argument('--no-vis', action='store_true', default=False,\n help='disables visdom visualization')\n parser.add_argument('--port', type=int, default=8097,\n help='port to run the server on (default: 8097)')\n args = parser.parse_args()\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n args.vis = not args.no_vis\n\n return args\n"
] |
[
[
"numpy.clip",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.ones",
"numpy.ceil",
"numpy.random.normal",
"numpy.mod",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sehoonha/optskills
|
[
"d3b3ea46e7987f31f98e8b1772f927d3de32e569"
] |
[
"optskills/problems/sim_jump.py"
] |
[
"import numpy as np\nfrom numpy.linalg import norm\nfrom sim_problem import SimProblem, SPDController, JTController, STR\n\n\nclass SimJumpController(object):\n def __init__(self, _world):\n self.world = _world\n self.spd = SPDController(self.skel(), 250.0, 20.0, self.world.dt)\n self.spd.target = self.skel().q\n self.jt = JTController(self.skel())\n\n self.dim = 5\n # self.dim = 3\n self.params = (np.random.rand(self.dim) - 0.5) * 2.0\n\n self.reset()\n # for i, dof in enumerate(self.skel().dofs):\n # print i, dof.name\n # for i, body in enumerate(self.skel().bodies):\n # print i, body.name\n\n def skel(self):\n return self.world.skels[-1]\n\n def reset(self):\n self.target_index = -1\n\n w = (self.params - (-1.0)) / 2.0 # Change to 0 - 1 Scale\n lo = np.array([-3.0, 0.0, -3.0, -300.0, -0.5])\n hi = np.array([3.0, -3.0, 3.0, 300.0, 0.5])\n # lo = np.array([-3.0, 0.0, -3.0])\n # hi = np.array([3.0, -3.0, 3.0])\n params = lo * (1 - w) + hi * w\n # print('self.params = %s' % self.params)\n # print('normalized params = %s' % params)\n (q0, q1, q2, f0, q3) = params\n # (q0, q1, q2) = params\n # f0 = 100.0\n\n # Set the first pose\n pose0 = self.skel().q\n pose0[6] = pose0[9] = q0 # Thighs\n pose0[14] = pose0[15] = q1 # Knees\n pose0[17] = pose0[19] = q2 # Heels\n pose0[28], pose0[31] = 0.5, -0.5 # Shoulder\n\n # Set the second pose\n pose1 = self.skel().q\n pose1[28], pose1[31] = -2.0, 2.0 # Shoulder\n pose1[29], pose1[32] = 0.5, -0.5 # Shoulder\n\n # Set the third pose\n pose2 = self.skel().q\n pose2[6] = pose2[9] = q3 # Thighs\n\n self.target_time = [0.0, 0.4, 0.8, 9.9e8]\n self.targets = [pose0, pose1, pose2]\n self.forces = [[],\n [([\"h_toe_left\", \"h_toe_right\"], [0, -f0, 0])],\n []]\n\n def control(self):\n next_t = self.target_time[self.target_index + 1]\n if self.world.t >= next_t:\n self.target_index += 1\n self.spd.target = self.targets[self.target_index]\n\n vf = np.zeros(self.skel().ndofs)\n for f in self.forces[self.target_index]:\n bodies = f[0]\n force = f[1]\n vf += self.jt.control(bodies, force)\n\n return self.spd.control() + vf\n\n\nclass SimJump(SimProblem):\n def __init__(self):\n super(SimJump, self).__init__('skel/fullbody1.skel')\n self.__init__simulation__()\n\n self.dim = self.controller.dim\n self.eval_counter = 0 # Well, increasing when simulated\n self.params = None\n\n def __init__simulation__(self):\n self.init_state = self.skel().x\n self.init_state[1] = -0.50 * 3.14\n self.init_state[4] = 0.88\n self.init_state[5] = 0.0\n\n self.reset()\n self.controller = SimJumpController(self.world)\n # self.controller = SPDController(self.skel(), 400.0, 40.0, h)\n # self.controller.target = self.skel().q\n\n def simulate(self, sample):\n self.eval_counter += 1\n\n self.reset()\n self.set_params(sample)\n while not self.terminated():\n self.step()\n # print 'result:', self.params, self.collect_result()\n return self.collect_result()\n\n def evaluate(self, result, task):\n # Calculate the validity of COM\n C = np.array(result['C'])\n C[1] = result['maxCy']\n lo = np.array([0.0, 1.00, 0.0])\n hi = np.array([0.0, 1.50, 0.0])\n w = task\n C_hat = lo * (1 - w) + hi * w\n weight = np.array([0.2, 1.0, 0.2])\n obj = norm((C - C_hat) * weight) ** 2\n\n # Test height penalty\n height_penalty = 0.0 if result['C'][1] > 0.5 else 1.0\n obj += height_penalty\n\n # Calculate unbalanced penalty\n T = result['T']\n Hy = result['Hy']\n obj_balanced = 10.0\n if T is not None and Hy > 0.5:\n weight = np.array([0.5, 0.0, 0.5])\n obj_balanced = norm((T - C) * weight) ** 2\n\n # Calculate parameter penalty\n params = result['params']\n penalty = 0.0\n if params is not None:\n for i in range(self.dim):\n v = params[i]\n penalty += max(0.0, v - 1.0) ** 2\n penalty += min(0.0, v - (-1.0)) ** 2\n return obj + obj_balanced + penalty\n\n def set_random_params(self):\n self.set_params(2.0 * (np.random.rand(self.dim) - 0.5))\n\n def set_params(self, x):\n self.params = x\n self.controller.params = x\n self.controller.reset()\n\n def collect_result(self):\n res = {}\n res['C'] = self.skel().C\n res['T'] = self.skel().COP\n res['Hy'] = self.skel().body('h_head').C[1]\n res['maxCy'] = max([C[1] for C in self.com_trajectory])\n res['params'] = self.params\n return res\n\n def terminated(self):\n return (self.world.t > 1.3)\n\n def __str__(self):\n res = self.collect_result()\n status = \"\"\n status += '[SimJump at %.4f' % self.world.t\n for key, value in res.iteritems():\n if key == 'C':\n status += ' %s : %s' % (key, STR(value, 2))\n elif key == 'T':\n status += ' %s : %s' % (key, STR(value, 2))\n elif key == 'params':\n status += ' %s : %s' % (key, STR(value, 3))\n else:\n status += ' %s : %.4f' % (key, value)\n\n # Print Values\n status += ' value = {'\n tasks = np.linspace(0.0, 1.0, 6)\n values = [self.evaluate(res, t) for t in tasks]\n status += ' '.join(['%.4f' % v for v in values])\n status += '}'\n\n status += ']'\n return status\n\n def __repr__(self):\n return 'problems.SimJump()'\n"
] |
[
[
"numpy.random.rand",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
csmithud/pfra-hydromet
|
[
"443da7105a35b004b33b361fcfaa459b9690e4b3"
] |
[
"core/hydromet_traditional.py"
] |
[
"import pathlib as pl\nimport os\nimport numpy as np\nimport pandas as pd\nfrom scipy.integrate import quad\nfrom scipy.optimize import minimize\nfrom scipy import interpolate, stats, special\nfrom matplotlib import pyplot as plt\nfrom cycler import cycler\nfrom IPython.display import display\nfrom hydromet import S_24hr, IA_24hr\nfrom zipfile import ZipFile\nimport requests\nimport urllib.parse\nimport urllib.request\nimport geopandas as gpd\nfrom IPython.display import clear_output\nimport io\n\nimport sys\nsys.path.append('..')\nfrom hydromet_stratified import*\n\n# #----------------------------------------------------------------------------------------------------------------------#\n# # Functions called by EventTable_Stratified.ipynb.\n# #----------------------------------------------------------------------------------------------------------------------#\n\n# def Q_SCS(R: np.array, CN: float, mu: float) -> float:\n# \"\"\"SCS-CN runoff formula.\n# \"\"\"\n# S = 1000.0/CN-10.0\n# return (R-mu*S)**2/(R-mu*S+S)\n\n\n# def Norm_Constant_GEV(x: np.ndarray, PMP: float) -> float:\n# \"\"\"Constant for distribution truncation at the PMP value.\n# \"\"\" \n# return 1.0/stats.genextreme.cdf(PMP, x[2], x[0], x[1])\n\n\n# def Norm_Constant_LN(SD: float, mu: float, PMP: float) -> float:\n# \"\"\"Constant for distribution truncation at the PMP value. \n# \"\"\" \n# return 1.0/stats.lognorm.cdf(PMP, SD, scale = np.exp(mu))\n\n\n# def PDF_GEV(R: np.ndarray, x: np.ndarray, PMP: float) -> np.ndarray:\n# \"\"\"\n# \"\"\"\n# return Norm_Constant_GEV(x, PMP)*stats.genextreme.pdf(R, x[2], x[0], x[1])\n\n\n# def CDF_GEV(R: np.ndarray, x: np.ndarray, PMP: float) -> float:\n# \"\"\"\n# \"\"\"\n# return Norm_Constant_GEV(x, PMP)*stats.genextreme.cdf(R, x[2], x[0], x[1])\n\n\n# def PPF_GEV(P: np.ndarray, x: np.ndarray, PMP: float) -> np.ndarray:\n# \"\"\"\n# \"\"\"\n# return stats.genextreme.ppf(P/Norm_Constant_GEV(x, PMP), x[2], x[0], x[1])\n\n\n# def GEV_Parameters(df: pd.DataFrame, GEV_Parameters: np.ndarray, bounds: tuple, ID: str, PMP: float) -> pd.DataFrame:\n# \"\"\"Function defines an objective function for finding the GEV parameters and then determines the best GEV parameters \n# that minimize the difference between the GEV and comparison data.\n# \"\"\" \n# def objective_func_GEV(x: np.ndarray) -> float: \n# \"\"\"Calculates the sum of the squared residuals between the return interval and return interval calculated from \n# the GEV CDF with the differences normalized by the return interval. \n# \"\"\" \n# return sum(np.square((RI-1/(1-CDF_GEV(row[ID], x, PMP)))/RI) for RI, row in df.iterrows())\n# solution = minimize(objective_func_GEV, GEV_Parameters, method='SLSQP', bounds=bounds, options={'disp': False})\n# df_GEV_parameters = pd.DataFrame(data=solution.x, index=[\"mu\", \"sigma\", \"xi\"], columns=[\"GEV {}\".format(ID)])\n# return df_GEV_parameters\n\n\n# def GEV_parameters_Fit(raw_precip: pd.DataFrame, ID: str, PMP: float) -> pd.DataFrame:\n# \"\"\"This function provides initial value for finding the GEV parameters and then finds the best GEV parameters using \n# the function GEV_parameters.\n# \"\"\"\n# year = raw_precip.index.values\n# weights = np.append(1/year[:-1]-1/year[1:], 1/year[-1])\n# Avg = (weights*raw_precip[ID]).sum()\n# GEV_parameters = np.array([Avg*0.8, 0.5, -0.25])\n# bounds = ((Avg*0.7, Avg*1.0), (0.01, 1.1), (-0.5, 0.0))\n# df_GEV_parameters = GEV_Parameters(raw_precip, GEV_parameters, bounds, ID, PMP)\n# return df_GEV_parameters\n\n\n# def Avg_R_integrand(R: float, GEV_parameters: np.ndarray, PMP: float) -> float:\n# \"\"\"This function defines the integrand for calculating an average based on the GEV distribution.\n# \"\"\"\n# return R*PDF_GEV(R, GEV_parameters, PMP)\n\n\n# def Avg_R(lower_bound: float, upper_bound: float, GEV_parameters: np.ndarray, PMP: float) -> float:\n# \"\"\"Calculates the average value of the GEV distribution of rainfall or runoff based on an upper and lower bound.\n# \"\"\"\n# return quad(Avg_R_integrand, lower_bound, upper_bound, args=(GEV_parameters, PMP))\n\n# def GEV_RI(RI: np.ndarray, GEV_parameters: np.ndarray, PMP: float) -> np.ndarray:\n# \"\"\"Provides rainfall or runoff as a function of the return interval (RI).\n# \"\"\"\n# return PPF_GEV(1-1.0/RI, GEV_parameters, PMP)\n\n\n# def objective_func_bound_GEV(RI_lower: float, RI_upper: float, RI_middle: float, GEV_parameters: np.ndarray, \n# PMP: float) -> float:\n# \"\"\"Calculates the square of the error between the average rainfall or runoff calculated from the bin floor and \n# ceiling (given in terms of RI) and the rainfall or runoff of the return period of interest.\n# \"\"\" \n# return np.square(Avg_R(GEV_RI(RI_lower, GEV_parameters, PMP), GEV_RI(RI_upper, GEV_parameters, PMP), GEV_parameters, \n# PMP)[0]/(1.0/RI_lower-1.0/RI_upper) - GEV_RI(RI_middle, GEV_parameters, PMP))\n\n\n# def bound_lower_GEV(RI_upper: float, RI_middle: float, GEV_parameters: np.ndarray, initial_value: float,\n# PMP: float) -> float:\n# \"\"\"Finds the rainfall or runoff bin floor given the bin ceiling and average return interval of the bin.\n# \"\"\"\n# return minimize(objective_func_bound_GEV, initial_value, args = (RI_upper, RI_middle, GEV_parameters, PMP),\n# method='SLSQP', bounds=[(1.0, RI_upper*0.999)], options={'disp': False})\n\n\n# def PDF_QlS(Q: np.ndarray, S: float, mu: float, GEV_parameters: np.ndarray, PMP: float) -> np.ndarray:\n# \"\"\"This function provides the runoff PDF conditional on max potential retention, where Q=runoff, S=max potential \n# retention, and mu=initial abstraction parameter.\n# \"\"\"\n# return (Q+2.0*S+np.sqrt(Q)*np.sqrt(Q+4.0*S))/(2.0*np.sqrt(Q)*np.sqrt(Q+4.0*S))*\\\n# PDF_GEV(1.0/2.0*(Q+np.sqrt(Q)*np.sqrt(Q+4.0*S)+2.0*S*mu), GEV_parameters, PMP)\n\n\n# def PDF_Q(Q: float, mu: float, GEV_parameters: np.ndarray, PMP: float, partition_avg: np.ndarray,\n# Delta_P: float, error_PQ: float) -> float: \n# \"\"\"\n# \"\"\"\n# return sum(Delta_P*PDF_QlS(Q, S_avg_partition, mu, GEV_parameters, PMP) for S_avg_partition in \n# partition_avg)/(1-error_PQ)\n\n# def Qzero_integrand(S: float, mu: float, alpha: float, beta: float, S_limit: float, GEV_parameters: np.ndarray,\n# PMP: float, error_PQ: float) -> float:\n# \"\"\"Defines the integrand for calculating the probability of zero runoff.\n# \"\"\"\n# return (CDF_GEV(S*mu, GEV_parameters, PMP)-CDF_GEV(0, GEV_parameters, PMP))\\\n# *(1.0/S_limit)*stats.beta(alpha, beta).pdf(S/S_limit)/(1-error_PQ)\n\n\n# def P_Qzero(mu: float, alpha: float, beta: float, S_limit: float, GEV_parameters: np.ndarray, PMP: float, \n# error_PQ: float) -> float:\n# \"\"\"Defines discrete probability of zero runoff (integrated).\n# \"\"\"\n# return quad(Qzero_integrand, 0, S_limit, args =(mu, alpha, beta, S_limit, GEV_parameters, PMP, error_PQ)) \n\n\n# def CDF_Q(Q: float, mu: float, alpha: float, beta: float, S_limit: float, GEV_parameters: np.ndarray, PMP: float, \n# partition_avg: np.ndarray, Delta_P: float, error_PQ: float) -> float:\n# \"\"\"Defines the cumulative distribution function for runoff. PDF PDF_Q(u) is integrated from zero to an arbitrary \n# runoff Q.\n# \"\"\"\n# return quad(PDF_Q, 0.0, Q, args=(mu, GEV_parameters, PMP, partition_avg, Delta_P, error_PQ))[0]\\\n# +P_Qzero(mu, alpha, beta, S_limit, GEV_parameters, PMP, error_PQ)[0]\n\n# def Avg_Q_integrand(Q: float, mu: float, GEV_parameters: np.ndarray, PMP: float, partition_avg: np.ndarray, \n# Delta_P: float, error_PQ: float) -> float:\n# \"\"\"\n# \"\"\"\n# return Q*PDF_Q(Q, mu, GEV_parameters, PMP, partition_avg, Delta_P, error_PQ)\n\n\n# def Avg_Q(lower_bound: float, upper_bound: float, mu: float, GEV_parameters: np.ndarray, PMP: float, \n# partition_avg: np.ndarray, Delta_P: float, error_PQ: float) -> float:\n# \"\"\"\n# \"\"\"\n# return quad(Avg_Q_integrand, lower_bound, upper_bound, args=(mu, GEV_parameters, PMP, partition_avg, Delta_P, \n# error_PQ))\n\n\n# def runoff_RI(RI: float, f_RI_Q: interpolate.interp1d) -> float:\n# \"\"\"Defines runoff as a function of the return interval (RI).\n# \"\"\"\n# return f_RI_Q(RI)\n\n\n# def objective_func_bound_runoff_L(RI_lower: float, RI_upper: float, RI_middle: float, mu: float, \n# GEV_parameters: np.ndarray, PMP: float, partition_avg: np.ndarray, Delta_P: float, \n# f_RI_Q: interpolate.interp1d, error_PQ: float) -> float:\n# \"\"\"Calculates the square of the error between the average runoff calculated from the bin floor and ceiling (given in \n# terms of RI) and the runoff of the return period of interest.\n# \"\"\" \n# return np.square(Avg_Q(runoff_RI(RI_lower, f_RI_Q), runoff_RI(RI_upper, f_RI_Q), mu, GEV_parameters, PMP, \n# partition_avg, Delta_P, error_PQ)[0]/(1.0/RI_lower-1.0/RI_upper)-runoff_RI(RI_middle, f_RI_Q)) \n\n\n# def Bound_L(RI_upper: float, RI_middle: float, mu: float, GEV_parameters: np.ndarray, PMP: float, \n# partition_avg: np.ndarray, Delta_P: float, initial_value: float, f_RI_Q: interpolate.interp1d, \n# error_PQ: float) -> float:\n# \"\"\"Calculates runoff bin floor given the bin ceiling and average return interval of the bin.\n# \"\"\"\n# return minimize(objective_func_bound_runoff_L, initial_value, \n# args = (RI_upper, RI_middle, mu, GEV_parameters, PMP, partition_avg, Delta_P, f_RI_Q, error_PQ),\n# method='SLSQP', bounds=[(1.0, RI_upper)], options={'disp': False})\n\n\n# def PDF_S(S: np.ndarray, alpha: float, beta: float, S_limit: float)-> float:\n# \"\"\"Defines the distribution of the max potential retention.\n# \"\"\"\n# return (1.0/S_limit)*stats.beta(alpha, beta).pdf(S/S_limit)\n\n\n# def S_avg_integrand(S: float, alpha: float, beta: float, S_limit: float) -> float:\n# \"\"\"Defines the integrand for finding the average value over each partition.\n# \"\"\"\n# return S*PDF_S(S, alpha, beta, S_limit)\n\n\n# def S_avg_partition(alpha: float, beta: float, S_limit: float, lower_bound: float, upper_bound: float) -> float:\n# \"\"\"Defines the integration for the average value over each partition.\n# \"\"\"\n# return quad(S_avg_integrand, lower_bound, upper_bound, args=(alpha, beta, S_limit))\n\n\n# def PDF_SlQ(S: np.ndarray, Q: float, mu: float, GEV_parameters: np.ndarray, PMP: float, partition_avg: np.ndarray, \n# Delta_P: float, alpha: float, beta: float, S_limit: float, error_PQ: float) -> float:\n# \"\"\"Defines the PDF of the max potential retention, S, conditional on runoff, Q.\n# \"\"\"\n# return PDF_QlS(Q, S, mu, GEV_parameters, PMP)*PDF_S(S, alpha, beta, S_limit)/PDF_Q(Q, mu, GEV_parameters, PMP, \n# partition_avg, Delta_P, error_PQ)\n\n\n# def CDF_SlQ(S: float, Q: float, mu: float, GEV_parameters: np.ndarray, PMP: float, partition_avg: np.ndarray, \n# Delta_P: float, alpha: float, beta: float, S_limit: float, error_PQ: float) -> float:\n# \"\"\"Defines the CDF of the max potential retention, S, conditional on runoff Q.\n# \"\"\"\n# return quad(PDF_SlQ, 0, S, args=(Q, mu, GEV_parameters, PMP, partition_avg, Delta_P, alpha, beta, S_limit, \n# error_PQ))[0]\n\n\n# def Avg_SlQ_integrand(S: float, Q: float, mu: float, GEV_parameters: np.ndarray, PMP: float, partition_avg: np.ndarray, \n# Delta_P: float, alpha: float, beta: float, S_limit: float, error_PQ: float) -> float:\n# \"\"\"Defines the integrand for calculating the average max potential retention.\n# \"\"\"\n# return S*PDF_SlQ(S, Q, mu, GEV_parameters, PMP, partition_avg, Delta_P, alpha, beta, S_limit, error_PQ)\n\n\n# def Avg_SlQ(Q: float, mu: float, GEV_parameters: np.ndarray, PMP: float, partition_avg: np.ndarray, Delta_P: float, \n# alpha: float, beta: float, S_limit: float, error_PQ: float, lower_bound: float, \n# upper_bound: float) -> float:\n# \"\"\"Derives the average values of the max potential retention by integrating between an upper and lower bound.\n# \"\"\"\n# return quad(Avg_SlQ_integrand, lower_bound, upper_bound, args=(Q, mu, GEV_parameters, PMP, partition_avg, Delta_P, \n# alpha, beta, S_limit, error_PQ))[0]\n\n\n# def objective_func_median_S(S: float, Q: float, mu: float, GEV_parameters: np.ndarray, PMP: float, \n# partition_avg: np.ndarray, Delta_P: float, alpha: float, beta: float, \n# S_limit: float, error_PQ: float) -> float:\n# \"\"\"Calculates the square of the error between the the CDF value and the median value of 0.5.\n# \"\"\" \n# return np.square(CDF_SlQ(S, Q, mu, GEV_parameters, PMP, partition_avg, Delta_P, alpha, beta, S_limit, error_PQ)-0.5)\n\n\n# def Median_S(Q: float, mu: float, GEV_parameters: np.ndarray, PMP: float, partition_avg: np.ndarray, Delta_P: float, \n# alpha: float, beta: float, S_limit: float, error_PQ: float, bounds: list, Initial_Value: float) -> float:\n# \"\"\"\n# \"\"\"\n# return minimize(objective_func_median_S, Initial_Value, \n# args = (Q, mu, GEV_parameters, PMP, partition_avg, Delta_P, alpha, beta, S_limit, error_PQ),\n# method='SLSQP', bounds=bounds, options={'disp': False})\n\n\n# def partition_S_avgs(n_partition: int, Delta_P: float, alpha: float, beta: float, S_limit: float) -> np.ndarray:\n# \"\"\"Calculates the average value of the max potential retention for n partitions of the distribution.\n# \"\"\"\n# Bounds = np.linspace(0.0, 1.0, n_partition+1)\n# Bounds_S = S_limit*stats.beta(alpha, beta).ppf(Bounds)\n# Bounds_Lower = Bounds_S[:-1]\n# Bounds_Upper = Bounds_S[1:]\n# partition_avg = np.array([S_avg_partition(alpha, beta, S_limit, lower, upper)[0] \n# for lower, upper in zip(Bounds_Lower, Bounds_Upper)])/Delta_P\n# return partition_avg\n\n\n# def weights_Rainfall(Return_Intervals: np.ndarray, GEV_parameters: np.ndarray, PMP: float, RI_upper_bound: float, \n# NOAA_precip: pd.DataFrame, ID: str, CN: float, mu: float) -> pd.DataFrame:\n# \"\"\"Calculate the weights of the rainfall events. If the RI of interest are already in the mean curve table, RI \n# values for the rainfall are taken directly from the input data (NOAA_precip or mean precip curve) instead of \n# being calculated from the fitted GEV.\n# \"\"\"\n# Size = Return_Intervals.size\n# Bin_Bounds_R_topdown = np.zeros(Size+1)\n# Bin_Bounds_R_topdown[Size] = RI_upper_bound\n# for i in range(0, Size):\n# Bin_Bounds_R_topdown[Size-i-1] = bound_lower_GEV(Bin_Bounds_R_topdown[Size-i], Return_Intervals[Size-i-1], \n# GEV_parameters, Return_Intervals[Size-i-1]*0.2, PMP).x[0] \n# lower_bound = GEV_RI(RI_upper_bound, GEV_parameters, PMP)\n# Avg_PlusR = Avg_R(lower_bound, PMP, GEV_parameters, PMP)[0]/(1.0/RI_upper_bound)\n# Prob_Plus = CDF_GEV(Avg_PlusR, GEV_parameters, PMP) \n# RI_index = np.append(Return_Intervals, 1.0/(1-Prob_Plus)).astype(int)\n# weights_R_topdown = (1.0/Bin_Bounds_R_topdown[:-1]-1.0/Bin_Bounds_R_topdown[1:]).astype(float)\n# weights_R_topdown = np.append(weights_R_topdown, 1.0/RI_upper_bound)\n# data = np.vstack((Bin_Bounds_R_topdown, np.append(Bin_Bounds_R_topdown[1:], np.inf), weights_R_topdown)).T\n# df_weights = pd.DataFrame(data=data, index=RI_index, columns=['Bin Floor', 'Bin Ceiling', 'Event Weight'])\n# RI_data = NOAA_precip[NOAA_precip.index.isin(Return_Intervals)].index.values.astype(int)\n# RI_index_calc = RI_index[np.isin(RI_index, RI_data, invert=True)]\n# Precip_calculate = GEV_RI(RI_index_calc, GEV_parameters, PMP)\n# df2 = pd.DataFrame(data = Precip_calculate, index = RI_index_calc, columns=[ID]) \n# df_R_NOAA_E = NOAA_precip[NOAA_precip.index.isin(RI_data)].copy()\n# df_precip = df_R_NOAA_E.append(df2)\n# df_precip = pd.DataFrame(df_precip[ID])\n# Q = Q_SCS(df_precip[ID].values, CN, mu)\n# df_precip['Runoff'] = Q \n# return pd.concat([df_weights, df_precip], axis=1)\n\n\n# def runoff_GEV(mu: float, GEV_parameters, PMP: float, alpha: float, beta: float, S_limit: float, \n# partition_avg: np.ndarray, Delta_P: float, error_PQ: float, n_partitions_Q: int=40) -> tuple:\n# \"\"\"Calculates the values of runoff versus return period, fits a GEV distribution to the results, and returns \n# dataframes for both the GEV parameters and runoff as a function of the return interval.\n# \"\"\"\n# Q_line = np.linspace(0.01, PMP - 0.01, n_partitions_Q+1)\n# Return_PeriodQ = 1.0/(1-np.transpose([CDF_Q(Q, mu, alpha, beta, S_limit, GEV_parameters, PMP, partition_avg, \n# Delta_P, error_PQ) for Q in Q_line]))\n# df_runoff = pd.DataFrame(Q_line, index=Return_PeriodQ, columns=['Runoff'])\n# df_GEV_parameters_R = GEV_parameters_Fit(df_runoff, 'Runoff', PMP)\n# return df_runoff, df_GEV_parameters_R\n\n\n# def runoff_weights(Return_Intervals: np.ndarray, RI_upper_bound: float, mu: float, GEV_Parameters_Runoff: pd.DataFrame,\n# GEV_Parameters_Rain: pd.DataFrame, PMP: float, partition_avg: np.ndarray, Delta_P: float, \n# error_PQ: float) -> pd.DataFrame:\n# \"\"\"Calculate the weights of the runoff events assuming that a GEV distribution is the best fit to the runoff \n# distribution that was derived analytically (and implemented with numerical integration) based on the GEV PDF of \n# rainfall and the distribution of the max potential retention.\n# \"\"\" \n# Size = Return_Intervals.size\n# Bin_Bounds = np.zeros(Size+1)\n# Bin_Bounds[Size] = RI_upper_bound\n# for i in range(0, Size):\n# Bin_Bounds[Size-i-1] = bound_lower_GEV(Bin_Bounds[Size-i], Return_Intervals[Size-i-1], GEV_Parameters_Runoff, \n# np.array([Return_Intervals[Size-i-1]*0.2]), PMP).x[0] \n# lower_bound = GEV_RI(RI_upper_bound, GEV_Parameters_Runoff, PMP)\n# Avg_Plus = Avg_Q(lower_bound, PMP, mu, GEV_Parameters_Rain, PMP, partition_avg, Delta_P, \n# error_PQ)[0]/(1.0/RI_upper_bound)\n# Prob_Plus = CDF_GEV(Avg_Plus, GEV_Parameters_Runoff, PMP)\n# RI_index = np.append(Return_Intervals, 1.0/(1.0-Prob_Plus)).astype(int)\n# Event_Amount = GEV_RI(RI_index, GEV_Parameters_Runoff, PMP)\n# df_runoff = pd.DataFrame(data=Event_Amount, index=RI_index.astype(int), columns=['Runoff'])\n# weights = (1.0/Bin_Bounds[:-1]-1.0/Bin_Bounds[1:]).astype(float)\n# weights = np.append(weights, 1/RI_upper_bound)\n# data = np.vstack((Bin_Bounds, np.append(Bin_Bounds[1:], np.inf), weights)).T\n# df_weights = pd.DataFrame(data=data, index=RI_index.astype(int), columns=['Bin Floor', 'Bin Ceiling', \n# 'Event Weight']) \n# return pd.concat([df_weights, df_runoff], axis=1)\n\n\n# def Scenarios_Avg_S_Median_S(df_weights_runoff: pd.DataFrame, mu: float, GEV_parameters: np.ndarray, PMP: float, \n# partition_avg: np.ndarray, Delta_P: float, alpha: float, beta: float, \n# S_limit: float, error_PQ: float) -> pd.DataFrame:\n# \"\"\"Calculate median and average max potential retention scenarios for given runoff.\n# \"\"\"\n# Runoff_Q = df_weights_runoff['Runoff'].values\n# Return_Intervals_Q = df_weights_runoff.index.values.astype(int)\n# Avg_S_list = [Avg_SlQ(Q1, mu, GEV_parameters, PMP, partition_avg, Delta_P, alpha, beta, S_limit, error_PQ, 0.0, \n# S_limit) for Q1 in Runoff_Q]\n# R_Avg_S = [1.0/2.0*(Q+np.sqrt(Q)*np.sqrt(Q+4.0*S)+2.0*S*mu) for Q, S in zip(Runoff_Q, Avg_S_list)]\n# Median_S_list = [Median_S(Q1, mu, GEV_parameters, PMP, partition_avg, Delta_P, alpha, beta, S_limit, error_PQ, \n# [(0.25, S_limit)], (0+S_limit)/3).x[0] for Q1 in Runoff_Q]\n# R_Median_S = [1.0/2.0*(Q+np.sqrt(Q)*np.sqrt(Q+4.0*S)+2.0*S*mu) for Q, S in zip(Runoff_Q, Median_S_list)]\n# new_data = np.vstack((Avg_S_list, R_Avg_S, Median_S_list, R_Median_S)).T\n# df_SR1 = pd.DataFrame(data=new_data, index=Return_Intervals_Q, \n# columns=['Avg. S', 'Rainfall (Avg. S)', 'Median S', 'Rainfall (Median S)']) \n# return pd.concat([df_weights_runoff, df_SR1], axis=1)\n\n\n# def Scenarios_low_and_high_S(df_runoff_SR1: pd.DataFrame, mu: float, GEV_parameters: np.ndarray, PMP: float, \n# partition_avg: np.ndarray, Delta_P: float, alpha: float, beta: float, \n# S_limit: float, error_PQ: float) -> pd.DataFrame:\n# \"\"\"Calculate scenarios for high and low maximum potential retention.\n# \"\"\"\n# weights_runoff = df_runoff_SR1['Event Weight'].values\n# Runoff_Q = df_runoff_SR1['Runoff'].values\n# Return_Intervals_Q = df_runoff_SR1.index.values.astype(int)\n# Median_S_list = df_runoff_SR1['Median S'].values\n# Avg_S_Lower50_list = [Avg_SlQ(Q1, mu, GEV_parameters, PMP, partition_avg, Delta_P, alpha, beta, S_limit, error_PQ, \n# 0.0, S1)/0.5 for Q1, S1 in zip(Runoff_Q, Median_S_list)]\n# Avg_S_Upper50_list = [Avg_SlQ(Q1, mu, GEV_parameters, PMP, partition_avg, Delta_P, alpha, beta, S_limit, error_PQ, \n# S1, S_limit)/0.5 for Q1, S1 in zip(Runoff_Q, Median_S_list)]\n# R_Avg_S_Lower50 = [1.0/2.0*(Q+np.sqrt(Q)*np.sqrt(Q+4.0*S)+2.0*S*mu) for Q, S in zip(Runoff_Q, Avg_S_Lower50_list)]\n# R_Avg_S_Upper50 = [1.0/2.0*(Q+np.sqrt(Q)*np.sqrt(Q+4.0*S)+2.0*S*mu) for Q, S in zip(Runoff_Q, Avg_S_Upper50_list)]\n# new_data = np.vstack((weights_runoff*0.5, Runoff_Q, Avg_S_Lower50_list, R_Avg_S_Lower50, Avg_S_Upper50_list, \n# R_Avg_S_Upper50)).T\n# df_SR2 = pd.DataFrame(data=new_data, index=Return_Intervals_Q,\n# columns=['Event Weight', 'Runoff', 'Avg. S (Lower 50%)', 'Rainfall (Lower 50%)', \n# 'Avg. S (Upper 50%)', 'Rainfall (Upper 50%)'])\n# return df_SR2\n\n\n# def precip_hyetograph_nrcs(df: pd.DataFrame) -> pd.DataFrame:\n# \"\"\"This function takes the dataframe precipitation table extracted from NOAA Atlas 14 and calculates the nested \n# hyetograph for storm events classified by recurrence intervals. The function first retrieves the ratio of \n# rainfall and incremental intensity; then proceeds to get the ratio, slope, and slope difference; and finally fits \n# a parabolic curve from 0 to 9 hours that passes through the ratios at 0, 6, and 9 hours. The function then fits \n# curves for the remaining data until 12 hours. NOTE: this function is limited to 24 hours and needs to be updated\n# to be flexible for dfferent storm durations.\n# \"\"\"\n# ratio_to_24h = pd.DataFrame(np.arange(start=0, stop=241, step=1), columns = ['time']).set_index(['time'])\n# dif = df.diff()\n# dif.at['05m','value'] = df.at['05m','value']\n# df['ratio'] = df/df.at['24h','value']\n# i_val = {'05m': 12, '10m': 12, '15m': 12, '30m': 4, '60m': 2, '02h': 1, '03h': 1, '06h': 1./3., '12h': 1./6., \n# '24h': 1./12.}\n# intensity_val = pd.DataFrame.from_dict(i_val, orient='index')\n# df.insert(1, 'increm_intensity', dif['value']*intensity_val[0], True)\n# raw_rf = {'time':[0, 6, 9, 10.5, 11, 11.5, 11.75, 11.875, 11.917, 12, 12.083, 12.125, 12.25, 12.5, 13, 13.5, 15, 18, \n# 24]}\n# raw_df = pd.DataFrame(raw_rf, columns = ['time'])\n# temp_0 = 0.5 - df.sort_values('ratio', ascending=False)['ratio']*0.5 \n# temp_12 = 0.5\n# temp_24 = 1 - temp_0.sort_values(0, ascending=False)\n# raw_df.loc[0:9, 'ratio']= temp_0.values\n# raw_df.loc[9:18, 'ratio'] = temp_24.values\n# raw_df.loc[9, 'ratio'] = temp_12\n# raw_df['slope_raw'] = raw_df['ratio'].diff()/raw_df['time'].diff()\n# raw_df.loc[0, 'slope_raw'] = 0\n# raw_df['slope_dif'] = raw_df.loc[0:9]['slope_raw'].diff() \n# df2 = raw_df.set_index(['time'])\n# a = ((2.0/3.0)*df2.at[9.0, 'ratio']-df2.at[6.0, 'ratio'])/18.0\n# b = (df2.at[6.0,'ratio']-36.0*a)/6.0\n# low_12h = 4.0*df.loc['24h','value']*(1.0/36.0+2.0/9.0*df.loc['06h','value']/df.loc['24h','value'])\n# up_12h = 2.0/3.0*df.loc['24h','value']*(5.0/6.0+2.0/3.0*df.loc['06h','value']/df.loc['24h','value'])\n# if b < 0.0:\n# a=df2.at[9.0,'ratio']/81.0\n# b=0.0\n# if 18.0*a+b<0:\n# a=(-1.0*b/18.0)\n# b=df2.at[9.0,'ratio']/4.5 \n# a2 = (9.0/10.5*df2.at[10.5,'ratio']-df2.at[9.0,'ratio'])/13.5\n# b2 = (df2.at[9.0,'ratio']-81.0*a2)/9.0\n# up_2 = 2.0*df.loc['24h','value']*(0.5-(df2.at[11.5, 'ratio']+3.0*df2.at[10.5, 'ratio'])/4.0)+0.01\n# low_2 = 2.0*df.loc['24h','value']*(0.5-(3.0*df2.at[11.5, 'ratio']+df2.at[10.5, 'ratio'])/4.0)+0.01\n# if df.loc['02h', 'value']<low_2:\n# test1 = low_2\n# else:\n# test1 = df.loc['02h', 'value']\n# if df.loc['02h', 'value']> up_2:\n# test2 = up_2\n# else:\n# test2 = df.loc['02h','value']\n# if test1 > test2:\n# test3 = test1\n# else:\n# test3 = test2\n# if test2 > test3:\n# test4 = test2\n# else:\n# test4 = test3\n# if test4>up_2:\n# test_f = up_2\n# else:\n# test_f = test4\n# a3 = 2.0*(df2.at[11.5, 'ratio']-2*(0.5-0.5*test_f/df.loc['24h', 'value'])+ df2.at[10.5, 'ratio'])\n# b3 = df2.at[11.5, 'ratio']-df2.at[10.5, 'ratio']-22.0*a3\n# c3 = (0.5-0.5*test_f/df.loc['24h','value'])-121.0*a3-11.0*b3 \n# ratio_to_24h.loc[0:90, 'ratio'] = a*np.power(ratio_to_24h.loc[0:90].index/10.0, 2)+\\\n# b*ratio_to_24h.loc[0:90].index/10.0\n# ratio_to_24h.loc[91:105, 'ratio'] = a2*np.power(ratio_to_24h.loc[91:105].index/10.0, 2)+\\\n# b2*ratio_to_24h.loc[91:105].index/10.0\n# ratio_to_24h.loc[106:115, 'ratio'] = a3*np.power(ratio_to_24h.loc[106:115].index/10.0, 2)+\\\n# b3*ratio_to_24h.loc[106:115].index/10.0 + c3\n# ratio_to_24h['slope'] = ratio_to_24h['ratio'].diff()/0.1 \n# if -0.867*ratio_to_24h.loc[115, 'slope']+0.4337 < 0.399: \n# fac_116 = -0.867*ratio_to_24h.loc[115, 'slope']+0.4337\n# else:\n# fac_116 = 0.399\n# if -0.4917*ratio_to_24h.loc[115,'slope']+0.8182 < 0.799: \n# fac_117 = -0.4917*ratio_to_24h.loc[115,'slope']+0.8182\n# else:\n# fac_117 = 0.799\n# ratio_to_24h.at[116, 'ratio'] = df2.at[11.5, 'ratio']+fac_116*(df2.at[11.75,'ratio']-df2.at[11.5, 'ratio'])\n# ratio_to_24h.at[117, 'ratio'] = df2.at[11.5, 'ratio']+fac_117*(df2.at[11.75,'ratio']-df2.at[11.5, 'ratio']) \n# ratio_to_24h.at[118, 'ratio'] = df2.at[11.75, 'ratio']+0.4*(df2.at[11.875,'ratio']-df2.at[11.75, 'ratio'])\n# ratio_to_24h.at[119, 'ratio'] = df2.at[11.875, 'ratio']+0.6*(df2.at[11.917,'ratio']-df2.at[11.875, 'ratio'])\n# ratio_to_24h.loc[121:240, 'ratio'] = 1-ratio_to_24h.loc[0:119, 'ratio'].sort_index(ascending=False).values\n# ratio_to_24h.loc[120, 'ratio'] = ratio_to_24h.at[121, 'ratio']-(df.at['05m', 'ratio']+1.0/5.0*\n# (df.at['10m','ratio']-df.at['05m','ratio']))\n# ratio_to_24h.loc[0, 'ratio'] = 0\n# ratio_to_24h['slope'] = ratio_to_24h['ratio'].diff()/0.1\n# ratio_to_24h.at[0, 'slope'] = 0\n# ratio_to_24h['t_step'] = ratio_to_24h.index*0.1\n# ratio_to_24h.index = ratio_to_24h.index*0.1\n# return ratio_to_24h\n\n\n# def get_hyeto_input_data_nrcs(temporal_precip_table_dir: str, event: int,\n# display_print: bool=True) -> pd.DataFrame:\n# '''Extracts the temporal distribution from precipitation frequency data for the specified duration from an Excel \n# sheet and returns the data as a dataframe. \n# '''\n# hyeto_precip = 'nrcs_hye_{}'.format(event)\n# df = pd.read_excel(temporal_precip_table_dir, sheet_name=hyeto_precip, index_col=0)\n# if display_print: \n# print(display(df.head(2)))\n# return df\n\n# def get_hyeto_input_data_atlas(temporal_precip_table_dir: str, quartile: str,\n# display_print: bool=True) -> tuple:\n# '''Extracts the temporal distribution from precipitation frequency data for the specified duration from an Excel \n# sheet and returns the data as a dataframe. \n# '''\n# hyeto_precip = 'atlas_hye_{}'.format(quartile)\n# df = pd.read_excel(temporal_precip_table_dir, sheet_name=hyeto_precip, index_col=0)\n# weights_df = pd.read_excel(temporal_precip_table_dir, sheet_name='atlas_hye_weights', index_col=0)\n# if display_print: \n# print(display(df.head(2)))\n# return df, weights_df\n \n# def hydro_out_to_dic(curve_df: pd.DataFrame, BCN: str) -> dict:\n# '''This function takes the dataframe and adds additional data required for the dss file and json file creation.\n# '''\n# dic = {}\n# df_dic = curve_df.to_dict()\n# dates = list(curve_df.index)\n# ordin = curve_df.index.name.title()\n# events = {}\n# for k, v in df_dic.items():\n# if 'E' in k:\n# events[k] = list(v.values())\n# key = 'H24'\n# val = {'time_idx_ordinate': ordin, \n# 'run_duration_days': str(2),\n# 'time_idx': dates, \n# 'pluvial_BC_units': 'inch/ts', \n# 'BCName': {BCN: events}} \n# dic[key] = val\n# return dic\n\n\n# def Rename_Final_Events_Precip_Stratified(curve_weight: dict, hydrology: int) -> dict:\n# '''Creates a unique event name based on the recurrence interval, infiltration condition, and temporal distribution.\n# '''\n# assert hydrology in [1, 2, 3, 4], \"Naming convention not set for hydrology\"\n# rename_map = {}\n# num = 1\n# for k in curve_weight.keys():\n# ID = 'E{0}{1}'.format(hydrology, str(num).zfill(3))\n# rename_map[k] = ID \n# num+=1\n# return rename_map \n\ndef combine_results_traditional(var: str, outputs_dir: str, BCN: str, duration: int, hydrology_IDs: list,\n run_dur_dic: dict=None, remove_ind_dur: bool = True) -> dict:\n '''Combines the excess rainfall *.csv files for each duration into a \n single dictionary for all durations. A small value of 0.0001 is added so the result is not printed in scientific notation.\n '''\n pd.reset_option('^display.', silent=True)\n assert var in ['Excess_Rainfall', 'Weights'], 'Cannot combine results'\n dic = {}\n df_lst = []\n for ID in hydrology_IDs:\n scen = '{0}_Dur{1}_{2}'.format(BCN, duration, ID)\n file = outputs_dir/'{}_{}.csv'.format(var, scen)\n df = pd.read_csv(file, index_col = 0)\n if var == 'Excess_Rainfall':\n df_dic = df.to_dict()\n dates = list(df.index)\n ordin = df.index.name.title()\n events = {}\n for k, v in df_dic.items():\n m = list(v.values())\n m1= [ float(i)+0.0001 if float(i)< 0.0001 and 0< float(i) else float(i) for i in m]\n events[k] = m1\n key ='{0}'.format(str(ID).zfill(2))\n val = {'time_idx_ordinate': ordin, \n 'run_duration_days': run_dur_dic[str(duration)],\n 'time_idx': dates, \n 'pluvial_BC_units': 'inch/ts', \n 'BCName': {BCN: events}} \n dic[key] = val\n elif var == 'Weights':\n df_lst.append(df)\n if remove_ind_dur:\n os.remove(file) \n if var == 'Weights':\n all_dfs = pd.concat(df_lst)\n weights_dic = all_dfs.to_dict()\n dic = {'BCName': {BCN: weights_dic['Weight']}}\n #print('Total Weight:', all_dfs['Weight'].sum())\n return dic\n\n#----------------------------------------------------------------------------------------------------------------------#\n# Functions for calculating inputs to the mean precipitation curve calculation.\n#----------------------------------------------------------------------------------------------------------------------#\n\n# def return_interval_data(raw_precip: pd.DataFrame, Return_Intervals_MC: np.ndarray, df_GEV_parameters: pd.DataFrame, \n# PMP: float) -> pd.DataFrame:\n# \"\"\"Calculates the additional precipitation values for RI not in the original NOAA data. The additional precipitation \n# value are merged with the NOAA data. In addition for each RI, the parameters are calculated for a log-normal \n# distribution that represents the variability (uncertainty) of precipitation based on the 90-percent confidence \n# interval retrieved from the NOAA data.\n# \"\"\"\n# Non_Exceedance_Prob = 1-1/Return_Intervals_MC \n# GEV_parameters_M = df_GEV_parameters['GEV Median'].values\n# GEV_parameters_L = df_GEV_parameters['GEV Lower (90%)'].values\n# GEV_parameters_U = df_GEV_parameters['GEV Upper (90%)'].values\n# Precip_additional_M = PPF_GEV(Non_Exceedance_Prob, GEV_parameters_M, PMP)\n# Precip_additional_L = PPF_GEV(Non_Exceedance_Prob, GEV_parameters_L, PMP)\n# Precip_additional_U = PPF_GEV(Non_Exceedance_Prob, GEV_parameters_U, PMP)\n# Precip_additional = np.vstack((Precip_additional_M, Precip_additional_L, Precip_additional_U)).T\n# df1 = pd.DataFrame(data=Precip_additional, index=Return_Intervals_MC, \n# columns=['Median', 'Lower (90%)', 'Upper (90%)']) \n# df2 = pd.concat([raw_precip, df1]).sort_index(kind='mergesort')\n# df2['Log SD (Lower)'] = (np.log(df2['Median'].values) - np.log(df2['Lower (90%)'].values))/1.645\n# df2['Log SD (Upper)'] = (np.log(df2['Upper (90%)'].values) - np.log(df2['Median'].values))/1.645\n# df2['Max Log SD'] = np.maximum(df2['Log SD (Lower)'].values, df2['Log SD (Upper)'].values)\n# median = df2['Median'].values\n# mu_LN = np.log(median)\n# SD = df2['Max Log SD'].values\n# df2['mu LN'] = [mu_truncated_LN(SD1, PMP, median1, mu1).x[0] for median1, mu1, SD1 in zip(median, mu_LN, SD)]\n# df2['Lower (68%)'] = np.exp(-df2['Max Log SD'].values)*df2['Median'].values\n# df2['Upper (68%)'] = np.exp(df2['Max Log SD'].values)*df2['Median'].values\n# return df2\n\ndef return_traditional_interval_data(raw_precip: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Calculates the 1% plus and minus precipitation values. \n values are merged with the NOAA data. In addition for each RI, the parameters are calculated for a log-normal \n distribution that represents the variability (uncertainty) of precipitation based on the 90-percent confidence \n interval retrieved from the NOAA data.\n \"\"\"\n CL = 0.68\n df2 = raw_precip\n df2['Log SD (Lower)'] = (np.log(df2['Median'].values) - np.log(df2['Lower (90%)'].values))/1.645\n df2['Log SD (Upper)'] = (np.log(df2['Upper (90%)'].values) - np.log(df2['Median'].values))/1.645\n df2['Max Log SD'] = np.maximum(df2['Log SD (Lower)'].values, df2['Log SD (Upper)'].values)\n# SD = df2['Max Log SD'].values\n# mu_LN = np.log(df2['Median'].values)\n \n df2['Lower (68%)'] = stats.lognorm.ppf(0.5-CL/2, df2['Max Log SD'], scale = df2['Median'])\n df2['Upper (68%)'] = stats.lognorm.ppf(0.5+CL/2, df2['Max Log SD'], scale = df2['Median'])\n# df2['Lower (68%)'] = np.exp(-df2['Max Log SD'].values)*df2['Median'].values\n# df2['Upper (68%)'] = np.exp(df2['Max Log SD'].values)*df2['Median'].values\n return df2[['Median','Lower (68%)','Upper (68%)']]\n\n# def mu_truncated_LN(sigma: float, PMP: float, median: float, Initial_Value: float) -> float:\n# \"\"\"Find the mu parameter when the median of the truncated (at the PMP) lognormal is equal to the true median value.\n# \"\"\"\n# def objective_mu_LN(mu1: float, sigma: float, PMP: float, median: float) -> float:\n# \"\"\"\n# \"\"\"\n# return np.square(median - np.exp(mu1-np.sqrt(2)*sigma*special.erfcinv(1/2*special.erfc((mu1-np.log(PMP))/\n# (np.sqrt(2)*sigma)))))\n# return minimize(objective_mu_LN, Initial_Value, args = (sigma, PMP, median), method='SLSQP', \n# bounds=[(0.0, Initial_Value*2)], options={'disp': False})\n\n\n# def mean_curve_input_table(CL: np.ndarray, return_interval_data: pd.DataFrame, PMP: float, \n# outputs_dir: pl.WindowsPath) -> pd.DataFrame:\n# \"\"\"This function takes the return interval data and creates an input table of values for calculating the mean \n# curve. The function returns a table of precipitation values for the different AEP and confidence limits (CL) \n# based on a lognormal distribution that represents the variability of precipitation based on the 90-percent \n# confidence interval limits provided by NOAA Atlas 14.\n# \"\"\"\n# mu_LN = return_interval_data['mu LN'].values\n# SD = return_interval_data['Max Log SD'].values\n# data = [stats.lognorm.ppf(CL/Norm_Constant_LN(SD1, mu1, PMP), SD1, scale = np.exp(mu1)) for mu1, SD1 in \n# zip(mu_LN, SD)]\n# df_input = pd.DataFrame(data=data, columns = CL, \n# index = 1/return_interval_data.index.values).sort_index(axis=0, ascending=True)\n# df_input.index.name = 'AEP'\n# df_input = df_input.drop([1])\n# df_input.to_csv(outputs_dir)\n# return df_input\n\n#----------------------------------------------------------------------------------------------------------------------#\n# Plotting Functions\n#----------------------------------------------------------------------------------------------------------------------#\n\n# def plot_GEV_precip_curves(precip_data: pd.DataFrame, df_GEV_parameters: pd.DataFrame, PMP: float, \n# Label1: str='') -> None:\n# \"\"\"This functions plots the GEV distributions and also associated GEV return frequency curves on top of the \n# precpitation curve data taken either from NOAA Atlas 14 or from the mean precipitation curve output.\n# \"\"\"\n# color = ['r', 'k', 'k']\n# _, ax = plt.subplots(1, 2, figsize=(10,4))\n# for i, (_, columndata) in enumerate(df_GEV_parameters.iteritems()):\n# Precip = np.linspace(PPF_GEV(1e-100, columndata.values, PMP), PPF_GEV(0.9999999, columndata.values, PMP), 1000)\n# Return_Period = 1.0/(1-CDF_GEV(Precip, columndata.values, PMP))\n# ax[0].plot(Precip, PDF_GEV(Precip, columndata.values, PMP), color[i] , lw=2, alpha=0.6)\n# ax[1].plot(Return_Period, Precip, color[i], lw=2.5, alpha=0.6)\n# for _, columndata in precip_data.iteritems():\n# columndata.plot(style=['+-', 'o-', '.--'], logx=True)\n# ax[0].set_xlabel(f'{Label1} [inches]')\n# ax[0].set_ylabel('GEV PDF $p_R(R)$')\n# ax[0].set_title(f'24-hour {Label1}')\n# ax[1].set_xscale('log')\n# ax[1].set_xlabel('Return Period [years]')\n# ax[1].set_ylabel(f'{Label1} [inches]')\n# ax[1].set_title(f'24-hour {Label1}') \n# return None\n\n \n# def plot_runoff_maxRetention_distributions(GEV_parameters_E: np.ndarray, PMP: float, fitted_cn: pd.DataFrame) -> None:\n# \"\"\"Plots the distribution of runoff conditional on the max potential retention and plots the distribution of the max \n# potential retention.\n# \"\"\"\n# custom_cycler = cycler('color', ['0.1', '0.25', '0.4', '0.55']) + cycler('lw', [1, 1, 1, 1])\n# S_limit = 1000.0/fitted_cn.iloc[0]['CN Lower Limit']-10.0\n# alpha = fitted_cn.iloc[0]['alpha']\n# beta = fitted_cn.iloc[0]['beta']\n# mu = fitted_cn.iloc[0]['mu']\n# Q = np.linspace(PPF_GEV(1e-100, GEV_parameters_E, PMP), PPF_GEV(0.99, GEV_parameters_E, PMP), 100)\n# S = np.linspace(0.0, S_limit, 100)\n# _, ax = plt.subplots(1, 2, figsize=(10,4))\n# ax[0].set_prop_cycle(custom_cycler)\n# SA = np.linspace(0.1, 3.5, 5)\n# ax[0].plot(Q, np.transpose([PDF_QlS(Q, S1, mu, GEV_parameters_E, PMP) for S1 in SA]))\n# ax[0].grid(linestyle='--')\n# ax[0].set_ylim((0, 1.1))\n# ax[0].set_xlabel('Runoff, Q [inches]')\n# ax[0].set_ylabel('$p_Q(Q | S)$') \n# ax[0].set_title('Conditional Runoff Distribution')\n# ax[1].set_prop_cycle(custom_cycler)\n# ax[1].plot(S, (1.0/S_limit)*stats.beta(alpha, beta).pdf(S/S_limit))\n# ax[1].grid(linestyle='--')\n# ax[1].set_xlabel('Max Potential Retention, S [inches]')\n# ax[1].set_ylabel('$p_S(S)$') \n# ax[1].set_title('Max Potential Retention Distribution')\n# plt.tight_layout()\n# return None\n \n# def plot_runoff_distributions_final(GEV_parameters_Rain: np.ndarray, GEV_parameters_Runoff: np.ndarray, PMP: float, \n# fitted_cn: pd.DataFrame, partition_avg: np.ndarray, Delta_P: float, \n# error_PQ: float) -> None:\n# \"\"\"Plots the runoff distribution and the runoff return frequency curve in comparison to the original rainfall return \n# frequency curve.\n# \"\"\"\n# mu = fitted_cn.iloc[0]['mu']\n# Q1 = np.linspace(0.01, 6, 1000)\n# Return_Period = np.geomspace(1.1, 10**7, 100000)\n# Precip = GEV_RI(Return_Period, GEV_parameters_Rain, PMP)\n# Runoff = GEV_RI(Return_Period, GEV_parameters_Runoff, PMP)\n# _, ax = plt.subplots(1, 2, figsize=(10,4))\n# ax[0].grid(linestyle='--')\n# ax[0].set_xlabel('Runoff, Q [inches]')\n# ax[0].set_ylabel('$p_Q(Q)$')\n# ax[0].set_title('Runoff Distribution')\n# ax[0].plot(Q1, PDF_Q(Q1, mu, GEV_parameters_Rain, PMP, partition_avg, Delta_P, error_PQ), lw = 1, color = '0.1')\n# ax[1].set_xscale('log')\n# ax[1].grid(linestyle='--')\n# ax[1].set_ylim((0, PMP))\n# ax[1].set_xlabel('Return Period [years]')\n# ax[1].set_ylabel('Depth [inches]')\n# ax[1].set_title('24-hour Event') \n# ax[1].plot(Return_Period, Runoff, 'b', lw=2, alpha=0.45, label = 'Runoff')\n# ax[1].plot(Return_Period, Precip, 'r', lw=2, alpha=0.6, label='Rainfall')\n# ax[1].legend()\n# plt.tight_layout()\n# return None\n \n# def plot_max_potential_retention_cond_runoff(GEV_parameters_E: np.ndarray, PMP: float, fitted_cn: pd.DataFrame, \n# partition_avg: np.ndarray, Delta_P: float, error_PQ: float) -> None:\n# \"\"\"Plots the distribution of the max potential retention conditional on different runoff values.\n# \"\"\"\n# custom_cycler = cycler('color', ['0.1', '0.25', '0.4', '0.55']) + cycler('lw', [1, 1, 1, 1])\n# S_limit = 1000.0/fitted_cn.iloc[0]['CN Lower Limit']-10\n# alpha = fitted_cn.iloc[0]['alpha']\n# beta = fitted_cn.iloc[0]['beta']\n# mu = fitted_cn.iloc[0]['mu']\n# S1 = np.linspace(0.01, S_limit, 1000)\n# QA = np.linspace(0.5, PMP, 50)\n# PDF_S = np.transpose([PDF_SlQ(S1, Q1, mu, GEV_parameters_E, PMP, partition_avg, Delta_P, alpha, beta, S_limit, \n# error_PQ) for Q1 in QA])\n# _, ax = plt.subplots(1, 1, figsize=(6,4))\n# ax.set_prop_cycle(custom_cycler)\n# ax.plot(S1, PDF_S)\n# ax.grid(linestyle = '--')\n# ax.set_ylim((0, np.max(PDF_S)))\n# ax.set_xlabel('Max Potential Retention, S [inches]')\n# ax.set_ylabel('$p_S(S | Q)$')\n# ax.set_title('Conditional Max Potential Retention Distribution')\n# plt.tight_layout()\n# return None\n\n\n\ndef precip_to_runoff_nrcs(hydro_events:np.ndarray,nrcs_precip_table_dir: pl.WindowsPath,\n precip_data: pd.DataFrame, CN: int, display_print = False):\n \"\"\"Takes the events, precipitation data, nrcs temporal distribution, CN and applies the CN reduction method to\n obtain a runoff curve for each recurrence interval\n \"\"\"\n #runoff_distros1 = {}\n prep_curves = pd.DataFrame(columns = hydro_events.astype(float))\n for event in hydro_events:\n dist_df = get_hyeto_input_data_nrcs(nrcs_precip_table_dir, event, display_print)\n dist_df['precip'] = dist_df['ratio']*precip_data['Median'].loc[event]\n s = S_24hr(CN)\n ia = IA_24hr(s)\n #runoff_distros1[event] = excess_precip(dist_df,ia, s)\n dist_df = excess_precip(dist_df,ia, s)\n prep_curves[event] = dist_df['hyeto_input']\n t_step = dist_df.index[1]\n return prep_curves, t_step\n\ndef precip_distributed_nrcs(hydro_events:np.ndarray,nrcs_precip_table_dir: pl.WindowsPath,\n precip_data: pd.DataFrame, display_print = False):\n \"\"\"Takes the events, precipitation data, nrcs temporal distribution, CN and applies the CN reduction method to\n obtain a runoff curve for each recurrence interval\n \"\"\"\n #runoff_distros1 = {}\n prep_curves = pd.DataFrame(columns = hydro_events.astype(float))\n for event in hydro_events:\n dist_df = get_hyeto_input_data_nrcs(nrcs_precip_table_dir, event, display_print)\n dist_df['precip'] = dist_df['ratio']*precip_data['Median'].loc[event]\n dist_df['hyeto_input'] = dist_df['precip'].diff()\n dist_df['hyeto_input'] = dist_df['hyeto_input'].fillna(0.0)\n prep_curves[event] = dist_df['hyeto_input']\n t_step = dist_df.index[1]\n return prep_curves, t_step\n\ndef precip_to_runoff_atlas(hydro_events:np.ndarray,atlas14_precip_table_dir: pl.WindowsPath,\n precip_data: pd.DataFrame, CN: int, quartile:int, display_print = False):\n \"\"\"Takes the events, precipitation data, nrcs temporal distribution, CN and applies the CN reduction method to\n obtain a runoff curve for each recurrence interval\n \"\"\"\n #runoff_distros1 = {}\n Atlas14_hyetographs = {1:'q1',2: 'q2',3: 'q3',4: 'q4'}\n prep_curves = pd.DataFrame(columns = hydro_events)\n for event in hydro_events:\n dist_df, weight_df = get_hyeto_input_data_atlas(atlas14_precip_table_dir, Atlas14_hyetographs[quartile], display_print)\n dist_df['precip'] = dist_df[Atlas14_hyetographs[quartile]]*precip_data['Median'].loc[event]\n s = S_24hr(CN)\n ia = IA_24hr(s)\n dist_df = excess_precip(dist_df,ia, s)\n prep_curves[event] = dist_df['hyeto_input']\n t_step = dist_df.index[1]\n return prep_curves, t_step\n\n\ndef precip_distributed_atlas(hydro_events:np.ndarray,atlas14_precip_table_dir: pl.WindowsPath,\n precip_data: pd.DataFrame, quartile:int, display_print = False):\n \"\"\"Takes the events, precipitation data, nrcs temporal distribution, CN and applies the CN reduction method to\n obtain a runoff curve for each recurrence interval\n \"\"\"\n #runoff_distros1 = {}\n Atlas14_hyetographs = {1:'q1',2: 'q2',3: 'q3',4: 'q4'}\n prep_curves = pd.DataFrame(columns = hydro_events)\n for event in hydro_events:\n dist_df, weight_df = get_hyeto_input_data_atlas(atlas14_precip_table_dir, Atlas14_hyetographs[quartile], display_print)\n dist_df['precip'] = dist_df[Atlas14_hyetographs[quartile]]*precip_data['Median'].loc[event]\n dist_df['hyeto_input'] = dist_df['precip'].diff()\n dist_df['hyeto_input'] = dist_df['hyeto_input'].fillna(0.0)\n prep_curves[event] = dist_df['hyeto_input']\n t_step = dist_df.index[1]\n return prep_curves, t_step\n\ndef extend_time(prep_curves: pd.DataFrame,time_extend: float,time_step: float) -> pd.DataFrame:\n \"\"\"extends the hyetograph by a select period of time. the timestep is the spacing between\n simulation intervals (typically 0.1 or 0.5 hours)\n \"\"\"\n extend_curves = prep_curves.loc[0.0:time_extend]*0\n extend_curves.index = extend_curves.index+(24+time_step)\n return prep_curves.append(extend_curves).rename_axis('hours')\n\ndef precip(dist_df: pd.DataFrame,ia: float, s: float) -> pd.DataFrame:\n '''Calculates runoff using the curve number approach for a dataframe. See equation 10-9\n of NEH 630, Chapter 10\n (https://www.wcc.nrcs.usda.gov/ftpref/wntsc/H&H/NEHhydrology/ch10.pdf) \n '''\n dist_df['excess_precip'] = np.where(dist_df['precip']<= ia, 0, (np.square(dist_df['precip']-ia))/(dist_df['precip']-ia+s))\n dist_df['hyeto_input'] = dist_df['excess_precip'].diff()\n dist_df['hyeto_input'] = dist_df['hyeto_input'].fillna(0.0)\n return dist_df\n\ndef weights_traditional(hydro_events_dict:dict) -> dict:\n #Code for making list of years into a list of weights\n recurrence_years = list(hydro_events_dict.values())\n weights=[]\n adj_weights = {}\n uni = sorted(list(set(recurrence_years)))\n for i, year in reversed(list(enumerate(uni))):\n w=round(1/year-sum(weights),9) \n weights.append(w)\n weights.reverse()\n for name, rec in hydro_events_dict.items():\n ind = uni.index(rec)\n count = recurrence_years.count(rec)\n adj_weights[name] = weights[ind]*(1/count)\n return adj_weights\n\ndef weights_noaa(Reccurence_Intervals):\n #Code for making list of years into a list of weights\n weights=[]\n adj_weights = {}\n uni = sorted(list(set(Reccurence_Intervals)))\n for i, year in reversed(list(enumerate(uni))):\n w=round(1/year-sum(weights),9) \n weights.append(w)\n weights.reverse()\n return weights\n\ndef events_initialize(events_lib:str):\n \n if events_lib == 'FEMA':\n fema_intervals = ['10', '25', '50', '100', '500','100_minus','100_plus'] # Return intervals for FEMA study\n #recurrence_intervals = np.array([2, 5, 10, 25, 50, 100, 200, 500, 1000]) # Return intervals for calculating runoff values.\n hydro_events_dict = {'10':10, '25':25, '50': 50, '100':100, '500':500,'100_minus': 100, '100_plus': 100}\n return fema_intervals, hydro_events_dict\n if events_lib == 'NOAA':\n recurrence_intervals = np.array([2, 5, 10, 25, 50, 100, 200, 500, 1000]) # Return intervals for calculating runoff values.\n return recurrence_intervals\n\ndef nhd_download(select_data:str,vector_dir:pl.Path) -> str:\n '''\n creates link to nhd zip file on USGS The National Map. Checks link before attempting \n to download. Prints updates and returns a list of the files downloaded.\n '''\n ##update in future to get S3 bucket information and only include available files\n s3_url = 'https://prd-tnm.s3.amazonaws.com'\n product_name = 'NHDPLUS_H_'+select_data[:4]+'_HU4_GDB.zip'\n link = s3_url+'/StagedProducts/Hydrography/NHDPlusHR/Beta/GDB/'+product_name\n gdb = product_name[:-4]+'.gdb'\n gdb_dir = vector_dir/gdb\n if vector_dir.exists() is False:\n os.mkdir(vector_dir)\n else:\n if gdb_dir.exists() is True:\n print('gdb is already downloaded')\n return gdb_dir\n print('opening: '+link)\n alive = url_is_alive(link)\n if alive is True:\n os.mkdir(gdb_dir)\n r = requests.get(link, allow_redirects=True)\n memfile = io.BytesIO(r.content)\n with ZipFile(memfile,'r') as openzip:\n print('saving to: '+str(gdb_dir))\n openzip.extractall(path=vector_dir)\n return gdb_dir\n else:\n print('download attempt failed')\n return 'attempt failed'\n \ndef url_is_alive(url:str)-> bool:\n '''\n Checks that a given URL is reachable.\n :param url: A URL\n :rtype: bool\n '''\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n\n try:\n urllib.request.urlopen(request)\n return True\n except urllib.request.HTTPError:\n return False\n\ndef get_noaa_precip_values(vol_code,durations, verbose = True):\n noaa_url = 'https://hdsc.nws.noaa.gov/pub/hdsc/data/{}/'.format(vol_code)\n\n req = urllib.request.Request(noaa_url)\n data = urllib.request.urlopen(req).read().decode().split()\n\n copy_zips = []\n for duration in durations:\n zips = [d for d in data if ('.zip' in d) and ('{}'.format(duration) in d) and ('ams' not in d)]\n copy_zips.append(zips)\n if verbose: \n print('{} files found for {}'.format(len(zips), duration))\n\n all_zips_list = list(np.array(copy_zips).flat)\n\n for i, zip_name in enumerate(all_zips_list):\n all_zips_list[i]= zip_name.split(\"\\\"\", 1)[1].split(\"\\\"\", 1)[0]\n return all_zips_list, noaa_url"
] |
[
[
"pandas.reset_option",
"numpy.square",
"pandas.concat",
"pandas.read_csv",
"numpy.maximum",
"numpy.log",
"scipy.stats.lognorm.ppf",
"pandas.DataFrame",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
fbcotter/dtcwt_gainlayer
|
[
"32ec3e21066edc2a0d5edefaf70f43d031d1b4ac"
] |
[
"experiments/networks/largekernel_nets.py"
] |
[
"\"\"\" This module builds different networks with nonlinearities\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as func\nfrom dtcwt_gainlayer import WaveConvLayer\nfrom dtcwt_gainlayer.layers.dwt import WaveConvLayer as WaveConvLayer_dwt\nfrom collections import OrderedDict\n\n\nnets = {\n 'ref': ['conv', 'pool', 'conv', 'pool', 'conv'],\n 'gainA': ['gain', 'pool', 'conv', 'pool', 'conv'],\n 'gainB': ['conv', 'pool', 'gain', 'pool', 'conv'],\n 'gainC': ['conv', 'pool', 'conv', 'pool', 'gain'],\n 'gainD': ['gain', 'pool', 'gain', 'pool', 'conv'],\n 'gainE': ['conv', 'pool', 'gain', 'pool', 'gain'],\n 'gainF': ['gain', 'pool', 'gain', 'pool', 'gain'],\n}\n\n\nclass GainlayerNet(nn.Module):\n \"\"\" Builds a VGG-like network with gain layers\n\n Args:\n dataset (str): cifar10, cifar100, tiny_imagenet. Needed to know\n how to shape the network backend.\n type (str): key into the nets dictionary defining the layer order\n num_channels (int): number of output channels for the first scale. This\n value doubles after pooling.\n wd (float): l2 weight decay for pixel and lowpass gains.\n wd1 (float): l1 weight decay for complex bandpass gains\n pixel_k (int): pixel convolution kernel size\n lp_k (int): lowpass convolution kernel size\n bp_ks (tuple(int)): bandpass convolution kernel sizes. Length of this\n tuple defines how many wavelet scales to take. If you want to skip\n the first scale, you can set bp_ks=(0,1)\n\n Note:\n The wd and wd1 parameters prime the network for when you\n call :meth:`NonlinearNet.get_reg` to get the regularization\n term.\n\n \"\"\"\n def __init__(self, dataset, type, num_channels=64, wd=1e-4, wd1=None,\n pixel_k=5, lp_k=3, bp_ks=(1,), use_dwt=False):\n super().__init__()\n\n if dataset == 'cifar10':\n self.num_classes = 10\n elif dataset == 'cifar100':\n self.num_classes = 100\n elif dataset == 'tiny_imagenet':\n self.num_classes = 200\n self.wd = wd\n self.wd1 = wd1\n self._wave_params = []\n self._default_params = []\n layers = nets[type]\n\n if use_dwt:\n WaveLayer = lambda Cin, Cout: WaveConvLayer_dwt(\n Cin, Cout, lp_k, bp_ks)\n else:\n WaveLayer = lambda Cin, Cout: WaveConvLayer(\n Cin, Cout, lp_k, bp_ks, wd=wd, wd1=wd1)\n\n blks = []\n layer, pool = 0, 1\n Cin, Cout = 3, num_channels\n for blk in layers:\n if blk == 'conv':\n name = 'conv' + chr(ord('A') + layer)\n blk = nn.Sequential(\n nn.Conv2d(Cin, Cout, pixel_k, padding=2, stride=1),\n nn.BatchNorm2d(Cout), nn.ReLU())\n self._default_params.extend(list(blk.parameters()))\n Cin = Cout\n layer += 1\n elif blk == 'gain':\n name = 'gain' + chr(ord('A') + layer)\n blk = nn.Sequential(\n WaveLayer(Cin, Cout),\n nn.BatchNorm2d(Cout),\n nn.ReLU())\n self._wave_params.extend(list(blk.parameters()))\n Cin = Cout\n layer += 1\n elif blk == 'pool':\n name = 'pool' + str(pool)\n blk = nn.MaxPool2d(2)\n pool += 1\n Cout = 2*Cin\n blks.append((name, blk))\n\n # Build the backend of the network\n if dataset == 'cifar10' or dataset == 'cifar100':\n self.net = nn.Sequential(OrderedDict(blks))\n self.avg = nn.AvgPool2d(8)\n self.fc1 = nn.Linear(Cout, self.num_classes)\n elif dataset == 'tiny_imagenet':\n blk1 = nn.MaxPool2d(2)\n blk2 = nn.Sequential(\n nn.Conv2d(Cout, 2*Cout, pixel_k, padding=2, stride=1),\n nn.BatchNorm2d(2*Cout),\n nn.ReLU())\n blks = blks + [\n ('pool3', blk1),\n ('conv_final', blk2),]\n self.net = nn.Sequential(OrderedDict(blks))\n self.avg = nn.AvgPool2d(8)\n self.fc1 = nn.Linear(2*Cout, self.num_classes)\n self._default_params.extend(list(blk2.parameters()))\n self._default_params.extend(list(self.fc1.parameters()))\n\n def parameters(self):\n \"\"\" Return all parameters that do not belong to any wavelet based\n learning \"\"\"\n return self._default_params\n\n def wave_parameters(self):\n \"\"\" Return all parameters that belong to wavelet based learning \"\"\"\n return self._wave_params\n\n def get_reg(self):\n \"\"\" Applies custom regularization.\n\n The default in pytorch is to apply l2 to everything with the same weight\n decay. We allow for more customizability.\n \"\"\"\n loss = 0\n for name, m in self.net.named_children():\n if name.startswith('wave'):\n loss += m[0].GainLayer.get_reg()\n elif name.startswith('conv'):\n loss += 0.5 * self.wd * torch.sum(m[0].weight**2)\n loss += 0.5 * self.wd * torch.sum(self.fc1.weight**2)\n return loss\n\n def clip_grads(self, value=1):\n \"\"\" Clips gradients to be in the range [-value, value].\n\n Can be useful to do if you are getting nans in training. Also sets nans\n to be 0.\n \"\"\"\n grads = []\n for name, m in self.net.named_children():\n if name.startswith('wave'):\n grads.extend([g for g in m[0].GainLayer.g])\n # Set nans in grads to 0\n for g in filter(lambda g: g.grad is not None, grads):\n g.grad.data[g.grad.data != g.grad.data] = 0\n torch.nn.utils.clip_grad_value_(grads, value)\n\n def forward(self, x):\n \"\"\" Define the default forward pass\"\"\"\n out = self.net(x)\n out = self.avg(out)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n\n return func.log_softmax(out, dim=-1)\n"
] |
[
[
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.utils.clip_grad_value_",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siddharth-143/Python
|
[
"293f4643a3a13e3b82d23fd8922db54dbb0f12bc",
"293f4643a3a13e3b82d23fd8922db54dbb0f12bc"
] |
[
"python-questions-for-pratices/Question-60.py",
"OpenCV2/Feature_Detection_and_Description/Corner_Detection_with_Shi-Tomasi_coner_method.py"
] |
[
"\"\"\"\nQuestion 60 :\n Write a program to read an ASCII string and convert it ta\n a unicode string.\n\n Hints : Use unicode() function to convert.\n\"\"\"\n\n# Solution :\nfrom numpy.core import unicode\n\ns = input(\"Enter a string : \")\nu = unicode(\"utf-8\", s)\nprint(u)\n\n\"\"\"\nOutput :\n code doesn't work\n If you know the solution. \n let me know....\n\"\"\"",
"\"\"\"\n Corner Detection with Shi-Tomasi Coner Detection method\n\"\"\"\n\n# Python progran to illustrate\n# corner detection with\n# Shi-Tomasi detection method\n\n# organizing imports\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# path to input image specified and\n# image is loaded with imread command\nimg = cv2.imread(\"../images/1.jpeg\")\n\n# convert image to grayscale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# Shi-Tomasi corner detection function\n# we are detecting only 100 best corner here\n# you can change the number to get desired result\ncorner = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)\n\n# convert corner value to integer\n# so that we will be able to draw circles on them\ncorner = np.int0(corner)\n\n# draw red color circles on all corners\nfor i in corner:\n x, y = i.ravel()\n cv2.circle(img, (x, y), 3, (255, 0, 0), -1)\n\n# resulting image\nplt.imshow(img)\nplt.show()\n\n# de-allocate any associated memory usage\nif cv2.waitKey(0) & 0xff == 27:\n cv2.destroyAllWindows()"
] |
[
[
"numpy.core.unicode"
],
[
"numpy.int0",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
congcy/TVINet
|
[
"c4b3a6d1c7a0d65ded62a53d6236493207993dbd"
] |
[
"code/mdlnet.py"
] |
[
"'''\n Jackie Yuan\n 2018.06\n'''\n\nfrom __future__ import print_function\nfrom keras.callbacks import ModelCheckpoint\nimport sgydata\nimport myunet\nimport numpy as np\n\n# input image dimensions\nimg_rows, img_cols = 1600, 200\nimg_rows2, img_cols2 = 200, 200\nnchannels=3\n\n# input data\n#shot1\nnumsgy1,x_train1,y_train=sgydata.load_sgylist(sgylist='../config/train_marmo_pvel_syn_nt2000_ns_01_bk.txt', \n floc='../config/train_marmo_label.txt',shuffle='false')\n#shot2\nnumsgy2,x_train2,y_train=sgydata.load_sgylist(sgylist='../config/train_marmo_pvel_syn_nt2000_ns_03_bk.txt', \n floc='../config/train_marmo_label.txt',shuffle='false')\n\n#shot3\nnumsgy3,x_train3,y_train=sgydata.load_sgylist(sgylist='../config/train_marmo_pvel_syn_nt2000_ns_05_bk.txt', \n floc='../config/train_marmo_label.txt',shuffle='false')\n\nnums1,x_test1,y_test=sgydata.load_sgylist(sgylist='../config/test_marmo_pvel_syn_nt2000_ns_01_bk.txt', \n floc='../config/test_marmo_label.txt')\n\nnums2,x_test2,y_test=sgydata.load_sgylist(sgylist='../config/test_marmo_pvel_syn_nt2000_ns_03_bk.txt', \n floc='../config/test_marmo_label.txt')\n\nnums3,x_test3,y_test=sgydata_ycc.load_sgylist(sgylist='../config/test_marmo_pvel_syn_nt2000_ns_05_bk.txt', \n floc='../config/test_marmo_label.txt')\n\n# reshape training data\nx_train1 = x_train1.reshape(x_train1.shape[0], img_cols, img_rows, 1)\nx_train2 = x_train2.reshape(x_train2.shape[0], img_cols, img_rows, 1)\nx_train3 = x_train3.reshape(x_train3.shape[0], img_cols, img_rows, 1)\n\ny_train = y_train.reshape(y_train.shape[0], img_cols2, img_rows2, 1)\n\n# reshape test data\nx_test1 = x_test1.reshape(x_test1.shape[0], img_cols, img_rows, 1)\nx_test2 = x_test2.reshape(x_test2.shape[0], img_cols, img_rows, 1)\nx_test3 = x_test3.reshape(x_test3.shape[0], img_cols, img_rows, 1)\n\ny_test = y_test.reshape(y_test.shape[0], img_cols2, img_rows2, 1)\n\n# combine data\nx_train = np.concatenate((x_train1,x_train2,x_train3),axis=3)\nx_test = np.concatenate((x_test1,x_test2,x_test3),axis=3)\n\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\n\n# design, train and test model\nmodel = myunet.get_unet(img_cols, img_rows, nchannels)\nprint(\"got unet\")\n\nmodel_checkpoint = ModelCheckpoint('../results/unet.hdf5', monitor='loss',verbose=1, save_best_only=True)\nprint('Fitting model...')\nhistory_callback=model.fit(x_train, y_train, batch_size=2, nb_epoch=100, verbose=1,\n validation_split=0.1, shuffle=True, callbacks=[model_checkpoint])\n\n\n# predict and output test data and image\nstr1='../results'\nstr3='/imgs_mask_test.npy'\nstr4=\"/loss_history_marmo_cnns.txt\"\nstr5=\"/val_loss_history_marmo_cnns.txt\"\nstr6=\"/mdlnet_marmo_cnns.h5\";\nprint('predict test data')\nimgs_mask_test = model.predict(x_test, batch_size=1, verbose=1)\nnp.save(str1+str3, imgs_mask_test)\n\n# save model and img\nprint(\"array to image\")\nimgs = np.load(str1+str3)\nfor i in range(imgs.shape[0]):\n img = imgs[i]\n np.savetxt(\"../results/%d_gaussian_marmo_cnns.txt\"%(i),img[...,0])\n \nloss_history = history_callback.history[\"loss\"]\nval_loss_history = history_callback.history[\"val_loss\"]\nnumpy_loss_history = np.array(loss_history)\nnumpy_val_loss_history = np.array(val_loss_history)\nnp.savetxt(str1+str4, numpy_loss_history, delimiter=\",\")\nnp.savetxt(str1+str5, numpy_val_loss_history, delimiter=\",\")\n\nprint(\"save model\")\nmodel.save(str1+str6) \n\n"
] |
[
[
"numpy.save",
"numpy.concatenate",
"numpy.savetxt",
"numpy.load",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Werberty/Projeto-CoppeliaSim
|
[
"2f60565f2e5e91d2d86e79576eb0f80a1106a18c",
"2f60565f2e5e91d2d86e79576eb0f80a1106a18c"
] |
[
"Campos_potenciais/teste_camposPotenc.py",
"Testes/exemplo2.py"
] |
[
"try:\n import sim\nexcept:\n print('--------------------------------------------------------------')\n print('\"sim.py\" could not be imported. This means very probably that')\n print('either \"sim.py\" or the remoteApi library could not be found.')\n print('Make sure both are in the same folder as this file,')\n print('or appropriately adjust the file \"sim.py\"')\n print('--------------------------------------------------------------')\n print('')\n\nimport time\n\nimport numpy as np\n\nprint('Program started')\nsim.simxFinish(-1) # just in case, close all opened connections\nclientID = sim.simxStart('127.0.0.1', 19999, True, True,\n 5000, 5) # Connect to CoppeliaSim\n\n\ndef readSensorData(clientId=-1,\n range_data_signal_id=\"hokuyo_range_data\",\n angle_data_signal_id=\"hokuyo_angle_data\"):\n\n # the first call should be non-blocking to avoid getting out-of-sync angle data\n returnCodeRanges, string_range_data = sim.simxGetStringSignal(\n clientId, range_data_signal_id, sim.simx_opmode_streaming)\n\n # the second call should block to avoid out-of-sync scenarios\n # between your python script and the simulator's main loop\n # (your script may be slower than the simulator's main loop, thus\n # slowing down data processing)\n returnCodeAngles, string_angle_data = sim.simxGetStringSignal(\n clientId, angle_data_signal_id, sim.simx_opmode_blocking)\n\n # check the if both data were obtained correctly\n if returnCodeRanges == 0 and returnCodeAngles == 0:\n # unpack data from range and sensor messages\n raw_range_data = sim.simxUnpackFloats(string_range_data)\n raw_angle_data = sim.simxUnpackFloats(string_angle_data)\n\n return raw_range_data, raw_angle_data\n\n # return none in case were nothing was gotten from the simulator\n return None\n\n\ndef vetor_de_atracao():\n pass \n\n\nif clientID != -1:\n print('Connected to remote API server')\n\n robotname = 'Pioneer_p3dx'\n returnCode, robotHandle = sim.simxGetObjectHandle(\n clientID, robotname, sim.simx_opmode_oneshot_wait)\n\n returnCode, l_wheel = sim.simxGetObjectHandle(\n clientID, robotname + '_leftMotor', sim.simx_opmode_oneshot_wait)\n returnCode, r_wheel = sim.simxGetObjectHandle(\n clientID, robotname + '_rightMotor', sim.simx_opmode_oneshot_wait)\n\n # Goal configuration (x, y, theta)\n qgoal = np.array([3, -3, np.deg2rad(90)])\n # qgoal = np.array([-2, -4, np.deg2rad(180)])\n\n # # Frame que representa o Goal\n # returnCode, goalFrame = sim.simxGetObjectHandle(\n # clientID, 'Goal', sim.simx_opmode_oneshot_wait)\n # returnCode = sim.simxSetObjectPosition(\n # clientID, goalFrame, -1, [qgoal[0], qgoal[1], 0], sim.simx_opmode_oneshot_wait)\n # returnCode = sim.simxSetObjectOrientation(\n # clientID, goalFrame, -1, [0, 0, qgoal[2]], sim.simx_opmode_oneshot_wait)\n\n # Handle para os dados do LASER\n laser_range_data = \"hokuyo_range_data\"\n laser_angle_data = \"hokuyo_angle_data\"\n\n # Geralmente a primeira leitura é inválida (atenção ao Operation Mode)\n # Em loop até garantir que as leituras serão válidas\n returnCode = 1\n while returnCode != 0:\n returnCode, range_data = sim.simxGetStringSignal(\n clientID, laser_range_data, sim.simx_opmode_streaming + 10)\n\n # Específico do robô\n L = 0.331\n r = 0.09751\n maxv = 1.0\n maxw = np.deg2rad(45)\n\n t = 0\n # Lembrar de habilitar o 'Real-time mode'\n # startTime = time.time()\n # lastTime = startTime\n rho = np.inf\n while rho > 0.1:\n\n # now = time.time()\n # dt = now - lastTime\n\n returnCode, robotPos = sim.simxGetObjectPosition(\n clientID, robotHandle, -1, sim.simx_opmode_oneshot_wait)\n returnCode, robotOri = sim.simxGetObjectOrientation(\n clientID, robotHandle, -1, sim.simx_opmode_oneshot_wait)\n robotConfig = np.array([robotPos[0], robotPos[1], robotOri[2]])\n\n dx, dy = qgoal[:2] - robotConfig[:2]\n\n # Apenas para interromper o loop\n rho = np.sqrt(dx**2 + dy**2)\n\n # Fazendo leitura dos sensores\n raw_range_data, raw_angle_data = readSensorData(\n clientID, laser_range_data, laser_angle_data)\n laser_data = np.array([raw_angle_data, raw_range_data]).T\n\n # dx, dy = [np.cos(laser_data[0, 0]), np.sin(laser_data[0, 1])]\n\n # dx, dy = 0, 0\n # for laser in laser_data:\n # x = laser[1] * np.cos(laser[0])\n # y = laser[1] * np.sin(laser[0])\n # dx += x\n # dy += y\n # # print(dx, dy)\n \n kr = 1 \n kt = 2 \n\n v = kr*(dx*np.cos(robotConfig[2]) + dy*np.sin(robotConfig[2]))\n w = kt*(np.arctan2(dy, dx) - robotConfig[2])\n\n # Limit v,w to +/- max\n v = max(min(v, maxv), -maxv)\n w = max(min(w, maxw), -maxw)\n\n # Cinemática Inversa\n wr = ((2.0*v) + (w*L))/(2.0*r)\n wl = ((2.0*v) - (w*L))/(2.0*r)\n\n # Enviando velocidades\n sim.simxSetJointTargetVelocity(\n clientID, l_wheel, wl, sim.simx_opmode_oneshot_wait)\n sim.simxSetJointTargetVelocity(\n clientID, r_wheel, wr, sim.simx_opmode_oneshot_wait)\n\n # t = t + dt\n # lastTime = now\n\n sim.simxSetJointTargetVelocity(\n clientID, r_wheel, 0, sim.simx_opmode_oneshot_wait)\n sim.simxSetJointTargetVelocity(\n clientID, l_wheel, 0, sim.simx_opmode_oneshot_wait)\n\n # Now close the connection to CoppeliaSim:\n sim.simxFinish(clientID)\n\nelse:\n print('Failed connecting to remote API server')\n\nprint('Program ended')\n",
"try:\n import sim\nexcept:\n print ('--------------------------------------------------------------')\n print ('\"sim.py\" could not be imported. This means very probably that')\n print ('either \"sim.py\" or the remoteApi library could not be found.')\n print ('Make sure both are in the same folder as this file,')\n print ('or appropriately adjust the file \"sim.py\"')\n print ('--------------------------------------------------------------')\n print ('')\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport math\n\n'''\nreadSensorData - It will try to capture the range and angle data from the simulator.\n The request for the range data is sent in streaming mode to force\n it to sync with the angle data request which acts as a mutex.\n\ninputs:\n -clientId: simulator client id obtained through a successfull connection with the simulator.\n -range_data_signal_id: string containing the range data signal pipe name.\n -angle_data_signal_id: string containing the angle data signal pipe name.\noutputs:\n -returns None if no data is recovered.\n -returns two arrays, one with data range and the other with their angles, if data was \n retrieved successfully.\n'''\ndef readSensorData(clientId=-1, \n range_data_signal_id=\"hokuyo_range_data\", \n angle_data_signal_id=\"hokuyo_angle_data\"):\n\n # the first call should be non-blocking to avoid getting out-of-sync angle data\n returnCodeRanges, string_range_data = sim.simxGetStringSignal(clientId, range_data_signal_id, sim.simx_opmode_streaming)\n\n # the second call should block to avoid out-of-sync scenarios\n # between your python script and the simulator's main loop\n # (your script may be slower than the simulator's main loop, thus\n # slowing down data processing)\n returnCodeAngles, string_angle_data = sim.simxGetStringSignal(clientId, angle_data_signal_id, sim.simx_opmode_blocking)\n\n # check the if both data were obtained correctly\n if returnCodeRanges == 0 and returnCodeAngles == 0:\n # unpack data from range and sensor messages\n raw_range_data = sim.simxUnpackFloats(string_range_data)\n raw_angle_data = sim.simxUnpackFloats(string_angle_data)\n\n return raw_range_data, raw_angle_data\n\n # return none in case were nothing was gotten from the simulator\n return None\n\n\ndef draw_laser_data(laser_data, max_sensor_range=5):\n \n fig = plt.figure(figsize=(6,6), dpi=100)\n ax = fig.add_subplot(111, aspect='equal')\n \n for i in range(len(laser_data)):\n ang, dist = laser_data[i]\n \n # Quando o feixe não acerta nada, retorna o valor máximo (definido na simulação)\n # Logo, usar um pequeno limiar do máximo para considerar a leitura\n if (max_sensor_range - dist) > 0.1:\n x = dist * np.cos(ang)\n y = dist * np.sin(ang)\n c = 'r'\n if ang < 0: \n c = 'b'\n ax.plot(x, y, 'o', color=c)\n\n ax.plot(0, 0, 'k>', markersize=10)\n \n ax.grid()\n ax.set_xlim([-max_sensor_range, max_sensor_range])\n ax.set_ylim([-max_sensor_range, max_sensor_range])\n\n\nprint ('Program started')\nsim.simxFinish(-1) # just in case, close all opened connections\nclientID=sim.simxStart('127.0.0.1',19999,True,True,5000,5) # Connect to CoppeliaSim\n\nif clientID!=-1:\n print ('Connected to remote API server')\n \n # Iniciando a simulação\n # Deve usar a porta do 'continuous remote API server services' (remoteApiConnections.txt)\n # e = sim.simxStartSimulation(clientID,sim.simx_opmode_blocking)\n\n # Handle para o ROBÔ \n robotname = 'Pioneer_p3dx'\n returnCode, robotHandle = sim.simxGetObjectHandle(clientID, robotname, sim.simx_opmode_oneshot_wait) \n \n # Handle para as juntas das RODAS\n returnCode, l_wheel = sim.simxGetObjectHandle(clientID, robotname + '_leftMotor', sim.simx_opmode_oneshot_wait)\n returnCode, r_wheel = sim.simxGetObjectHandle(clientID, robotname + '_rightMotor', sim.simx_opmode_oneshot_wait) \n \n # Handle para os dados do LASER\n laser_range_data = \"hokuyo_range_data\"\n laser_angle_data = \"hokuyo_angle_data\"\n \n # Geralmente a primeira leitura é inválida (atenção ao Operation Mode)\n # Em loop até garantir que as leituras serão válidas\n returnCode = 1\n while returnCode != 0:\n returnCode, range_data = sim.simxGetStringSignal(clientID, laser_range_data, sim.simx_opmode_streaming + 10)\n \n # Prosseguindo com as leituras\n raw_range_data, raw_angle_data = readSensorData(clientID, laser_range_data, laser_angle_data)\n laser_data = np.array([raw_angle_data, raw_range_data]).T\n \n print(laser_data)\n draw_laser_data(laser_data)\n \n returnCode, pos = sim.simxGetObjectPosition(clientID, robotHandle, -1, sim.simx_opmode_oneshot_wait) \n print('Pos: ', pos)\n \n #raise SystemExit()\n \n # Dados do Pioneer\n L = 0.381 # Metros\n r = 0.0975 # Metros\n\n t = 0\n # Lembrar de habilitar o 'Real-time mode'\n startTime=time.time()\n lastTime = startTime\n while t < 60:\n \n now = time.time()\n dt = now - lastTime\n \n # Fazendo leitura do laser \n raw_range_data, raw_angle_data = readSensorData(clientID, laser_range_data, laser_angle_data)\n laser_data = np.array([raw_angle_data, raw_range_data]).T\n \n # Velocidade básica (linear, angular)\n v = 0\n w = np.deg2rad(0)\n\n frente = int(len(laser_data)/2)\n lado_direito = int(len(laser_data)*1/4)\n lado_esquerdo = int(len(laser_data)*3/4)\n \n if laser_data[frente, 1] > 2:\n v = .5\n w = 0\n elif laser_data[lado_direito, 1] > 2:\n v = 0\n w = np.deg2rad(-30)\n elif laser_data[lado_esquerdo, 1] > 2:\n v = 0\n w = np.deg2rad(30)\n \n # Isso é o modelo cinemático, estudaremos detalhadamente depois!\n wl = v/r - (w*L)/(2*r)\n wr = v/r + (w*L)/(2*r)\n \n # Enviando velocidades\n sim.simxSetJointTargetVelocity(clientID, l_wheel, wl, sim.simx_opmode_streaming + 5)\n sim.simxSetJointTargetVelocity(clientID, r_wheel, wr, sim.simx_opmode_streaming + 5) \n \n t = t + dt \n lastTime = now\n\n # Parando o robô \n sim.simxSetJointTargetVelocity(clientID, r_wheel, 0, sim.simx_opmode_oneshot_wait)\n sim.simxSetJointTargetVelocity(clientID, l_wheel, 0, sim.simx_opmode_oneshot_wait) \n \n # Parando a simulação \n sim.simxStopSimulation(clientID,sim.simx_opmode_blocking) \n \n # Now close the connection to CoppeliaSim:\n sim.simxFinish(clientID)\n \nelse:\n print ('Failed connecting to remote API server')\n \nprint ('Program ended')\n"
] |
[
[
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.deg2rad",
"numpy.array"
],
[
"numpy.cos",
"numpy.sin",
"numpy.deg2rad",
"numpy.array",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zabarah/MRCNN
|
[
"93fdd01e1e38200763a2d6adbc5b076aa0218d2c"
] |
[
"MRCNN/Evalution.py"
] |
[
"\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport os\nos.chdir(\"Target File\") \nimport sklearn.metrics as skm\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom keras import backend as cmp\n\n\n##########################\ndef ResOfSitesDiv(y,z):\n '''\n First select The hyper, hypo, and medium methylation sites of \n the original methylation data in the test set and then do the \n same operation to select corresponding predicted data.\n '''\n \n y1 = y[y>=0.9]#As hypermethylation sites\n y2 = y[y<=0.1]#As hypomethylation sites\n ix_y = []\n for i in range(len(y1)):\n if(0.4<y1[i]<0.6):\n ix_y.append(i)\n y3 = y[ix_y] #As middle-methylation sites\n \n z1 = z[z>=0.9]#As hypermethylation sites\n z2 = z[z<=0.1]#As hypomethylation sites\n ix_z = []\n for i in range(len(y1)):\n if(0.4<y1[i]<0.6):\n ix_z.append(i)\n z3 = y[ix_z] #As middle-methylation sites\n return y1,z1,y2,z2,y3,z3\n\n \n \n \n \ndef ResMetrics(y,z):\n MSE = mse(y, z)** 0.5\n MAE = mae(y,z) \n ResLoss = np.concatenate((MSE,MAE),axis=0)\n return ResLoss \n\n\n \n\n\n\n\ndef contingency_table(y, z):\n y = cmp.round(y)\n z = cmp.round(z)\n\n def count_matches(a, b):\n tmp = cmp.concatenate([a, b])\n return cmp.sum(cmp.cast(cmp.all(tmp, -1), cmp.floatx()))\n\n ones = cmp.ones_licmpe(y)\n zeros = cmp.zeros_licmpe(y)\n y_ones = cmp.equal(y, ones)\n y_zeros = cmp.equal(y, zeros)\n z_ones = cmp.equal(z, ones)\n z_zeros = cmp.equal(z, zeros)\n\n tp = count_matches(y_ones, z_ones)\n tn = count_matches(y_zeros, z_zeros)\n fp = count_matches(y_zeros, z_ones)\n fn = count_matches(y_ones, z_zeros)\n\n return (tp, tn, fp, fn)\n\n\n'''\nOptional classification metrics\n'''\n\ndef tpr(y, z):\n tp, tn, fp, fn = contingency_table(y, z)\n return tp / (tp + fn)\n\n\ndef tnr(y, z):\n tp, tn, fp, fn = contingency_table(y, z)\n return tn / (tn + fp)\n\n\ndef fpr(y, z):\n tp, tn, fp, fn = contingency_table(y, z)\n return fp / (fp + tn)\n\n\ndef fnr(y, z):\n tp, tn, fp, fn = contingency_table(y, z)\n return fn / (fn + tp)\n\n\ndef acc(y, z):\n tp, tn, fp, fn = contingency_table(y, z)\n return (tp + tn) / (tp + tn + fp + fn)\n\t\ndef auc(y, z, round=True):\n if round:\n y = y.round()\n if len(y) == 0 or len(np.unique(y)) < 2:\n return np.nan\n return skm.roc_auc_score(y, z)\n\t\t\n\t\ndef prec(y, z):\n tp, tn, fp, fn = contingency_table(y, z)\n return tp / (tp + fp)\n\n\ndef f1(y, z):\n _tpr = tpr(y, z)\n _prec = prec(y, z)\n return 2 * (_prec * _tpr) / (_prec + _tpr)\n\n\t\ndef mcc(y, z):\n tp, tn, fp, fn = contingency_table(y, z)\n return (tp * tn - fp * fn) / cmp.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\n\n\n\n\n\n\t\n\n\n\t\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"numpy.unique",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.mean_squared_error",
"numpy.concatenate"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mt-huebsch/pymatgen
|
[
"92da4a6a3d7c7a2f4cfed19a49794d59f15b42e7"
] |
[
"pymatgen/io/vasp/sets.py"
] |
[
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nThis module defines the VaspInputSet abstract base class and a concrete\nimplementation for the parameters developed and tested by the core team\nof pymatgen, including the Materials Virtual Lab, Materials Project and the MIT\nhigh throughput project. The basic concept behind an input set is to specify\na scheme to generate a consistent set of VASP inputs from a structure\nwithout further user intervention. This ensures comparability across\nruns.\n\nRead the following carefully before implementing new input sets:\n\n1. 99% of what needs to be done can be done by specifying user_incar_settings\n to override some of the defaults of various input sets. Unless there is an\n extremely good reason to add a new set, DO NOT add one. E.g., if you want\n to turn the hubbard U off, just set \"LDAU\": False as a user_incar_setting.\n2. All derivative input sets should inherit from one of the usual MPRelaxSet or\n MITRelaxSet, and proper superclass delegation should be used where possible.\n In particular, you are not supposed to implement your own as_dict or\n from_dict for derivative sets unless you know what you are doing.\n Improper overriding the as_dict and from_dict protocols is the major\n cause of implementation headaches. If you need an example, look at how the\n MPStaticSet or MPNonSCFSets are constructed.\n\nThe above are recommendations. The following are UNBREAKABLE rules:\n\n1. All input sets must take in a structure or list of structures as the first\n argument.\n2. user_incar_settings, user_kpoints_settings and user_<whatever>_settings are\n ABSOLUTE. Any new sets you implement must obey this. If a user wants to\n override your settings, you assume he knows what he is doing. Do not\n magically override user supplied settings. You can issue a warning if you\n think the user is wrong.\n3. All input sets must save all supplied args and kwargs as instance variables.\n E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This\n ensures the as_dict and from_dict work correctly.\n\"\"\"\n\nimport abc\nimport glob\nimport itertools\nimport os\nimport re\nimport shutil\nimport warnings\nfrom copy import deepcopy\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Union\nfrom zipfile import ZipFile\n\nimport numpy as np\nfrom monty.dev import deprecated\nfrom monty.io import zopen\nfrom monty.json import MSONable\nfrom monty.serialization import loadfn\n\nfrom pymatgen.analysis.structure_matcher import StructureMatcher\nfrom pymatgen.core.periodic_table import Element, Species\nfrom pymatgen.core.sites import PeriodicSite\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.io.lobster import Lobsterin\nfrom pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar, VaspInput\nfrom pymatgen.io.vasp.outputs import Outcar, Vasprun\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.symmetry.bandstructure import HighSymmKpath\n\nMODULE_DIR = Path(__file__).resolve().parent\n\n\nclass VaspInputSet(MSONable, metaclass=abc.ABCMeta):\n \"\"\"\n Base class representing a set of Vasp input parameters with a structure\n supplied as init parameters. Typically, you should not inherit from this\n class. Start from DictSet or MPRelaxSet or MITRelaxSet.\n \"\"\"\n\n @property\n @abc.abstractmethod\n def incar(self):\n \"\"\"Incar object\"\"\"\n pass\n\n @property\n @abc.abstractmethod\n def kpoints(self):\n \"\"\"Kpoints object\"\"\"\n pass\n\n @property\n @abc.abstractmethod\n def poscar(self):\n \"\"\"Poscar object\"\"\"\n pass\n\n @property\n def potcar_symbols(self):\n \"\"\"\n List of POTCAR symbols.\n \"\"\"\n # pylint: disable=E1101\n elements = self.poscar.site_symbols\n potcar_symbols = []\n settings = self._config_dict[\"POTCAR\"]\n\n if isinstance(settings[elements[-1]], dict):\n for el in elements:\n potcar_symbols.append(settings[el][\"symbol\"] if el in settings else el)\n else:\n for el in elements:\n potcar_symbols.append(settings.get(el, el))\n\n return potcar_symbols\n\n @property\n def potcar(self):\n \"\"\"\n Potcar object.\n \"\"\"\n # pylint: disable=E1101\n potcar = Potcar(self.potcar_symbols, functional=self.potcar_functional)\n\n # warn if the selected POTCARs do not correspond to the chosen\n # potcar_functional\n for psingle in potcar:\n if self.potcar_functional not in psingle.identify_potcar()[0]:\n warnings.warn(\n \"POTCAR data with symbol {} is not known by pymatgen to\\\n correspond with the selected potcar_functional {}. This POTCAR\\\n is known to correspond with functionals {}. Please verify that\\\n you are using the right POTCARs!\".format(\n psingle.symbol,\n self.potcar_functional,\n psingle.identify_potcar(mode=\"data\")[0],\n ),\n BadInputSetWarning,\n )\n\n return potcar\n\n @property # type: ignore\n @deprecated(message=\"Use the get_vasp_input() method instead.\")\n def all_input(self):\n \"\"\"\n Returns all input files as a dict of {filename: vasp object}\n\n Returns:\n dict of {filename: object}, e.g., {'INCAR': Incar object, ...}\n \"\"\"\n return {\n \"INCAR\": self.incar,\n \"KPOINTS\": self.kpoints,\n \"POSCAR\": self.poscar,\n \"POTCAR\": self.potcar,\n }\n\n def get_vasp_input(self) -> VaspInput:\n \"\"\"\n\n Returns:\n VaspInput\n \"\"\"\n return VaspInput(\n incar=self.incar,\n kpoints=self.kpoints,\n poscar=self.poscar,\n potcar=self.potcar,\n )\n\n def write_input(\n self,\n output_dir,\n make_dir_if_not_present=True,\n include_cif=False,\n potcar_spec=False,\n zip_output=False,\n ):\n \"\"\"\n Writes a set of VASP input to a directory.\n\n Args:\n output_dir (str): Directory to output the VASP input files\n make_dir_if_not_present (bool): Set to True if you want the\n directory (and the whole path) to be created if it is not\n present.\n include_cif (bool): Whether to write a CIF file in the output\n directory for easier opening by VESTA.\n potcar_spec (bool): Instead of writing the POTCAR, write a \"POTCAR.spec\".\n This is intended to help sharing an input set with people who might\n not have a license to specific Potcar files. Given a \"POTCAR.spec\",\n the specific POTCAR file can be re-generated using pymatgen with the\n \"generate_potcar\" function in the pymatgen CLI.\n zip_output (bool): If True, output will be zipped into a file with the\n same name as the InputSet (e.g., MPStaticSet.zip)\n \"\"\"\n if potcar_spec:\n if make_dir_if_not_present and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n with zopen(os.path.join(output_dir, \"POTCAR.spec\"), \"wt\") as f:\n f.write(\"\\n\".join(self.potcar_symbols))\n\n for k, v in {\n \"INCAR\": self.incar,\n \"POSCAR\": self.poscar,\n \"KPOINTS\": self.kpoints,\n }.items():\n if v is not None:\n with zopen(os.path.join(output_dir, k), \"wt\") as f:\n f.write(v.__str__())\n else:\n vinput = self.get_vasp_input()\n vinput.write_input(\n output_dir, make_dir_if_not_present=make_dir_if_not_present\n )\n\n cifname = \"\"\n if include_cif:\n s = vinput[\"POSCAR\"].structure\n cifname = Path(output_dir) / (\"%s.cif\" % re.sub(r\"\\s\", \"\", s.formula))\n s.to(filename=cifname)\n\n if zip_output:\n filename = self.__class__.__name__ + \".zip\"\n with ZipFile(filename, \"w\") as zip:\n for file in [\n \"INCAR\",\n \"POSCAR\",\n \"KPOINTS\",\n \"POTCAR\",\n \"POTCAR.spec\",\n cifname,\n ]:\n try:\n zip.write(file)\n os.remove(file)\n except FileNotFoundError:\n pass\n\n def as_dict(self, verbosity=2):\n \"\"\"\n Args:\n verbosity: Verbosity for generated dict. If 1, structure is\n excluded.\n\n Returns:\n MSONable dict\n \"\"\"\n d = MSONable.as_dict(self)\n if verbosity == 1:\n d.pop(\"structure\", None)\n return d\n\n\ndef _load_yaml_config(fname):\n config = loadfn(str(MODULE_DIR / (\"%s.yaml\" % fname)))\n if \"PARENT\" in config:\n parent_config = _load_yaml_config(config[\"PARENT\"])\n for k, v in parent_config.items():\n if k not in config:\n config[k] = v\n elif isinstance(v, dict):\n v_new = config.get(k, {})\n v_new.update(v)\n config[k] = v_new\n return config\n\n\nclass DictSet(VaspInputSet):\n \"\"\"\n Concrete implementation of VaspInputSet that is initialized from a dict\n settings. This allows arbitrary settings to be input. In general,\n this is rarely used directly unless there is a source of settings in yaml\n format (e.g., from a REST interface). It is typically used by other\n VaspInputSets for initialization.\n\n Special consideration should be paid to the way the MAGMOM initialization\n for the INCAR is done. The initialization differs depending on the type of\n structure and the configuration settings. The order in which the magmom is\n determined is as follows:\n\n 1. If the site itself has a magmom setting, that is used.\n 2. If the species on the site has a spin setting, that is used.\n 3. If the species itself has a particular setting in the config file, that\n is used, e.g., Mn3+ may have a different magmom than Mn4+.\n 4. Lastly, the element symbol itself is checked in the config file. If\n there are no settings, VASP's default of 0.6 is used.\n \"\"\"\n\n def __init__(\n self,\n structure,\n config_dict,\n files_to_transfer=None,\n user_incar_settings=None,\n user_kpoints_settings=None,\n user_potcar_settings=None,\n constrain_total_magmom=False,\n sort_structure=True,\n potcar_functional=None,\n user_potcar_functional=None,\n force_gamma=False,\n reduce_structure=None,\n vdw=None,\n use_structure_charge=False,\n standardize=False,\n sym_prec=0.1,\n international_monoclinic=True,\n validate_magmom=True,\n ):\n \"\"\"\n Args:\n structure (Structure): The Structure to create inputs for.\n config_dict (dict): The config dictionary to use.\n files_to_transfer (dict): A dictionary of {filename: filepath}. This\n allows the transfer of files from a previous calculation.\n user_incar_settings (dict): User INCAR settings. This allows a user\n to override INCAR settings, e.g., setting a different MAGMOM for\n various elements or species. Note that in the new scheme,\n ediff_per_atom and hubbard_u are no longer args. Instead, the\n config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former\n scales with # of atoms, the latter does not. If both are\n present, EDIFF is preferred. To force such settings, just supply\n user_incar_settings={\"EDIFF\": 1e-5, \"LDAU\": False} for example.\n The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since\n pymatgen defines different values depending on what anions are\n present in the structure, so these keys can be defined in one\n of two ways, e.g. either {\"LDAUU\":{\"O\":{\"Fe\":5}}} to set LDAUU\n for Fe to 5 in an oxide, or {\"LDAUU\":{\"Fe\":5}} to set LDAUU to\n 5 regardless of the input structure.\n\n If a None value is given, that key is unset. For example,\n {\"ENCUT\": None} will remove ENCUT from the incar settings.\n user_kpoints_settings (dict or Kpoints): Allow user to override kpoints\n setting by supplying a dict E.g., {\"reciprocal_density\": 1000}.\n User can also supply Kpoints object. Default is None.\n user_potcar_settings (dict: Allow user to override POTCARs. E.g.,\n {\"Gd\": \"Gd_3\"}. This is generally not recommended. Default is None.\n constrain_total_magmom (bool): Whether to constrain the total magmom\n (NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all\n species. Defaults to False.\n sort_structure (bool): Whether to sort the structure (using the\n default sort order of electronegativity) before generating input\n files. Defaults to True, the behavior you would want most of the\n time. This ensures that similar atomic species are grouped\n together.\n user_potcar_functional (str): Functional to use. Default (None) is to use\n the functional in the config dictionary. Valid values:\n \"PBE\", \"PBE_52\", \"PBE_54\", \"LDA\", \"LDA_52\", \"LDA_54\", \"PW91\",\n \"LDA_US\", \"PW91_US\".\n force_gamma (bool): Force gamma centered kpoint generation. Default\n (False) is to use the Automatic Density kpoint scheme, which\n will use the Gamma centered generation scheme for hexagonal\n cells, and Monkhorst-Pack otherwise.\n reduce_structure (None/str): Before generating the input files,\n generate the reduced structure. Default (None), does not\n alter the structure. Valid values: None, \"niggli\", \"LLL\".\n vdw: Adds default parameters for van-der-Waals functionals supported\n by VASP to INCAR. Supported functionals are: DFT-D2, undamped\n DFT-D3, DFT-D3 with Becke-Jonson damping, Tkatchenko-Scheffler,\n Tkatchenko-Scheffler with iterative Hirshfeld partitioning,\n MBD@rSC, dDsC, Dion's vdW-DF, DF2, optPBE, optB88, optB86b and\n rVV10.\n use_structure_charge (bool): If set to True, then the public\n variable used for setting the overall charge of the\n structure (structure.charge) is used to set the NELECT\n variable in the INCAR\n Default is False (structure's overall charge is not used)\n standardize (float): Whether to standardize to a primitive standard\n cell. Defaults to False.\n sym_prec (float): Tolerance for symmetry finding.\n international_monoclinic (bool): Whether to use international convention\n (vs Curtarolo) for monoclinic. Defaults True.\n validate_magmom (bool): Ensure that the missing magmom values are filled\n in with the vasp default value of 1.0\n \"\"\"\n if reduce_structure:\n structure = structure.get_reduced_structure(reduce_structure)\n if sort_structure:\n structure = structure.get_sorted_structure()\n if validate_magmom:\n get_valid_magmom_struct(structure, spin_mode=\"auto\", inplace=True)\n\n self._structure = structure\n self._config_dict = deepcopy(config_dict)\n self.files_to_transfer = files_to_transfer or {}\n self.constrain_total_magmom = constrain_total_magmom\n self.sort_structure = sort_structure\n self.force_gamma = force_gamma\n self.reduce_structure = reduce_structure\n self.user_incar_settings = user_incar_settings or {}\n self.user_kpoints_settings = user_kpoints_settings or {}\n self.user_potcar_settings = user_potcar_settings\n self.vdw = vdw.lower() if vdw is not None else None\n self.use_structure_charge = use_structure_charge\n self.standardize = standardize\n self.sym_prec = sym_prec\n self.international_monoclinic = international_monoclinic\n\n if (\n self.user_incar_settings.get(\"KSPACING\")\n and user_kpoints_settings is not None\n ):\n warnings.warn(\n \"You have specified KSPACING and also supplied kpoints \"\n \"settings. KSPACING only has effect when there is no \"\n \"KPOINTS file. Since both settings were given, pymatgen\"\n \"will generate a KPOINTS file and ignore KSPACING.\"\n \"Remove the `user_kpoints_settings` argument to enable KSPACING.\",\n BadInputSetWarning,\n )\n\n if self.vdw:\n vdw_par = loadfn(str(MODULE_DIR / \"vdW_parameters.yaml\"))\n try:\n self._config_dict[\"INCAR\"].update(vdw_par[self.vdw])\n except KeyError:\n raise KeyError(\n \"Invalid or unsupported van-der-Waals \"\n \"functional. Supported functionals are \"\n \"%s.\" % vdw_par.keys()\n )\n # read the POTCAR_FUNCTIONAL from the .yaml\n self.potcar_functional = self._config_dict.get(\"POTCAR_FUNCTIONAL\", \"PBE\")\n\n if potcar_functional is not None and user_potcar_functional is not None:\n raise ValueError(\n \"Received both 'potcar_functional' and \"\n \"'user_potcar_functional arguments. 'potcar_functional \"\n \"is deprecated.\"\n )\n if potcar_functional:\n warnings.warn(\n \"'potcar_functional' argument is deprecated. Use \"\n \"'user_potcar_functional' instead.\",\n FutureWarning,\n )\n self.potcar_functional = potcar_functional\n elif user_potcar_functional:\n self.potcar_functional = user_potcar_functional\n\n # warn if a user is overriding POTCAR_FUNCTIONAL\n if self.potcar_functional != self._config_dict.get(\"POTCAR_FUNCTIONAL\"):\n warnings.warn(\n \"Overriding the POTCAR functional is generally not recommended \"\n \" as it significantly affect the results of calculations and \"\n \"compatibility with other calculations done with the same \"\n \"input set. Note that some POTCAR symbols specified in \"\n \"the configuration file may not be available in the selected \"\n \"functional.\",\n BadInputSetWarning,\n )\n\n if self.user_potcar_settings:\n warnings.warn(\n \"Overriding POTCARs is generally not recommended as it \"\n \"significantly affect the results of calculations and \"\n \"compatibility with other calculations done with the same \"\n \"input set. In many instances, it is better to write a \"\n \"subclass of a desired input set and override the POTCAR in \"\n \"the subclass to be explicit on the differences.\",\n BadInputSetWarning,\n )\n for k, v in self.user_potcar_settings.items():\n self._config_dict[\"POTCAR\"][k] = v\n\n @property\n def structure(self) -> Structure:\n \"\"\"\n :return: Structure\n \"\"\"\n if self.standardize and self.sym_prec:\n return standardize_structure(\n self._structure,\n sym_prec=self.sym_prec,\n international_monoclinic=self.international_monoclinic,\n )\n return self._structure\n\n @property\n def incar(self) -> Incar:\n \"\"\"\n :return: Incar\n \"\"\"\n settings = dict(self._config_dict[\"INCAR\"])\n for k, v in self.user_incar_settings.items():\n if v is None:\n try:\n del settings[k]\n except KeyError:\n settings[k] = v\n elif k == \"KSPACING\" and self.user_kpoints_settings != {}:\n pass # Ignore KSPACING if user_kpoints_settings are given\n else:\n settings[k] = v\n structure = self.structure\n incar = Incar()\n comp = structure.composition\n elements = sorted(\n [el for el in comp.elements if comp[el] > 0], key=lambda e: e.X\n )\n most_electroneg = elements[-1].symbol\n poscar = Poscar(structure)\n hubbard_u = settings.get(\"LDAU\", False)\n\n for k, v in settings.items():\n if k == \"MAGMOM\":\n mag = []\n for site in structure:\n if hasattr(site, \"magmom\"):\n mag.append(site.magmom)\n elif hasattr(site.specie, \"spin\"):\n mag.append(site.specie.spin)\n elif str(site.specie) in v:\n if site.specie.symbol == \"Co\":\n warnings.warn(\n \"Co without oxidation state is initialized low spin by default. If this is \"\n \"not desired, please set the spin on the magmom on the site directly to \"\n \"ensure correct initialization\"\n )\n mag.append(v.get(str(site.specie)))\n else:\n if site.specie.symbol == \"Co\":\n warnings.warn(\n \"Co without oxidation state is initialized low spin by default. If this is \"\n \"not desired, please set the spin on the magmom on the site directly to \"\n \"ensure correct initialization\"\n )\n mag.append(v.get(site.specie.symbol, 0.6))\n incar[k] = mag\n elif k in (\"LDAUU\", \"LDAUJ\", \"LDAUL\"):\n if hubbard_u:\n if hasattr(structure[0], k.lower()):\n m = {\n site.specie.symbol: getattr(site, k.lower())\n for site in structure\n }\n incar[k] = [m[sym] for sym in poscar.site_symbols]\n # lookup specific LDAU if specified for most_electroneg atom\n elif most_electroneg in v.keys() and isinstance(\n v[most_electroneg], dict\n ):\n incar[k] = [\n v[most_electroneg].get(sym, 0)\n for sym in poscar.site_symbols\n ]\n # else, use fallback LDAU value if it exists\n else:\n incar[k] = [\n v.get(sym, 0)\n if isinstance(v.get(sym, 0), (float, int))\n else 0\n for sym in poscar.site_symbols\n ]\n elif k.startswith(\"EDIFF\") and k != \"EDIFFG\":\n if \"EDIFF\" not in settings and k == \"EDIFF_PER_ATOM\":\n incar[\"EDIFF\"] = float(v) * structure.num_sites\n else:\n incar[\"EDIFF\"] = float(settings[\"EDIFF\"])\n else:\n incar[k] = v\n has_u = hubbard_u and sum(incar[\"LDAUU\"]) > 0\n if has_u:\n # modify LMAXMIX if LSDA+U and you have d or f electrons\n # note that if the user explicitly sets LMAXMIX in settings it will\n # override this logic.\n if \"LMAXMIX\" not in settings.keys():\n # contains f-electrons\n if any([el.Z > 56 for el in structure.composition]):\n incar[\"LMAXMIX\"] = 6\n # contains d-electrons\n elif any([el.Z > 20 for el in structure.composition]):\n incar[\"LMAXMIX\"] = 4\n else:\n for key in list(incar.keys()):\n if key.startswith(\"LDAU\"):\n del incar[key]\n\n if self.constrain_total_magmom:\n nupdown = sum([mag if abs(mag) > 0.6 else 0 for mag in incar[\"MAGMOM\"]])\n incar[\"NUPDOWN\"] = nupdown\n\n if self.use_structure_charge:\n incar[\"NELECT\"] = self.nelect\n\n # Ensure adequate number of KPOINTS are present for the tetrahedron\n # method (ISMEAR=-5). If KSPACING is in the INCAR file the number\n # of kpoints is not known before calling VASP, but a warning is raised\n # when the KSPACING value is > 0.5 (2 reciprocal Angstrom).\n # An error handler in Custodian is available to\n # correct overly large KSPACING values (small number of kpoints)\n # if necessary.\n # if \"KSPACING\" not in self.user_incar_settings.keys():\n if self.kpoints is not None:\n if np.product(self.kpoints.kpts) < 4 and incar.get(\"ISMEAR\", 0) == -5:\n incar[\"ISMEAR\"] = 0\n\n if (\n self.user_incar_settings.get(\"KSPACING\", 0) > 0.5\n and incar.get(\"ISMEAR\", 0) == -5\n ):\n warnings.warn(\n \"Large KSPACING value detected with ISMEAR = -5. Ensure that VASP \"\n \"generates an adequate number of KPOINTS, lower KSPACING, or \"\n \"set ISMEAR = 0\",\n BadInputSetWarning,\n )\n\n if all([k.is_metal for k in structure.composition.keys()]):\n if incar.get(\"NSW\", 0) > 0 and incar.get(\"ISMEAR\", 1) < 1:\n warnings.warn(\n \"Relaxation of likely metal with ISMEAR < 1 \"\n \"detected. Please see VASP recommendations on \"\n \"ISMEAR for metals.\",\n BadInputSetWarning,\n )\n\n return incar\n\n @property\n def poscar(self) -> Poscar:\n \"\"\"\n :return: Poscar\n \"\"\"\n return Poscar(self.structure)\n\n @property\n def nelect(self) -> float:\n \"\"\"\n Gets the default number of electrons for a given structure.\n \"\"\"\n nelectrons_by_element = {p.element: p.nelectrons for p in self.potcar}\n nelect = sum([num_atoms * nelectrons_by_element[str(el)]\n for el, num_atoms in self.structure.composition.element_composition.items()])\n\n if self.use_structure_charge:\n return nelect - self.structure.charge\n return nelect\n\n @property\n def kpoints(self) -> Union[Kpoints, None]:\n \"\"\"\n Returns a KPOINTS file using the fully automated grid method. Uses\n Gamma centered meshes for hexagonal cells and Monk grids otherwise.\n\n If KSPACING is set in user_incar_settings (or the INCAR file), no\n file is created because VASP will automatically generate the kpoints.\n\n Algorithm:\n Uses a simple approach scaling the number of divisions along each\n reciprocal lattice vector proportional to its length.\n \"\"\"\n # Return None if KSPACING is present in the INCAR, because this will\n # cause VASP to generate the kpoints automatically\n if self.user_incar_settings.get(\"KSPACING\") or self._config_dict[\"INCAR\"].get(\n \"KSPACING\"\n ):\n if self.user_kpoints_settings == {}:\n return None\n\n settings = self.user_kpoints_settings or self._config_dict.get(\"KPOINTS\")\n\n if isinstance(settings, Kpoints):\n return settings\n\n # Return None if KSPACING is present in the INCAR, because this will\n # cause VASP to generate the kpoints automatically\n if (\n self.user_incar_settings.get(\"KSPACING\")\n and self.user_kpoints_settings == {}\n ):\n return None\n\n # If grid_density is in the kpoints_settings use\n # Kpoints.automatic_density\n if settings.get(\"grid_density\"):\n return Kpoints.automatic_density(\n self.structure, int(settings[\"grid_density\"]), self.force_gamma\n )\n\n # If reciprocal_density is in the kpoints_settings use\n # Kpoints.automatic_density_by_vol\n if settings.get(\"reciprocal_density\"):\n return Kpoints.automatic_density_by_vol(\n self.structure, int(settings[\"reciprocal_density\"]), self.force_gamma\n )\n\n # If length is in the kpoints_settings use Kpoints.automatic\n if settings.get(\"length\"):\n return Kpoints.automatic(settings[\"length\"])\n\n # Raise error. Unsure of which kpoint generation to use\n raise ValueError(\n \"Invalid KPoint Generation algo : Supported Keys are \"\n \"grid_density: for Kpoints.automatic_density generation, \"\n \"reciprocal_density: for KPoints.automatic_density_by_vol \"\n \"generation, and length : for Kpoints.automatic generation\"\n )\n\n def estimate_nbands(self) -> int:\n \"\"\"\n Estimate the number of bands that VASP will initialize a\n calculation with by default. Note that in practice this\n can depend on # of cores (if not set explicitly)\n \"\"\"\n\n nions = len(self.structure)\n\n # from VASP's point of view, the number of magnetic atoms are\n # the number of atoms with non-zero magmoms, so use Incar as\n # source of truth\n nmag = len([m for m in self.incar['MAGMOM'] if not np.allclose(m, 0)])\n\n # by definition, if non-spin polarized ignore nmag\n if (not nmag) or (self.incar['ISPIN'] == 1):\n nbands = np.ceil(self.nelect / 2 + nions / 2)\n else:\n nbands = np.ceil(0.6 * self.nelect + nmag)\n\n return int(nbands)\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return self.__class__.__name__\n\n def write_input(\n self,\n output_dir: str,\n make_dir_if_not_present: bool = True,\n include_cif: bool = False,\n potcar_spec: bool = False,\n zip_output: bool = False,\n ):\n \"\"\"\n Writes out all input to a directory.\n\n Args:\n output_dir (str): Directory to output the VASP input files\n make_dir_if_not_present (bool): Set to True if you want the\n directory (and the whole path) to be created if it is not\n present.\n include_cif (bool): Whether to write a CIF file in the output\n directory for easier opening by VESTA.\n potcar_spec (bool): Instead of writing the POTCAR, write a \"POTCAR.spec\".\n This is intended to help sharing an input set with people who might\n not have a license to specific Potcar files. Given a \"POTCAR.spec\",\n the specific POTCAR file can be re-generated using pymatgen with the\n \"generate_potcar\" function in the pymatgen CLI.\n \"\"\"\n super().write_input(\n output_dir=output_dir,\n make_dir_if_not_present=make_dir_if_not_present,\n include_cif=include_cif,\n potcar_spec=potcar_spec,\n zip_output=zip_output,\n )\n for k, v in self.files_to_transfer.items():\n with zopen(v, \"rb\") as fin, zopen(str(Path(output_dir) / k), \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n\n def calculate_ng(self, max_prime_factor: int = 7, must_inc_2: bool = True) -> Tuple:\n \"\"\"\n Calculates the NGX, NGY, and NGZ values using the information availible in the INCAR and POTCAR\n This is meant to help with making initial guess for the FFT grid so we can interact with the Charge density API\n\n Args:\n max_prime_factor (int): the valid prime factors of the grid size in each direction\n VASP has many different setting for this to handel many compiling options.\n For typical MPI options all prime factors up to 7 are allowed\n \"\"\"\n\n # TODO throw error for Ultrasoft potentials\n\n _RYTOEV = 13.605826\n _AUTOA = 0.529177249\n _PI = 3.141592653589793238\n\n # TODO Only do this for VASP 6 for now. Older version require more advanced logitc\n\n # get the ENCUT val\n if \"ENCUT\" in self.incar and self.incar[\"ENCUT\"] > 0:\n encut = self.incar[\"ENCUT\"]\n else:\n encut = max([i_species.enmax for i_species in self.all_input[\"POTCAR\"]])\n #\n\n _CUTOF = [\n np.sqrt(encut / _RYTOEV) / (2 * _PI / (anorm / _AUTOA))\n for anorm in self.poscar.structure.lattice.abc\n ]\n\n _PREC = \"Normal\" # VASP default\n if \"PREC\" in self.incar:\n _PREC = self.incar[\"PREC\"]\n\n if _PREC[0].lower() in {\"l\", \"m\", \"h\"}:\n raise NotImplementedError(\n \"PREC = LOW/MEDIUM/HIGH from VASP 4.x and not supported, Please use NORMA/SINGLE/ACCURATE\"\n )\n\n if _PREC[0].lower() in {\"a\", \"s\"}: # TODO This only works in VASP 6.x\n _WFACT = 4\n else:\n _WFACT = 3\n\n def next_g_size(cur_g_size):\n g_size = int(_WFACT * cur_g_size + 0.5)\n return next_num_with_prime_factors(g_size, max_prime_factor, must_inc_2)\n\n ng_vec = [*map(next_g_size, _CUTOF)]\n\n if _PREC[0].lower() in {\"a\", \"n\"}: # TODO This works for VASP 5.x and 6.x\n finer_g_scale = 2\n else:\n finer_g_scale = 1\n\n return ng_vec, [ng_ * finer_g_scale for ng_ in ng_vec]\n\n\n# Helper functions to determine valid FFT grids for VASP\ndef next_num_with_prime_factors(\n n: int, max_prime_factor: int, must_inc_2: bool = True\n) -> int:\n \"\"\"\n Return the next number greater than or equal to n that only has the desired prime factors\n\n Args:\n n (int): Initial guess at the grid density\n max_prime_factor (int): the maximum prime factor\n must_inc_2 (bool): 2 must be a prime factor of the result\n\n Returns:\n int: first product of of the prime_factors that is >= n\n \"\"\"\n if max_prime_factor < 2:\n raise ValueError(\"Must choose a maximum prime factor greater than 2\")\n prime_factors = primes_less_than(max_prime_factor)\n for new_val in itertools.count(start=n):\n if must_inc_2 and new_val % 2 != 0:\n continue\n cur_val_ = new_val\n for j in prime_factors:\n while cur_val_ % j == 0:\n cur_val_ //= j\n if cur_val_ == 1:\n return new_val\n raise ValueError(\"No factorable number found, not possible.\")\n\n\ndef primes_less_than(max_val: int) -> List[int]:\n \"\"\"\n Get the primes less than or equal to the max value\n \"\"\"\n res = []\n for i in range(2, max_val + 1):\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n res.append(i)\n return res\n\n\nclass MITRelaxSet(DictSet):\n \"\"\"\n Standard implementation of VaspInputSet utilizing parameters in the MIT\n High-throughput project.\n The parameters are chosen specifically for a high-throughput project,\n which means in general pseudopotentials with fewer electrons were chosen.\n\n Please refer::\n\n A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,\n K. A. Persson, G. Ceder. A high-throughput infrastructure for density\n functional theory calculations. Computational Materials Science,\n 2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023\n \"\"\"\n\n CONFIG = _load_yaml_config(\"MITRelaxSet\")\n\n def __init__(self, structure, **kwargs):\n \"\"\"\n :param structure: Structure\n :param kwargs: Same as those supported by DictSet.\n \"\"\"\n super().__init__(structure, MITRelaxSet.CONFIG, **kwargs)\n self.kwargs = kwargs\n\n\nclass MPRelaxSet(DictSet):\n \"\"\"\n Implementation of VaspInputSet utilizing parameters in the public\n Materials Project. Typically, the pseudopotentials chosen contain more\n electrons than the MIT parameters, and the k-point grid is ~50% more dense.\n The LDAUU parameters are also different due to the different psps used,\n which result in different fitted values.\n \"\"\"\n\n CONFIG = _load_yaml_config(\"MPRelaxSet\")\n\n def __init__(self, structure, **kwargs):\n \"\"\"\n :param structure: Structure\n :param kwargs: Same as those supported by DictSet.\n \"\"\"\n super().__init__(structure, MPRelaxSet.CONFIG, **kwargs)\n self.kwargs = kwargs\n\n\nclass MPScanRelaxSet(DictSet):\n \"\"\"\n Class for writing a relaxation input set using the accurate and numerically\n efficient r2SCAN variant of the Strongly Constrained and Appropriately Normed\n (SCAN) metaGGA density functional.\n\n Notes:\n 1. This functional is currently not officially supported in VASP. Source\n code may be obtained by contacting the authors of the manuscript in the\n References section. The original SCAN functional, available from VASP 5.4.3\n onwards, maybe used instead by passing `user_incar_settings={\"METAGGA\": \"SCAN\"}`\n when instantiating this InputSet. r2SCAN and SCAN are expected to yield\n very similar results.\n\n 2. Meta-GGA calculations require POTCAR files that include\n information on the kinetic energy density of the core-electrons,\n i.e. \"PBE_52\" or \"PBE_54\". Make sure the POTCARs include the\n following lines (see VASP wiki for more details):\n\n $ grep kinetic POTCAR\n kinetic energy-density\n mkinetic energy-density pseudized\n kinetic energy density (partial)\n\n References:\n James W. Furness, Aaron D. Kaplan, Jinliang Ning, John P. Perdew, and Jianwei Sun.\n Accurate and Numerically Efficient r2SCAN Meta-Generalized Gradient Approximation.\n The Journal of Physical Chemistry Letters 0, 11 DOI: 10.1021/acs.jpclett.0c02405\n \"\"\"\n\n CONFIG = _load_yaml_config(\"MPSCANRelaxSet\")\n\n def __init__(self, structure, bandgap=0, **kwargs):\n \"\"\"\n Args:\n structure (Structure): Input structure.\n bandgap (int): Bandgap of the structure in eV. The bandgap is used to\n compute the appropriate k-point density and determine the\n smearing settings.\n\n Metallic systems (default, bandgap = 0) use a KSPACING value of 0.22\n and Methfessel-Paxton order 2 smearing (ISMEAR=2, SIGMA=0.2).\n\n Non-metallic systems (bandgap > 0) use the tetrahedron smearing\n method (ISMEAR=-5, SIGMA=0.05). The KSPACING value is\n calculated from the bandgap via Eqs. 25 and 29 of Wisesa, McGill,\n and Mueller [1] (see References). Note that if 'user_incar_settings'\n or 'user_kpoints_settings' override KSPACING, the calculation from\n bandgap is not performed.\n\n vdw (str): set \"rVV10\" to enable SCAN+rVV10, which is a versatile\n van der Waals density functional by combing the SCAN functional\n with the rVV10 non-local correlation functional. rvv10 is the only\n dispersion correction available for SCAN at this time.\n **kwargs: Same as those supported by DictSet.\n\n References:\n [1] P. Wisesa, K.A. McGill, T. Mueller, Efficient generation of\n generalized Monkhorst-Pack grids through the use of informatics,\n Phys. Rev. B. 93 (2016) 1–10. doi:10.1103/PhysRevB.93.155109.\n \"\"\"\n super().__init__(structure, MPScanRelaxSet.CONFIG, **kwargs)\n self.bandgap = bandgap\n self.kwargs = kwargs\n\n if self.potcar_functional not in [\"PBE_52\", \"PBE_54\"]:\n raise ValueError(\"SCAN calculations require PBE_52 or PBE_54!\")\n\n # self.kwargs.get(\"user_incar_settings\", {\n updates = {}\n # select the KSPACING and smearing parameters based on the bandgap\n if self.bandgap == 0:\n updates[\"KSPACING\"] = 0.22\n updates[\"SIGMA\"] = 0.2\n updates[\"ISMEAR\"] = 2\n else:\n rmin = 25.22 - 1.87 * bandgap # Eq. 25\n kspacing = 2 * np.pi * 1.0265 / (rmin - 1.0183) # Eq. 29\n # cap the KSPACING at a max of 0.44, per internal benchmarking\n if kspacing > 0.44:\n kspacing = 0.44\n updates[\"KSPACING\"] = kspacing\n updates[\"ISMEAR\"] = -5\n updates[\"SIGMA\"] = 0.05\n\n # Don't overwrite things the user has supplied\n if self.user_incar_settings.get(\"KSPACING\"):\n del updates[\"KSPACING\"]\n\n if self.user_incar_settings.get(\"ISMEAR\"):\n del updates[\"ISMEAR\"]\n\n if self.user_incar_settings.get(\"SIGMA\"):\n del updates[\"SIGMA\"]\n\n if self.vdw:\n if self.vdw != \"rvv10\":\n warnings.warn(\n \"Use of van der waals functionals other than rVV10 \"\n \"with SCAN is not supported at this time. \"\n )\n # delete any vdw parameters that may have been added to the INCAR\n vdw_par = loadfn(str(MODULE_DIR / \"vdW_parameters.yaml\"))\n for k, v in vdw_par[self.vdw].items():\n try:\n del self._config_dict[\"INCAR\"][k]\n except KeyError:\n pass\n\n self._config_dict[\"INCAR\"].update(updates)\n\n\nclass MPMetalRelaxSet(MPRelaxSet):\n \"\"\"\n Implementation of VaspInputSet utilizing parameters in the public\n Materials Project, but with tuning for metals. Key things are a denser\n k point density, and a\n \"\"\"\n\n CONFIG = _load_yaml_config(\"MPRelaxSet\")\n\n def __init__(self, structure, **kwargs):\n \"\"\"\n :param structure: Structure\n :param kwargs: Same as those supported by DictSet.\n \"\"\"\n super().__init__(structure, **kwargs)\n self._config_dict[\"INCAR\"].update({\"ISMEAR\": 1, \"SIGMA\": 0.2})\n self._config_dict[\"KPOINTS\"].update({\"reciprocal_density\": 200})\n self.kwargs = kwargs\n\n\nclass MPHSERelaxSet(DictSet):\n \"\"\"\n Same as the MPRelaxSet, but with HSE parameters.\n \"\"\"\n\n CONFIG = _load_yaml_config(\"MPHSERelaxSet\")\n\n def __init__(self, structure, **kwargs):\n \"\"\"\n :param structure: Structure\n :param kwargs: Same as those supported by DictSet.\n \"\"\"\n super().__init__(structure, MPHSERelaxSet.CONFIG, **kwargs)\n self.kwargs = kwargs\n\n\nclass MPStaticSet(MPRelaxSet):\n \"\"\"\n Creates input files for a static calculation.\n \"\"\"\n\n def __init__(\n self,\n structure,\n prev_incar=None,\n prev_kpoints=None,\n lepsilon=False,\n lcalcpol=False,\n reciprocal_density=100,\n small_gap_multiply=None,\n **kwargs\n ):\n \"\"\"\n Args:\n structure (Structure): Structure from previous run.\n prev_incar (Incar): Incar file from previous run.\n prev_kpoints (Kpoints): Kpoints from previous run.\n lepsilon (bool): Whether to add static dielectric calculation\n lcalcpol (bool): Whether to turn on evaluation of the Berry phase approximations\n for electronic polarization\n reciprocal_density (int): For static calculations, we usually set the\n reciprocal density by volume. This is a convenience arg to change\n that, rather than using user_kpoints_settings. Defaults to 100,\n which is ~50% more than that of standard relaxation calculations.\n small_gap_multiply ([float, float]): If the gap is less than\n 1st index, multiply the default reciprocal_density by the 2nd\n index.\n **kwargs: kwargs supported by MPRelaxSet.\n \"\"\"\n super().__init__(structure, **kwargs)\n if isinstance(prev_incar, str):\n prev_incar = Incar.from_file(prev_incar)\n if isinstance(prev_kpoints, str):\n prev_kpoints = Kpoints.from_file(prev_kpoints)\n\n self.prev_incar = prev_incar\n self.prev_kpoints = prev_kpoints\n self.reciprocal_density = reciprocal_density\n self.kwargs = kwargs\n self.lepsilon = lepsilon\n self.lcalcpol = lcalcpol\n self.small_gap_multiply = small_gap_multiply\n\n @property\n def incar(self):\n \"\"\"\n :return: Incar\n \"\"\"\n parent_incar = super().incar\n incar = (\n Incar(self.prev_incar)\n if self.prev_incar is not None\n else Incar(parent_incar)\n )\n\n incar.update(\n {\n \"IBRION\": -1,\n \"ISMEAR\": -5,\n \"LAECHG\": True,\n \"LCHARG\": True,\n \"LORBIT\": 11,\n \"LVHAR\": True,\n \"LWAVE\": False,\n \"NSW\": 0,\n \"ALGO\": \"Normal\",\n }\n )\n\n if self.lepsilon:\n incar[\"IBRION\"] = 8\n incar[\"LEPSILON\"] = True\n\n # LPEAD=T: numerical evaluation of overlap integral prevents\n # LRF_COMMUTATOR errors and can lead to better expt. agreement\n # but produces slightly different results\n incar[\"LPEAD\"] = True\n\n # Note that DFPT calculations MUST unset NSW. NSW = 0 will fail\n # to output ionic.\n incar.pop(\"NSW\", None)\n incar.pop(\"NPAR\", None)\n\n if self.lcalcpol:\n incar[\"LCALCPOL\"] = True\n\n for k in [\"MAGMOM\", \"NUPDOWN\"] + list(self.user_incar_settings.keys()):\n # For these parameters as well as user specified settings, override\n # the incar settings.\n if parent_incar.get(k, None) is not None:\n incar[k] = parent_incar[k]\n else:\n incar.pop(k, None)\n\n # use new LDAUU when possible b/c the Poscar might have changed\n # representation\n if incar.get(\"LDAU\"):\n u = incar.get(\"LDAUU\", [])\n j = incar.get(\"LDAUJ\", [])\n if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:\n for tag in (\"LDAUU\", \"LDAUL\", \"LDAUJ\"):\n incar.update({tag: parent_incar[tag]})\n # ensure to have LMAXMIX for GGA+U static run\n if \"LMAXMIX\" not in incar:\n incar.update({\"LMAXMIX\": parent_incar[\"LMAXMIX\"]})\n\n # Compare ediff between previous and staticinputset values,\n # choose the tighter ediff\n incar[\"EDIFF\"] = min(incar.get(\"EDIFF\", 1), parent_incar[\"EDIFF\"])\n return incar\n\n @property\n def kpoints(self) -> Optional[Kpoints]:\n \"\"\"\n :return: Kpoints\n \"\"\"\n self._config_dict[\"KPOINTS\"][\"reciprocal_density\"] = self.reciprocal_density\n kpoints = super().kpoints\n\n # Prefer to use k-point scheme from previous run\n # except for when lepsilon = True is specified\n if kpoints is not None:\n if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:\n if (self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst) and (\n not self.lepsilon\n ):\n k_div = [kp + 1 if kp % 2 == 1 else kp for kp in kpoints.kpts[0]]\n kpoints = Kpoints.monkhorst_automatic(k_div)\n else:\n kpoints = Kpoints.gamma_automatic(kpoints.kpts[0])\n return kpoints\n\n def override_from_prev_calc(self, prev_calc_dir=\".\"):\n \"\"\"\n Update the input set to include settings from a previous calculation.\n\n Args:\n prev_calc_dir (str): The path to the previous calculation directory.\n\n Returns:\n The input set with the settings (structure, k-points, incar, etc)\n updated using the previous VASP run.\n \"\"\"\n vasprun, outcar = get_vasprun_outcar(prev_calc_dir)\n\n self.prev_incar = vasprun.incar\n self.prev_kpoints = vasprun.kpoints\n\n if self.standardize:\n warnings.warn(\n \"Use of standardize=True with from_prev_run is not \"\n \"recommended as there is no guarantee the copied \"\n \"files will be appropriate for the standardized \"\n \"structure.\"\n )\n\n self._structure = get_structure_from_prev_run(vasprun, outcar)\n\n # multiply the reciprocal density if needed\n if self.small_gap_multiply:\n gap = vasprun.eigenvalue_band_properties[0]\n if gap <= self.small_gap_multiply[0]:\n self.reciprocal_density = (\n self.reciprocal_density * self.small_gap_multiply[1]\n )\n\n return self\n\n @classmethod\n def from_prev_calc(cls, prev_calc_dir, **kwargs):\n \"\"\"\n Generate a set of Vasp input files for static calculations from a\n directory of previous Vasp run.\n\n Args:\n prev_calc_dir (str): Directory containing the outputs(\n vasprun.xml and OUTCAR) of previous vasp run.\n **kwargs: All kwargs supported by MPStaticSet, other than prev_incar\n and prev_structure and prev_kpoints which are determined from\n the prev_calc_dir.\n \"\"\"\n input_set = cls(_dummy_structure, **kwargs)\n return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)\n\n\nclass MPScanStaticSet(MPScanRelaxSet):\n \"\"\"\n Creates input files for a static calculation using the accurate and numerically\n efficient r2SCAN variant of the Strongly Constrainted and Appropriately Normed\n (SCAN) metaGGA functional.\n \"\"\"\n\n def __init__(\n self,\n structure,\n bandgap=0,\n prev_incar=None,\n lepsilon=False,\n lcalcpol=False,\n **kwargs\n ):\n \"\"\"\n Args:\n structure (Structure): Structure from previous run.\n bandgap (float): Bandgap of the structure in eV. The bandgap is used to\n compute the appropriate k-point density and determine the\n smearing settings.\n prev_incar (Incar): Incar file from previous run.\n lepsilon (bool): Whether to add static dielectric calculation\n lcalcpol (bool): Whether to turn on evaluation of the Berry phase approximations\n for electronic polarization.\n **kwargs: kwargs supported by MPScanRelaxSet.\n \"\"\"\n super().__init__(structure, bandgap, **kwargs)\n if isinstance(prev_incar, str):\n prev_incar = Incar.from_file(prev_incar)\n\n self.prev_incar = prev_incar\n self.kwargs = kwargs\n self.lepsilon = lepsilon\n self.lcalcpol = lcalcpol\n\n @property\n def incar(self):\n \"\"\"\n :return: Incar\n \"\"\"\n parent_incar = super().incar\n incar = (\n Incar(self.prev_incar)\n if self.prev_incar is not None\n else Incar(parent_incar)\n )\n\n incar.update(\n {\"LREAL\": False, \"NSW\": 0, \"LORBIT\": 11, \"LVHAR\": True, \"ISMEAR\": -5}\n )\n\n if self.lepsilon:\n incar[\"IBRION\"] = 8\n incar[\"LEPSILON\"] = True\n\n # LPEAD=T: numerical evaluation of overlap integral prevents\n # LRF_COMMUTATOR errors and can lead to better expt. agreement\n # but produces slightly different results\n incar[\"LPEAD\"] = True\n\n # Note that DFPT calculations MUST unset NSW. NSW = 0 will fail\n # to output ionic.\n incar.pop(\"NSW\", None)\n incar.pop(\"NPAR\", None)\n\n if self.lcalcpol:\n incar[\"LCALCPOL\"] = True\n\n for k in list(self.user_incar_settings.keys()):\n # For user specified settings, override\n # the incar settings.\n if parent_incar.get(k, None) is not None:\n incar[k] = parent_incar[k]\n else:\n incar.pop(k, None)\n\n return incar\n\n def override_from_prev_calc(self, prev_calc_dir=\".\"):\n \"\"\"\n Update the input set to include settings from a previous calculation.\n\n Args:\n prev_calc_dir (str): The path to the previous calculation directory.\n\n Returns:\n The input set with the settings (structure, k-points, incar, etc)\n updated using the previous VASP run.\n \"\"\"\n vasprun, outcar = get_vasprun_outcar(prev_calc_dir)\n\n self.prev_incar = vasprun.incar\n\n self._structure = get_structure_from_prev_run(vasprun, outcar)\n\n return self\n\n @classmethod\n def from_prev_calc(cls, prev_calc_dir, **kwargs):\n \"\"\"\n Generate a set of Vasp input files for static calculations from a\n directory of previous Vasp run.\n\n Args:\n prev_calc_dir (str): Directory containing the outputs(\n vasprun.xml and OUTCAR) of previous vasp run.\n **kwargs: All kwargs supported by MPScanStaticSet, other than prev_incar\n which is determined from the prev_calc_dir.\n \"\"\"\n input_set = cls(_dummy_structure, **kwargs)\n return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)\n\n\nclass MPHSEBSSet(MPHSERelaxSet):\n \"\"\"\n Implementation of a VaspInputSet for HSE band structure computations.\n Remember that HSE band structures must be self-consistent in VASP. A\n band structure along symmetry lines for instance needs BOTH a uniform\n grid with appropriate weights AND a path along the lines with weight 0.\n\n Thus, the \"Uniform\" mode is just like regular static SCF but allows\n adding custom kpoints (e.g., corresponding to known VBM/CBM) to the\n uniform grid that have zero weight (e.g., for better gap estimate).\n\n The \"Gap\" mode behaves just like the \"Uniform\" mode, however, if starting\n from a previous calculation, the VBM and CBM k-points will automatically\n be added to ``added_kpoints``.\n\n The \"Line\" mode is just like Uniform mode, but additionally adds\n k-points along symmetry lines with zero weight.\n \"\"\"\n\n def __init__(\n self,\n structure,\n user_incar_settings=None,\n added_kpoints=None,\n mode=\"Gap\",\n reciprocal_density=None,\n copy_chgcar=True,\n kpoints_line_density=20,\n **kwargs\n ):\n \"\"\"\n Args:\n structure (Structure): Structure to compute\n user_incar_settings (dict): A dict specifying additional incar\n settings\n added_kpoints (list): a list of kpoints (list of 3 number list)\n added to the run. The k-points are in fractional coordinates\n mode (str): \"Line\" - generate k-points along symmetry lines for\n bandstructure. \"Uniform\" - generate uniform k-points grid.\n reciprocal_density (int): k-point density to use for uniform mesh.\n copy_chgcar (bool): Whether to copy the CHGCAR of a previous run.\n kpoints_line_density (int): k-point density for high symmetry lines\n **kwargs (dict): Any other parameters to pass into DictSet.\n \"\"\"\n super().__init__(structure, **kwargs)\n self.user_incar_settings = user_incar_settings or {}\n self._config_dict[\"INCAR\"].update(\n {\n \"NSW\": 0,\n \"ISMEAR\": 0,\n \"SIGMA\": 0.05,\n \"ISYM\": 3,\n \"LCHARG\": False,\n \"NELMIN\": 5,\n }\n )\n self.added_kpoints = added_kpoints if added_kpoints is not None else []\n self.mode = mode\n\n if (\n not reciprocal_density\n or \"reciprocal_density\" not in self.user_kpoints_settings\n ):\n self.reciprocal_density = 50\n else:\n self.reciprocal_density = (\n reciprocal_density or self.user_kpoints_settings[\"reciprocal_density\"]\n )\n\n self.kpoints_line_density = kpoints_line_density\n self.copy_chgcar = copy_chgcar\n\n @property\n def kpoints(self) -> Kpoints:\n \"\"\"\n :return: Kpoints\n \"\"\"\n kpts = [] # type: List[Union[int, float, None]]\n weights = [] # type: List[Union[float, None]]\n all_labels = [] # type: List[Union[str, None]]\n structure = self.structure\n\n # for both modes, include the Uniform mesh w/standard weights\n grid = Kpoints.automatic_density_by_vol(structure, self.reciprocal_density).kpts\n ir_kpts = SpacegroupAnalyzer(structure, symprec=0.1).get_ir_reciprocal_mesh(\n grid[0]\n )\n for k in ir_kpts:\n kpts.append(k[0])\n weights.append(int(k[1]))\n all_labels.append(None)\n\n # for both modes, include any user-added kpoints w/zero weight\n for k in self.added_kpoints:\n kpts.append(k)\n weights.append(0.0)\n all_labels.append(\"user-defined\")\n\n # for line mode only, add the symmetry lines w/zero weight\n if self.mode.lower() == \"line\":\n kpath = HighSymmKpath(structure)\n frac_k_points, labels = kpath.get_kpoints(\n line_density=self.kpoints_line_density, coords_are_cartesian=False\n )\n\n for k, f in enumerate(frac_k_points):\n kpts.append(f)\n weights.append(0.0)\n all_labels.append(labels[k])\n\n comment = (\n \"HSE run along symmetry lines\"\n if self.mode.lower() == \"line\"\n else \"HSE run on uniform grid\"\n )\n\n return Kpoints(\n comment=comment,\n style=Kpoints.supported_modes.Reciprocal,\n num_kpts=len(kpts),\n kpts=kpts,\n kpts_weights=weights,\n labels=all_labels,\n )\n\n def override_from_prev_calc(self, prev_calc_dir=\".\"):\n \"\"\"\n Update the input set to include settings from a previous calculation.\n\n Args:\n prev_calc_dir (str): The path to the previous calculation directory.\n\n Returns:\n The input set with the settings (structure, k-points, incar, etc)\n updated using the previous VASP run.\n \"\"\"\n vasprun, outcar = get_vasprun_outcar(prev_calc_dir)\n\n self._structure = get_structure_from_prev_run(vasprun, outcar)\n\n # note: recommend not standardizing the cell because we want to retain\n # k-points\n if self.standardize:\n warnings.warn(\n \"Use of standardize=True with from_prev_calc is not \"\n \"recommended as there is no guarantee the copied \"\n \"files will be appropriate for the standardized \"\n \"structure.\"\n )\n\n if self.mode.lower() == \"gap\":\n added_kpoints = []\n\n bs = vasprun.get_band_structure()\n vbm, cbm = bs.get_vbm()[\"kpoint\"], bs.get_cbm()[\"kpoint\"]\n if vbm:\n added_kpoints.append(vbm.frac_coords)\n if cbm:\n added_kpoints.append(cbm.frac_coords)\n\n self.added_kpoints.extend(added_kpoints)\n\n files_to_transfer = {}\n if self.copy_chgcar:\n chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / \"CHGCAR*\")))\n if chgcars:\n files_to_transfer[\"CHGCAR\"] = str(chgcars[-1])\n\n self.files_to_transfer.update(files_to_transfer)\n\n return self\n\n @classmethod\n def from_prev_calc(cls, prev_calc_dir, **kwargs):\n \"\"\"\n Generate a set of Vasp input files for HSE calculations from a\n directory of previous Vasp run.\n\n Args:\n prev_calc_dir (str): Directory containing the outputs\n (vasprun.xml and OUTCAR) of previous vasp run.\n **kwargs: All kwargs supported by MPHSEBSStaticSet, other than\n prev_structure which is determined from the previous calc dir.\n \"\"\"\n input_set = cls(_dummy_structure, **kwargs)\n return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)\n\n\nclass MPNonSCFSet(MPRelaxSet):\n \"\"\"\n Init a MPNonSCFSet. Typically, you would use the classmethod\n from_prev_calc to initialize from a previous SCF run.\n \"\"\"\n\n def __init__(\n self,\n structure,\n prev_incar=None,\n mode=\"line\",\n nedos=2001,\n dedos=0.005,\n reciprocal_density=100,\n sym_prec=0.1,\n kpoints_line_density=20,\n optics=False,\n copy_chgcar=True,\n nbands_factor=1.2,\n small_gap_multiply=None,\n **kwargs\n ):\n \"\"\"\n Args:\n structure (Structure): Structure to compute\n prev_incar (Incar/string): Incar file from previous run.\n mode (str): Line, Uniform or Boltztrap mode supported.\n nedos (int): nedos parameter. Default to 2001.\n dedos (float): setting nedos=0 and uniform mode in from_prev_calc,\n an automatic nedos will be calculated using the total energy range\n divided by the energy step dedos\n reciprocal_density (int): density of k-mesh by reciprocal\n volume (defaults to 100)\n sym_prec (float): Symmetry precision (for Uniform mode).\n kpoints_line_density (int): Line density for Line mode.\n optics (bool): whether to add dielectric function\n copy_chgcar: Whether to copy the old CHGCAR when starting from a\n previous calculation.\n nbands_factor (float): Multiplicative factor for NBANDS when starting\n from a previous calculation. Choose a higher number if you are\n doing an LOPTICS calculation.\n small_gap_multiply ([float, float]): When starting from a previous\n calculation, if the gap is less than 1st index, multiply the default\n reciprocal_density by the 2nd index.\n **kwargs: kwargs supported by MPRelaxSet.\n \"\"\"\n super().__init__(structure, **kwargs)\n if isinstance(prev_incar, str):\n prev_incar = Incar.from_file(prev_incar)\n self.prev_incar = prev_incar\n self.kwargs = kwargs\n self.nedos = nedos\n self.dedos = dedos\n self.reciprocal_density = reciprocal_density\n self.sym_prec = sym_prec\n self.kpoints_line_density = kpoints_line_density\n self.optics = optics\n self.mode = mode.lower()\n self.copy_chgcar = copy_chgcar\n self.nbands_factor = nbands_factor\n self.small_gap_multiply = small_gap_multiply\n\n if self.mode.lower() not in [\"line\", \"uniform\", \"boltztrap\"]:\n raise ValueError(\n \"Supported modes for NonSCF runs are 'Line', \"\n \"'Uniform' and 'Boltztrap!\"\n )\n\n if (self.mode.lower() != \"uniform\" or nedos < 2000) and optics:\n warnings.warn(\n \"It is recommended to use Uniform mode with a high \"\n \"NEDOS for optics calculations.\"\n )\n\n @property\n def incar(self) -> Incar:\n \"\"\"\n :return: Incar\n \"\"\"\n incar = super().incar\n if self.prev_incar is not None:\n incar.update(self.prev_incar.items())\n\n # Overwrite necessary INCAR parameters from previous runs\n incar.update(\n {\n \"IBRION\": -1,\n \"LCHARG\": False,\n \"LORBIT\": 11,\n \"LWAVE\": False,\n \"NSW\": 0,\n \"ISYM\": 0,\n \"ICHARG\": 11,\n }\n )\n\n if self.mode.lower() == \"uniform\":\n # use tetrahedron method for DOS and optics calculations\n incar.update({\"ISMEAR\": -5, \"ISYM\": 2})\n else:\n # if line mode, can't use ISMEAR=-5; also use small sigma to avoid\n # partial occupancies for small band gap materials.\n # finally, explicit k-point generation (needed for bolztrap mode)\n # is incompatible with ISMEAR = -5.\n incar.update({\"ISMEAR\": 0, \"SIGMA\": 0.01})\n\n incar.update(self.user_incar_settings)\n\n if self.mode.lower() in \"uniform\":\n # Set smaller steps for DOS and optics output\n incar[\"NEDOS\"] = self.nedos\n\n if self.optics:\n incar[\"LOPTICS\"] = True\n\n incar.pop(\"MAGMOM\", None)\n\n return incar\n\n @property\n def kpoints(self) -> Optional[Kpoints]:\n \"\"\"\n :return: Kpoints\n \"\"\"\n # override pymatgen kpoints if provided\n user_kpoints = self.user_kpoints_settings\n if isinstance(user_kpoints, Kpoints):\n return user_kpoints\n\n if self.mode.lower() == \"line\":\n kpath = HighSymmKpath(self.structure)\n frac_k_points, k_points_labels = kpath.get_kpoints(\n line_density=self.kpoints_line_density, coords_are_cartesian=False\n )\n kpoints = Kpoints(\n comment=\"Non SCF run along symmetry lines\",\n style=Kpoints.supported_modes.Reciprocal,\n num_kpts=len(frac_k_points),\n kpts=frac_k_points,\n labels=k_points_labels,\n kpts_weights=[1] * len(frac_k_points),\n )\n elif self.mode.lower() == \"boltztrap\":\n kpoints = Kpoints.automatic_density_by_vol(\n self.structure, self.reciprocal_density\n )\n mesh = kpoints.kpts[0]\n ir_kpts = SpacegroupAnalyzer(\n self.structure, symprec=self.sym_prec\n ).get_ir_reciprocal_mesh(mesh)\n kpts = []\n weights = []\n for k in ir_kpts:\n kpts.append(k[0])\n weights.append(int(k[1]))\n kpoints = Kpoints(\n comment=\"Non SCF run on uniform grid\",\n style=Kpoints.supported_modes.Reciprocal,\n num_kpts=len(ir_kpts),\n kpts=kpts,\n kpts_weights=weights,\n )\n else:\n self._config_dict[\"KPOINTS\"][\"reciprocal_density\"] = self.reciprocal_density\n return super().kpoints\n\n return kpoints\n\n def override_from_prev_calc(self, prev_calc_dir=\".\"):\n \"\"\"\n Update the input set to include settings from a previous calculation.\n\n Args:\n prev_calc_dir (str): The path to the previous calculation directory.\n\n Returns:\n The input set with the settings (structure, k-points, incar, etc)\n updated using the previous VASP run.\n \"\"\"\n vasprun, outcar = get_vasprun_outcar(prev_calc_dir)\n\n self.prev_incar = vasprun.incar\n\n # Get a Magmom-decorated structure\n self._structure = get_structure_from_prev_run(vasprun, outcar)\n\n if self.standardize:\n warnings.warn(\n \"Use of standardize=True with from_prev_run is not \"\n \"recommended as there is no guarantee the copied \"\n \"files will be appropriate for the standardized\"\n \" structure. copy_chgcar is enforced to be false.\"\n )\n self.copy_chgcar = False\n\n # Turn off spin when magmom for every site is smaller than 0.02.\n if outcar and outcar.magnetization:\n site_magmom = np.array([i[\"tot\"] for i in outcar.magnetization])\n ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1\n\n elif vasprun.is_spin:\n ispin = 2\n\n else:\n ispin = 1\n\n nbands = int(np.ceil(vasprun.parameters[\"NBANDS\"] * self.nbands_factor))\n self.prev_incar.update({\"ISPIN\": ispin, \"NBANDS\": nbands})\n\n files_to_transfer = {}\n\n if self.copy_chgcar:\n chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / \"CHGCAR*\")))\n if chgcars:\n files_to_transfer[\"CHGCAR\"] = str(chgcars[-1])\n\n self.files_to_transfer.update(files_to_transfer)\n\n # multiply the reciprocal density if needed:\n if self.small_gap_multiply:\n gap = vasprun.eigenvalue_band_properties[0]\n if gap <= self.small_gap_multiply[0]:\n self.reciprocal_density = (\n self.reciprocal_density * self.small_gap_multiply[1]\n )\n self.kpoints_line_density = (\n self.kpoints_line_density * self.small_gap_multiply[1]\n )\n\n # automatic setting of nedos using the energy range and the energy step dedos\n if self.nedos == 0:\n emax = max([eigs.max() for eigs in vasprun.eigenvalues.values()])\n emin = min([eigs.min() for eigs in vasprun.eigenvalues.values()])\n self.nedos = int((emax - emin) / self.dedos)\n\n return self\n\n @classmethod\n def from_prev_calc(cls, prev_calc_dir, **kwargs):\n \"\"\"\n Generate a set of Vasp input files for NonSCF calculations from a\n directory of previous static Vasp run.\n\n Args:\n prev_calc_dir (str): The directory contains the outputs(\n vasprun.xml and OUTCAR) of previous vasp run.\n **kwargs: All kwargs supported by MPNonSCFSet, other than structure,\n prev_incar and prev_chgcar which are determined from the\n prev_calc_dir.\n \"\"\"\n input_set = cls(_dummy_structure, **kwargs)\n return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)\n\n\nclass MPSOCSet(MPStaticSet):\n \"\"\"\n An input set for running spin-orbit coupling (SOC) calculations.\n \"\"\"\n\n def __init__(\n self,\n structure,\n saxis=(0, 0, 1),\n copy_chgcar=True,\n nbands_factor=1.2,\n reciprocal_density=100,\n small_gap_multiply=None,\n magmom=None,\n **kwargs\n ):\n \"\"\"\n Args:\n structure (Structure): the structure must have the 'magmom' site\n property and each magnetic moment value must have 3\n components. eg: ``magmom = [[0,0,2], ...]``\n saxis (tuple): magnetic moment orientation\n copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.\n nbands_factor (float): Multiplicative factor for NBANDS. Choose a\n higher number if you are doing an LOPTICS calculation.\n reciprocal_density (int): density of k-mesh by reciprocal volume.\n small_gap_multiply ([float, float]): If the gap is less than\n 1st index, multiply the default reciprocal_density by the 2nd\n index.\n magmom (list[list[float]]): Override for the structure magmoms.\n **kwargs: kwargs supported by MPStaticSet.\n \"\"\"\n\n if not hasattr(structure[0], \"magmom\") and not isinstance(\n structure[0].magmom, list\n ):\n raise ValueError(\n \"The structure must have the 'magmom' site \"\n \"property and each magnetic moment value must have 3 \"\n \"components. eg:- magmom = [0,0,2]\"\n )\n\n super().__init__(structure, reciprocal_density=reciprocal_density, **kwargs)\n self.saxis = saxis\n self.copy_chgcar = copy_chgcar\n self.nbands_factor = nbands_factor\n self.small_gap_multiply = small_gap_multiply\n self.magmom = magmom\n\n @property\n def incar(self) -> Incar:\n \"\"\"\n :return: Incar\n \"\"\"\n incar = super().incar\n if self.prev_incar is not None:\n incar.update(self.prev_incar.items())\n\n # Overwrite necessary INCAR parameters from previous runs\n incar.update(\n {\"ISYM\": -1, \"LSORBIT\": \"T\", \"ICHARG\": 11, \"SAXIS\": list(self.saxis)}\n )\n incar.update(self.user_incar_settings)\n\n return incar\n\n def override_from_prev_calc(self, prev_calc_dir=\".\"):\n \"\"\"\n Update the input set to include settings from a previous calculation.\n\n Args:\n prev_calc_dir (str): The path to the previous calculation directory.\n\n Returns:\n The input set with the settings (structure, k-points, incar, etc)\n updated using the previous VASP run.\n \"\"\"\n vasprun, outcar = get_vasprun_outcar(prev_calc_dir)\n\n self.prev_incar = vasprun.incar\n\n # Remove magmoms from previous INCAR, since we will prefer\n # the final calculated magmoms\n # TODO: revisit in context of MPStaticSet incar logic\n if \"MAGMOM\" in self.prev_incar:\n del self.prev_incar[\"magmom\"]\n\n # Get a magmom-decorated structure\n self._structure = get_structure_from_prev_run(vasprun, outcar)\n if self.standardize:\n warnings.warn(\n \"Use of standardize=True with from_prev_run is not \"\n \"recommended as there is no guarantee the copied \"\n \"files will be appropriate for the standardized\"\n \" structure. copy_chgcar is enforced to be false.\"\n )\n self.copy_chgcar = False\n\n # override magmom if provided\n if self.magmom:\n self._structure = self._structure.copy(\n site_properties={\"magmom\": self.magmom}\n )\n\n # magmom has to be 3D for SOC calculation.\n if hasattr(self._structure[0], \"magmom\"):\n if not isinstance(self._structure[0].magmom, list):\n self._structure = self._structure.copy(\n site_properties={\n \"magmom\": [[0, 0, site.magmom] for site in self._structure]\n }\n )\n else:\n raise ValueError(\n \"Neither the previous structure has magmom \"\n \"property nor magmom provided\"\n )\n\n nbands = int(np.ceil(vasprun.parameters[\"NBANDS\"] * self.nbands_factor))\n self.prev_incar.update({\"NBANDS\": nbands})\n\n files_to_transfer = {}\n if self.copy_chgcar:\n chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / \"CHGCAR*\")))\n if chgcars:\n files_to_transfer[\"CHGCAR\"] = str(chgcars[-1])\n\n self.files_to_transfer.update(files_to_transfer)\n\n # multiply the reciprocal density if needed:\n if self.small_gap_multiply:\n gap = vasprun.eigenvalue_band_properties[0]\n if gap <= self.small_gap_multiply[0]:\n self.reciprocal_density = (\n self.reciprocal_density * self.small_gap_multiply[1]\n )\n\n return self\n\n @classmethod\n def from_prev_calc(cls, prev_calc_dir, **kwargs):\n \"\"\"\n Generate a set of Vasp input files for SOC calculations from a\n directory of previous static Vasp run. SOC calc requires all 3\n components for MAGMOM for each atom in the structure.\n\n Args:\n prev_calc_dir (str): The directory contains the outputs(\n vasprun.xml and OUTCAR) of previous vasp run.\n **kwargs: All kwargs supported by MPSOCSet, other than structure,\n prev_incar and prev_chgcar which are determined from the\n prev_calc_dir.\n \"\"\"\n input_set = cls(_dummy_structure, **kwargs)\n return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)\n\n\nclass MPNMRSet(MPStaticSet):\n \"\"\"\n Init a MPNMRSet.\n \"\"\"\n\n def __init__(\n self,\n structure,\n mode=\"cs\",\n isotopes=None,\n prev_incar=None,\n reciprocal_density=100,\n **kwargs\n ):\n \"\"\"\n Args:\n structure (Structure): Structure to compute\n mode (str): The NMR calculation to run\n \"cs\": for Chemical Shift\n \"efg\" for Electric Field Gradient\n isotopes (list): list of Isotopes for quadrupole moments\n prev_incar (Incar): Incar file from previous run.\n reciprocal_density (int): density of k-mesh by reciprocal\n volume (defaults to 100)\n **kwargs: kwargs supported by MPStaticSet.\n \"\"\"\n self.mode = mode\n self.isotopes = isotopes if isotopes else []\n super().__init__(\n structure,\n prev_incar=prev_incar,\n reciprocal_density=reciprocal_density,\n **kwargs\n )\n\n @property\n def incar(self):\n \"\"\"\n :return: Incar\n \"\"\"\n incar = super().incar\n\n if self.mode.lower() == \"cs\":\n incar.update(\n {\n \"LCHIMAG\": True,\n \"EDIFF\": -1.0e-10,\n \"ISYM\": 0,\n \"LCHARG\": False,\n \"LNMR_SYM_RED\": True,\n \"NELMIN\": 10,\n \"NSLPLINE\": True,\n \"PREC\": \"ACCURATE\",\n \"SIGMA\": 0.01,\n }\n )\n elif self.mode.lower() == \"efg\":\n\n isotopes = {ist.split(\"-\")[0]: ist for ist in self.isotopes}\n\n quad_efg = [\n Species(p).get_nmr_quadrupole_moment(isotopes.get(p, None))\n for p in self.poscar.site_symbols\n ]\n\n incar.update(\n {\n \"ALGO\": \"FAST\",\n \"EDIFF\": -1.0e-10,\n \"ISYM\": 0,\n \"LCHARG\": False,\n \"LEFG\": True,\n \"QUAD_EFG\": quad_efg,\n \"NELMIN\": 10,\n \"PREC\": \"ACCURATE\",\n \"SIGMA\": 0.01,\n }\n )\n incar.update(self.user_incar_settings)\n\n return incar\n\n\nclass MVLElasticSet(MPRelaxSet):\n \"\"\"\n MVL denotes VASP input sets that are implemented by the Materials Virtual\n Lab (http://www.materialsvirtuallab.org) for various research.\n\n This input set is used to calculate elastic constants in VASP. It is used\n in the following work::\n\n Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.\n “Elastic Properties of Alkali Superionic Conductor Electrolytes\n from First Principles Calculations”, J. Electrochem. Soc.\n 2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes\n\n To read the elastic constants, you may use the Outcar class which parses the\n elastic constants.\n \"\"\"\n\n def __init__(self, structure, potim=0.015, **kwargs):\n \"\"\"\n Args:\n scale (float): POTIM parameter. The default of 0.015 is usually fine,\n but some structures may require a smaller step.\n user_incar_settings (dict): A dict specifying additional incar\n settings.\n kwargs:\n Parameters supported by MPRelaxSet.\n \"\"\"\n super().__init__(structure, **kwargs)\n self._config_dict[\"INCAR\"].update({\"IBRION\": 6, \"NFREE\": 2, \"POTIM\": potim})\n self._config_dict[\"INCAR\"].pop(\"NPAR\", None)\n\n\nclass MVLGWSet(DictSet):\n \"\"\"\n MVL denotes VASP input sets that are implemented by the Materials Virtual\n Lab (http://www.materialsvirtuallab.org) for various research. This is a\n flexible input set for GW calculations.\n\n Note that unlike all other input sets in this module, the PBE_54 series of\n functional is set as the default. These have much improved performance for\n GW calculations.\n\n A typical sequence is mode=\"STATIC\" -> mode=\"DIAG\" -> mode=\"GW\" ->\n mode=\"BSE\". For all steps other than the first one (static), the\n recommendation is to use from_prev_calculation on the preceding run in\n the series.\n \"\"\"\n\n CONFIG = _load_yaml_config(\"MVLGWSet\")\n\n SUPPORTED_MODES = (\"DIAG\", \"GW\", \"STATIC\", \"BSE\")\n\n def __init__(\n self,\n structure,\n prev_incar=None,\n nbands=None,\n reciprocal_density=100,\n mode=\"STATIC\",\n copy_wavecar=True,\n nbands_factor=5,\n ncores=16,\n **kwargs\n ):\n r\"\"\"\n Args:\n structure (Structure): Input structure.\n prev_incar (Incar/string): Incar file from previous run.\n mode (str): Supported modes are \"STATIC\" (default), \"DIAG\", \"GW\",\n and \"BSE\".\n nbands (int): For subsequent calculations, it is generally\n recommended to perform NBANDS convergence starting from the\n NBANDS of the previous run for DIAG, and to use the exact same\n NBANDS for GW and BSE. This parameter is used by\n from_previous_calculation to set nband.\n copy_wavecar: Whether to copy the old WAVECAR, WAVEDER and associated\n files when starting from a previous calculation.\n nbands_factor (int): Multiplicative factor for NBANDS when starting\n from a previous calculation. Only applies if mode==\"DIAG\".\n Need to be tested for convergence.\n ncores (int): Numbers of cores used for the calculation. VASP will alter\n NBANDS if it was not dividable by ncores. Only applies if\n mode==\"DIAG\".\n **kwargs: All kwargs supported by DictSet. Typically,\n user_incar_settings is a commonly used option.\n \"\"\"\n super().__init__(structure, MVLGWSet.CONFIG, **kwargs)\n self.prev_incar = prev_incar\n self.nbands = nbands\n self.reciprocal_density = reciprocal_density\n self.mode = mode.upper()\n if self.mode not in MVLGWSet.SUPPORTED_MODES:\n raise ValueError(\n \"%s not one of the support modes : %s\"\n % (self.mode, MVLGWSet.SUPPORTED_MODES)\n )\n self.kwargs = kwargs\n self.copy_wavecar = copy_wavecar\n self.nbands_factor = nbands_factor\n self.ncores = ncores\n\n @property\n def kpoints(self):\n \"\"\"\n Generate gamma center k-points mesh grid for GW calc,\n which is requested by GW calculation.\n \"\"\"\n return Kpoints.automatic_density_by_vol(\n self.structure, self.reciprocal_density, force_gamma=True\n )\n\n @property\n def incar(self):\n \"\"\"\n :return: Incar\n \"\"\"\n parent_incar = super().incar\n incar = (\n Incar(self.prev_incar)\n if self.prev_incar is not None\n else Incar(parent_incar)\n )\n\n if self.mode == \"DIAG\":\n # Default parameters for diagonalization calculation.\n incar.update({\"ALGO\": \"Exact\", \"NELM\": 1, \"LOPTICS\": True, \"LPEAD\": True})\n elif self.mode == \"GW\":\n # Default parameters for GW calculation.\n incar.update({\"ALGO\": \"GW0\", \"NELM\": 1, \"NOMEGA\": 80, \"ENCUTGW\": 250})\n incar.pop(\"EDIFF\", None)\n incar.pop(\"LOPTICS\", None)\n incar.pop(\"LPEAD\", None)\n elif self.mode == \"BSE\":\n # Default parameters for BSE calculation.\n incar.update({\"ALGO\": \"BSE\", \"ANTIRES\": 0, \"NBANDSO\": 20, \"NBANDSV\": 20})\n\n if self.nbands:\n incar[\"NBANDS\"] = self.nbands\n\n # Respect user set INCAR.\n incar.update(self.kwargs.get(\"user_incar_settings\", {}))\n\n return incar\n\n def override_from_prev_calc(self, prev_calc_dir=\".\"):\n \"\"\"\n Update the input set to include settings from a previous calculation.\n\n Args:\n prev_calc_dir (str): The path to the previous calculation directory.\n\n Returns:\n The input set with the settings (structure, k-points, incar, etc)\n updated using the previous VASP run.\n \"\"\"\n vasprun, outcar = get_vasprun_outcar(prev_calc_dir)\n self.prev_incar = vasprun.incar\n self._structure = vasprun.final_structure\n\n if self.standardize:\n warnings.warn(\n \"Use of standardize=True with from_prev_run is not \"\n \"recommended as there is no guarantee the copied \"\n \"files will be appropriate for the standardized \"\n \"structure.\"\n )\n\n self.nbands = int(vasprun.parameters[\"NBANDS\"])\n if self.mode.upper() == \"DIAG\":\n self.nbands = int(\n np.ceil(self.nbands * self.nbands_factor / self.ncores) * self.ncores\n )\n\n # copy WAVECAR, WAVEDER (derivatives)\n files_to_transfer = {}\n if self.copy_wavecar:\n for fname in (\"WAVECAR\", \"WAVEDER\", \"WFULL\"):\n w = sorted(glob.glob(str(Path(prev_calc_dir) / (fname + \"*\"))))\n if w:\n if fname == \"WFULL\":\n for f in w:\n fname = Path(f).name\n fname = fname.split(\".\")[0]\n files_to_transfer[fname] = f\n else:\n files_to_transfer[fname] = str(w[-1])\n\n self.files_to_transfer.update(files_to_transfer)\n\n return self\n\n @classmethod\n def from_prev_calc(cls, prev_calc_dir, mode=\"DIAG\", **kwargs):\n \"\"\"\n Generate a set of Vasp input files for GW or BSE calculations from a\n directory of previous Exact Diag Vasp run.\n\n Args:\n prev_calc_dir (str): The directory contains the outputs(\n vasprun.xml of previous vasp run.\n mode (str): Supported modes are \"STATIC\", \"DIAG\" (default), \"GW\",\n and \"BSE\".\n **kwargs: All kwargs supported by MVLGWSet, other than structure,\n prev_incar and mode, which are determined from the\n prev_calc_dir.\n \"\"\"\n input_set = cls(_dummy_structure, mode=mode, **kwargs)\n return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)\n\n\nclass MVLSlabSet(MPRelaxSet):\n \"\"\"\n Class for writing a set of slab vasp runs,\n including both slabs (along the c direction) and orient unit cells (bulk),\n to ensure the same KPOINTS, POTCAR and INCAR criterion.\n \"\"\"\n\n def __init__(\n self,\n structure,\n k_product=50,\n bulk=False,\n auto_dipole=False,\n set_mix=True,\n sort_structure=True,\n **kwargs\n ):\n \"\"\"\n :param structure: Structure\n :param k_product: default to 50, kpoint number * length for a & b\n directions, also for c direction in bulk calculations\n :param bulk:\n :param auto_dipole:\n :param set_mix:\n :param sort_structure:\n :param kwargs: Other kwargs supported by :class:`DictSet`.\n \"\"\"\n super().__init__(structure, **kwargs)\n\n if sort_structure:\n structure = structure.get_sorted_structure()\n\n self.k_product = k_product\n self.bulk = bulk\n self.auto_dipole = auto_dipole\n self.kwargs = kwargs\n self.set_mix = set_mix\n self.kpt_calc = None\n\n slab_incar = {\n \"EDIFF\": 1e-4,\n \"EDIFFG\": -0.02,\n \"ENCUT\": 400,\n \"ISMEAR\": 0,\n \"SIGMA\": 0.05,\n \"ISIF\": 3,\n }\n if not self.bulk:\n slab_incar[\"ISIF\"] = 2\n slab_incar[\"LVTOT\"] = True\n if self.set_mix:\n slab_incar[\"AMIN\"] = 0.01\n slab_incar[\"AMIX\"] = 0.2\n slab_incar[\"BMIX\"] = 0.001\n slab_incar[\"NELMIN\"] = 8\n if self.auto_dipole:\n weights = [s.species.weight for s in structure]\n center_of_mass = np.average(\n structure.frac_coords, weights=weights, axis=0\n )\n\n slab_incar[\"IDIPOL\"] = 3\n slab_incar[\"LDIPOL\"] = True\n slab_incar[\"DIPOL\"] = center_of_mass\n\n self._config_dict[\"INCAR\"].update(slab_incar)\n\n @property\n def kpoints(self):\n \"\"\"\n k_product, default to 50, is kpoint number * length for a & b\n directions, also for c direction in bulk calculations\n Automatic mesh & Gamma is the default setting.\n \"\"\"\n\n # To get input sets, the input structure has to has the same number\n # of required parameters as a Structure object (ie. 4). Slab\n # attributes aren't going to affect the VASP inputs anyways so\n # converting the slab into a structure should not matter\n\n kpt = super().kpoints\n kpt.comment = \"Automatic mesh\"\n kpt.style = \"Gamma\"\n\n # use k_product to calculate kpoints, k_product = kpts[0][0] * a\n lattice_abc = self.structure.lattice.abc\n kpt_calc = [\n int(self.k_product / lattice_abc[0] + 0.5),\n int(self.k_product / lattice_abc[1] + 0.5),\n 1,\n ]\n\n self.kpt_calc = kpt_calc\n # calculate kpts (c direction) for bulk. (for slab, set to 1)\n if self.bulk:\n kpt_calc[2] = int(self.k_product / lattice_abc[2] + 0.5)\n\n kpt.kpts[0] = kpt_calc\n\n return kpt\n\n def as_dict(self, verbosity=2):\n \"\"\"\n :param verbosity: Verbosity of dict. E.g., whether to include Structure.\n :return: MSONAble dict\n \"\"\"\n d = MSONable.as_dict(self)\n if verbosity == 1:\n d.pop(\"structure\", None)\n return d\n\n\nclass MVLGBSet(MPRelaxSet):\n \"\"\"\n Class for writing a vasp input files for grain boundary calculations, slab\n or bulk.\n \"\"\"\n\n def __init__(\n self, structure, k_product=40, slab_mode=False, is_metal=True, **kwargs\n ):\n r\"\"\"\n\n Args:\n structure(Structure): provide the structure\n k_product: Kpoint number * length for a & b directions, also for c\n direction in bulk calculations. Default to 40.\n slab_mode (bool): Defaults to False. Use default (False) for a\n bulk supercell. Use True if you are performing calculations on a\n slab-like (i.e., surface) of the GB, for example, when you are\n calculating the work of separation.\n is_metal (bool): Defaults to True. This determines whether an ISMEAR of\n 1 is used (for metals) or not (for insulators and semiconductors)\n by default. Note that it does *not* override user_incar_settings,\n which can be set by the user to be anything desired.\n **kwargs:\n Other kwargs supported by :class:`MPRelaxSet`.\n \"\"\"\n super().__init__(structure, **kwargs)\n self.k_product = k_product\n self.slab_mode = slab_mode\n self.is_metal = is_metal\n\n @property\n def kpoints(self):\n \"\"\"\n k_product, default to 40, is kpoint number * length for a & b\n directions, also for c direction in bulk calculations\n Automatic mesh & Gamma is the default setting.\n \"\"\"\n\n # To get input sets, the input structure has to has the same number\n # of required parameters as a Structure object.\n\n kpt = super().kpoints\n kpt.comment = \"Generated by pymatgen's MVLGBSet\"\n kpt.style = \"Gamma\"\n\n # use k_product to calculate kpoints, k_product = kpts[0][0] * a\n lengths = self.structure.lattice.abc\n kpt_calc = [\n int(self.k_product / lengths[0] + 0.5),\n int(self.k_product / lengths[1] + 0.5),\n int(self.k_product / lengths[2] + 0.5),\n ]\n\n if self.slab_mode:\n kpt_calc[2] = 1\n\n kpt.kpts[0] = kpt_calc\n\n return kpt\n\n @property\n def incar(self):\n \"\"\"\n :return: Incar\n \"\"\"\n incar = super().incar\n\n # The default incar setting is used for metallic system, for\n # insulator or semiconductor, ISMEAR need to be changed.\n incar.update(\n {\n \"LCHARG\": False,\n \"NELM\": 60,\n \"PREC\": \"Normal\",\n \"EDIFFG\": -0.02,\n \"ICHARG\": 0,\n \"NSW\": 200,\n \"EDIFF\": 0.0001,\n }\n )\n\n if self.is_metal:\n incar[\"ISMEAR\"] = 1\n incar[\"LDAU\"] = False\n\n if self.slab_mode:\n # for clean grain boundary and bulk relaxation, full optimization\n # relaxation (ISIF=3) is used. For slab relaxation (ISIF=2) is used.\n incar[\"ISIF\"] = 2\n incar[\"NELMIN\"] = 8\n\n incar.update(self.user_incar_settings)\n\n return incar\n\n\nclass MVLRelax52Set(DictSet):\n \"\"\"\n Implementation of VaspInputSet utilizing the public Materials Project\n parameters for INCAR & KPOINTS and VASP's recommended PAW potentials for\n POTCAR.\n\n Keynotes from VASP manual:\n 1. Recommended potentials for calculations using vasp.5.2+\n 2. If dimers with short bonds are present in the compound (O2, CO,\n N2, F2, P2, S2, Cl2), it is recommended to use the h potentials.\n Specifically, C_h, O_h, N_h, F_h, P_h, S_h, Cl_h\n 3. Released on Oct 28, 2018 by VASP. Please refer to VASP\n Manual 1.2, 1.3 & 10.2.1 for more details.\n \"\"\"\n\n CONFIG = _load_yaml_config(\"MVLRelax52Set\")\n\n def __init__(self, structure, **kwargs):\n \"\"\"\n Args:\n structure (Structure): input structure.\n potcar_functional (str): choose from \"PBE_52\" and \"PBE_54\".\n **kwargs: Other kwargs supported by :class:`DictSet`.\n \"\"\"\n if kwargs.get(\"potcar_functional\") or kwargs.get(\"user_potcar_functional\"):\n super().__init__(structure, MVLRelax52Set.CONFIG, **kwargs)\n else:\n super().__init__(\n structure,\n MVLRelax52Set.CONFIG,\n user_potcar_functional=\"PBE_52\",\n **kwargs\n )\n if self.potcar_functional not in [\"PBE_52\", \"PBE_54\"]:\n raise ValueError(\"Please select from PBE_52 and PBE_54!\")\n\n self.kwargs = kwargs\n\n\nclass MITNEBSet(MITRelaxSet):\n \"\"\"\n Class for writing NEB inputs. Note that EDIFF is not on a per atom\n basis for this input set.\n \"\"\"\n\n def __init__(self, structures, unset_encut=False, **kwargs):\n \"\"\"\n Args:\n structures: List of Structure objects.\n unset_encut (bool): Whether to unset ENCUT.\n **kwargs: Other kwargs supported by :class:`DictSet`.\n \"\"\"\n if len(structures) < 3:\n raise ValueError(\"You need at least 3 structures for an NEB.\")\n kwargs[\"sort_structure\"] = False\n super().__init__(structures[0], **kwargs)\n self.structures = self._process_structures(structures)\n\n self.unset_encut = False\n if unset_encut:\n self._config_dict[\"INCAR\"].pop(\"ENCUT\", None)\n\n if \"EDIFF\" not in self._config_dict[\"INCAR\"]:\n self._config_dict[\"INCAR\"][\"EDIFF\"] = self._config_dict[\"INCAR\"].pop(\n \"EDIFF_PER_ATOM\"\n )\n\n # NEB specific defaults\n defaults = {\n \"IMAGES\": len(structures) - 2,\n \"IBRION\": 1,\n \"ISYM\": 0,\n \"LCHARG\": False,\n \"LDAU\": False,\n }\n self._config_dict[\"INCAR\"].update(defaults)\n\n @property\n def poscar(self):\n \"\"\"\n :return: Poscar for structure of first end point.\n \"\"\"\n return Poscar(self.structures[0])\n\n @property\n def poscars(self):\n \"\"\"\n :return: List of Poscars.\n \"\"\"\n return [Poscar(s) for s in self.structures]\n\n @staticmethod\n def _process_structures(structures):\n \"\"\"\n Remove any atom jumps across the cell\n \"\"\"\n input_structures = structures\n structures = [input_structures[0]]\n for s in input_structures[1:]:\n prev = structures[-1]\n for i, site in enumerate(s):\n t = np.round(prev[i].frac_coords - site.frac_coords)\n if np.any(np.abs(t) > 0.5):\n s.translate_sites([i], t, to_unit_cell=False)\n structures.append(s)\n return structures\n\n def write_input(\n self,\n output_dir,\n make_dir_if_not_present=True,\n write_cif=False,\n write_path_cif=False,\n write_endpoint_inputs=False,\n ):\n \"\"\"\n NEB inputs has a special directory structure where inputs are in 00,\n 01, 02, ....\n\n Args:\n output_dir (str): Directory to output the VASP input files\n make_dir_if_not_present (bool): Set to True if you want the\n directory (and the whole path) to be created if it is not\n present.\n write_cif (bool): If true, writes a cif along with each POSCAR.\n write_path_cif (bool): If true, writes a cif for each image.\n write_endpoint_inputs (bool): If true, writes input files for\n running endpoint calculations.\n \"\"\"\n output_dir = Path(output_dir)\n if make_dir_if_not_present and not output_dir.exists():\n output_dir.mkdir(parents=True)\n self.incar.write_file(str(output_dir / \"INCAR\"))\n self.kpoints.write_file(str(output_dir / \"KPOINTS\"))\n self.potcar.write_file(str(output_dir / \"POTCAR\"))\n\n for i, p in enumerate(self.poscars):\n d = output_dir / str(i).zfill(2)\n if not d.exists():\n d.mkdir(parents=True)\n p.write_file(str(d / \"POSCAR\"))\n if write_cif:\n p.structure.to(filename=str(d / \"{}.cif\".format(i)))\n if write_endpoint_inputs:\n end_point_param = MITRelaxSet(\n self.structures[0], user_incar_settings=self.user_incar_settings\n )\n\n for image in [\"00\", str(len(self.structures) - 1).zfill(2)]:\n end_point_param.incar.write_file(str(output_dir / image / \"INCAR\"))\n end_point_param.kpoints.write_file(str(output_dir / image / \"KPOINTS\"))\n end_point_param.potcar.write_file(str(output_dir / image / \"POTCAR\"))\n if write_path_cif:\n sites = set()\n lat = self.structures[0].lattice\n for site in chain(*(s.sites for s in self.structures)):\n sites.add(PeriodicSite(site.species, site.frac_coords, lat))\n nebpath = Structure.from_sites(sorted(sites))\n nebpath.to(filename=str(output_dir / \"path.cif\"))\n\n\nclass MITMDSet(MITRelaxSet):\n \"\"\"\n Class for writing a vasp md run. This DOES NOT do multiple stage\n runs.\n \"\"\"\n\n def __init__(\n self,\n structure,\n start_temp,\n end_temp,\n nsteps,\n time_step=2,\n spin_polarized=False,\n **kwargs\n ):\n r\"\"\"\n\n Args:\n structure (Structure): Input structure.\n start_temp (int): Starting temperature.\n end_temp (int): Final temperature.\n nsteps (int): Number of time steps for simulations. NSW parameter.\n time_step (int): The time step for the simulation. The POTIM\n parameter. Defaults to 2fs.\n spin_polarized (bool): Whether to do spin polarized calculations.\n The ISPIN parameter. Defaults to False.\n **kwargs: Other kwargs supported by :class:`DictSet`.\n \"\"\"\n # MD default settings\n defaults = {\n \"TEBEG\": start_temp,\n \"TEEND\": end_temp,\n \"NSW\": nsteps,\n \"EDIFF_PER_ATOM\": 0.000001,\n \"LSCALU\": False,\n \"LCHARG\": False,\n \"LPLANE\": False,\n \"LWAVE\": True,\n \"ISMEAR\": 0,\n \"NELMIN\": 4,\n \"LREAL\": True,\n \"BMIX\": 1,\n \"MAXMIX\": 20,\n \"NELM\": 500,\n \"NSIM\": 4,\n \"ISYM\": 0,\n \"ISIF\": 0,\n \"IBRION\": 0,\n \"NBLOCK\": 1,\n \"KBLOCK\": 100,\n \"SMASS\": 0,\n \"POTIM\": time_step,\n \"PREC\": \"Low\",\n \"ISPIN\": 2 if spin_polarized else 1,\n \"LDAU\": False,\n }\n\n super().__init__(structure, **kwargs)\n\n self.start_temp = start_temp\n self.end_temp = end_temp\n self.nsteps = nsteps\n self.time_step = time_step\n self.spin_polarized = spin_polarized\n self.kwargs = kwargs\n\n # use VASP default ENCUT\n self._config_dict[\"INCAR\"].pop(\"ENCUT\", None)\n\n if defaults[\"ISPIN\"] == 1:\n self._config_dict[\"INCAR\"].pop(\"MAGMOM\", None)\n self._config_dict[\"INCAR\"].update(defaults)\n\n @property\n def kpoints(self):\n \"\"\"\n :return: Kpoints\n \"\"\"\n return Kpoints.gamma_automatic()\n\n\nclass MPMDSet(MPRelaxSet):\n \"\"\"\n This a modified version of the old MITMDSet pre 2018/03/12.\n\n This set serves as the basis for the amorphous skyline paper.\n\n (1) Aykol, M.; Dwaraknath, S. S.; Sun, W.; Persson, K. A. Thermodynamic\n Limit for Synthesis of Metastable Inorganic Materials. Sci. Adv. 2018,\n 4 (4).\n\n Class for writing a vasp md run. This DOES NOT do multiple stage runs.\n Precision remains normal, to increase accuracy of stress tensor.\n \"\"\"\n\n def __init__(\n self, structure, start_temp, end_temp, nsteps, spin_polarized=False, **kwargs\n ):\n r\"\"\"\n Args:\n structure (Structure): Input structure.\n start_temp (int): Starting temperature.\n end_temp (int): Final temperature.\n nsteps (int): Number of time steps for simulations. NSW parameter.\n time_step (int): The time step for the simulation. The POTIM\n parameter. Defaults to 2fs.\n spin_polarized (bool): Whether to do spin polarized calculations.\n The ISPIN parameter. Defaults to False.\n **kwargs: Other kwargs supported by :class:`DictSet`.\n \"\"\"\n\n # MD default settings\n defaults = {\n \"TEBEG\": start_temp,\n \"TEEND\": end_temp,\n \"NSW\": nsteps,\n \"EDIFF_PER_ATOM\": 0.00001,\n \"LSCALU\": False,\n \"LCHARG\": False,\n \"LPLANE\": False,\n \"LWAVE\": True,\n \"ISMEAR\": 0,\n \"NELMIN\": 4,\n \"LREAL\": True,\n \"BMIX\": 1,\n \"MAXMIX\": 20,\n \"NELM\": 500,\n \"NSIM\": 4,\n \"ISYM\": 0,\n \"ISIF\": 0,\n \"IBRION\": 0,\n \"NBLOCK\": 1,\n \"KBLOCK\": 100,\n \"SMASS\": 0,\n \"POTIM\": 2,\n \"PREC\": \"Normal\",\n \"ISPIN\": 2 if spin_polarized else 1,\n \"LDAU\": False,\n \"ADDGRID\": True,\n }\n\n if Element(\"H\") in structure.species:\n defaults[\"POTIM\"] = 0.5\n defaults[\"NSW\"] = defaults[\"NSW\"] * 4\n\n super().__init__(structure, **kwargs)\n\n self.start_temp = start_temp\n self.end_temp = end_temp\n self.nsteps = nsteps\n self.spin_polarized = spin_polarized\n self.kwargs = kwargs\n\n # use VASP default ENCUT\n self._config_dict[\"INCAR\"].pop(\"ENCUT\", None)\n\n if defaults[\"ISPIN\"] == 1:\n self._config_dict[\"INCAR\"].pop(\"MAGMOM\", None)\n self._config_dict[\"INCAR\"].update(defaults)\n\n @property\n def kpoints(self):\n \"\"\"\n :return: Kpoints\n \"\"\"\n return Kpoints.gamma_automatic()\n\n\nclass MVLNPTMDSet(MITMDSet):\n \"\"\"\n Class for writing a vasp md run in NPT ensemble.\n\n Notes:\n To eliminate Pulay stress, the default ENCUT is set to a rather large\n value of ENCUT, which is 1.5 * ENMAX.\n \"\"\"\n\n def __init__(\n self,\n structure,\n start_temp,\n end_temp,\n nsteps,\n time_step=2,\n spin_polarized=False,\n **kwargs\n ):\n r\"\"\"\n Args:\n structure (Structure): input structure.\n start_temp (int): Starting temperature.\n end_temp (int): Final temperature.\n nsteps(int): Number of time steps for simulations. NSW parameter.\n time_step (int): The time step for the simulation. The POTIM\n parameter. Defaults to 2fs.\n spin_polarized (bool): Whether to do spin polarized calculations.\n The ISPIN parameter. Defaults to False.\n **kwargs: Other kwargs supported by :class:`DictSet`.\n \"\"\"\n user_incar_settings = kwargs.get(\"user_incar_settings\", {})\n\n # NPT-AIMD default settings\n defaults = {\n \"IALGO\": 48,\n \"ISIF\": 3,\n \"LANGEVIN_GAMMA\": [10] * structure.ntypesp,\n \"LANGEVIN_GAMMA_L\": 1,\n \"MDALGO\": 3,\n \"PMASS\": 10,\n \"PSTRESS\": 0,\n \"SMASS\": 0,\n }\n\n defaults.update(user_incar_settings)\n kwargs[\"user_incar_settings\"] = defaults\n\n super().__init__(\n structure, start_temp, end_temp, nsteps, time_step, spin_polarized, **kwargs\n )\n\n # Set NPT-AIMD ENCUT = 1.5 * VASP_default\n enmax = [self.potcar[i].keywords[\"ENMAX\"] for i in range(structure.ntypesp)]\n encut = max(enmax) * 1.5\n self._config_dict[\"INCAR\"][\"ENCUT\"] = encut\n\n\nclass MVLScanRelaxSet(MPRelaxSet):\n \"\"\"\n Class for writing a relax input set using Strongly Constrained and\n Appropriately Normed (SCAN) semilocal density functional.\n\n Notes:\n 1. This functional is only available from VASP.5.4.3 upwards.\n\n 2. Meta-GGA calculations require POTCAR files that include\n information on the kinetic energy density of the core-electrons,\n i.e. \"PBE_52\" or \"PBE_54\". Make sure the POTCAR including the\n following lines (see VASP wiki for more details):\n\n $ grep kinetic POTCAR\n kinetic energy-density\n mkinetic energy-density pseudized\n kinetic energy density (partial)\n \"\"\"\n\n def __init__(self, structure, **kwargs):\n r\"\"\"\n Args:\n structure (Structure): input structure.\n vdw (str): set \"rVV10\" to enable SCAN+rVV10, which is a versatile\n van der Waals density functional by combing the SCAN functional\n with the rVV10 non-local correlation functional.\n **kwargs: Other kwargs supported by :class:`DictSet`.\n \"\"\"\n # choose PBE_52 unless the user specifies something else\n if kwargs.get(\"potcar_functional\") or kwargs.get(\"user_potcar_functional\"):\n super().__init__(structure, **kwargs)\n else:\n super().__init__(structure, user_potcar_functional=\"PBE_52\", **kwargs)\n\n if self.potcar_functional not in [\"PBE_52\", \"PBE_54\"]:\n raise ValueError(\"SCAN calculations required PBE_52 or PBE_54!\")\n\n updates = {\n \"ADDGRID\": True,\n \"EDIFF\": 1e-05,\n \"EDIFFG\": -0.05,\n \"LASPH\": True,\n \"LDAU\": False,\n \"METAGGA\": \"SCAN\",\n \"NELM\": 200,\n }\n\n if kwargs.get(\"vdw\", \"\").lower() == \"rvv10\":\n updates[\"BPARAM\"] = 15.7 # This is the correct BPARAM for SCAN+rVV10\n\n self._config_dict[\"INCAR\"].update(updates)\n\n\nclass LobsterSet(MPRelaxSet):\n \"\"\"\n Input set to prepare VASP runs that can be digested by Lobster (See cohp.de)\n \"\"\"\n\n CONFIG = _load_yaml_config(\"MPRelaxSet\")\n\n def __init__(\n self,\n structure: Structure,\n isym: int = 0,\n ismear: int = -5,\n reciprocal_density: int = None,\n address_basis_file: str = None,\n user_supplied_basis: dict = None,\n user_potcar_settings: dict = {\"W\": \"W_sv\"},\n **kwargs\n ):\n \"\"\"\n Args:\n structure (Structure): input structure.\n isym (int): ISYM entry for INCAR, only isym=-1 and isym=0 are allowed\n ismear (int): ISMEAR entry for INCAR, only ismear=-5 and ismear=0 are allowed\n reciprocal_density (int): density of k-mesh by reciprocal volume\n user_supplied_basis (dict): dict including basis functions for all elements in structure,\n e.g. {\"Fe\": \"3d 3p 4s\", \"O\": \"2s 2p\"}; if not supplied, a standard basis is used\n address_basis_file (str): address to a file similar to \"BASIS_PBE_54_standaard.yaml\"\n in pymatgen.io.lobster.lobster_basis\n **kwargs: Other kwargs supported by :class:`DictSet`.\n \"\"\"\n warnings.warn(\n \"Make sure that all parameters are okay! This is a brand new implementation.\"\n )\n\n if isym not in (-1, 0):\n raise ValueError(\"Lobster cannot digest WAVEFUNCTIONS with symmetry\")\n if ismear not in (-5, 0):\n raise ValueError(\"Lobster usually works with ismear=-5 or ismear=0\")\n\n # newest potcars are preferred\n # Choose PBE_54 unless the user specifies a different potcar_functional\n if kwargs.get(\"potcar_functional\") or kwargs.get(\"user_potcar_functional\"):\n super().__init__(structure, **kwargs)\n else:\n super().__init__(structure, user_potcar_functional=\"PBE_54\", **kwargs)\n\n # reciprocal density\n if self.user_kpoints_settings is not None:\n if (\n not reciprocal_density\n or \"reciprocal_density\" not in self.user_kpoints_settings\n ):\n # test, if this is okay\n self.reciprocal_density = 310\n else:\n self.reciprocal_density = (\n reciprocal_density\n or self.user_kpoints_settings[\"reciprocal_density\"]\n )\n else:\n if not reciprocal_density:\n # test, if this is okay\n self.reciprocal_density = 310\n else:\n self.reciprocal_density = reciprocal_density\n\n self.isym = isym\n self.ismear = ismear\n self.user_supplied_basis = user_supplied_basis\n self.address_basis_file = address_basis_file\n # predefined basis! Check if the basis is okay! (charge spilling and bandoverlaps!)\n if user_supplied_basis is None and address_basis_file is None:\n basis = Lobsterin.get_basis(\n structure=structure, potcar_symbols=self.potcar_symbols\n )\n elif address_basis_file is not None:\n basis = Lobsterin.get_basis(\n structure=structure,\n potcar_symbols=self.potcar_symbols,\n address_basis_file=address_basis_file,\n )\n elif user_supplied_basis is not None:\n # test if all elements from structure are in user_supplied_basis\n for atomtype in structure.symbol_set:\n if atomtype not in user_supplied_basis:\n raise ValueError(\n \"There are no basis functions for the atom type \"\n + str(atomtype)\n )\n basis = [key + \" \" + value for key, value in user_supplied_basis.items()]\n\n lobsterin = Lobsterin(settingsdict={\"basisfunctions\": basis})\n nbands = lobsterin._get_nbands(structure=structure)\n\n update_dict = {\n \"EDIFF\": 1e-6,\n \"NSW\": 0,\n \"LWAVE\": True,\n \"ISYM\": isym,\n \"NBANDS\": nbands,\n \"IBRION\": -1,\n \"ISMEAR\": ismear,\n \"LORBIT\": 11,\n \"ICHARG\": 0,\n \"ALGO\": \"Normal\",\n }\n\n self._config_dict[\"INCAR\"].update(update_dict)\n self._config_dict[\"KPOINTS\"].update(\n {\"reciprocal_density\": self.reciprocal_density}\n )\n\n\ndef get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):\n \"\"\"\n :param path: Path to get the vasprun.xml and OUTCAR.\n :param parse_dos: Whether to parse dos. Defaults to True.\n :param parse_eigen: Whether to parse eigenvalue. Defaults to True.\n :return:\n \"\"\"\n path = Path(path)\n vruns = list(glob.glob(str(path / \"vasprun.xml*\")))\n outcars = list(glob.glob(str(path / \"OUTCAR*\")))\n\n if len(vruns) == 0 or len(outcars) == 0:\n raise ValueError(\n \"Unable to get vasprun.xml/OUTCAR from prev calculation in %s\" % path\n )\n vsfile_fullpath = str(path / \"vasprun.xml\")\n outcarfile_fullpath = str(path / \"OUTCAR\")\n vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]\n outcarfile = (\n outcarfile_fullpath if outcarfile_fullpath in outcars else sorted(outcars)[-1]\n )\n return (\n Vasprun(vsfile, parse_dos=parse_dos, parse_eigen=parse_eigen),\n Outcar(outcarfile),\n )\n\n\ndef get_structure_from_prev_run(vasprun, outcar=None):\n \"\"\"\n Process structure from previous run.\n\n Args:\n vasprun (Vasprun): Vasprun that contains the final structure\n from previous run.\n outcar (Outcar): Outcar that contains the magnetization info from\n previous run.\n\n Returns:\n Returns the magmom-decorated structure that can be passed to get\n Vasp input files, e.g. get_kpoints.\n \"\"\"\n structure = vasprun.final_structure\n\n site_properties = {}\n # magmom\n if vasprun.is_spin:\n if outcar and outcar.magnetization:\n site_properties.update({\"magmom\": [i[\"tot\"] for i in outcar.magnetization]})\n else:\n site_properties.update({\"magmom\": vasprun.parameters[\"MAGMOM\"]})\n # ldau\n if vasprun.parameters.get(\"LDAU\", False):\n for k in (\"LDAUU\", \"LDAUJ\", \"LDAUL\"):\n vals = vasprun.incar[k]\n m = {}\n l_val = []\n s = 0\n for site in structure:\n if site.specie.symbol not in m:\n m[site.specie.symbol] = vals[s]\n s += 1\n l_val.append(m[site.specie.symbol])\n if len(l_val) == len(structure):\n site_properties.update({k.lower(): l_val})\n else:\n raise ValueError(\n \"length of list {} not the same as\" \"structure\".format(l_val)\n )\n\n return structure.copy(site_properties=site_properties)\n\n\ndef standardize_structure(structure, sym_prec=0.1, international_monoclinic=True):\n \"\"\"\n Get the symmetrically standardized structure.\n\n Args:\n structure (Structure): The structure.\n sym_prec (float): Tolerance for symmetry finding for standardization.\n international_monoclinic (bool): Whether to use international\n convention (vs Curtarolo) for monoclinic. Defaults True.\n\n Returns:\n The symmetrized structure.\n \"\"\"\n sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)\n new_structure = sym_finder.get_primitive_standard_structure(\n international_monoclinic=international_monoclinic\n )\n\n # the primitive structure finding has had several bugs in the past\n # defend through validation\n vpa_old = structure.volume / structure.num_sites\n vpa_new = new_structure.volume / new_structure.num_sites\n\n if abs(vpa_old - vpa_new) / vpa_old > 0.02:\n raise ValueError(\n \"Standardizing cell failed! VPA old: {}, VPA new: {}\".format(\n vpa_old, vpa_new\n )\n )\n\n sm = StructureMatcher()\n if not sm.fit(structure, new_structure):\n raise ValueError(\"Standardizing cell failed! Old structure doesn't match new.\")\n\n return new_structure\n\n\nclass BadInputSetWarning(UserWarning):\n \"\"\"\n Warning class for bad but legal inputs.\n \"\"\"\n\n pass\n\n\ndef batch_write_input(\n structures,\n vasp_input_set=MPRelaxSet,\n output_dir=\".\",\n make_dir_if_not_present=True,\n subfolder=None,\n sanitize=False,\n include_cif=False,\n potcar_spec=False,\n zip_output=False,\n **kwargs\n):\n \"\"\"\n Batch write vasp input for a sequence of structures to\n output_dir, following the format output_dir/{group}/{formula}_{number}.\n\n Args:\n structures ([Structure]): Sequence of Structures.\n vasp_input_set (VaspInputSet): VaspInputSet class that creates\n vasp input files from structures. Note that a class should be\n supplied. Defaults to MPRelaxSet.\n output_dir (str): Directory to output files. Defaults to current\n directory \".\".\n make_dir_if_not_present (bool): Create the directory if not present.\n Defaults to True.\n subfolder (callable): Function to create subdirectory name from\n structure. Defaults to simply \"formula_count\".\n sanitize (bool): Boolean indicating whether to sanitize the\n structure before writing the VASP input files. Sanitized output\n are generally easier for viewing and certain forms of analysis.\n Defaults to False.\n include_cif (bool): Whether to output a CIF as well. CIF files are\n generally better supported in visualization programs.\n potcar_spec (bool): Instead of writing the POTCAR, write a \"POTCAR.spec\".\n This is intended to help sharing an input set with people who might\n not have a license to specific Potcar files. Given a \"POTCAR.spec\",\n the specific POTCAR file can be re-generated using pymatgen with the\n \"generate_potcar\" function in the pymatgen CLI.\n zip_output (bool): If True, output will be zipped into a file with the\n same name as the InputSet (e.g., MPStaticSet.zip)\n **kwargs: Additional kwargs are passed to the vasp_input_set class\n in addition to structure.\n \"\"\"\n output_dir = Path(output_dir)\n for i, s in enumerate(structures):\n formula = re.sub(r\"\\s+\", \"\", s.formula)\n if subfolder is not None:\n subdir = subfolder(s)\n d = output_dir / subdir\n else:\n d = output_dir / \"{}_{}\".format(formula, i)\n if sanitize:\n s = s.copy(sanitize=True)\n v = vasp_input_set(s, **kwargs)\n v.write_input(\n str(d),\n make_dir_if_not_present=make_dir_if_not_present,\n include_cif=include_cif,\n potcar_spec=potcar_spec,\n zip_output=zip_output,\n )\n\n\n_dummy_structure = Structure(\n [1, 0, 0, 0, 1, 0, 0, 0, 1],\n [\"I\"],\n [[0, 0, 0]],\n site_properties={\"magmom\": [[0, 0, 1]]},\n)\n\n\ndef get_valid_magmom_struct(structure, inplace=True, spin_mode=\"auto\"):\n \"\"\"\n Make sure that the structure is valid magmoms based on the kind of caculation\n Fill in missing Magmom values\n\n Args:\n structure: The input structure\n inplace: True - edit the magmom of the input structurel; False - return new structure\n spin_mode: \"scalar\"/\"vector\"/\"none\"/\"auto\" only first letter (s/v/n) is needed.\n dictates how the spin configuration will be determined.\n\n - auto: read the existing magmom values and decide\n - scalar: use a single scalar value (for spin up/down)\n - vector: use a vector value for spin-orbit systems\n - none: Remove all the magmom information\n\n Returns:\n New structure if inplace == False\n \"\"\"\n default_values = {\"s\": 1.0, \"v\": [1.0, 1.0, 1.0], \"n\": None}\n if spin_mode[0].lower() == \"a\":\n mode = \"n\"\n for isite in structure.sites:\n if \"magmom\" not in isite.properties or isite.properties[\"magmom\"] is None:\n pass\n elif isinstance(isite.properties[\"magmom\"], float):\n if mode == \"v\":\n raise TypeError(\"Magmom type conflict\")\n mode = \"s\"\n elif len(isite.properties[\"magmom\"]) == 3:\n if mode == \"s\":\n raise TypeError(\"Magmom type conflict\")\n mode = \"v\"\n else:\n raise TypeError(\"Unrecognized Magmom Value\")\n else:\n mode = spin_mode[0].lower()\n\n if not inplace:\n new_struct = structure.copy()\n else:\n new_struct = structure\n for isite in new_struct.sites:\n if mode == \"n\":\n if \"magmom\" in isite.properties:\n isite.properties.pop(\"magmom\")\n elif \"magmom\" not in isite.properties or isite.properties[\"magmom\"] is None:\n isite.properties[\"magmom\"] = default_values[mode]\n\n if not inplace:\n return new_struct\n return None\n"
] |
[
[
"numpy.product",
"numpy.sqrt",
"numpy.allclose",
"numpy.abs",
"numpy.round",
"numpy.ceil",
"numpy.average",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WiFisunset/qiskit-terra
|
[
"6a2602a9ecf9b1a3345de1516b873ac7b3da587f",
"6a2602a9ecf9b1a3345de1516b873ac7b3da587f",
"6a2602a9ecf9b1a3345de1516b873ac7b3da587f",
"6a2602a9ecf9b1a3345de1516b873ac7b3da587f"
] |
[
"qiskit/opflow/gradients/natural_gradient.py",
"test/python/circuit/library/test_nlocal.py",
"qiskit/algorithms/optimizers/gsls.py",
"qiskit/pulse/library/waveform.py"
] |
[
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" Natural Gradient. \"\"\"\n\nfrom collections.abc import Iterable\nfrom typing import List, Tuple, Callable, Optional, Union\nimport functools\nimport numpy as np\n\nfrom qiskit.circuit.quantumcircuit import _compare_parameters\nfrom qiskit.circuit import ParameterVector, ParameterExpression\nfrom qiskit.exceptions import MissingOptionalLibraryError\nfrom ..operator_base import OperatorBase\nfrom ..list_ops.list_op import ListOp\nfrom ..list_ops.composed_op import ComposedOp\nfrom ..state_fns.circuit_state_fn import CircuitStateFn\nfrom .circuit_gradients import CircuitGradient\nfrom .circuit_qfis import CircuitQFI\nfrom .gradient import Gradient\nfrom .gradient_base import GradientBase\nfrom .qfi import QFI\n\n\nclass NaturalGradient(GradientBase):\n r\"\"\"Convert an operator expression to the first-order gradient.\n\n Given an ill-posed inverse problem\n\n x = arg min{||Ax-C||^2} (1)\n\n one can use regularization schemes can be used to stabilize the system and find a numerical\n solution\n\n x_lambda = arg min{||Ax-C||^2 + lambda*R(x)} (2)\n\n where R(x) represents the penalization term.\n \"\"\"\n\n def __init__(\n self,\n grad_method: Union[str, CircuitGradient] = \"lin_comb\",\n qfi_method: Union[str, CircuitQFI] = \"lin_comb_full\",\n regularization: Optional[str] = None,\n **kwargs,\n ):\n r\"\"\"\n Args:\n grad_method: The method used to compute the state gradient. Can be either\n ``'param_shift'`` or ``'lin_comb'`` or ``'fin_diff'``.\n qfi_method: The method used to compute the QFI. Can be either\n ``'lin_comb_full'`` or ``'overlap_block_diag'`` or ``'overlap_diag'``.\n regularization: Use the following regularization with a least square method to solve the\n underlying system of linear equations\n Can be either None or ``'ridge'`` or ``'lasso'`` or ``'perturb_diag'``\n ``'ridge'`` and ``'lasso'`` use an automatic optimal parameter search\n If regularization is None but the metric is ill-conditioned or singular then\n a least square solver is used without regularization\n kwargs (dict): Optional parameters for a CircuitGradient\n \"\"\"\n super().__init__(grad_method)\n\n self._qfi_method = QFI(qfi_method)\n self._regularization = regularization\n self._epsilon = kwargs.get(\"epsilon\", 1e-6)\n\n # pylint: disable=signature-differs\n def convert(\n self,\n operator: OperatorBase,\n params: Optional[\n Union[ParameterVector, ParameterExpression, List[ParameterExpression]]\n ] = None,\n ) -> OperatorBase:\n r\"\"\"\n Args:\n operator: The operator we are taking the gradient of.\n params: The parameters we are taking the gradient with respect to. If not explicitly\n passed, they are inferred from the operator and sorted by name.\n\n Returns:\n An operator whose evaluation yields the NaturalGradient.\n\n Raises:\n TypeError: If ``operator`` does not represent an expectation value or the quantum\n state is not ``CircuitStateFn``.\n ValueError: If ``params`` contains a parameter not present in ``operator``.\n ValueError: If ``operator`` is not parameterized.\n \"\"\"\n if not isinstance(operator, ComposedOp):\n if not (isinstance(operator, ListOp) and len(operator.oplist) == 1):\n raise TypeError(\n \"Please provide the operator either as ComposedOp or as ListOp of \"\n \"a CircuitStateFn potentially with a combo function.\"\n )\n\n if not isinstance(operator[-1], CircuitStateFn):\n raise TypeError(\n \"Please make sure that the operator for which you want to compute \"\n \"Quantum Fisher Information represents an expectation value or a \"\n \"loss function and that the quantum state is given as \"\n \"CircuitStateFn.\"\n )\n if len(operator.parameters) == 0:\n raise ValueError(\"The operator we are taking the gradient of is not parameterized!\")\n if params is None:\n params = sorted(operator.parameters, key=functools.cmp_to_key(_compare_parameters))\n if not isinstance(params, Iterable):\n params = [params]\n # Instantiate the gradient\n grad = Gradient(self._grad_method, epsilon=self._epsilon).convert(operator, params)\n # Instantiate the QFI metric which is used to re-scale the gradient\n metric = self._qfi_method.convert(operator[-1], params) * 0.25\n\n # Define the function which compute the natural gradient from the gradient and the QFI.\n def combo_fn(x):\n c = np.real(x[0])\n a = np.real(x[1])\n if self.regularization:\n # If a regularization method is chosen then use a regularized solver to\n # construct the natural gradient.\n nat_grad = NaturalGradient._regularized_sle_solver(\n a, c, regularization=self.regularization\n )\n else:\n try:\n # Try to solve the system of linear equations Ax = C.\n nat_grad = np.linalg.solve(a, c)\n except np.linalg.LinAlgError: # singular matrix\n nat_grad = np.linalg.lstsq(a, c)[0]\n return np.real(nat_grad)\n\n # Define the ListOp which combines the gradient and the QFI according to the combination\n # function defined above.\n return ListOp([grad, metric], combo_fn=combo_fn)\n\n @property\n def qfi_method(self) -> CircuitQFI:\n \"\"\"Returns ``CircuitQFI``.\n\n Returns: ``CircuitQFI``\n\n \"\"\"\n return self._qfi_method.qfi_method\n\n @property\n def regularization(self) -> Optional[str]:\n \"\"\"Returns the regularization option.\n\n Returns: the regularization option.\n\n \"\"\"\n return self._regularization\n\n @staticmethod\n def _reg_term_search(\n a: np.ndarray,\n c: np.ndarray,\n reg_method: Callable[[np.ndarray, np.ndarray, float], float],\n lambda1: float = 1e-3,\n lambda4: float = 1.0,\n tol: float = 1e-8,\n ) -> Tuple[float, np.ndarray]:\n \"\"\"\n This method implements a search for a regularization parameter lambda by finding for the\n corner of the L-curve\n More explicitly, one has to evaluate a suitable lambda by finding a compromise between\n the error in the solution and the norm of the regularization.\n This function implements a method presented in\n `A simple algorithm to find the L-curve corner in the regularization of inverse problems\n <https://arxiv.org/pdf/1608.04571.pdf>`\n Args:\n a: see (1) and (2)\n c: see (1) and (2)\n reg_method: Given A, C and lambda the regularization method must return x_lambda\n - see (2)\n lambda1: left starting point for L-curve corner search\n lambda4: right starting point for L-curve corner search\n tol: termination threshold\n\n Returns:\n regularization coefficient, solution to the regularization inverse problem\n \"\"\"\n\n def _get_curvature(x_lambda: List) -> Union[int, float]:\n \"\"\"Calculate Menger curvature\n\n Menger, K. (1930). Untersuchungen ̈uber Allgemeine Metrik. Math. Ann.,103(1), 466–501\n\n Args:\n x_lambda: [[x_lambdaj], [x_lambdak], [x_lambdal]]\n lambdaj < lambdak < lambdal\n\n Returns:\n Menger Curvature\n\n \"\"\"\n eps = []\n eta = []\n for x in x_lambda:\n try:\n eps.append(np.log(np.linalg.norm(np.matmul(a, x) - c) ** 2))\n except ValueError:\n eps.append(np.log(np.linalg.norm(np.matmul(a, np.transpose(x)) - c) ** 2))\n eta.append(np.log(max(np.linalg.norm(x) ** 2, 1e-6)))\n p_temp = 1\n c_k = 0\n for i in range(3):\n p_temp *= (eps[np.mod(i + 1, 3)] - eps[i]) ** 2 + (\n eta[np.mod(i + 1, 3)] - eta[i]\n ) ** 2\n c_k += eps[i] * eta[np.mod(i + 1, 3)] - eps[np.mod(i + 1, 3)] * eta[i]\n c_k = 2 * c_k / max(1e-4, np.sqrt(p_temp))\n return c_k\n\n def get_lambda2_lambda3(lambda1, lambda4):\n gold_sec = (1 + np.sqrt(5)) / 2.0\n lambda2 = 10 ** ((np.log10(lambda4) + np.log10(lambda1) * gold_sec) / (1 + gold_sec))\n lambda3 = 10 ** (np.log10(lambda1) + np.log10(lambda4) - np.log10(lambda2))\n return lambda2, lambda3\n\n lambda2, lambda3 = get_lambda2_lambda3(lambda1, lambda4)\n lambda_ = [lambda1, lambda2, lambda3, lambda4]\n x_lambda = []\n for lam in lambda_:\n x_lambda.append(reg_method(a, c, lam))\n counter = 0\n while (lambda_[3] - lambda_[0]) / lambda_[3] >= tol:\n counter += 1\n c_2 = _get_curvature(x_lambda[:-1])\n c_3 = _get_curvature(x_lambda[1:])\n while c_3 < 0:\n lambda_[3] = lambda_[2]\n x_lambda[3] = x_lambda[2]\n lambda_[2] = lambda_[1]\n x_lambda[2] = x_lambda[1]\n lambda2, _ = get_lambda2_lambda3(lambda_[0], lambda_[3])\n lambda_[1] = lambda2\n x_lambda[1] = reg_method(a, c, lambda_[1])\n c_3 = _get_curvature(x_lambda[1:])\n\n if c_2 > c_3:\n lambda_mc = lambda_[1]\n x_mc = x_lambda[1]\n lambda_[3] = lambda_[2]\n x_lambda[3] = x_lambda[2]\n lambda_[2] = lambda_[1]\n x_lambda[2] = x_lambda[1]\n lambda2, _ = get_lambda2_lambda3(lambda_[0], lambda_[3])\n lambda_[1] = lambda2\n x_lambda[1] = reg_method(a, c, lambda_[1])\n else:\n lambda_mc = lambda_[2]\n x_mc = x_lambda[2]\n lambda_[0] = lambda_[1]\n x_lambda[0] = x_lambda[1]\n lambda_[1] = lambda_[2]\n x_lambda[1] = x_lambda[2]\n _, lambda3 = get_lambda2_lambda3(lambda_[0], lambda_[3])\n lambda_[2] = lambda3\n x_lambda[2] = reg_method(a, c, lambda_[2])\n return lambda_mc, x_mc\n\n @staticmethod\n def _ridge(\n a: np.ndarray,\n c: np.ndarray,\n lambda_: float = 1.0,\n lambda1: float = 1e-4,\n lambda4: float = 1e-1,\n tol_search: float = 1e-8,\n fit_intercept: bool = True,\n normalize: bool = False,\n copy_a: bool = True,\n max_iter: int = 1000,\n tol: float = 0.0001,\n solver: str = \"auto\",\n random_state: Optional[int] = None,\n ) -> Tuple[float, np.ndarray]:\n \"\"\"\n Ridge Regression with automatic search for a good regularization term lambda\n x_lambda = arg min{||Ax-C||^2 + lambda*||x||_2^2} (3)\n `Scikit Learn Ridge Regression\n <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html>`\n Args:\n a: see (1) and (2)\n c: see (1) and (2)\n lambda_ : regularization parameter used if auto_search = False\n lambda1: left starting point for L-curve corner search\n lambda4: right starting point for L-curve corner search\n tol_search: termination threshold for regularization parameter search\n fit_intercept: if True calculate intercept\n normalize: deprecated if fit_intercept=False, if True normalize A for regression\n copy_a: if True A is copied, else overwritten\n max_iter: max. number of iterations if solver is CG\n tol: precision of the regression solution\n solver: solver {‘auto’, ‘svd’, ‘cholesky’, ‘lsqr’, ‘sparse_cg’, ‘sag’, ‘saga’}\n random_state: seed for the pseudo random number generator used when data is shuffled\n\n Returns:\n regularization coefficient, solution to the regularization inverse problem\n\n Raises:\n MissingOptionalLibraryError: scikit-learn not installed\n\n \"\"\"\n try:\n from sklearn.linear_model import Ridge\n except ImportError as ex:\n raise MissingOptionalLibraryError(\n libname=\"scikit-learn\", name=\"_ridge\", pip_install=\"pip install scikit-learn\"\n ) from ex\n\n reg = Ridge(\n alpha=lambda_,\n fit_intercept=fit_intercept,\n normalize=normalize,\n copy_X=copy_a,\n max_iter=max_iter,\n tol=tol,\n solver=solver,\n random_state=random_state,\n )\n\n def reg_method(a, c, alpha):\n reg.set_params(alpha=alpha)\n reg.fit(a, c)\n return reg.coef_\n\n lambda_mc, x_mc = NaturalGradient._reg_term_search(\n a, c, reg_method, lambda1=lambda1, lambda4=lambda4, tol=tol_search\n )\n return lambda_mc, np.transpose(x_mc)\n\n @staticmethod\n def _lasso(\n a: np.ndarray,\n c: np.ndarray,\n lambda_: float = 1.0,\n lambda1: float = 1e-4,\n lambda4: float = 1e-1,\n tol_search: float = 1e-8,\n fit_intercept: bool = True,\n normalize: bool = False,\n precompute: Union[bool, Iterable] = False,\n copy_a: bool = True,\n max_iter: int = 1000,\n tol: float = 0.0001,\n warm_start: bool = False,\n positive: bool = False,\n random_state: Optional[int] = None,\n selection: str = \"random\",\n ) -> Tuple[float, np.ndarray]:\n \"\"\"\n Lasso Regression with automatic search for a good regularization term lambda\n x_lambda = arg min{||Ax-C||^2/(2*n_samples) + lambda*||x||_1} (4)\n `Scikit Learn Lasso Regression\n <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html>`\n\n Args:\n a: mxn matrix\n c: m vector\n lambda_ : regularization parameter used if auto_search = False\n lambda1: left starting point for L-curve corner search\n lambda4: right starting point for L-curve corner search\n tol_search: termination threshold for regularization parameter search\n fit_intercept: if True calculate intercept\n normalize: deprecated if fit_intercept=False, if True normalize A for regression\n precompute: If True compute and use Gram matrix to speed up calculations.\n Gram matrix can also be given explicitly\n copy_a: if True A is copied, else overwritten\n max_iter: max. number of iterations if solver is CG\n tol: precision of the regression solution\n warm_start: if True reuse solution from previous fit as initialization\n positive: if True force positive coefficients\n random_state: seed for the pseudo random number generator used when data is shuffled\n selection: {'cyclic', 'random'}\n\n Returns:\n regularization coefficient, solution to the regularization inverse problem\n\n Raises:\n MissingOptionalLibraryError: scikit-learn not installed\n\n \"\"\"\n try:\n from sklearn.linear_model import Lasso\n except ImportError as ex:\n raise MissingOptionalLibraryError(\n libname=\"scikit-learn\", name=\"_lasso\", pip_install=\"pip install scikit-learn\"\n ) from ex\n\n reg = Lasso(\n alpha=lambda_,\n fit_intercept=fit_intercept,\n normalize=normalize,\n precompute=precompute,\n copy_X=copy_a,\n max_iter=max_iter,\n tol=tol,\n warm_start=warm_start,\n positive=positive,\n random_state=random_state,\n selection=selection,\n )\n\n def reg_method(a, c, alpha):\n reg.set_params(alpha=alpha)\n reg.fit(a, c)\n return reg.coef_\n\n lambda_mc, x_mc = NaturalGradient._reg_term_search(\n a, c, reg_method, lambda1=lambda1, lambda4=lambda4, tol=tol_search\n )\n\n return lambda_mc, x_mc\n\n @staticmethod\n def _regularized_sle_solver(\n a: np.ndarray,\n c: np.ndarray,\n regularization: str = \"perturb_diag\",\n lambda1: float = 1e-3,\n lambda4: float = 1.0,\n alpha: float = 0.0,\n tol_norm_x: Tuple[float, float] = (1e-8, 5.0),\n tol_cond_a: float = 1000.0,\n ) -> np.ndarray:\n \"\"\"\n Solve a linear system of equations with a regularization method and automatic lambda fitting\n Args:\n a: mxn matrix\n c: m vector\n regularization: Regularization scheme to be used: 'ridge', 'lasso',\n 'perturb_diag_elements' or 'perturb_diag'\n lambda1: left starting point for L-curve corner search (for 'ridge' and 'lasso')\n lambda4: right starting point for L-curve corner search (for 'ridge' and 'lasso')\n alpha: perturbation coefficient for 'perturb_diag_elements' and 'perturb_diag'\n tol_norm_x: tolerance for the norm of x\n tol_cond_a: tolerance for the condition number of A\n\n Returns:\n solution to the regularized system of linear equations\n\n \"\"\"\n if regularization == \"ridge\":\n _, x = NaturalGradient._ridge(a, c, lambda1=lambda1)\n elif regularization == \"lasso\":\n _, x = NaturalGradient._lasso(a, c, lambda1=lambda1)\n elif regularization == \"perturb_diag_elements\":\n alpha = 1e-7\n while np.linalg.cond(a + alpha * np.diag(a)) > tol_cond_a:\n alpha *= 10\n # include perturbation in A to avoid singularity\n x, _, _, _ = np.linalg.lstsq(a + alpha * np.diag(a), c, rcond=None)\n elif regularization == \"perturb_diag\":\n alpha = 1e-7\n while np.linalg.cond(a + alpha * np.eye(len(c))) > tol_cond_a:\n alpha *= 10\n # include perturbation in A to avoid singularity\n x, _, _, _ = np.linalg.lstsq(a + alpha * np.eye(len(c)), c, rcond=None)\n else:\n # include perturbation in A to avoid singularity\n x, _, _, _ = np.linalg.lstsq(a, c, rcond=None)\n\n if np.linalg.norm(x) > tol_norm_x[1] or np.linalg.norm(x) < tol_norm_x[0]:\n if regularization == \"ridge\":\n lambda1 = lambda1 / 10.0\n _, x = NaturalGradient._ridge(a, c, lambda1=lambda1, lambda4=lambda4)\n elif regularization == \"lasso\":\n lambda1 = lambda1 / 10.0\n _, x = NaturalGradient._lasso(a, c, lambda1=lambda1)\n elif regularization == \"perturb_diag_elements\":\n while np.linalg.cond(a + alpha * np.diag(a)) > tol_cond_a:\n if alpha == 0:\n alpha = 1e-7\n else:\n alpha *= 10\n # include perturbation in A to avoid singularity\n x, _, _, _ = np.linalg.lstsq(a + alpha * np.diag(a), c, rcond=None)\n else:\n if alpha == 0:\n alpha = 1e-7\n else:\n alpha *= 10\n while np.linalg.cond(a + alpha * np.eye(len(c))) > tol_cond_a:\n # include perturbation in A to avoid singularity\n x, _, _, _ = np.linalg.lstsq(a + alpha * np.eye(len(c)), c, rcond=None)\n alpha *= 10\n return x\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test library of n-local circuits.\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom ddt import ddt, data, unpack\n\nfrom qiskit.test.base import QiskitTestCase\nfrom qiskit import transpile\nfrom qiskit.circuit import QuantumCircuit, Parameter, ParameterVector, ParameterExpression\nfrom qiskit.circuit.library import (\n NLocal,\n TwoLocal,\n RealAmplitudes,\n ExcitationPreserving,\n XGate,\n CRXGate,\n CCXGate,\n SwapGate,\n RXGate,\n RYGate,\n EfficientSU2,\n RZGate,\n RXXGate,\n RYYGate,\n CXGate,\n)\nfrom qiskit.circuit.random.utils import random_circuit\nfrom qiskit.converters.circuit_to_dag import circuit_to_dag\n\n\n@ddt\nclass TestNLocal(QiskitTestCase):\n \"\"\"Test the n-local circuit class.\"\"\"\n\n def test_if_reps_is_negative(self):\n \"\"\"Test to check if error is raised for negative value of reps\"\"\"\n with self.assertRaises(ValueError):\n _ = NLocal(reps=-1)\n\n def test_reps_setter_when_negative(self):\n \"\"\"Test to check if setter raises error for reps < 0\"\"\"\n nlocal = NLocal(reps=1)\n with self.assertRaises(ValueError):\n nlocal.reps = -1\n\n def assertCircuitEqual(self, qc1, qc2, visual=False, transpiled=True):\n \"\"\"An equality test specialized to circuits.\"\"\"\n if transpiled:\n basis_gates = [\"id\", \"u1\", \"u3\", \"cx\"]\n qc1_transpiled = transpile(qc1, basis_gates=basis_gates, optimization_level=0)\n qc2_transpiled = transpile(qc2, basis_gates=basis_gates, optimization_level=0)\n qc1, qc2 = qc1_transpiled, qc2_transpiled\n\n if visual:\n self.assertEqual(qc1.draw(), qc2.draw())\n else:\n self.assertEqual(qc1, qc2)\n\n def test_empty_nlocal(self):\n \"\"\"Test the creation of an empty NLocal.\"\"\"\n nlocal = NLocal()\n self.assertEqual(nlocal.num_qubits, 0)\n self.assertEqual(nlocal.num_parameters_settable, 0)\n self.assertEqual(nlocal.reps, 1)\n\n self.assertEqual(nlocal, QuantumCircuit())\n\n for attribute in [nlocal.rotation_blocks, nlocal.entanglement_blocks]:\n self.assertEqual(len(attribute), 0)\n\n @data(\n (XGate(), [[0], [2], [1]]),\n (XGate(), [[0]]),\n (CRXGate(-0.2), [[2, 0], [1, 3]]),\n )\n @unpack\n def test_add_layer_to_empty_nlocal(self, block, entangler_map):\n \"\"\"Test appending gates to an empty nlocal.\"\"\"\n nlocal = NLocal()\n nlocal.add_layer(block, entangler_map)\n\n max_num_qubits = max(max(indices) for indices in entangler_map)\n reference = QuantumCircuit(max_num_qubits + 1)\n for indices in entangler_map:\n reference.append(block, indices)\n\n self.assertCircuitEqual(nlocal, reference)\n\n @data([5, 3], [1, 5], [1, 1], [1, 2, 3, 10])\n def test_append_circuit(self, num_qubits):\n \"\"\"Test appending circuits to an nlocal works normally.\"\"\"\n # fixed depth of 3 gates per circuit\n depth = 3\n\n # keep track of a reference circuit\n reference = QuantumCircuit(max(num_qubits))\n\n # construct the NLocal from the first circuit\n first_circuit = random_circuit(num_qubits[0], depth, seed=4200)\n # TODO Terra bug: if this is to_gate it fails, since the QC adds an instruction not gate\n nlocal = NLocal(max(num_qubits), entanglement_blocks=first_circuit.to_instruction(), reps=1)\n reference.append(first_circuit, list(range(num_qubits[0])))\n\n # append the rest\n for num in num_qubits[1:]:\n circuit = random_circuit(num, depth, seed=4200)\n nlocal.append(circuit, list(range(num)))\n reference.append(circuit, list(range(num)))\n\n self.assertCircuitEqual(nlocal, reference)\n\n @data([5, 3], [1, 5], [1, 1], [1, 2, 3, 10])\n def test_add_nlocal(self, num_qubits):\n \"\"\"Test adding an nlocal to an nlocal (using add_layer).\"\"\"\n # fixed depth of 3 gates per circuit\n depth = 3\n\n # keep track of a reference circuit\n reference = QuantumCircuit(max(num_qubits))\n\n # construct the NLocal from the first circuit\n first_circuit = random_circuit(num_qubits[0], depth, seed=4220)\n # TODO Terra bug: if this is to_gate it fails, since the QC adds an instruction not gate\n nlocal = NLocal(max(num_qubits), entanglement_blocks=first_circuit.to_instruction(), reps=1)\n reference.append(first_circuit, list(range(num_qubits[0])))\n\n # append the rest\n for num in num_qubits[1:]:\n circuit = random_circuit(num, depth, seed=4220)\n nlocal.add_layer(NLocal(num, entanglement_blocks=circuit, reps=1))\n reference.append(circuit, list(range(num)))\n\n self.assertCircuitEqual(nlocal, reference)\n\n @unittest.skip(\"Feature missing\")\n def test_iadd_overload(self):\n \"\"\"Test the overloaded + operator.\"\"\"\n num_qubits, depth = 2, 2\n\n # construct two circuits for adding\n first_circuit = random_circuit(num_qubits, depth, seed=4242)\n circuit = random_circuit(num_qubits, depth, seed=4242)\n\n # get a reference\n reference = first_circuit + circuit\n\n # convert the object to be appended to different types\n others = [circuit, circuit.to_instruction(), circuit.to_gate(), NLocal(circuit)]\n\n # try adding each type\n for other in others:\n nlocal = NLocal(num_qubits, entanglement_blocks=first_circuit, reps=1)\n nlocal += other\n with self.subTest(msg=\"type: {}\".format(type(other))):\n self.assertCircuitEqual(nlocal, reference)\n\n def test_parameter_getter_from_automatic_repetition(self):\n \"\"\"Test getting and setting of the nlocal parameters.\"\"\"\n circuit = QuantumCircuit(2)\n circuit.ry(Parameter(\"a\"), 0)\n circuit.crx(Parameter(\"b\"), 0, 1)\n\n # repeat circuit and check that parameters are duplicated\n reps = 3\n nlocal = NLocal(2, entanglement_blocks=circuit, reps=reps)\n self.assertTrue(nlocal.num_parameters, 6)\n self.assertTrue(len(nlocal.parameters), 6)\n\n @data(list(range(6)), ParameterVector(\"θ\", length=6), [0, 1, Parameter(\"theta\"), 3, 4, 5])\n def test_parameter_setter_from_automatic_repetition(self, params):\n \"\"\"Test getting and setting of the nlocal parameters.\"\"\"\n circuit = QuantumCircuit(2)\n circuit.ry(Parameter(\"a\"), 0)\n circuit.crx(Parameter(\"b\"), 0, 1)\n\n # repeat circuit and check that parameters are duplicated\n reps = 3\n nlocal = NLocal(2, entanglement_blocks=circuit, reps=reps)\n nlocal.assign_parameters(params, inplace=True)\n\n param_set = set(p for p in params if isinstance(p, ParameterExpression))\n with self.subTest(msg=\"Test the parameters of the non-transpiled circuit\"):\n # check the parameters of the final circuit\n self.assertEqual(nlocal.parameters, param_set)\n\n with self.subTest(msg=\"Test the parameters of the transpiled circuit\"):\n basis_gates = [\"id\", \"u1\", \"u2\", \"u3\", \"cx\"]\n transpiled_circuit = transpile(nlocal, basis_gates=basis_gates)\n self.assertEqual(transpiled_circuit.parameters, param_set)\n\n @data(list(range(6)), ParameterVector(\"θ\", length=6), [0, 1, Parameter(\"theta\"), 3, 4, 5])\n def test_parameters_setter(self, params):\n \"\"\"Test setting the parameters via list.\"\"\"\n # construct circuit with some parameters\n initial_params = ParameterVector(\"p\", length=6)\n circuit = QuantumCircuit(1)\n for i, initial_param in enumerate(initial_params):\n circuit.ry(i * initial_param, 0)\n\n # create an NLocal from the circuit and set the new parameters\n nlocal = NLocal(1, entanglement_blocks=circuit, reps=1)\n nlocal.assign_parameters(params, inplace=True)\n\n param_set = set(p for p in params if isinstance(p, ParameterExpression))\n with self.subTest(msg=\"Test the parameters of the non-transpiled circuit\"):\n # check the parameters of the final circuit\n self.assertEqual(nlocal.parameters, param_set)\n\n with self.subTest(msg=\"Test the parameters of the transpiled circuit\"):\n basis_gates = [\"id\", \"u1\", \"u2\", \"u3\", \"cx\"]\n transpiled_circuit = transpile(nlocal, basis_gates=basis_gates)\n self.assertEqual(transpiled_circuit.parameters, param_set)\n\n def test_repetetive_parameter_setting(self):\n \"\"\"Test alternate setting of parameters and circuit construction.\"\"\"\n x = Parameter(\"x\")\n circuit = QuantumCircuit(1)\n circuit.rx(x, 0)\n\n nlocal = NLocal(1, entanglement_blocks=circuit, reps=3, insert_barriers=True)\n with self.subTest(msg=\"immediately after initialization\"):\n self.assertEqual(len(nlocal.parameters), 3)\n\n with self.subTest(msg=\"after circuit construction\"):\n self.assertEqual(len(nlocal.parameters), 3)\n\n q = Parameter(\"q\")\n nlocal.assign_parameters([x, q, q], inplace=True)\n with self.subTest(msg=\"setting parameter to Parameter objects\"):\n self.assertEqual(nlocal.parameters, set({x, q}))\n\n nlocal.assign_parameters([0, -1], inplace=True)\n with self.subTest(msg=\"setting parameter to numbers\"):\n self.assertEqual(nlocal.parameters, set())\n\n def test_skip_unentangled_qubits(self):\n \"\"\"Test skipping the unentangled qubits.\"\"\"\n num_qubits = 6\n entanglement_1 = [[0, 1, 3], [1, 3, 5], [0, 1, 5]]\n skipped_1 = [2, 4]\n\n entanglement_2 = [entanglement_1, [[0, 1, 2], [2, 3, 5]]]\n skipped_2 = [4]\n\n for entanglement, skipped in zip([entanglement_1, entanglement_2], [skipped_1, skipped_2]):\n with self.subTest(entanglement=entanglement, skipped=skipped):\n nlocal = NLocal(\n num_qubits,\n rotation_blocks=XGate(),\n entanglement_blocks=CCXGate(),\n entanglement=entanglement,\n reps=3,\n skip_unentangled_qubits=True,\n )\n\n skipped_set = set(nlocal.qubits[i] for i in skipped)\n dag = circuit_to_dag(nlocal)\n idle = set(dag.idle_wires())\n self.assertEqual(skipped_set, idle)\n\n @data(\"linear\", \"full\", \"circular\", \"sca\", [\"linear\", \"full\"], [\"circular\", \"linear\", \"sca\"])\n def test_entanglement_by_str(self, entanglement):\n \"\"\"Test setting the entanglement of the layers by str.\"\"\"\n reps = 3\n nlocal = NLocal(\n 5,\n rotation_blocks=XGate(),\n entanglement_blocks=CCXGate(),\n entanglement=entanglement,\n reps=reps,\n )\n\n def get_expected_entangler_map(rep_num, mode):\n if mode == \"linear\":\n return [(0, 1, 2), (1, 2, 3), (2, 3, 4)]\n elif mode == \"full\":\n return [\n (0, 1, 2),\n (0, 1, 3),\n (0, 1, 4),\n (0, 2, 3),\n (0, 2, 4),\n (0, 3, 4),\n (1, 2, 3),\n (1, 2, 4),\n (1, 3, 4),\n (2, 3, 4),\n ]\n else:\n circular = [(3, 4, 0), (0, 1, 2), (1, 2, 3), (2, 3, 4)]\n if mode == \"circular\":\n return circular\n sca = circular[-rep_num:] + circular[:-rep_num]\n if rep_num % 2 == 1:\n sca = [tuple(reversed(indices)) for indices in sca]\n return sca\n\n for rep_num in range(reps):\n entangler_map = nlocal.get_entangler_map(rep_num, 0, 3)\n if isinstance(entanglement, list):\n mode = entanglement[rep_num % len(entanglement)]\n else:\n mode = entanglement\n expected = get_expected_entangler_map(rep_num, mode)\n\n with self.subTest(rep_num=rep_num):\n # using a set here since the order does not matter\n self.assertEqual(set(entangler_map), set(expected))\n\n def test_pairwise_entanglement(self):\n \"\"\"Test pairwise entanglement.\"\"\"\n nlocal = NLocal(\n 5,\n rotation_blocks=XGate(),\n entanglement_blocks=CXGate(),\n entanglement=\"pairwise\",\n reps=1,\n )\n entangler_map = nlocal.get_entangler_map(0, 0, 2)\n pairwise = [(0, 1), (2, 3), (1, 2), (3, 4)]\n\n self.assertEqual(pairwise, entangler_map)\n\n def test_pairwise_entanglement_raises(self):\n \"\"\"Test choosing pairwise entanglement raises an error for too large blocks.\"\"\"\n nlocal = NLocal(3, XGate(), CCXGate(), entanglement=\"pairwise\", reps=1)\n\n # pairwise entanglement is only defined if the entangling gate has 2 qubits\n with self.assertRaises(ValueError):\n print(nlocal.draw())\n\n def test_entanglement_by_list(self):\n \"\"\"Test setting the entanglement by list.\n\n This is the circuit we test (times 2, with final X layer)\n ┌───┐ ┌───┐┌───┐ ┌───┐\n q_0: |0>┤ X ├──■────■───X────┤ X ├┤ X ├──■───X─────── .. ┤ X ├\n ├───┤ │ │ │ ├───┤└─┬─┘ │ │ ├───┤\n q_1: |0>┤ X ├──■────┼───┼──X─┤ X ├──■────┼───X──X──── .. ┤ X ├\n ├───┤┌─┴─┐ │ │ │ ├───┤ │ │ │ x2 ├───┤\n q_2: |0>┤ X ├┤ X ├──■───┼──X─┤ X ├──■────■──────X──X─ .. ┤ X ├\n ├───┤└───┘┌─┴─┐ │ ├───┤ ┌─┴─┐ │ ├───┤\n q_3: |0>┤ X ├─────┤ X ├─X────┤ X ├─────┤ X ├───────X─ .. ┤ X ├\n └───┘ └───┘ └───┘ └───┘ └───┘\n \"\"\"\n circuit = QuantumCircuit(4)\n for _ in range(2):\n circuit.x([0, 1, 2, 3])\n circuit.barrier()\n circuit.ccx(0, 1, 2)\n circuit.ccx(0, 2, 3)\n circuit.swap(0, 3)\n circuit.swap(1, 2)\n circuit.barrier()\n circuit.x([0, 1, 2, 3])\n circuit.barrier()\n circuit.ccx(2, 1, 0)\n circuit.ccx(0, 2, 3)\n circuit.swap(0, 1)\n circuit.swap(1, 2)\n circuit.swap(2, 3)\n circuit.barrier()\n circuit.x([0, 1, 2, 3])\n\n layer_1_ccx = [(0, 1, 2), (0, 2, 3)]\n layer_1_swap = [(0, 3), (1, 2)]\n layer_1 = [layer_1_ccx, layer_1_swap]\n\n layer_2_ccx = [(2, 1, 0), (0, 2, 3)]\n layer_2_swap = [(0, 1), (1, 2), (2, 3)]\n layer_2 = [layer_2_ccx, layer_2_swap]\n\n entanglement = [layer_1, layer_2]\n\n nlocal = NLocal(\n 4,\n rotation_blocks=XGate(),\n entanglement_blocks=[CCXGate(), SwapGate()],\n reps=4,\n entanglement=entanglement,\n insert_barriers=True,\n )\n\n self.assertCircuitEqual(nlocal, circuit)\n\n\n@ddt\nclass TestTwoLocal(QiskitTestCase):\n \"\"\"Tests for the TwoLocal circuit.\"\"\"\n\n def assertCircuitEqual(self, qc1, qc2, visual=False, transpiled=True):\n \"\"\"An equality test specialized to circuits.\"\"\"\n if transpiled:\n basis_gates = [\"id\", \"u1\", \"u3\", \"cx\"]\n qc1_transpiled = transpile(qc1, basis_gates=basis_gates, optimization_level=0)\n qc2_transpiled = transpile(qc2, basis_gates=basis_gates, optimization_level=0)\n qc1, qc2 = qc1_transpiled, qc2_transpiled\n\n if visual:\n self.assertEqual(qc1.draw(), qc2.draw())\n else:\n self.assertEqual(qc1, qc2)\n\n def test_skip_final_rotation_layer(self):\n \"\"\"Test skipping the final rotation layer works.\"\"\"\n two = TwoLocal(3, [\"ry\", \"h\"], [\"cz\", \"cx\"], reps=2, skip_final_rotation_layer=True)\n self.assertEqual(two.num_parameters, 6) # would be 9 with a final rotation layer\n\n @data(\n (5, \"rx\", \"cx\", \"full\", 2, 15),\n (3, \"x\", \"z\", \"linear\", 1, 0),\n (3, \"rx\", \"cz\", \"linear\", 0, 3),\n (3, [\"rx\", \"ry\"], [\"cry\", \"cx\"], \"circular\", 2, 24),\n )\n @unpack\n def test_num_parameters(self, num_qubits, rot, ent, ent_mode, reps, expected):\n \"\"\"Test the number of parameters.\"\"\"\n two = TwoLocal(\n num_qubits,\n rotation_blocks=rot,\n entanglement_blocks=ent,\n entanglement=ent_mode,\n reps=reps,\n )\n\n with self.subTest(msg=\"num_parameters_settable\"):\n self.assertEqual(two.num_parameters_settable, expected)\n\n with self.subTest(msg=\"num_parameters\"):\n self.assertEqual(two.num_parameters, expected)\n\n def test_empty_two_local(self):\n \"\"\"Test the setup of an empty two-local circuit.\"\"\"\n two = TwoLocal()\n\n with self.subTest(msg=\"0 qubits\"):\n self.assertEqual(two.num_qubits, 0)\n\n with self.subTest(msg=\"no blocks are set\"):\n self.assertListEqual(two.rotation_blocks, [])\n self.assertListEqual(two.entanglement_blocks, [])\n\n with self.subTest(msg=\"equal to empty circuit\"):\n self.assertEqual(two, QuantumCircuit())\n\n @data(\"rx\", RXGate(Parameter(\"p\")), RXGate, \"circuit\")\n def test_various_block_types(self, rot):\n \"\"\"Test setting the rotation blocks to various type and assert the output type is RX.\"\"\"\n if rot == \"circuit\":\n rot = QuantumCircuit(1)\n rot.rx(Parameter(\"angle\"), 0)\n\n two = TwoLocal(3, rot, reps=0)\n self.assertEqual(len(two.rotation_blocks), 1)\n rotation = two.rotation_blocks[0]\n\n # decompose\n self.assertIsInstance(rotation.data[0][0], RXGate)\n\n def test_parameter_setters(self):\n \"\"\"Test different possibilities to set parameters.\"\"\"\n two = TwoLocal(3, rotation_blocks=\"rx\", entanglement=\"cz\", reps=2)\n params = [0, 1, 2, Parameter(\"x\"), Parameter(\"y\"), Parameter(\"z\"), 6, 7, 0]\n params_set = set(param for param in params if isinstance(param, Parameter))\n\n with self.subTest(msg=\"dict assign and copy\"):\n ordered = two.ordered_parameters\n bound = two.assign_parameters(dict(zip(ordered, params)), inplace=False)\n self.assertEqual(bound.parameters, params_set)\n self.assertEqual(two.num_parameters, 9)\n\n with self.subTest(msg=\"list assign and copy\"):\n ordered = two.ordered_parameters\n bound = two.assign_parameters(params, inplace=False)\n self.assertEqual(bound.parameters, params_set)\n self.assertEqual(two.num_parameters, 9)\n\n with self.subTest(msg=\"list assign inplace\"):\n ordered = two.ordered_parameters\n two.assign_parameters(params, inplace=True)\n self.assertEqual(two.parameters, params_set)\n self.assertEqual(two.num_parameters, 3)\n self.assertEqual(two.num_parameters_settable, 9)\n\n def test_parameters_settable_is_constant(self):\n \"\"\"Test the attribute num_parameters_settable does not change on parameter change.\"\"\"\n two = TwoLocal(3, rotation_blocks=\"rx\", entanglement=\"cz\", reps=2)\n ordered_params = two.ordered_parameters\n\n x = Parameter(\"x\")\n two.assign_parameters(dict(zip(ordered_params, [x] * two.num_parameters)), inplace=True)\n\n with self.subTest(msg=\"num_parameters collapsed to 1\"):\n self.assertEqual(two.num_parameters, 1)\n\n with self.subTest(msg=\"num_parameters_settable remained constant\"):\n self.assertEqual(two.num_parameters_settable, len(ordered_params))\n\n def test_compose_inplace_to_circuit(self):\n \"\"\"Test adding a two-local to an existing circuit.\"\"\"\n two = TwoLocal(3, [\"ry\", \"rz\"], \"cz\", \"full\", reps=1, insert_barriers=True)\n circuit = QuantumCircuit(3)\n circuit.compose(two, inplace=True)\n\n reference = QuantumCircuit(3)\n param_iter = iter(two.ordered_parameters)\n for i in range(3):\n reference.ry(next(param_iter), i)\n for i in range(3):\n reference.rz(next(param_iter), i)\n reference.barrier()\n reference.cz(0, 1)\n reference.cz(0, 2)\n reference.cz(1, 2)\n reference.barrier()\n for i in range(3):\n reference.ry(next(param_iter), i)\n for i in range(3):\n reference.rz(next(param_iter), i)\n\n self.assertCircuitEqual(circuit, reference)\n\n def test_composing_two(self):\n \"\"\"Test adding two two-local circuits.\"\"\"\n entangler_map = [[0, 3], [0, 2]]\n two = TwoLocal(4, [], \"cry\", entangler_map, reps=1)\n circuit = two.compose(two)\n\n reference = QuantumCircuit(4)\n params = two.ordered_parameters\n for _ in range(2):\n reference.cry(params[0], 0, 3)\n reference.cry(params[1], 0, 2)\n\n self.assertCircuitEqual(reference, circuit)\n\n def test_ry_blocks(self):\n \"\"\"Test that the RealAmplitudes circuit is instantiated correctly.\"\"\"\n two = RealAmplitudes(4)\n with self.subTest(msg=\"test rotation gate\"):\n self.assertEqual(len(two.rotation_blocks), 1)\n self.assertIsInstance(two.rotation_blocks[0].data[0][0], RYGate)\n\n with self.subTest(msg=\"test parameter bounds\"):\n expected = [(-np.pi, np.pi)] * two.num_parameters\n np.testing.assert_almost_equal(two.parameter_bounds, expected)\n\n def test_ry_circuit(self):\n \"\"\"Test an RealAmplitudes circuit.\"\"\"\n num_qubits = 3\n reps = 2\n entanglement = \"full\"\n parameters = ParameterVector(\"theta\", num_qubits * (reps + 1))\n param_iter = iter(parameters)\n\n expected = QuantumCircuit(3)\n for _ in range(reps):\n for i in range(num_qubits):\n expected.ry(next(param_iter), i)\n expected.cx(0, 1)\n expected.cx(0, 2)\n expected.cx(1, 2)\n for i in range(num_qubits):\n expected.ry(next(param_iter), i)\n\n library = RealAmplitudes(\n num_qubits, reps=reps, entanglement=entanglement\n ).assign_parameters(parameters)\n\n self.assertCircuitEqual(library, expected)\n\n def test_ryrz_blocks(self):\n \"\"\"Test that the EfficientSU2 circuit is instantiated correctly.\"\"\"\n two = EfficientSU2(3)\n with self.subTest(msg=\"test rotation gate\"):\n self.assertEqual(len(two.rotation_blocks), 2)\n self.assertIsInstance(two.rotation_blocks[0].data[0][0], RYGate)\n self.assertIsInstance(two.rotation_blocks[1].data[0][0], RZGate)\n\n with self.subTest(msg=\"test parameter bounds\"):\n expected = [(-np.pi, np.pi)] * two.num_parameters\n np.testing.assert_almost_equal(two.parameter_bounds, expected)\n\n def test_ryrz_circuit(self):\n \"\"\"Test an EfficientSU2 circuit.\"\"\"\n num_qubits = 3\n reps = 2\n entanglement = \"circular\"\n parameters = ParameterVector(\"theta\", 2 * num_qubits * (reps + 1))\n param_iter = iter(parameters)\n\n expected = QuantumCircuit(3)\n for _ in range(reps):\n for i in range(num_qubits):\n expected.ry(next(param_iter), i)\n for i in range(num_qubits):\n expected.rz(next(param_iter), i)\n expected.cx(2, 0)\n expected.cx(0, 1)\n expected.cx(1, 2)\n for i in range(num_qubits):\n expected.ry(next(param_iter), i)\n for i in range(num_qubits):\n expected.rz(next(param_iter), i)\n\n library = EfficientSU2(num_qubits, reps=reps, entanglement=entanglement).assign_parameters(\n parameters\n )\n\n self.assertCircuitEqual(library, expected)\n\n def test_swaprz_blocks(self):\n \"\"\"Test that the ExcitationPreserving circuit is instantiated correctly.\"\"\"\n two = ExcitationPreserving(5)\n with self.subTest(msg=\"test rotation gate\"):\n self.assertEqual(len(two.rotation_blocks), 1)\n self.assertIsInstance(two.rotation_blocks[0].data[0][0], RZGate)\n\n with self.subTest(msg=\"test entanglement gate\"):\n self.assertEqual(len(two.entanglement_blocks), 1)\n block = two.entanglement_blocks[0]\n self.assertEqual(len(block.data), 2)\n self.assertIsInstance(block.data[0][0], RXXGate)\n self.assertIsInstance(block.data[1][0], RYYGate)\n\n with self.subTest(msg=\"test parameter bounds\"):\n expected = [(-np.pi, np.pi)] * two.num_parameters\n np.testing.assert_almost_equal(two.parameter_bounds, expected)\n\n def test_swaprz_circuit(self):\n \"\"\"Test a ExcitationPreserving circuit in iswap mode.\"\"\"\n num_qubits = 3\n reps = 2\n entanglement = \"linear\"\n parameters = ParameterVector(\"theta\", num_qubits * (reps + 1) + reps * (num_qubits - 1))\n param_iter = iter(parameters)\n\n expected = QuantumCircuit(3)\n for _ in range(reps):\n for i in range(num_qubits):\n expected.rz(next(param_iter), i)\n shared_param = next(param_iter)\n expected.rxx(shared_param, 0, 1)\n expected.ryy(shared_param, 0, 1)\n shared_param = next(param_iter)\n expected.rxx(shared_param, 1, 2)\n expected.ryy(shared_param, 1, 2)\n for i in range(num_qubits):\n expected.rz(next(param_iter), i)\n\n library = ExcitationPreserving(\n num_qubits, reps=reps, entanglement=entanglement\n ).assign_parameters(parameters)\n\n self.assertCircuitEqual(library, expected)\n\n def test_fsim_circuit(self):\n \"\"\"Test a ExcitationPreserving circuit in fsim mode.\"\"\"\n num_qubits = 3\n reps = 2\n entanglement = \"linear\"\n # need the parameters in the entanglement blocks to be the same because the order\n # can get mixed up in ExcitationPreserving (since parameters are not ordered in circuits)\n parameters = [1] * (num_qubits * (reps + 1) + reps * (1 + num_qubits))\n param_iter = iter(parameters)\n\n expected = QuantumCircuit(3)\n for _ in range(reps):\n for i in range(num_qubits):\n expected.rz(next(param_iter), i)\n shared_param = next(param_iter)\n expected.rxx(shared_param, 0, 1)\n expected.ryy(shared_param, 0, 1)\n expected.cp(next(param_iter), 0, 1)\n shared_param = next(param_iter)\n expected.rxx(shared_param, 1, 2)\n expected.ryy(shared_param, 1, 2)\n expected.cp(next(param_iter), 1, 2)\n for i in range(num_qubits):\n expected.rz(next(param_iter), i)\n\n library = ExcitationPreserving(\n num_qubits, reps=reps, mode=\"fsim\", entanglement=entanglement\n ).assign_parameters(parameters)\n\n self.assertCircuitEqual(library, expected)\n\n def test_circular_on_same_block_and_circuit_size(self):\n \"\"\"Test circular entanglement works correctly if the circuit and block sizes match.\"\"\"\n\n two = TwoLocal(2, \"ry\", \"cx\", entanglement=\"circular\", reps=1)\n parameters = np.arange(two.num_parameters)\n\n ref = QuantumCircuit(2)\n ref.ry(parameters[0], 0)\n ref.ry(parameters[1], 1)\n ref.cx(0, 1)\n ref.ry(parameters[2], 0)\n ref.ry(parameters[3], 1)\n\n self.assertCircuitEqual(two.assign_parameters(parameters), ref)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Line search with Gaussian-smoothed samples on a sphere.\"\"\"\n\nfrom typing import Dict, Optional, Tuple, List, Callable\nimport numpy as np\n\nfrom qiskit.utils import algorithm_globals\nfrom .optimizer import Optimizer, OptimizerSupportLevel\n\n\nclass GSLS(Optimizer):\n \"\"\"Gaussian-smoothed Line Search.\n\n An implementation of the line search algorithm described in\n https://arxiv.org/pdf/1905.01332.pdf, using gradient approximation\n based on Gaussian-smoothed samples on a sphere.\n \"\"\"\n\n _OPTIONS = [\n \"maxiter\",\n \"max_eval\",\n \"disp\",\n \"sampling_radius\",\n \"sample_size_factor\",\n \"initial_step_size\",\n \"min_step_size\",\n \"step_size_multiplier\",\n \"armijo_parameter\",\n \"min_gradient_norm\",\n \"max_failed_rejection_sampling\",\n ]\n\n # pylint: disable=unused-argument\n def __init__(\n self,\n maxiter: int = 10000,\n max_eval: int = 10000,\n disp: bool = False,\n sampling_radius: float = 1.0e-6,\n sample_size_factor: int = 1,\n initial_step_size: float = 1.0e-2,\n min_step_size: float = 1.0e-10,\n step_size_multiplier: float = 0.4,\n armijo_parameter: float = 1.0e-1,\n min_gradient_norm: float = 1e-8,\n max_failed_rejection_sampling: int = 50,\n ) -> None:\n \"\"\"\n Args:\n maxiter: Maximum number of iterations.\n max_eval: Maximum number of evaluations.\n disp: Set to True to display convergence messages.\n sampling_radius: Sampling radius to determine gradient estimate.\n sample_size_factor: The size of the sample set at each iteration is this number\n multiplied by the dimension of the problem, rounded to the nearest integer.\n initial_step_size: Initial step size for the descent algorithm.\n min_step_size: Minimum step size for the descent algorithm.\n step_size_multiplier: Step size reduction after unsuccessful steps, in the\n interval (0, 1).\n armijo_parameter: Armijo parameter for sufficient decrease criterion, in the\n interval (0, 1).\n min_gradient_norm: If the gradient norm is below this threshold, the algorithm stops.\n max_failed_rejection_sampling: Maximum number of attempts to sample points within\n bounds.\n \"\"\"\n super().__init__()\n for k, v in list(locals().items()):\n if k in self._OPTIONS:\n self._options[k] = v\n\n def get_support_level(self) -> Dict[str, int]:\n \"\"\"Return support level dictionary.\n\n Returns:\n A dictionary containing the support levels for different options.\n \"\"\"\n return {\n \"gradient\": OptimizerSupportLevel.ignored,\n \"bounds\": OptimizerSupportLevel.supported,\n \"initial_point\": OptimizerSupportLevel.required,\n }\n\n def optimize(\n self,\n num_vars: int,\n objective_function: Callable,\n gradient_function: Optional[Callable] = None,\n variable_bounds: Optional[List[Tuple[float, float]]] = None,\n initial_point: Optional[np.ndarray] = None,\n ) -> Tuple[np.ndarray, float, int]:\n super().optimize(\n num_vars, objective_function, gradient_function, variable_bounds, initial_point\n )\n\n if initial_point is None:\n initial_point = algorithm_globals.random.normal(size=num_vars)\n else:\n initial_point = np.array(initial_point)\n\n if variable_bounds is None:\n var_lb = np.array([-np.inf] * num_vars)\n var_ub = np.array([np.inf] * num_vars)\n else:\n var_lb = np.array([l for (l, _) in variable_bounds])\n var_ub = np.array([u for (_, u) in variable_bounds])\n\n x, x_value, n_evals, _ = self.ls_optimize(\n num_vars, objective_function, initial_point, var_lb, var_ub\n )\n\n return x, x_value, n_evals\n\n def ls_optimize(\n self,\n n: int,\n obj_fun: Callable,\n initial_point: np.ndarray,\n var_lb: np.ndarray,\n var_ub: np.ndarray,\n ) -> Tuple[np.ndarray, float, int, float]:\n \"\"\"Run the line search optimization.\n\n Args:\n n: Dimension of the problem.\n obj_fun: Objective function.\n initial_point: Initial point.\n var_lb: Vector of lower bounds on the decision variables. Vector elements can be -np.inf\n if the corresponding variable is unbounded from below.\n var_ub: Vector of upper bounds on the decision variables. Vector elements can be np.inf\n if the corresponding variable is unbounded from below.\n\n Returns:\n Final iterate as a vector, corresponding objective function value,\n number of evaluations, and norm of the gradient estimate.\n\n Raises:\n ValueError: If the number of dimensions mismatches the size of the initial point or\n the length of the lower or upper bound.\n \"\"\"\n if len(initial_point) != n:\n raise ValueError(\"Size of the initial point mismatches the number of dimensions.\")\n if len(var_lb) != n:\n raise ValueError(\"Length of the lower bound mismatches the number of dimensions.\")\n if len(var_ub) != n:\n raise ValueError(\"Length of the upper bound mismatches the number of dimensions.\")\n\n # Initialize counters and data\n iter_count = 0\n n_evals = 0\n prev_iter_successful = True\n prev_directions, prev_sample_set_x, prev_sample_set_y = None, None, None\n consecutive_fail_iter = 0\n alpha = self._options[\"initial_step_size\"]\n grad_norm = np.inf\n sample_set_size = int(round(self._options[\"sample_size_factor\"] * n))\n\n # Initial point\n x = initial_point\n x_value = obj_fun(x)\n n_evals += 1\n while iter_count < self._options[\"maxiter\"] and n_evals < self._options[\"max_eval\"]:\n\n # Determine set of sample points\n directions, sample_set_x = self.sample_set(n, x, var_lb, var_ub, sample_set_size)\n\n if n_evals + len(sample_set_x) + 1 >= self._options[\"max_eval\"]:\n # The evaluation budget is too small to allow for\n # another full iteration; we therefore exit now\n break\n\n sample_set_y = np.array([obj_fun(point) for point in sample_set_x])\n n_evals += len(sample_set_x)\n\n # Expand sample set if we could not improve\n if not prev_iter_successful:\n directions = np.vstack((prev_directions, directions))\n sample_set_x = np.vstack((prev_sample_set_x, sample_set_x))\n sample_set_y = np.hstack((prev_sample_set_y, sample_set_y))\n\n # Find gradient approximation and candidate point\n grad = self.gradient_approximation(\n n, x, x_value, directions, sample_set_x, sample_set_y\n )\n grad_norm = np.linalg.norm(grad)\n new_x = np.clip(x - alpha * grad, var_lb, var_ub)\n new_x_value = obj_fun(new_x)\n n_evals += 1\n\n # Print information\n if self._options[\"disp\"]:\n print(\"Iter {:d}\".format(iter_count))\n print(\"Point {} obj {}\".format(x, x_value))\n print(\"Gradient {}\".format(grad))\n print(\n \"Grad norm {} new_x_value {} step_size {}\".format(grad_norm, new_x_value, alpha)\n )\n print(\"Direction {}\".format(directions))\n\n # Test Armijo condition for sufficient decrease\n if new_x_value <= x_value - self._options[\"armijo_parameter\"] * alpha * grad_norm:\n # Accept point\n x, x_value = new_x, new_x_value\n alpha /= 2 * self._options[\"step_size_multiplier\"]\n prev_iter_successful = True\n consecutive_fail_iter = 0\n\n # Reset sample set\n prev_directions = None\n prev_sample_set_x = None\n prev_sample_set_y = None\n else:\n # Do not accept point\n alpha *= self._options[\"step_size_multiplier\"]\n prev_iter_successful = False\n consecutive_fail_iter += 1\n\n # Store sample set to enlarge it\n prev_directions = directions\n prev_sample_set_x, prev_sample_set_y = sample_set_x, sample_set_y\n\n iter_count += 1\n\n # Check termination criterion\n if (\n grad_norm <= self._options[\"min_gradient_norm\"]\n or alpha <= self._options[\"min_step_size\"]\n ):\n break\n\n return x, x_value, n_evals, grad_norm\n\n def sample_points(\n self, n: int, x: np.ndarray, num_points: int\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Sample ``num_points`` points around ``x`` on the ``n``-sphere of specified radius.\n\n The radius of the sphere is ``self._options['sampling_radius']``.\n\n Args:\n n: Dimension of the problem.\n x: Point around which the sample set is constructed.\n num_points: Number of points in the sample set.\n\n Returns:\n A tuple containing the sampling points and the directions.\n \"\"\"\n normal_samples = algorithm_globals.random.normal(size=(num_points, n))\n row_norms = np.linalg.norm(normal_samples, axis=1, keepdims=True)\n directions = normal_samples / row_norms\n points = x + self._options[\"sampling_radius\"] * directions\n\n return points, directions\n\n def sample_set(\n self, n: int, x: np.ndarray, var_lb: np.ndarray, var_ub: np.ndarray, num_points: int\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Construct sample set of given size.\n\n Args:\n n: Dimension of the problem.\n x: Point around which the sample set is constructed.\n var_lb: Vector of lower bounds on the decision variables. Vector elements can be -np.inf\n if the corresponding variable is unbounded from below.\n var_ub: Vector of lower bounds on the decision variables. Vector elements can be np.inf\n if the corresponding variable is unbounded from above.\n num_points: Number of points in the sample set.\n\n Returns:\n Matrices of (unit-norm) sample directions and sample points, one per row.\n Both matrices are 2D arrays of floats.\n\n Raises:\n RuntimeError: If not enough samples could be generated within the bounds.\n \"\"\"\n # Generate points uniformly on the sphere\n points, directions = self.sample_points(n, x, num_points)\n\n # Check bounds\n if (points >= var_lb).all() and (points <= var_ub).all():\n # If all points are within bounds, return them\n return directions, (x + self._options[\"sampling_radius\"] * directions)\n else:\n # Otherwise we perform rejection sampling until we have\n # enough points that satisfy the bounds\n indices = np.where((points >= var_lb).all(axis=1) & (points <= var_ub).all(axis=1))[0]\n accepted = directions[indices]\n num_trials = 0\n\n while (\n len(accepted) < num_points\n and num_trials < self._options[\"max_failed_rejection_sampling\"]\n ):\n # Generate points uniformly on the sphere\n points, directions = self.sample_points(n, x, num_points)\n indices = np.where((points >= var_lb).all(axis=1) & (points <= var_ub).all(axis=1))[\n 0\n ]\n accepted = np.vstack((accepted, directions[indices]))\n num_trials += 1\n\n # When we are at a corner point, the expected fraction of acceptable points may be\n # exponential small in the dimension of the problem. Thus, if we keep failing and\n # do not have enough points by now, we switch to a different method that guarantees\n # finding enough points, but they may not be uniformly distributed.\n if len(accepted) < num_points:\n points, directions = self.sample_points(n, x, num_points)\n to_be_flipped = (points < var_lb) | (points > var_ub)\n directions *= np.where(to_be_flipped, -1, 1)\n points = x + self._options[\"sampling_radius\"] * directions\n indices = np.where((points >= var_lb).all(axis=1) & (points <= var_ub).all(axis=1))[\n 0\n ]\n accepted = np.vstack((accepted, directions[indices]))\n\n # If we still do not have enough sampling points, we have failed.\n if len(accepted) < num_points:\n raise RuntimeError(\n \"Could not generate enough samples \" \"within bounds; try smaller radius.\"\n )\n\n return (\n accepted[:num_points],\n x + self._options[\"sampling_radius\"] * accepted[:num_points],\n )\n\n def gradient_approximation(\n self,\n n: int,\n x: np.ndarray,\n x_value: float,\n directions: np.ndarray,\n sample_set_x: np.ndarray,\n sample_set_y: np.ndarray,\n ) -> np.ndarray:\n \"\"\"Construct gradient approximation from given sample.\n\n Args:\n n: Dimension of the problem.\n x: Point around which the sample set was constructed.\n x_value: Objective function value at x.\n directions: Directions of the sample points wrt the central point x, as a 2D array.\n sample_set_x: x-coordinates of the sample set, one point per row, as a 2D array.\n sample_set_y: Objective function values of the points in sample_set_x, as a 1D array.\n\n Returns:\n Gradient approximation at x, as a 1D array.\n \"\"\"\n ffd = sample_set_y - x_value\n gradient = (\n float(n)\n / len(sample_set_y)\n * np.sum(\n ffd.reshape(len(sample_set_y), 1) / self._options[\"sampling_radius\"] * directions, 0\n )\n )\n return gradient\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"A pulse that is described by complex-valued sample points.\"\"\"\nfrom typing import Dict, List, Optional, Union, Any\n\nimport numpy as np\n\nfrom qiskit.circuit.parameterexpression import ParameterExpression, ParameterValueType\nfrom qiskit.pulse.exceptions import PulseError\nfrom qiskit.pulse.library.pulse import Pulse\nfrom qiskit.pulse.utils import deprecated_functionality\n\n\nclass Waveform(Pulse):\n \"\"\"A pulse specified completely by complex-valued samples; each sample is played for the\n duration of the backend cycle-time, dt.\n \"\"\"\n\n def __init__(\n self,\n samples: Union[np.ndarray, List[complex]],\n name: Optional[str] = None,\n epsilon: float = 1e-7,\n ):\n \"\"\"Create new sample pulse command.\n\n Args:\n samples: Complex array of the samples in the pulse envelope.\n name: Unique name to identify the pulse.\n epsilon: Pulse sample norm tolerance for clipping.\n If any sample's norm exceeds unity by less than or equal to epsilon\n it will be clipped to unit norm. If the sample\n norm is greater than 1+epsilon an error will be raised.\n \"\"\"\n\n samples = np.asarray(samples, dtype=np.complex_)\n self.epsilon = epsilon\n self._samples = self._clip(samples, epsilon=epsilon)\n super().__init__(duration=len(samples), name=name)\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Return sample values.\"\"\"\n return self._samples\n\n def _clip(self, samples: np.ndarray, epsilon: float = 1e-7) -> np.ndarray:\n \"\"\"If samples are within epsilon of unit norm, clip sample by reducing norm by (1-epsilon).\n\n If difference is greater than epsilon error is raised.\n\n Args:\n samples: Complex array of the samples in the pulse envelope.\n epsilon: Pulse sample norm tolerance for clipping.\n If any sample's norm exceeds unity by less than or equal to epsilon\n it will be clipped to unit norm. If the sample\n norm is greater than 1+epsilon an error will be raised.\n\n Returns:\n Clipped pulse samples.\n\n Raises:\n PulseError: If there exists a pulse sample with a norm greater than 1+epsilon.\n \"\"\"\n samples_norm = np.abs(samples)\n to_clip = (samples_norm > 1.0) & (samples_norm <= 1.0 + epsilon)\n\n if np.any(to_clip):\n # first try normalizing by the abs value\n clip_where = np.argwhere(to_clip)\n clip_angle = np.angle(samples[clip_where])\n clipped_samples = np.exp(1j * clip_angle, dtype=np.complex_)\n\n # if norm still exceed one subtract epsilon\n # required for some platforms\n clipped_sample_norms = np.abs(clipped_samples)\n to_clip_epsilon = clipped_sample_norms > 1.0\n if np.any(to_clip_epsilon):\n clip_where_epsilon = np.argwhere(to_clip_epsilon)\n clipped_samples_epsilon = (1 - epsilon) * np.exp(\n 1j * clip_angle[clip_where_epsilon], dtype=np.complex_\n )\n clipped_samples[clip_where_epsilon] = clipped_samples_epsilon\n\n # update samples with clipped values\n samples[clip_where] = clipped_samples\n samples_norm[clip_where] = np.abs(clipped_samples)\n\n if np.any(samples_norm > 1.0):\n raise PulseError(\"Pulse contains sample with norm greater than 1+epsilon.\")\n\n return samples\n\n def is_parameterized(self) -> bool:\n \"\"\"Return True iff the instruction is parameterized.\"\"\"\n return False\n\n @property\n def parameters(self) -> Dict[str, Any]:\n \"\"\"Return a dictionary containing the pulse's parameters.\"\"\"\n return dict()\n\n @deprecated_functionality\n def assign_parameters(\n self, value_dict: Dict[ParameterExpression, ParameterValueType]\n ) -> \"Waveform\":\n # Waveforms don't accept parameters\n return self\n\n def __eq__(self, other: Pulse) -> bool:\n return (\n super().__eq__(other)\n and self.samples.shape == other.samples.shape\n and np.allclose(self.samples, other.samples, rtol=0, atol=self.epsilon)\n )\n\n def __hash__(self) -> int:\n return hash(self.samples.tostring())\n\n def __repr__(self) -> str:\n opt = np.get_printoptions()\n np.set_printoptions(threshold=50)\n np.set_printoptions(**opt)\n return \"{}({}{})\".format(\n self.__class__.__name__,\n repr(self.samples),\n \", name='{}'\".format(self.name) if self.name is not None else \"\",\n )\n"
] |
[
[
"numpy.diag",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.matmul",
"numpy.linalg.norm",
"sklearn.linear_model.Lasso",
"numpy.linalg.lstsq",
"sklearn.linear_model.Ridge",
"numpy.real",
"numpy.log10",
"numpy.transpose",
"numpy.mod"
],
[
"numpy.arange",
"numpy.testing.assert_almost_equal"
],
[
"numpy.hstack",
"numpy.clip",
"numpy.linalg.norm",
"numpy.array",
"numpy.where",
"numpy.vstack"
],
[
"numpy.abs",
"numpy.get_printoptions",
"numpy.allclose",
"numpy.asarray",
"numpy.set_printoptions",
"numpy.argwhere",
"numpy.any",
"numpy.angle",
"numpy.exp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sgherbst/msdsl
|
[
"e38d5ecdb88b3574bda62f22a4f91ce3e4173d12",
"e38d5ecdb88b3574bda62f22a4f91ce3e4173d12"
] |
[
"tests/lowlevel/test_ctle2.py",
"tests/func_sim/test_func_sim.py"
] |
[
"import pytest\nimport numpy as np\nfrom pathlib import Path\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import lfilter\nfrom scipy.signal import cont2discrete\nfrom msdsl.interp.interp import calc_interp_w\nfrom msdsl.interp.lds import SplineLDS\nfrom msdsl.interp.ctle import calc_ctle_abcd, calc_ctle_num_den\nfrom msdsl.interp.interp import calc_piecewise_poly, eval_piecewise_poly\n\nTHIS_DIR = Path(__file__).resolve().parent\nTOP_DIR = THIS_DIR.parent.parent\nDATA_FILE = TOP_DIR / 'channel_resp_mar11.csv'\n\ndef interp_emu_res(tvec, vvec, dtmax, tsim, order, npts):\n # hold the previous stop index\n stop_idx = -1\n # calculate spacing between \"hidden\" timesteps\n th = dtmax/(npts-1)\n # build up partial results\n results = []\n for k in range(len(tvec)):\n # find start index\n start_idx = stop_idx+1\n # find stop index\n if k == len(tvec)-1:\n stop_idx = len(tsim)-1\n else:\n stop_idx = np.searchsorted(tsim, tvec[k+1], side='left')\n # find vector of times for interpolation\n t_interp = tsim[start_idx:(stop_idx+1)] - tvec[k]\n # calculate piecewise polynomial representation\n U = calc_piecewise_poly(vvec[k], order=order)\n results.append(eval_piecewise_poly(t_interp, th, U))\n # return array of the results\n return np.concatenate(results)\n\[email protected]('fz,fp1,npts,order,gbw,dtmax,err_lim', [\n (0.8e9, 1.6e9, 4, 3, 40e9, 31.25e-12, 5e-3),\n])\ndef test_ctle2(fz, fp1, npts, order, gbw, dtmax, err_lim):\n # normalize frequencies\n fz = fz*dtmax\n fp1 = fp1*dtmax\n gbw = gbw*dtmax\n\n # read in data\n my_data = np.genfromtxt(DATA_FILE, delimiter=',', skip_header=1)\n t_resp = my_data[:, 1] - my_data[0, 1]\n v_resp = my_data[:, 2]\n\n # find timestep of oversampled data\n tover = np.median(np.diff(t_resp))\n assert np.all(np.isclose(np.diff(t_resp), tover))\n print(f'tover: {tover*1e12:0.3f} ps')\n\n # build interpolator for data\n my_interp = interp1d(t_resp, v_resp)\n svec = np.linspace(0, 1, npts)\n\n # find state-space representation of the CTLE\n A, B, C, D = calc_ctle_abcd(fz=fz, fp1=fp1, gbw=gbw)\n\n # calculate response using spline method\n # the list of timesteps used is one that was found to be particularly bad for the emulator\n W = calc_interp_w(npts=npts, order=order)\n ctle = SplineLDS(A=A, B=B, C=C, D=D, W=W)\n x = np.zeros((A.shape[0],), dtype=float)\n t = 0\n dtlist = [0.960, 0.080, 0.960, 0.960, 0.080, 0.960, 0.960, 0.080, 0.960, 0.960,\n 0.080, 0.960, 0.960, 0.080, 0.960, 0.960, 0.080, 0.960, 0.960, 0.080,\n 0.960, 0.960, 0.080, 0.960, 0.960, 0.080, 0.960, 0.960, 0.080, 0.960,\n 0.960, 0.080, 0.960, 0.960, 0.080, 0.960, 0.960, 0.080, 0.960, 0.960,\n 0.080, 0.960, 0.960, 0.080, 0.960, 0.960, 0.080]\n tlist = []\n ylist = []\n for dt in dtlist:\n tlist.append(t)\n x, y = ctle.calc_update(xo=x, inpt=my_interp(t+svec*dtmax), dt=dt)\n ylist.append(y)\n t += dt*dtmax\n\n # calculate measured values\n y_meas = interp_emu_res(tlist, ylist, dtmax, t_resp, order, npts)\n\n # find expected response of the CTLE\n num, den = calc_ctle_num_den(fz=fz, fp1=fp1, gbw=gbw)\n b, a, _ = cont2discrete((num, den), dt=tover/dtmax)\n y_expt = lfilter(b[0], a, v_resp)\n\n # uncomment to plot results\n # import matplotlib.pyplot as plt\n # plt.plot(t_resp, y_expt)\n # plt.plot(t_resp, y_meas)\n # plt.show()\n\n # calculate error\n rms_err = np.sqrt(np.mean((y_expt-y_meas)**2))\n print('rms_err:', rms_err)\n assert rms_err < err_lim\n",
"# general imports\nfrom pathlib import Path\nimport numpy as np\nimport importlib\n\n# AHA imports\nimport magma as m\n\n# msdsl imports\nfrom ..common import *\nfrom msdsl import MixedSignalModel, VerilogGenerator\n\nBUILD_DIR = Path(__file__).resolve().parent / 'build'\nDOMAIN = np.pi\nRANGE = 1.0\n\ndef pytest_generate_tests(metafunc):\n pytest_sim_params(metafunc)\n pytest_real_type_params(metafunc)\n pytest_func_mode_params(metafunc)\n tests = [(0, 0.0105, 512),\n (1, 0.000318, 128)]\n if importlib.util.find_spec('cvxpy'):\n tests.append((2, 0.000232, 32))\n metafunc.parametrize('order,err_lim,numel', tests)\n metafunc.parametrize('func', [np.sin, np.cos])\n\ndef gen_model(myfunc, order=0, numel=512, real_type=RealType.FixedPoint, func_mode='sync'):\n # create mixed-signal model\n model = MixedSignalModel('model', build_dir=BUILD_DIR,\n real_type=real_type)\n model.add_analog_input('in_')\n model.add_analog_output('out')\n model.add_digital_input('clk')\n model.add_digital_input('rst')\n\n # create function\n write_tables = (func_mode in {'sync'})\n real_func = model.make_function(\n myfunc, domain=[-DOMAIN, +DOMAIN], order=order, numel=numel, write_tables=write_tables)\n\n # apply function\n model.set_from_func(model.out, real_func, model.in_, clk=model.clk,\n rst=model.rst, func_mode=func_mode)\n\n # write the model\n return model.compile_to_file(VerilogGenerator())\n\ndef test_func_sim(func, simulator, order, err_lim, numel, real_type, func_mode):\n # set the random seed for repeatable results\n np.random.seed(0)\n\n # create clipped version of function\n myfunc = lambda x: func(np.clip(x, -DOMAIN, +DOMAIN))\n\n # generate model\n model_file = gen_model(\n myfunc=myfunc, order=order, numel=numel, real_type=real_type, func_mode=func_mode)\n\n # declare circuit\n class dut(m.Circuit):\n name = 'test_func_sim'\n io = m.IO(\n in_=fault.RealIn,\n out=fault.RealOut,\n clk=m.In(m.Clock),\n rst=m.BitIn\n )\n\n # create the tester\n tester = MsdslTester(dut, dut.clk)\n\n # initialize\n tester.poke(dut.in_, 0)\n tester.poke(dut.clk, 0)\n tester.poke(dut.rst, 1)\n tester.eval()\n\n # apply reset\n tester.step(2)\n\n # clear reset\n tester.poke(dut.rst, 0)\n tester.step(2)\n\n # save the outputs\n inpts = np.random.uniform(-1.2*DOMAIN, +1.2*DOMAIN, 100)\n apprx = []\n for in_ in inpts:\n tester.poke(dut.in_, in_)\n if func_mode in {'sync'}:\n tester.step(2)\n else:\n tester.eval()\n apprx.append(tester.get_value(dut.out))\n\n # run the simulation\n parameters = {\n 'in_range': 2*DOMAIN,\n 'out_range': 2*RANGE\n }\n tester.compile_and_run(\n directory=BUILD_DIR,\n simulator=simulator,\n ext_srcs=[model_file, get_file('func_sim/test_func_sim.sv')],\n parameters=parameters,\n real_type=real_type\n )\n\n # evaluate the outputs\n apprx = np.array([elem.value for elem in apprx], dtype=float)\n\n # compute the exact response to inputs\n exact = myfunc(inpts)\n\n # uncomment to plot results\n # import matplotlib.pyplot as plt\n # plt.plot(inpts, apprx, '*')\n # plt.show()\n\n # check the result\n err = np.sqrt(np.mean((exact-apprx)**2))\n print(f'RMS error: {err}')\n assert err <= err_lim\n"
] |
[
[
"numpy.linspace",
"numpy.genfromtxt",
"numpy.concatenate",
"scipy.interpolate.interp1d",
"scipy.signal.cont2discrete",
"numpy.diff",
"numpy.mean",
"numpy.searchsorted",
"scipy.signal.lfilter",
"numpy.zeros"
],
[
"numpy.random.seed",
"numpy.clip",
"numpy.mean",
"numpy.random.uniform",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
astonzhang/Parameterization-of-Hypercomplex-Multiplications
|
[
"0e3f1ceebccce9f14ce629356733c07602eb351c"
] |
[
"layers/qlib.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nfrom numpy.random import RandomState\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import random_ops\nimport random\n\n\"\"\" Quarternion layers\n\nReferences:\n\nhttps://arxiv.org/pdf/1806.04418.pdf\nhttps://arxiv.org/pdf/1806.07789.pdf\n\nhttps://github.com/Orkis-Research/light-Recurrent-Neural-Networks\nhttps://github.com/Orkis-Research/light-Convolutional-Neural-Networks-for-End-to-End-Automatic-Speech-Recognition\n\nSome functions are direct ports from the Pytorch library.\n\n\"\"\"\n\ndef make_quarternion_mul(kernel, concat_dim=0):\n\tr, i, j, k = tf.split(kernel, 4, axis=-1)\n\tr2 = tf.concat([r, -i, -j, -k], axis=-1)\t# 0, 1, 2, 3\n\ti2 = tf.concat([i, r, -k, j], axis=-1)\t# 1, 0, 3, 2\n\tj2 = tf.concat([j, k, r, -i], axis=-1)\t# 2, 3, 0, 1\n\tk2 = tf.concat([k, -j, i, r],axis=-1)\t# 3, 2, 1, 0\n\thamilton = tf.concat([r2, i2, j2, k2], axis=concat_dim)\n\treturn hamilton\n\n\ndef get_r(x, a=1):\n\treturn tf.split(x, 4, axis=a)[0]\n\ndef get_i(x, a=1):\n\treturn tf.split(x, 4, axis=a)[1]\n\ndef get_j(x, a=1):\n\treturn tf.split(x, 4, axis=a)[2]\n\ndef get_k(x, a=1):\n\treturn tf.split(x, 4, axis=a)[3]\n\ndef quarternion_attention(a, b):\n\t\"\"\" Performs dot product attention between two quarternion sequences.\n\n\ta = bsz x al x dim\n\tb = bsz x bl x dim\n\n\tfollowing:\n\t(rr' - xx' - yy' - zz') +\n\t\t(rx' + xr' + yz' - zy')i +\n\t\t(ry' - xz' + yr' + zx')j +\n\t\t(rz' + xy' - yx' + zr')k +\n\n\tthe output should be one attention matrix for each component (r,i,j,k)\n\t\"\"\"\n\tprint(\"light Attention!\")\n\tprint(a)\n\tprint(b)\n\tal, bl = tf.shape(a)[2], tf.shape(b)[2]\n\n\tar, ax, ay, az = tf.split(a, 4, axis=-1)\n\tbr, bx, by, bz = tf.split(b, 4, axis=-1)\n\tr = tf.matmul(ar, br, transpose_b=True) - tf.matmul(ax, bx, transpose_b=True) - tf.matmul(ay, by, transpose_b=True) - tf.matmul(az, bz, transpose_b=True)\n\ti = tf.matmul(ar, bx, transpose_b=True) + tf.matmul(ax, br, transpose_b=True) + tf.matmul(ay, bz, transpose_b=True) - tf.matmul(az, by, transpose_b=True)\n\tj = tf.matmul(ar, by, transpose_b=True) - tf.matmul(ax, bz, transpose_b=True) + tf.matmul(ay, br, transpose_b=True) + tf.matmul(az, bx, transpose_b=True)\n\tk = tf.matmul(ar, bz, transpose_b=True) + tf.matmul(ax, by, transpose_b=True) - tf.matmul(ay, bx, transpose_b=True) + tf.matmul(az, br, transpose_b=True)\n\treturn [r, i, j, k]\n\ndef quarternion_dot_product_att(a, b):\n\t\"\"\" Wrapper for two sequences\n\t\"\"\"\n\tal = tf.shape(a)[1]\n\tbl = tf.shape(b)[1]\n\t# print(a)\n\td = a.get_shape().as_list()[2]\n\tbsz = tf.shape(b)[0]\n\ta = tf.reshape(a, [-1, d])\n\ta = tf.tile(a, [bl, 1])\n\tb = tf.reshape(b, [-1, d])\n\tb = tf.tile(b, [al, 1])\n\tatt = quarternion_dot(a, b)\n\tatt = tf.reshape(att, [bsz, -1, al * bl])\n\tatt = tf.reduce_sum(att, 1)\n\treturn tf.reshape(att, [-1, al * bl])\n\ndef quarternion_dot_3d(q0, q1):\n\td = q0.get_shape().as_list()[2]\n\tsq = tf.shape(q0)[1]\n\tq0 = tf.reshape(q0, [-1, d])\n\tq1 = tf.reshape(q1, [-1, d])\n\tout = quarternion_dot(q0, q1)\n\treturn tf.reshape(out, [-1, sq, d])\n\ndef quarternion_dot(q0, q1):\n\t\"\"\" Quarternion product between 2 quarternions\n\n\treturns same shape and acts like element-wise quarternion mul\n\t\"\"\"\n\tq1_r = get_r(q1)\n\tq1_i = get_i(q1)\n\tq1_j = get_j(q1)\n\tq1_k = get_k(q1)\n\n\tr_base = tf.multiply(q0, q1)\n\tr = get_r(r_base) - get_i(r_base) - get_j(r_base) - get_k(r_base)\n\n\ti_base = tf.multiply(q0, tf.concat([q1_i, q1_r, q1_k, q1_j], 1))\n\ti = get_r(i_base) + get_i(i_base) + get_j(i_base) - get_k(i_base)\n\n\tj_base = tf.multiply(q0, tf.concat([q1_j, q1_k, q1_r, q1_i], 1))\n\tj = get_r(j_base) - get_i(j_base) + get_j(j_base) + get_k(j_base)\n\n\tk_base = tf.multiply(q0, tf.concat([q1_k, q1_j, q1_i, q1_r], 1))\n\tk = get_r(k_base) + get_i(k_base) - get_j(k_base) + get_k(k_base)\n\n\treturn tf.concat([r, i, j, k], 1)\n\ndef quarternion_concat(x, axis):\n\t\"\"\" Helpful if we have 2 quarternions in [r,i,j,k].\n\tWe can't simply concat them as it would mess the components.\n\tSo in this case, we extract each component and concat them individually.\n\t\"\"\"\n\toutput = [[] for i in range(4)]\n\tfor _x in x:\n\t\tsp = tf.split(_x, 4, axis=axis)\n\t\tfor i in range(4):\n\t\t\toutput[i].append(sp[i])\n\n\tfinal = []\n\tfor o in output:\n\t\to = tf.concat(o, axis)\n\t\tfinal.append(o)\n\n\treturn tf.concat(final, axis)\n\ndef quarternion_ffn_3d(x, dim, name='', init=None,\n\t\t\t\tnum_layers=1, activation=None, reuse=None):\n\t\"\"\" Quarternion Feed-forward layers to 3D input [bsz x seq_len x dim]\n\treturns same shape tensor with new projected dimension.\n\t\"\"\"\n\tprint(\"QFFN layer..\")\n\t_d = x.get_shape().as_list()[2]\n\tsq = tf.shape(x)[1]\n\tx = tf.reshape(x, [-1, _d])\n\tx = quarternion_ffn(x, dim, name=name, init=init,\n\t\t\t\t\t\tnum_layers=num_layers,\n\t\t\t\t\t\tactivation=activation,reuse=reuse)\n\tx = tf.reshape(x, [-1, sq, dim])\n\treturn x\n\ndef factorized_ffn_3d(x, dim, name='', init=None,\n\t\t\t\tnum_layers=1, activation=None, reuse=None):\n\t\"\"\" 3D factorized FFN layer\n\t\"\"\"\n\tprint(\"Factor Layer\")\n\t_d = x.get_shape().as_list()[2]\n\tsq = tf.shape(x)[1]\n\tx = tf.reshape(x, [-1, _d])\n\tx = factorized_ffn(x, dim, name=name, init=init,\n\t\t\t\t\t\tnum_layers=num_layers,\n\t\t\t\t\t\tactivation=activation,reuse=reuse)\n\tx = tf.reshape(x, [-1, sq, dim])\n\treturn x\n\n\ndef factorized_ffn(x, dim, name='', init=None,\n\t\t\t\tnum_layers=1, activation=None, reuse=None):\n\t\"\"\" Factorized FFN\n\t\"\"\"\n\tif(init is None):\n\t\tinit = tf.contrib.layers.xavier_initializer()\n\tinput_dim=x.get_shape().as_list()[2]\n\tk1 = tf.get_variable('factork1{}'.format(name), [input_dim], initializer=init)\n\tk2 = tf.get_variable('factork2{}'.format(name), [dim], initializer=init)\n\tW = tf.tensordot(k1, k2, axes=0)\n\toutput = tf.matmul(x, W)\n\tif(activation):\n\t\toutput = activation(output)\n\treturn output\n\ndef quarternion_ffn(x, dim, name='', init=None,\n\t\t\t\tnum_layers=1, activation=None, reuse=None):\n\t\"\"\" Implements quarternion feed-forward layer\n\n\tx is [bsz x features] tensor\n\t\"\"\"\n\tif(init is None):\n\t\tinit = tf.contrib.layers.xavier_initializer()\n\t\t# init = q_xavier_initializer()\n\tinput_dim = x.get_shape().as_list()[1] // 4\n\twith tf.variable_scope('Q{}'.format(name), reuse=reuse) as scope:\n\t\tkernel = tf.get_variable('quarternion', [input_dim, dim], initializer=init)\n\t\thamilton = make_quarternion_mul(kernel)\n\t\toutput = tf.matmul(x, hamilton)\n\t\tif(activation):\n\t\t\toutput = activation(output)\n\t\treturn output\n\ndef make_random_mul(kernel, n=4, concat_dim=0, dual=False):\n\t\"\"\" input is dim/n x dim\n\toutput is dim x dim\n\n\tgeneralization and parameterized hypercomplex product\n\t\"\"\"\n\tdim = kernel.get_shape().as_list()[1]\n\tdim2 = kernel.get_shape().as_list()[0]\n\tkernel = tf.reshape(kernel, [dim2, 1, 1, dim])\n\tmix = tf.split(kernel, n, axis=-1)\n\tsdim = mix[0].get_shape().as_list()[-1]\t# dim//n x 1 x 1 x dim//n\n\n\tAM = tf.get_variable('A', [n, 1, n, n])\n\n\tcat = tf.concat(mix, axis=1) # dim/n x n x 1 x dim/n\n\tcat = tf.tile(cat, [1, 1, n, 1])\t# dim/n x n x n x dim/n\n\tcat = tf.transpose(cat, [1, 0, 2, 3])\t# n x dim/n x n x dim/n\n\n\tif(dual==1):\n\t\tprint(\"Using Dual..\")\n\t\tBM = tf.get_variable('B', [n, 1, n, n])\n\t\tAM *= tf.nn.sigmoid(BM)\n\n\tAM = tf.tile(AM, [1, dim2, 1, 1])\t# n x dim/n x n x n\n\tcat = tf.matmul(AM, cat)\t# n x dim/n x n x dim/n\n\toutput = tf.reshape(cat, [dim2 *n, dim])\n\treturn output\n\n\ndef random_ffn_3d(x, dim, n=16, name='', init=None,\n\t\t\t\tnum_layers=1, activation=None, reuse=None, dual=False):\n\t\"\"\" Implements random feed-forward layer\n\n\tx is [bsz x features] tensor\n\t\"\"\"\n\tprint(\"R-FFN layer..n={} dual={}\".format(n, dual))\n\t_d = x.get_shape().as_list()[2]\n\tsq = tf.shape(x)[1]\n\tx = tf.reshape(x, [-1, _d])\n\tprint(x)\n\tx = random_ffn(x, dim, n=n, name=name, init=init,\n\t\t\t\t\t\tnum_layers=num_layers,\n\t\t\t\t\t\tactivation=activation, reuse=reuse, dual=dual)\n\tx = tf.reshape(x, [-1, sq, dim])\n\treturn x\n\n\ndef random_ffn(x, dim, n=4, name='', init=None,\n\t\t\t\tnum_layers=1, activation=None, reuse=None, dual=0):\n\t\"\"\" Implements random feed-forward layer\n\n\tx is [bsz x features] tensor\n\t\"\"\"\n\tif(init is None):\n\t\tinit = tf.contrib.layers.xavier_initializer()\n\t\t# init = q_xavier_initializer()\n\tinput_dim = x.get_shape().as_list()[1] // n\n\twith tf.variable_scope('R{}'.format(name), reuse=reuse) as scope:\n\t\tkernel = tf.get_variable('random', [input_dim, dim], initializer=init)\n\t\thamilton = make_random_mul(kernel, n=n, dual=dual)\n\t\toutput = tf.matmul(x, hamilton)\n\t\tif(activation):\n\t\t\toutput = activation(output)\n\t\treturn output\n\n\ndef octonion_ffn_3d(x, dim, name='', init=None,\n\t\t\t\tnum_layers=1, activation=None, reuse=None):\n\t\"\"\" Quarternion Feed-forward layers to 3D input [bsz x seq_len x dim]\n\treturns same shape tensor with new projected dimension.\n\t\"\"\"\n\tprint(\"OFFN layer..\")\n\t_d = x.get_shape().as_list()[2]\n\tsq = tf.shape(x)[1]\n\tx = tf.reshape(x, [-1, _d])\n\tx = octonion_ffn(x, dim, name=name, init=init,\n\t\t\t\t\t\tnum_layers=num_layers,\n\t\t\t\t\t\tactivation=activation,reuse=reuse)\n\tx = tf.reshape(x, [-1, sq, dim])\n\treturn x\n\ndef octonion_ffn(x, dim, name='', init=None,\n\t\t\t\tnum_layers=1, activation=None, reuse=None):\n\tif(init is None):\n\t\tinit = tf.contrib.layers.xavier_initializer()\n\tinput_dim = x.get_shape().as_list()[1] // 8\n\twith tf.variable_scope('OCT{}'.format(name), reuse=reuse) as scope:\n\t\tkernel = tf.get_variable('octonion', [input_dim, dim], initializer=init)\n\t\toutput = octonion_mul(x, kernel)\n\t\treturn output\n\n\ndef hamilton_product(x, kernel):\n\th = make_quarternion_mul(kernel)\n\toutput = tf.matmul(x, h)\n\treturn output\n\ndef qstar(x):\n\tx = tf.split(x, 4, axis=-1)\n\tx1 = -x[1]\n\tx2 = -x[2]\n\treturn tf.concat([x[0],x1,x2,x[3]], axis=-1)\n\ndef octonion_mul(x, kernel):\n\tx1, x2 = tf.split(x, 2, axis=-1)\n\tk1, k2 = tf.split(kernel, 2, axis=-1)\n\tprint(x1)\n\tprint(k1)\n\to1 = hamilton_product(x1, k1)\n\to2 = hamilton_product(k2, x1)\n\to1 -= hamilton_product(qstar(k2), x2)\n\to2 += hamilton_product(x2, qstar(k1))\n\toutput = tf.concat([o1, o2], axis=-1)\n\treturn output\n\nclass QuarternionRNN(tf.nn.rnn_cell.RNNCell):\n\n\tdef __init__(self, input_dim, output_dim,\n\t\t\t\t\tinitializer=None, name='', reuse=None):\n\t\t\"\"\" Rough implementation (need double-check)\n\t\tfrom the Quarternion RNN paper. For now, works decently.\n\t\t\"\"\"\n\t\tself.dim = output_dim\n\t\twith tf.variable_scope(\"QuartRNN{}\".format(name), reuse=reuse) as scope:\n\t\t\tif(initializer is None):\n\t\t\t\t# initializer = tf.contrib.layers.xavier_initializer()\n\t\t\t\tinitialzier = tf.orthogonal_initializer()\n\t\t\tinput_dim = input_dim // 4\n\t\t\tself.Wh = tf.get_variable(\"Wh\", [input_dim, output_dim],\n\t\t\t\t\t\t\t\t\tinitializer=initializer)\n\t\t\tself.Wx = tf.get_variable(\"Wx\", [input_dim, output_dim],\n\t\t\t\t\t\t\t\t\tinitializer=initializer)\n\t\t\tself.Wy = tf.get_variable(\"Wy\", [input_dim, output_dim],\n\t\t\t\t\t\t\t\t\tinitializer=initializer)\n\t\t\tself.Wh = make_quarternion_mul(self.Wh)\n\t\t\tself.Wx = make_quarternion_mul(self.Wx)\n\t\t\tself.Wy = make_quarternion_mul(self.Wy)\n\n\t@property\n\tdef state_size(self):\n\t\treturn self.dim\n\n\t@property\n\tdef output_size(self):\n\t\treturn self.dim\n\n\n\tdef __call__(self, inputs, state, scope=None):\n\t\t\"\"\"\n\t\tinputs: 2-D tensor of shape [batch_size, feats + [gates]]\n\t\t\"\"\"\n\t\tnew_state = tf.matmul(state, self.Wh) + tf.matmul(inputs, self.Wx)\n\t\tnew_state = tf.nn.sigmoid(new_state)\n\t\toutput = tf.nn.tanh(tf.matmul(inputs, self.Wy))\n\t\treturn output, new_state\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.multiply",
"tensorflow.concat",
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.shape",
"tensorflow.nn.sigmoid",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.orthogonal_initializer",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.tensordot",
"tensorflow.split",
"tensorflow.tile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
pietrobarbiero/logic_explained_networks
|
[
"238f2a220ae8fc4f31ab0cf12649603aba0285d5",
"238f2a220ae8fc4f31ab0cf12649603aba0285d5"
] |
[
"data/load_structured_datasets.py",
"tests/test_models.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport torch\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import MinMaxScaler, KBinsDiscretizer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom torch.nn.functional import one_hot\nfrom torch.utils.data import TensorDataset\n\n\ndef load_mimic(base_dir='./data'):\n data = pd.read_csv(f'{base_dir}/mimic-ii/full_cohort_data.csv')\n # data.drop('hgb_first')\n fs = [\n 'aline_flg',\n 'gender_num',\n # 'hosp_exp_flg',\n # 'icu_exp_flg',\n # 'day_28_flg',\n # 'censor_flg',\n 'sepsis_flg', 'chf_flg', 'afib_flg',\n 'renal_flg', 'liver_flg', 'copd_flg', 'cad_flg', 'stroke_flg',\n 'mal_flg', 'resp_flg',\n ]\n features_name = fs\n data1 = data[fs].values\n imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')\n data1 = imp_mean.fit_transform(data1)\n\n f2 = fs.copy()\n f2.append('day_icu_intime')\n f2.append('service_unit')\n f2.append('day_28_flg')\n f2.append('hospital_los_day')\n f2.append('icu_exp_flg')\n f2.append('hosp_exp_flg')\n f2.append('censor_flg')\n f2.append('mort_day_censored')\n f2 = data.columns.difference(f2)\n data2 = data[f2].values\n imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')\n data2 = imp_mean.fit_transform(data2)\n scaler = MinMaxScaler((0, 1))\n data2 = scaler.fit_transform(data2)\n features_name = features_name + list(f2)\n est = KBinsDiscretizer(n_bins=3, encode='onehot-dense', strategy='uniform')\n data2d = est.fit_transform(data2)\n f2d = []\n for feature in f2:\n # f2d.append(feature + '_VLOW')\n f2d.append(feature + '_LOW')\n f2d.append(feature + '_NORMAL')\n f2d.append(feature + '_HIGH')\n # f2d.append(feature + '_VHIGH')\n features_name = fs + f2d\n\n datax = np.hstack((data1, data2d))\n # datay = data['day_28_flg'].values\n # datay = (data['hospital_los_day']>6).values\n # datay = data['hosp_exp_flg'].values\n\n datay = (data['day_28_flg'].values + data['hosp_exp_flg'].values + data['icu_exp_flg'].values + (1-data['censor_flg'].values)) > 0\n\n # model = DecisionTreeClassifier()\n # # model = RandomForestClassifier()\n # scores = cross_val_score(model, datax, datay, cv=10)\n # print(scores.mean())\n\n x = torch.FloatTensor(datax)\n y = one_hot(torch.tensor(datay).to(torch.long)).to(torch.float)\n class_names = ['death_before_28_days', 'survived_28_days']\n return x, y, features_name, class_names\n\n\ndef load_celldiff(base_dir='./data'):\n gene_expression_matrix = pd.read_csv(f'{base_dir}/celldiff/data_matrix.csv', index_col=0)\n clustering_labels = pd.read_csv(f'{base_dir}/celldiff/cluster_labels.csv', index_col=0)\n biomarkers = pd.read_csv(f'{base_dir}/celldiff/markers.csv', index_col=0)\n\n labels = clustering_labels.values.squeeze()\n\n scaler = MinMaxScaler((0, 1))\n scaler.fit(gene_expression_matrix.values)\n data_normalized = scaler.transform(gene_expression_matrix.values)\n\n x = torch.FloatTensor(data_normalized)\n y = torch.FloatTensor(labels).to(torch.long).squeeze()\n\n # model = DecisionTreeClassifier()\n # model = RandomForestClassifier()\n # scores = cross_val_score(model, data_normalized, labels, cv=10)\n # print(scores.mean())\n\n feature_names = gene_expression_matrix.columns\n class_names = [f\"Cluster {i}\" for i in np.unique(labels)]\n\n x = torch.FloatTensor(data_normalized)\n y = one_hot(torch.tensor(labels).to(torch.long)).to(torch.float)\n return x, y, feature_names, class_names\n\n\ndef load_vDem(base_dir='./data'):\n data = pd.read_csv(f'{base_dir}/vdem/V-Dem-CY-Core-v10.csv')\n data['country_name_year'] = data['country_name'] + '_' + data['year'].astype(str)\n data_2000 = data[data['year'] > 2000].iloc[:, 12:-1].dropna(axis=1)\n\n high_level_indicators = [\n 'v2x_polyarchy',\n # 'v2x_libdem',\n # 'v2x_partipdem',\n 'v2x_delibdem',\n 'v2x_egaldem'\n ]\n mid_level_indicators = [\n 'v2x_api',\n 'v2x_mpi',\n 'v2x_freexp_altinf',\n 'v2x_frassoc_thick',\n 'v2x_suffr',\n 'v2xel_frefair',\n 'v2x_elecoff',\n # 'v2x_liberal',\n 'v2xcl_rol',\n # 'v2x_jucon',\n # 'v2xlg_legcon',\n # 'v2x_partip',\n 'v2x_cspart',\n # 'v2xdd_dd',\n # 'v2xel_locelec',\n # 'v2xel_regelec',\n 'v2xdl_delib',\n 'v2x_egal',\n 'v2xeg_eqprotec',\n 'v2xeg_eqaccess',\n 'v2xeg_eqdr',\n ]\n\n # drop_list = ['codelow', 'codehigh', 'sd', 'osp', 'nr', 'mean']\n low_level_indicators = []\n for f in data_2000.columns:\n if f.endswith('_ord') and f not in high_level_indicators and f not in mid_level_indicators:\n low_level_indicators.append(f)\n\n low_level_indicators_continuous = []\n for f in data_2000.columns:\n if f.endswith('_codehigh') or f.endswith('_codelow') and \\\n f not in high_level_indicators and f not in mid_level_indicators:\n low_level_indicators_continuous.append(f)\n\n print(f'Main {len(high_level_indicators)} - Area {len(mid_level_indicators)} - Raw {len(low_level_indicators)}')\n\n data_low_continuous = data_2000[low_level_indicators_continuous]\n\n data_low_raw = data_2000[low_level_indicators]\n one_hots = []\n for indicator in low_level_indicators:\n c = data_low_raw[indicator].values\n n_bins = int(c.max())\n kbin = KBinsDiscretizer(n_bins=n_bins, encode='onehot-dense', strategy='uniform')\n c1h = kbin.fit_transform(c.reshape(-1, 1))\n one_hots.append(c1h)\n\n new_indicator_names = []\n for clist, cname in zip(one_hots, low_level_indicators):\n if clist.shape[1] > 1:\n for i in range(clist.shape[1]):\n new_indicator_names.append(f'{cname}_{i}')\n else:\n new_indicator_names.append(f'{cname}')\n\n data_low = pd.DataFrame(np.hstack(one_hots), columns=new_indicator_names)\n data_mid = data_2000[mid_level_indicators] > 0.5\n data_high = data_2000[high_level_indicators].iloc[:, 0] > 0.5\n\n # scores = cross_val_score(LogisticRegression(), data_mid.values, data_high.values, cv=10)\n # print(scores.mean())\n # scores = cross_val_score(DecisionTreeClassifier(), data_mid.values, data_high.values, cv=10)\n # print(scores.mean())\n # scores = cross_val_score(RandomForestClassifier(), data_mid.values, data_high.values, cv=10)\n # print(scores.mean())\n feature_names = data_low.columns\n concept_names = data_mid.columns\n class_names = [\"Electoral democracy\", \"Non electoral democracy\"]\n\n x = torch.FloatTensor(data_low.values)\n c = torch.FloatTensor(data_mid.values)\n y = one_hot(torch.tensor(data_high.values).to(torch.long)).to(torch.float)\n return x, c, y, feature_names, concept_names, class_names\n\n\nif __name__ == '__main__':\n c0, x, y, f, conc, clas = load_vDem('.')\n # x, y, f, c = load_celldiff('.')\n",
"import unittest\n\nimport torch\nfrom torch.utils.data import TensorDataset\nfrom sklearn import datasets\nfrom sklearn.preprocessing import LabelBinarizer\n\nfrom lens.models.relu_nn import XReluNN\nfrom lens.models.psi_nn import PsiNetwork\nfrom lens.models.general_nn import XGeneralNN\nfrom lens.models.tree import XDecisionTreeClassifier\nfrom lens.models.brl import XBRLClassifier\nfrom lens.utils.base import set_seed\nfrom lens.utils.metrics import Accuracy\n\n\n# Create data\n\nx = torch.tensor([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=torch.float).cpu()\ny = torch.tensor([0, 1, 1, 0], dtype=torch.float).unsqueeze(1).cpu()\ny_multi = torch.tensor([[0, 1], [1, 0], [1, 0], [0, 1]], dtype=torch.float).cpu()\ntrain_data = TensorDataset(x, y)\ntrain_data_multi = TensorDataset(x, y_multi)\nx_sample = x[1]\ny_sample = y[1]\ny_sample_multi = y_multi[1].argmax()\n\n# Define loss and metrics\nloss = torch.nn.BCEWithLogitsLoss()\nmetric = Accuracy()\n\n# Define epochs and learning rate\nepochs = 1000\nl_r = 0.1\n\n# Network structures\nn_features = 2\nhidden_neurons = [10, 4]\n\n\nclass TestModels(unittest.TestCase):\n def test_1_relu_nn(self):\n set_seed(0)\n l1_weight_relu = 1e-3\n\n model = XReluNN(n_classes=1, n_features=n_features, hidden_neurons=hidden_neurons, loss=loss,\n l1_weight=l1_weight_relu)\n\n results = model.fit(train_data, train_data, epochs=epochs, l_r=l_r, metric=metric, save=False)\n assert results.shape == (epochs, 4)\n\n accuracy = model.evaluate(train_data, metric=metric)\n assert accuracy == 100.0\n\n local_explanation = model.get_local_explanation(x, y, x_sample, target_class=y_sample)\n print(local_explanation)\n assert local_explanation == '~feature0000000000 & feature0000000001'\n\n global_explanation = model.get_global_explanation(x, y, target_class=y_sample)\n print(global_explanation)\n assert global_explanation == '(feature0000000000 & ~feature0000000001) | ' \\\n '(feature0000000001 & ~feature0000000000)'\n\n # Test with multiple targets\n model = XReluNN(n_classes=2, n_features=n_features, hidden_neurons=hidden_neurons, loss=loss,\n l1_weight=l1_weight_relu)\n\n results = model.fit(train_data_multi, train_data_multi, epochs=epochs, l_r=l_r, metric=metric, save=False)\n assert results.shape == (epochs, 4)\n\n accuracy = model.evaluate(train_data_multi, metric=metric)\n assert accuracy == 100.0\n print(accuracy)\n\n local_explanation = model.get_local_explanation(x, y_multi, x_sample, target_class=y_sample_multi)\n print(local_explanation)\n assert local_explanation == '~feature0000000000 & feature0000000001'\n\n global_explanation = model.get_global_explanation(x, y_multi, target_class=y_sample_multi)\n print(global_explanation)\n assert global_explanation == '(feature0000000000 & ~feature0000000001) | ' \\\n '(feature0000000001 & ~feature0000000000)'\n\n return\n\n def test_2_psi_nn(self):\n set_seed(0)\n l1_weight_psi = 1e-4\n\n model = PsiNetwork(n_classes=1, n_features=n_features, hidden_neurons=hidden_neurons, loss=loss,\n l1_weight=l1_weight_psi, fan_in=2)\n\n results = model.fit(train_data, train_data, epochs=epochs, l_r=l_r, metric=metric, save=False)\n assert results.shape == (epochs, 4)\n\n accuracy = model.evaluate(train_data, metric=metric)\n assert accuracy == 100.0\n\n explanation = model.get_global_explanation(target_class=y_sample)\n print(explanation)\n assert explanation == '((feature0000000000 & ~feature0000000001) | ' \\\n '(feature0000000001 & ~feature0000000000))'\n\n set_seed(0)\n model = PsiNetwork(n_classes=2, n_features=n_features, hidden_neurons=hidden_neurons, loss=loss,\n l1_weight=l1_weight_psi)\n\n results = model.fit(train_data_multi, train_data_multi, epochs=epochs, l_r=l_r, metric=metric,\n save=False)\n assert results.shape == (epochs, 4)\n\n accuracy = model.evaluate(train_data_multi, metric=metric)\n print(accuracy)\n assert accuracy == 100.0\n\n explanation = model.get_global_explanation(target_class=y_sample_multi)\n print(explanation)\n assert explanation == '((feature0000000000 & ~feature0000000001) | ' \\\n '(feature0000000001 & ~feature0000000000))'\n return\n\n def test_3_general_nn(self):\n set_seed(0)\n l1_weight_general = 1e-3\n\n model = XGeneralNN(n_classes=1, n_features=n_features, hidden_neurons=hidden_neurons, loss=loss,\n l1_weight=l1_weight_general)\n\n results = model.fit(train_data, train_data, epochs=epochs, l_r=l_r, metric=metric, save=False,\n early_stopping=False)\n assert results.shape == (epochs, 4)\n\n accuracy = model.evaluate(train_data, metric=metric)\n assert accuracy == 100.0\n\n local_explanation = model.get_local_explanation(x, y, x_sample, target_class=y_sample)\n print(local_explanation)\n assert local_explanation == '~feature0000000000 & feature0000000001'\n\n global_explanation = model.get_global_explanation(x, y, target_class=y_sample)\n print(global_explanation)\n assert global_explanation == '(feature0000000000 & ~feature0000000001) | ' \\\n '(feature0000000001 & ~feature0000000000)'\n\n # Test with multiple targets\n set_seed(0)\n model = XGeneralNN(n_classes=2, n_features=n_features, hidden_neurons=hidden_neurons, loss=loss,\n l1_weight=l1_weight_general)\n\n results = model.fit(train_data_multi, train_data_multi, epochs=epochs, l_r=l_r, metric=metric, save=False)\n assert results.shape == (epochs, 4)\n\n accuracy = model.evaluate(train_data_multi, metric=metric)\n assert accuracy == 100.0\n print(accuracy)\n\n local_explanation = model.get_local_explanation(x, y_multi, x_sample, target_class=y_sample_multi)\n print(local_explanation)\n assert local_explanation == '~feature0000000000 & feature0000000001'\n\n global_explanation = model.get_global_explanation(x, y_multi, target_class=y_sample_multi)\n print(global_explanation)\n assert global_explanation == '(feature0000000000 & ~feature0000000001) | ' \\\n '(feature0000000001 & ~feature0000000000)'\n return\n\n def test_4_tree(self):\n set_seed(0)\n\n model = XDecisionTreeClassifier(n_classes=1, n_features=n_features)\n\n results = model.fit(train_data, train_data, metric=metric, save=False)\n\n assert results.shape == (1, 4)\n\n accuracy = model.evaluate(train_data)\n\n assert accuracy == 100.0\n\n formula = model.get_global_explanation(class_to_explain=y_sample)\n print(formula)\n\n model = XDecisionTreeClassifier(n_classes=2, n_features=n_features)\n\n results = model.fit(train_data_multi, train_data_multi, metric=metric, save=False)\n\n assert results.shape == (1, 4)\n\n accuracy = model.evaluate(train_data_multi)\n\n assert accuracy == 100.0\n\n formula = model.get_global_explanation(class_to_explain=y_sample_multi)\n print(formula)\n return\n\n def test_5_brl(self):\n from sklearn.preprocessing import MinMaxScaler\n from lens.logic import test_explanation\n from lens.utils.data import clean_names\n\n set_seed(0)\n\n iris = datasets.load_iris()\n x_brl = MinMaxScaler().fit_transform(iris.data)\n x_brl = torch.FloatTensor(x_brl)\n\n y_brl = torch.FloatTensor(iris.target == 2)\n y_multi_brl = LabelBinarizer().fit_transform(iris.target)\n y_multi_brl = torch.FloatTensor(y_multi_brl)\n train_data_brl = TensorDataset(x_brl, y_brl)\n train_data_multi_brl = TensorDataset(x_brl, y_multi_brl)\n y_sample_multi_brl = y_multi_brl[100].argmax()\n feature_names = iris.feature_names\n feature_names = clean_names(feature_names)\n class_names = iris.target_names\n\n model = XBRLClassifier(n_classes=1, n_features=n_features, feature_names=feature_names,\n class_names=class_names, discretize=True, name=\"brl_single\")\n\n results = model.fit(train_data_brl, metric=metric, save=False)\n assert results.shape == (1, 4)\n\n model.save()\n model.load()\n\n accuracy = model.evaluate(train_data_brl)\n\n assert accuracy >= 70.0\n\n formula = model.get_global_explanation(class_to_explain=0)\n print(f\"{class_names[0]} <-> {formula}\")\n\n exp_accuracy, _ = test_explanation(formula, target_class=1, x=x_brl, y=y_brl, concept_names=feature_names)\n print(\"Formula accuracy\", exp_accuracy)\n\n model = XBRLClassifier(n_classes=len(class_names), n_features=n_features, feature_names=feature_names,\n class_names=class_names, discretize=True, name=\"brl_multi\")\n\n results = model.fit(train_data_multi_brl, metric=metric, save=False)\n assert results.shape == (1, 4)\n\n model.save()\n model.load()\n\n accuracy = model.evaluate(train_data_multi_brl)\n print(\"\")\n assert accuracy >= 70.0\n\n formula = model.get_global_explanation(class_to_explain=y_sample_multi_brl)\n print(formula)\n\n exp_accuracy, _ = test_explanation(formula, target_class=y_sample_multi_brl, x=x_brl, y=y_multi_brl, concept_names=feature_names)\n print(\"Formula accuracy\", exp_accuracy)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.hstack",
"pandas.read_csv",
"numpy.unique",
"sklearn.impute.SimpleImputer",
"sklearn.preprocessing.KBinsDiscretizer",
"torch.tensor",
"torch.FloatTensor",
"sklearn.preprocessing.MinMaxScaler"
],
[
"torch.utils.data.TensorDataset",
"sklearn.datasets.load_iris",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.FloatTensor",
"sklearn.preprocessing.LabelBinarizer",
"sklearn.preprocessing.MinMaxScaler"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carlobar/PYPOWER-Dynamics
|
[
"bda06f027fa2b2b184d5abed5bfaf00ce730b85c"
] |
[
"pydyn/events.py"
] |
[
"#!python3\n#\n# Copyright (C) 2014-2015 Julius Susanto. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n\"\"\"\nPYPOWER-Dynamics\nEvents Class\nSets up and handles events in the simulation\n\"\"\"\nfrom pdb import set_trace as bp\nimport numpy as np\nfrom pypower.idx_bus import BUS_I, BUS_TYPE, PD, QD, GS, BS, BUS_AREA, \\\n VM, VA, VMAX, VMIN, LAM_P, LAM_Q, MU_VMAX, MU_VMIN, REF\n\n\n\nclass events:\n def __init__(self, filename):\n self.event_stack = []\n self.parser(filename) \n \n def parser(self, filename):\n \"\"\"\n Parse an event file (*.evnt) and populate event stack\n \"\"\"\n f = open(filename, 'r')\n \n for line in f:\n if line[0] != '#' and line.strip() != '': # Ignore comments and blank lines\n tokens = line.strip().split(',')\n \n # Parse signal events\n if tokens[1].strip() in ['SIGNAL', 'FAULT', 'LOAD', 'STATE']:\n self.event_stack.append([float(tokens[0].strip()), tokens[1].strip(), tokens[2].strip(), tokens[3].strip(), tokens[4].strip()])\n \n elif tokens[1].strip() in ['CLEAR_FAULT', 'TRIP_BRANCH', 'DISABLE_BRANCH', 'ENABLE_BRANCH']:\n self.event_stack.append([float(tokens[0].strip()), tokens[1].strip(), tokens[2].strip()])\n\n elif tokens[1].strip() in ['PAUSE']:\n self.event_stack.append( [float(tokens[0].strip()), tokens[1].strip()] )\n \n\n elif tokens[1].strip() in ['DEBUG_C']:\n self.event_stack.append([float(tokens[0].strip()), tokens[1].strip(), tokens[2].strip()])\n\n\n f.close()\n \n def handle_events(self, t, elements, ppc, baseMVA):\n \"\"\"\n Checks and handles the event stack during a simulation time step\n \"\"\"\n refactorise = False\n \n if self.event_stack:\n if self.event_stack[0][0] < t:\n print('Event missed at t=' + str(self.event_stack[0][0]) + 's... Check simulation time step!')\n del self.event_stack[0]\n \n # Event exists at time step\n while self.event_stack and self.event_stack[0][0] == t:\n event_type = self.event_stack[0][1]\n \n # Handle signal events\n if event_type == 'SIGNAL':\n obj_id = self.event_stack[0][2]\n sig_id = self.event_stack[0][3]\n value = float(self.event_stack[0][4])\n elements[obj_id].signals[sig_id] = value\n \n print('SIGNAL event at t=' + str(t) + 's on element \"' + obj_id + '\". ' + sig_id + ' = ' + str(value) + '.')\n \n if event_type == 'STATE':\n obj_id = self.event_stack[0][2]\n sig_id = self.event_stack[0][3]\n value = float(self.event_stack[0][4])\n elements[obj_id].states[sig_id] = value\n \n print('STATE event at t=' + str(t) + 's on element \"' + obj_id + '\". ' + sig_id + ' = ' + str(value) + '.')\n \n if event_type == 'FAULT':\n bus_id = int(self.event_stack[0][2])\n Rf = float(self.event_stack[0][3])\n Xf = float(self.event_stack[0][4])\n \n if Rf == 0:\n ppc[\"bus\"][bus_id, GS] = 1e6\n elif Rf < 0:\n ppc[\"bus\"][bus_id, GS] = 0\n Rf = 'Inf'\n else:\n ppc[\"bus\"][bus_id, GS] = 1 / Rf * baseMVA\n \n if Xf == 0:\n ppc[\"bus\"][bus_id, BS] = -1e6\n elif Xf < 0:\n ppc[\"bus\"][bus_id, BS] = 0\n Xf = 'Inf'\n else:\n ppc[\"bus\"][bus_id, BS] = -1 / Xf * baseMVA\n \n refactorise = True\n \n print('FAULT event at t=' + str(t) + 's on bus at row \"' + str(bus_id) + '\" with fault impedance Zf = ' + str(Rf) + ' + j' + str(Xf) + ' pu.')\n \n if event_type == 'CLEAR_FAULT':\n bus_id = int(self.event_stack[0][2])\n ppc[\"bus\"][bus_id, BS] = 0\n ppc[\"bus\"][bus_id, GS] = 0\n refactorise = True\n \n print('CLEAR_FAULT event at t=' + str(t) + 's on bus at row \"' + str(bus_id) + '\".')\n \n if event_type == 'TRIP_BRANCH':\n branch_id = int(self.event_stack[0][2])\n ppc[\"branch\"] = np.delete(ppc[\"branch\"],branch_id, 0)\n refactorise = True\n \n print('TRIP_BRANCH event at t=' + str(t) + 's on branch \"' + str(branch_id) + '\".')\n \n if event_type == 'DISABLE_BRANCH':\n branch_id = int(self.event_stack[0][2])\n ppc[\"branch\"][branch_id, 10] = 0\n refactorise = True\n \n print('DISABLE_BRANCH event at t=' + str(t) + 's on branch \"' + str(branch_id) + '\"...')\n print('... from node ' + str(ppc[\"branch\"][branch_id, 0]) + ' to node ' + str(ppc[\"branch\"][branch_id, 1]) +'.')\n #bp()\n \n if event_type == 'ENABLE_BRANCH':\n branch_id = int(self.event_stack[0][2])\n ppc[\"branch\"][branch_id, 10] = 1\n refactorise = True\n \n print('ENABLE_BRANCH event at t=' + str(t) + 's on branch \"' + str(branch_id) + '\".')\n \n if event_type == 'LOAD':\n bus_id = int(self.event_stack[0][2])\n Pl = float(self.event_stack[0][3])\n Ql = float(self.event_stack[0][4])\n\n print('LOAD event at t=' + str(t) + 's on bus at row \"' + str(bus_id) + '\".')\n print('\\tCurrent load: S = ' + str(ppc[\"bus\"][bus_id, PD]) + ' MW + j' + str(ppc[\"bus\"][bus_id, QD]) + ' MVAr.')\n \n ppc[\"bus\"][bus_id, PD] = Pl\n ppc[\"bus\"][bus_id, QD] = Ql\n \n refactorise = True\n \n print('\\tNew load: S = ' + str(Pl) + ' MW + j' + str(Ql) + ' MVAr.')\n \n if event_type == 'PAUSE':\n print('PAUSE event at t=' + str(t) + 's' )\n bp()\n\n if event_type == 'DEBUG_C':\n c_name = self.event_stack[0][2]\n print('DEBUG_C event at t=' + str(t) + 's on element ' + c_name )\n try:\n element = elements[c_name]\n except:\n print('Element '+c_name+\" doesn't exists\")\n bp()\n element.solve_step(0.001,0) \n\n\n del self.event_stack[0]\n \n return ppc, refactorise\n"
] |
[
[
"numpy.delete"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yaswanthpalaghat/Movie-Recommendation-System-using-Machine-Learning
|
[
"546914401c4fd2cbcf1d32de69e3f64995d95c25"
] |
[
"SentimentAnalysis.py"
] |
[
"__author__ = \"Yaswanth Sai Palaghat\"\r\n\r\nimport nltk\r\nimport sklearn\r\nfrom sklearn.datasets import load_files\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndef getSentiment(user_review):\r\n movie_train = load_files(r'F:\\movierec\\movie_reviews', shuffle=True)\r\n movie_vec = CountVectorizer(min_df=2, tokenizer=nltk.word_tokenize)\r\n movie_counts = movie_vec.fit_transform(movie_train.data)\r\n tfidf_transformer = TfidfTransformer()\r\n movie_tfidf = tfidf_transformer.fit_transform(movie_counts)\r\n docs_train, docs_test, y_train, y_test = train_test_split(movie_tfidf, movie_train.target, test_size = 0.2, random_state = 12)\r\n clf = MultinomialNB().fit(docs_train, y_train)\r\n reviews_new = [user_review]\r\n reviews_new_counts = movie_vec.transform(reviews_new)\r\n reviews_new_tfidf = tfidf_transformer.transform(reviews_new_counts)\r\n pred = clf.predict(reviews_new_tfidf)\r\n for review, category in zip(reviews_new, pred):\r\n result = movie_train.target_names[category]\r\n if result == 'positive':\r\n return 1\r\n elif result == 'negative':\r\n return 0\r\n else:\r\n return -1\r\n"
] |
[
[
"sklearn.naive_bayes.MultinomialNB",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_files",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.feature_extraction.text.TfidfTransformer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wdwang/pyABC
|
[
"65f85d97f20ac47df6a6d95cb373adba35738f57",
"65f85d97f20ac47df6a6d95cb373adba35738f57",
"65f85d97f20ac47df6a6d95cb373adba35738f57"
] |
[
"pyabc/transition/multivariatenormal.py",
"pyabc/populationstrategy.py",
"pyabc/visualization.py"
] |
[
"from typing import Union\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as st\nfrom .exceptions import NotEnoughParticles\nfrom .base import Transition\nfrom .util import smart_cov\n\n\ndef scott_rule_of_thumb(n_samples, dimension):\n \"\"\"\n Scott's rule of thumb.\n\n .. math::\n\n \\\\left ( \\\\frac{1}{n} \\\\right ) ^{\\\\frac{1}{d+4}}\n\n (see also scipy.stats.kde.gaussian_kde.scotts_factor)\n \"\"\"\n return n_samples ** (-1. / (dimension + 4))\n\n\ndef silverman_rule_of_thumb(n_samples, dimension):\n \"\"\"\n Silverman's rule of thumb.\n\n .. math::\n\n \\\\left ( \\\\frac{4}{n (d+2)} \\\\right ) ^ {\\\\frac{1}{d + 4}}\n\n (see also scipy.stats.kde.gaussian_kde.silverman_factor)\n \"\"\"\n return (4 / n_samples / (dimension + 2)) ** (1 / (dimension + 4))\n\n\nclass MultivariateNormalTransition(Transition):\n \"\"\"\n Transition via a multivariate Gaussian KDE estimate.\n\n Parameters\n ----------\n\n scaling: float\n Scaling is a factor which additionally multiplies the\n covariance with. Since Silverman and Scott usually have too large\n bandwidths, it should make most sense to have 0 < scaling <= 1\n\n bandwidth_selector: optional\n Defaults to `silverman_rule_of_thumb`.\n The bandwidth selector is a function of the form\n f(n_samples: float, dimension: int),\n where n_samples denotes the (effective) samples size (and is therefore)\n a float and dimension is the parameter dimension.\n\n \"\"\"\n def __init__(self, scaling=1, bandwidth_selector=silverman_rule_of_thumb):\n self.scaling = scaling\n self.bandwidth_selector = bandwidth_selector\n\n def fit(self, X: pd.DataFrame, w: np.ndarray):\n if len(X) == 0:\n raise NotEnoughParticles(\"Fitting not possible.\")\n self._X_arr = X.as_matrix()\n sample_cov = smart_cov(self._X_arr, w)\n dim = sample_cov.shape[0]\n eff_sample_size = 1 / (w**2).sum()\n bw_factor = self.bandwidth_selector(eff_sample_size, dim)\n self.cov = sample_cov * bw_factor**2 * self.scaling\n self.normal = st.multivariate_normal(cov=self.cov, allow_singular=True)\n\n def rvs_single(self):\n sample = self.X.sample(weights=self.w).iloc[0]\n perturbed = (sample +\n np.random.multivariate_normal(\n np.zeros(self.cov.shape[0]), self.cov))\n return perturbed\n\n def pdf(self, x: Union[pd.Series, pd.DataFrame]):\n x = x[self.X.columns]\n x = np.array(x)\n if len(x.shape) == 1:\n x = x[None, :]\n dens = np.array([(self.normal.pdf(xs - self._X_arr) * self.w).sum()\n for xs in x])\n return dens if dens.size != 1 else float(dens)\n",
"\"\"\"\nPopulation strategy\n===================\n\nStrategies to choose the population size.\n\nThe population size can be constant or can change over the course\nof the generations.\n\"\"\"\n\nimport json\nimport logging\n\nimport numpy as np\nfrom typing import List\n\nfrom pyabc.cv.bootstrap import calc_cv\nfrom .transition import Transition\nfrom .transition.predict_population_size import predict_population_size\n\nadaptation_logger = logging.getLogger(\"Adaptation\")\n\n\nclass PopulationStrategy:\n \"\"\"\n Strategy to select the sizes of the populations.\n\n This is a non-functional abstract base implementation. Do not use this\n class directly. Subclasses must override the `adapt_population_size`\n method.\n\n Parameters\n ----------\n\n nr_particles: int\n Number of particles per populations\n\n nr_samples_per_parameter: int, optional\n Number of samples to draw for a proposed parameter.\n Default is 1.\n \"\"\"\n\n def __init__(self, nr_particles: int, *, nr_samples_per_parameter: int=1):\n self.nr_particles = nr_particles\n self.nr_samples_per_parameter = nr_samples_per_parameter\n\n def adapt_population_size(self, transitions: List[Transition],\n model_weights: np.ndarray):\n \"\"\"\n Select the population size for the next population.\n\n Parameters\n ----------\n transitions: List of Transitions\n model_weights: array of model weights\n\n Returns\n -------\n n: int\n The new population size\n \"\"\"\n raise NotImplementedError\n\n def get_config(self):\n \"\"\"\n Get the configuration of this object.\n\n Returns\n -------\n dict\n Configuration of the class as dictionary\n \"\"\"\n return {\"name\": self.__class__.__name__,\n \"nr_particles\": self.nr_particles}\n\n def to_json(self):\n \"\"\"\n Return the configuration as json string.\n Per default, this converts the dictionary returned\n by get_config to json.\n\n Returns\n -------\n\n str\n Configuration of the class as json string.\n \"\"\"\n return json.dumps(self.get_config())\n\n\nclass ConstantPopulationSize(PopulationStrategy):\n \"\"\"\n Constant size of the different populations\n\n Parameters\n ----------\n\n nr_particles: int\n Number of particles per populations\n\n nr_samples_per_parameter: int\n Number of samples to draw for a proposed parameter\n \"\"\"\n\n def adapt_population_size(self, transitions, model_weights):\n pass\n\n\nclass AdaptivePopulationSize(PopulationStrategy):\n \"\"\"\n Adapt the population size according to the mean coefficient of variation\n error criterion, as detailed in [#klingerhasenaueradaptive]_.\n This strategy tries to respond to the shape of the\n current posterior approximation by selecting the population size such\n that the variation of the density estimates matches the target\n variation given via the mean_cv argument.\n\n Parameters\n ----------\n\n start_nr_particles: int\n Number of particles in the first populations\n\n mean_cv: float, optional\n The error criterion. Defaults to 0.05.\n A smaller value leads generally to larger populations.\n The error criterion is the mean coefficient of variation of\n the estimated KDE.\n\n max_population_size: int, optional\n Max nr of allowed particles in a population.\n Defaults to infinity.\n\n min_population_size: int, optional\n Min number of particles allowed in a population.\n Defaults to 10\n\n nr_samples_per_parameter: int, optional\n Defaults to 1.\n\n n_bootstrap: int, optional\n Number of bootstrapped populations to use to estimate the CV.\n Defaults to 10.\n\n\n\n .. [#klingerhasenaueradaptive] Klinger, Emmanuel, and Jan Hasenauer.\n “A Scheme for Adaptive Selection of Population Sizes in\n Approximate Bayesian Computation - Sequential Monte Carlo.”\n Computational Methods in Systems Biology, 128–44.\n Lecture Notes in Computer Science.\n Springer, Cham, 2017.\n https://doi.org/10.1007/978-3-319-67471-1_8.\n \"\"\"\n\n def __init__(self, start_nr_particles, mean_cv=0.05,\n *,\n max_population_size=float(\"inf\"),\n min_population_size=10,\n nr_samples_per_parameter=1,\n n_bootstrap=10):\n super().__init__(start_nr_particles,\n nr_samples_per_parameter=nr_samples_per_parameter)\n self.max_population_size = max_population_size\n self.min_population_size = min_population_size\n self.mean_cv = mean_cv\n self.n_bootstrap = n_bootstrap\n\n def get_config(self):\n return {\"name\": self.__class__.__name__,\n \"max_population_size\": self.max_population_size,\n \"mean_cv\": self.mean_cv}\n\n def adapt_population_size(self, transitions: List[Transition],\n model_weights: np.ndarray):\n test_X = [trans.X for trans in transitions]\n test_w = [trans.w for trans in transitions]\n\n reference_nr_part = self.nr_particles\n target_cv = self.mean_cv\n cv_estimate = predict_population_size(\n reference_nr_part, target_cv,\n lambda nr_particles: calc_cv(nr_particles, model_weights,\n self.n_bootstrap, test_w, transitions,\n test_X)[0])\n\n if not np.isnan(cv_estimate.n_estimated):\n self.nr_particles = max(min(int(cv_estimate.n_estimated),\n self.max_population_size),\n self.min_population_size)\n\n adaptation_logger.info(\"Change nr particles {} -> {}\"\n .format(reference_nr_part, self.nr_particles))\n",
"\"\"\"\nVisualizations\n--------------\n\nHelper functions to visualize results of ABCSMC runs.\n\"\"\"\nimport numpy as np\nfrom .transition import MultivariateNormalTransition, silverman_rule_of_thumb\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n\ndef kde_1d(df, w, x, xmin=None, xmax=None, numx=50):\n \"\"\"\n Calculates a 1 dimensional histogram from a Dataframe and weights.\n\n For example, a results distribution might be obtained from the history\n class and plotted as follows::\n\n df, w = history.get_distribution(0)\n x, pdf = hist_2d(df, w, \"x\")\n plt.plot(x, pdf)\n\n\n Parameters\n ----------\n df: Pandas Dataframe\n The rows are the observations, the columns the variables\n w: np.ndarray\n The corresponding weights\n x: str\n The variable for the x-axis\n xmin: float, optional\n The lower limit in x for the histogram.\n If left empty, it is set to the minimum of the ovbservations of the\n variable to be plotted as x.\n xmax: float, optional\n The upper limit in x for the histogram.\n If left empty, it is set to the maximum of the ovbservations of the\n variable to be plotted as x.\n numx: int, optional\n The number of bins in x direction.\n Defaults to 50.\n\n Returns\n -------\n\n x, pdf: (np.ndarray, np.ndarray)\n The x and the densities at these points.\n These can be passed for plotting, for example as\n plt.plot(x, pdf)\n\n \"\"\"\n kde = MultivariateNormalTransition(\n scaling=1,\n bandwidth_selector=silverman_rule_of_thumb)\n kde.fit(df[[x]], w)\n if xmin is None:\n xmin = df[x].min()\n if xmax is None:\n xmax = df[x].max()\n x_vals = np.linspace(xmin, xmax, num=numx)\n test = pd.DataFrame({x: x_vals})\n pdf = kde.pdf(test)\n return x_vals, pdf\n\n\ndef plot_kde_1d(df, w, x, xmin=None, xmax=None,\n numx=50, ax=None,\n refval=None, **kwargs):\n \"\"\"\n Plots a 1d histogram.\n\n Parameters\n ----------\n df: Pandas Dataframe\n The rows are the observations, the columns the variables\n w: The corresponding weights\n x: str\n The variable for the x-axis\n\n xmin: float, optional\n The lower limit in x for the histogram.\n If left empty, it is set to the minimum of the ovbservations of the\n variable to be plotted as x.\n xmax: float, optional\n The upper limit in x for the histogram.\n If left empty, it is set to the maximum of the ovbservations of the\n variable to be plotted as x.\n numx: int, optional\n The number of bins in x direction.\n Defaults tp 50.\n refval: dict, optional\n A reference value for x (as refval[x]: float).\n If not None, the value will be highlighted in the plot.\n Default: None.\n\n Returns\n -------\n\n ax: matplotlib axis\n axis of the plot\n\n \"\"\"\n x_vals, pdf = kde_1d(df, w, x, xmin=xmin, xmax=xmax, numx=numx)\n if ax is None:\n fig, ax = plt.subplots()\n ax.plot(x_vals, pdf, **kwargs)\n ax.set_xlabel(x)\n ax.set_ylabel(\"Posterior\")\n if refval is not None:\n ax.axvline(refval[x], color='C1', linestyle='dashed')\n return ax\n\n\ndef kde_2d(df, w, x, y, xmin=None, xmax=None, ymin=None, ymax=None,\n numx=50, numy=50):\n \"\"\"\n Calculates a 2 dimensional histogram from a Dataframe and weights.\n\n For example, a results distribution might be obtained from the history\n class and plotted as follows::\n\n df, w = history.get_distribution(0)\n X, Y, PDF = hist_2d(df, w, \"x\", \"y\")\n plt.pcolormesh(X, Y, PDF)\n\n\n Parameters\n ----------\n df: Pandas Dataframe\n The rows are the observations, the columns the variables\n w: The corresponding weights\n x: str\n The variable for the x-axis\n y: str\n The variable for the y-axis\n xmin: float, optional\n The lower limit in x for the histogram.\n If left empty, it is set to the minimum of the ovbservations of the\n variable to be plotted as x.\n xmax: float, optional\n The upper limit in x for the histogram.\n If left empty, it is set to the maximum of the ovbservations of the\n variable to be plotted as x.\n ymin: float, optional\n The lower limit in y for the histogram.\n If left empty, it is set to the minimum of the ovbservations of the\n variable to be plotted as y\n ymax: float, optional\n The upper limit in y for the histogram.\n If left empty, it is set to the maximum of the ovbservations of the\n variable to be plotted as y.\n numx: int, optional\n The number of bins in x direction.\n Defaults tp 50.\n numy int, optional\n The number of bins in y direction.\n Defaults tp 50.\n\n Returns\n -------\n\n X, Y, PDF: (np.ndarray, np.ndarray, np.ndarray)\n The X, the Y and the densities at these points.\n These can be passed for plotting, for example as\n plt.pcolormesh(X, Y, PDF)\n\n \"\"\"\n kde = MultivariateNormalTransition(\n scaling=1,\n bandwidth_selector=silverman_rule_of_thumb)\n kde.fit(df[[x, y]], w)\n if xmin is None:\n xmin = df[x].min()\n if xmax is None:\n xmax = df[x].max()\n if ymin is None:\n ymin = df[y].min()\n if ymax is None:\n ymax = df[y].max()\n X, Y = np.meshgrid(np.linspace(xmin, xmax, num=numx),\n np.linspace(ymin, ymax, num=numy))\n test = pd.DataFrame({x: X.flatten(), y: Y.flatten()})\n pdf = kde.pdf(test)\n PDF = pdf.reshape(X.shape)\n return X, Y, PDF\n\n\ndef plot_kde_2d(df, w, x, y, xmin=None, xmax=None, ymin=None, ymax=None,\n numx=50, numy=50, ax=None, colorbar=True,\n title=True, refval=None, **kwargs):\n \"\"\"\n Plots a 2d histogram.\n\n Parameters\n ----------\n df: Pandas Dataframe\n The rows are the observations, the columns the variables\n w: The corresponding weights.\n x: str\n The variable for the x-axis.\n y: str\n The variable for the y-axis.\n xmin: float, optional\n The lower limit in x for the histogram.\n If left empty, it is set to the minimum of the ovbservations of the\n variable to be plotted as x.\n xmax: float, optional\n The upper limit in x for the histogram.\n If left empty, it is set to the maximum of the ovbservations of the\n variable to be plotted as x.\n ymin: float, optional\n The lower limit in y for the histogram.\n If left empty, it is set to the minimum of the ovbservations of the\n variable to be plotted as y.\n ymax: float, optional\n The upper limit in y for the histogram.\n If left empty, it is set to the maximum of the ovbservations of the\n variable to be plotted as y.\n numx: int, optional\n The number of bins in x direction.\n Defaults to 50.\n numy int, optional\n The number of bins in y direction.\n Defaults tp 50.\n colorbar: bool, optional\n Whether to plot a colorbar. Defaults to True.\n title: bool, optional\n Whether to put a title on the plot. Defaults to True.\n refval: dict, optional\n A reference parameter to be shown in the plots. Default: None.\n\n Returns\n -------\n\n ax: matplotlib axis\n axis of the plot\n\n \"\"\"\n X, Y, PDF = kde_2d(df, w, x, y,\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax, numx=numx, numy=numy)\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.figure\n mesh = ax.pcolormesh(X, Y, PDF, **kwargs)\n ax.set_xlabel(x)\n ax.set_ylabel(y)\n if title:\n ax.set_title(\"Posterior\")\n if colorbar:\n cbar = fig.colorbar(mesh)\n cbar.set_label(\"PDF\")\n if refval is not None:\n ax.scatter([refval[x]], [refval[y]], color='C1')\n return ax\n\n\ndef plot_kde_matrix(df, w, limits=None, colorbar=True, refval=None):\n \"\"\"\n Plot a KDE matrix.\n\n Parameters\n ----------\n\n df: Pandas Dataframe\n The rows are the observations, the columns the variables.\n w: np.narray\n The corresponding weights.\n colorbar: bool\n Whether to plot the colorbars or not.\n limits: dictionary, optional\n Dictionary of the form ``{\"name\": (lower_limit, upper_limit)}``.\n refval: dict, optional\n A reference parameter to be shown in the plots (e.g. the\n underlying ground truth parameter used to simulate the data\n for testing purposes). Default: None.\n \"\"\"\n grid = sns.PairGrid(df, diag_sharey=False)\n if limits is None:\n limits = {}\n\n default = (None, None)\n\n def off_diagonal(x, y, **kwargs):\n df = pd.concat((x, y), axis=1)\n plot_kde_2d(df, w,\n x.name, y.name,\n xmin=limits.get(x.name, default)[0],\n xmax=limits.get(x.name, default)[1],\n ymin=limits.get(y.name, default)[0],\n ymax=limits.get(y.name, default)[1],\n ax=plt.gca(), title=False, colorbar=colorbar,\n refval=refval)\n\n def scatter(x, y, **kwargs):\n alpha = w / w.max()\n colors = np.zeros((alpha.size, 4))\n colors[:, 3] = alpha\n plt.gca().scatter(x, y, color=\"k\")\n if refval is not None:\n plt.gca().scatter([refval[x.name]], [refval[y.name]], color='C1')\n plt.gca().set_xlim(*limits.get(x.name, default))\n plt.gca().set_ylim(*limits.get(y.name, default))\n\n def diagonal(x, **kwargs):\n df = pd.concat((x,), axis=1)\n plot_kde_1d(df, w, x.name,\n xmin=limits.get(x.name, default)[0],\n xmax=limits.get(x.name, default)[1],\n ax=plt.gca(), refval=refval)\n\n grid.map_diag(diagonal)\n grid.map_upper(scatter)\n grid.map_lower(off_diagonal)\n return grid\n"
] |
[
[
"numpy.array",
"scipy.stats.multivariate_normal",
"numpy.zeros"
],
[
"numpy.isnan"
],
[
"matplotlib.pyplot.gca",
"pandas.concat",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
XGX-CURRY/LDP-Bandit
|
[
"b04f84418d39fa9e0308f6fbb42a4ec236118061"
] |
[
"Scheme1/plotters/plotters.py"
] |
[
"import glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport tikzplotlib\n\nsns.set_style(\"ticks\")\nplt.rc('font', size=6)\nplt.rc('text', usetex=False)\nplt.rc('font', family='serif')\n\ndef smoother(x, a=0.9, w=10, mode=\"moving\"):\n if mode == \"moving\":\n y = [x[0]]\n for i in range(1, len(x)):\n y.append((1 - a) * x[i] + a * y[i - 1])\n elif mode == \"window\":\n y = []\n for i in range(len(x)):\n y.append(np.mean(x[max(i - w, 0):i + 1]))\n return y\n\ndef plot_curve(\n ax,\n datas,\n freq=1000,\n label=None,\n feature=None,\n color=\"black\",\n smooth_coef=0.95,\n shaded_err=False,\n shaded_std=True,\n shared_area=0.5,\n **plot_kwargs,\n):\n x = datas[0]['time'].tolist()[::freq]\n ys = [np.asarray(data[feature].tolist()[::freq]) for data in datas]\n y_mean = np.mean(ys, axis=0)\n if label is None:\n lin = ax.plot(x, y_mean, color=color, **plot_kwargs)\n else:\n lin = ax.plot(x, y_mean, label=label, color=color, **plot_kwargs)\n if len(ys) > 1:\n y_std = np.std(ys, axis=0) * shared_area\n y_stderr = y_std / np.sqrt(len(ys))\n if shaded_err:\n ax.fill_between(x, y_mean - y_stderr, y_mean + y_stderr, color=color, alpha=.4)\n if shaded_std:\n ax.fill_between(x, y_mean - y_std, y_mean + y_std, color=color, alpha=.2)\n return lin\n\ndef parse_dir(repo):\n n = pd.read_csv(list(glob.glob(repo+'/*.csv'))[0]).shape[0]\n settings_dict = dict()\n settings_dict['exp'] = list()\n for data in glob.glob(repo+'/*.csv'):\n settings_dict['exp'].append(data.split('|')[0])\n for item in data.split('|')[1:-1]:\n key, value = item.split('=')\n if not (key in settings_dict):\n settings_dict[key] = list()\n if key == 'reward':\n settings_dict[key].append(value)\n else:\n settings_dict[key].append(int(value))\n for key in settings_dict:\n settings_dict[key] = sorted(list(set(settings_dict[key])))\n\n return n, settings_dict\n\ndef single_exp_plot(repo):\n COLORS = sns.color_palette(\"tab10\")\n\n label = 'cum_regrets' #'cum_regrets' #'estimation_error'\n\n fig = plt.figure(dpi=300)\n fig.set_size_inches(8, 1.5)\n fig.subplots_adjust(hspace=0.4)\n n, settings_dict = parse_dir(repo)\n freq = int(n/20)\n for j, eps in enumerate(settings_dict['eps']):\n ax = fig.add_subplot(1, len(settings_dict['eps']), j + 1)\n for i, exp in enumerate(settings_dict['exp']): \n datas = [pd.read_csv(data) for data in glob.glob(exp + '|*='+str(eps)+'|*=*'+'.csv')]\n plot_curve(\n ax, \n datas,\n color = COLORS[i],\n label = exp.split('/')[-1], \n feature = label, \n freq = freq,\n markersize = 1\n )\n ax.set_title(f\"Privacy Epsilon={eps}\")\n ax.set_xlabel(\"Timestep\")\n if j == 0:\n ax.set_ylabel(\"Cumulative Regret\")\n if j == len(settings_dict['eps']) - 1:\n ax.legend(loc=\"best\", frameon=False)\n sns.despine()\n # plt.show()\n tikzplotlib.save(repo + \"/SingleRegret.tex\")\n fig.savefig(repo + '/SingleRegret.pdf', dpi=300, bbox_inches='tight')\n\ndef glm_exp_plot(repo):\n COLORS = sns.color_palette(\"tab10\")\n\n label = 'cum_regrets' #'cum_regrets' #'estimation_error'\n\n fig = plt.figure(dpi=300)\n fig.set_size_inches(8, 3)\n fig.subplots_adjust(hspace=0.4)\n n, settings_dict = parse_dir(repo)\n freq = int(n/20)\n rewards = settings_dict['reward']\n epss = settings_dict['eps']\n print(epss)\n for k, reward in enumerate(rewards):\n for j, eps in enumerate(epss):\n ax = fig.add_subplot(2, len(epss), k*len(epss) + (j + 1))\n for i, exp in enumerate(settings_dict['exp']): \n datas = [pd.read_csv(data) for data in glob.glob(exp + '|*='+str(eps)+ '|*='+str(reward) + '|*=*'+'.csv')]\n plot_curve(\n ax, \n datas,\n color = COLORS[i],\n label = exp.split('/')[-1], \n feature = label,\n freq = freq,\n markersize = 1\n )\n ax.set_title(f\"Reward={reward}/Epsilon={eps}\")\n ax.set_xlabel(\"Timestep\")\n if j == 0:\n ax.set_ylabel(\"Cumulative Regret\")\n if j == len(settings_dict['eps']) - 1:\n ax.legend(loc=\"best\", frameon=False)\n plt.subplots_adjust(wspace = 0.3, hspace = 0.8)\n sns.despine()\n # plt.show()\n tikzplotlib.save(repo + \"/GeneralizedLinearRegret.tex\")\n fig.savefig(repo + '/GeneralizedLinearRegret.pdf', dpi=300, bbox_inches='tight')\n \ndef crpm_exp_plot(repo):\n COLORS = sns.color_palette(\"tab10\")\n\n label = 'cum_regrets' #'cum_regrets' #'estimation_error'\n\n fig = plt.figure(dpi=300)\n fig.set_size_inches(8, 1.5)\n fig.subplots_adjust(hspace=0.4)\n n, settings_dict = parse_dir(repo)\n freq = int(n/20)\n for j, eps in enumerate(settings_dict['eps']):\n ax = fig.add_subplot(1, len(settings_dict['eps']), j + 1)\n for i, exp in enumerate(settings_dict['exp']): \n datas = [pd.read_csv(data) for data in glob.glob(exp + '|*='+str(eps)+'|*=*'+'.csv')]\n plot_curve(\n ax, \n datas,\n color = COLORS[i],\n label = exp.split('/')[-1], \n feature = label, \n freq = freq,\n markersize = 1\n )\n ax.set_title(f\"Privacy Epsilon={eps}\")\n ax.set_xlabel(\"Timestep\")\n if j == 0:\n ax.set_ylabel(\"Cumulative Regret\")\n if j == len(settings_dict['eps']) - 1:\n ax.legend(loc=\"best\", frameon=False)\n sns.despine()\n # plt.show()\n tikzplotlib.save(repo + \"/CrpmRegret.tex\")\n fig.savefig(repo + '/CrpmRegret.pdf', dpi=300, bbox_inches='tight')\n\ndef multiparam_exp_plot(repo, title):\n COLORS = sns.color_palette(\"tab10\")\n\n label = 'cum_regrets' #'cum_regrets' #'estimation_error'\n\n fig = plt.figure(dpi=300)\n fig.set_size_inches(8, 3)\n fig.subplots_adjust(hspace=0.4)\n n, settings_dict = parse_dir(repo)\n freq = int(n/20)\n n_actions = settings_dict['n_action']\n epss = settings_dict['eps']\n for k, n_action in enumerate(n_actions):\n for j, eps in enumerate(epss):\n ax = fig.add_subplot(2, len(epss), k*(len(n_actions) + 1) + (j + 1))\n for i, exp in enumerate(settings_dict['exp']): \n datas = [pd.read_csv(data) for data in glob.glob(exp + '|*='+str(eps)+ '|*='+str(n_action) + '|*=*'+'.csv')]\n plot_curve(\n ax, \n datas,\n color = COLORS[i],\n label = 'LDP-SGD', \n feature = 'sgdr', \n freq = freq,\n markersize = 1\n )\n plot_curve(\n ax, \n datas,\n color = COLORS[i+1],\n label = 'LDP-OLS', \n feature = 'olsr', \n freq = freq,\n markersize = 1\n )\n plot_curve(\n ax, \n datas,\n color = COLORS[i+2],\n label = 'LDP-UCB', \n feature = 'ucbr', \n freq = freq,\n markersize = 1\n )\n plot_curve(\n ax, \n datas,\n color = COLORS[i+3],\n label = 'LDP-GLOC', \n feature = 'glocr', \n freq = freq,\n markersize = 1\n )\n ax.set_title(f\"Epsilon={eps}\")\n ax.set_xlabel(\"Timestep\")\n if j == 0:\n ax.set_ylabel(\"Cumulative Regret\")\n if j == len(settings_dict['eps']) - 1:\n ax.legend(loc=\"best\", frameon=False)\n plt.subplots_adjust(wspace = 0.3, hspace = 0.8)\n sns.despine()\n # plt.show()\n tikzplotlib.save(repo + \"/\" + title + \".tex\")\n fig.savefig(repo + \"/\" + title + \".pdf\", dpi=300, bbox_inches='tight')\n\ndef ees_exp_plot(repo, title):\n COLORS = sns.color_palette(\"tab10\")\n\n label = 'cum_regrets' #'cum_regrets' #'estimation_error'\n\n fig = plt.figure(dpi=300)\n fig.set_size_inches(8, 3)\n fig.subplots_adjust(hspace=0.4)\n n, settings_dict = parse_dir(repo)\n freq = int(n/20)\n n_actions = settings_dict['n_action']\n epss = settings_dict['eps']\n for k, n_action in enumerate(n_actions):\n for j, eps in enumerate(epss):\n ax = fig.add_subplot(2, len(epss), k*(len(n_actions) + 1) + (j + 1))\n for i, exp in enumerate(settings_dict['exp']): \n datas = [pd.read_csv(data) for data in glob.glob(exp + '|*='+str(eps)+ '|*='+str(n_action) + '|*=*'+'.csv')]\n plot_curve(\n ax, \n datas,\n color = COLORS[i],\n label = 'LDP-SGD', \n feature = 'sgdr', \n freq = freq,\n markersize = 1\n )\n plot_curve(\n ax, \n datas,\n color = COLORS[i+1],\n label = 'LDP-OLS', \n feature = 'olsr', \n freq = freq,\n markersize = 1\n )\n plot_curve(\n ax, \n datas,\n color = COLORS[i+2],\n label = 'LDP-UCB', \n feature = 'ucbr', \n freq = freq,\n markersize = 1\n )\n ax.set_title(f\"Epsilon={eps}\")\n ax.set_xlabel(\"Timestep\")\n if j == 0:\n ax.set_ylabel(\"Cumulative Regret\")\n if j == len(settings_dict['eps']) - 1:\n ax.legend(loc=\"best\", frameon=False)\n plt.subplots_adjust(wspace = 0.3, hspace = 0.8)\n sns.despine()\n # plt.show()\n tikzplotlib.save(repo + \"/\" + title + \".tex\")\n fig.savefig(repo + \"/\" + title + \".pdf\", dpi=300, bbox_inches='tight')"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.rc",
"numpy.std",
"numpy.mean",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dstallenberg/In-Phase
|
[
"21ddfd089b6e4111cc54ae00607aea01fdc94329"
] |
[
"src/quantum_phase_estimation/plot_results.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 10 13:54:18 2020\n\n@author: Mio\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import OrderedDict\n\n\ndef plot_results(result, nancillas, qubits, p_succes):\n\tif result['raw_text'] and len(result['raw_text']) > 1:\n\t\traise Exception(result['raw_text'])\n\t\n\tprint(\"Given the desired bit accuracy and probability of succes, {0} ancillas will be used resulting in a {1:0.3f} probability of succes.\".format(nancillas, p_succes))\n\tbinary_keys = np.array([str(bin(int(k)))[2::].rjust(nancillas, '0')[-nancillas::][::-1] for k in result['histogram'].keys() ])\n\t\n\tfig = plt.figure(figsize=(8,3))\n\tax = plt.gca()\n\t\n\tax.set_title(\"Phase estimation with {0}-bit accuracy and probability {1:0.3f}\".format(nancillas, p_succes))\n\t\n\tax.bar(binary_keys, result['histogram'].values())\n\n\tax. set_ylabel(r\"$P_{|\\psi\\rangle}$\")\n\tplt.xticks(rotation = 'vertical')\n\tplt.tight_layout()\n\tplt.show()\n\n\ndef plot_results_projectq(result, nancillas, qubits, p_succes):\n\tprint(\n\t\t\"Given the desired bit accuracy and probability of succes, {0} ancillas will be used resulting in a {1:0.3f} probability of succes.\".format(\n\t\t\tnancillas, p_succes))\n\tbinary_keys = [(str(bin(int(k)))[-nancillas::]).ljust(nancillas, '0') for k in result.keys()]\n\n\tfig = plt.figure(figsize=(8, 3))\n\tax = plt.gca()\n\n\tax.set_title(\"Phase estimation with {0}-bit accuracy and probability {1:0.3f}\".format(nancillas, p_succes))\n\n\tax.bar(binary_keys, result.values())\n\tplt.xticks(rotation='vertical')\n\tplt.show()\n\t\n\nresult = OrderedDict([('id', 6747326), ('url', 'https://api.quantum-inspire.com/results/6747326/'), ('job', 'https://api.quantum-inspire.com/jobs/6749903/'), ('created_at', '2020-01-10T13:02:43.137646Z'), ('number_of_qubits', 8), ('execution_time_in_seconds', 0.01200270652771), ('raw_text', ''), ('raw_data_url', 'https://api.quantum-inspire.com/results/6747326/raw-data/b2bcf5e9e6874e1b5dd25b4368d958fdd73e069fbc0eced8f4e8057dfb224418/'), ('histogram', OrderedDict([('197', 0.001953125), ('207', 0.029296875), ('223', 0.12890625), ('195', 0.015625), ('222', 0.103515625), ('228', 0.01953125), ('215', 0.017578125), ('254', 0.03125), ('240', 0.0234375), ('251', 0.005859375), ('232', 0.02734375), ('249', 0.001953125), ('226', 0.015625), ('224', 0.05859375), ('239', 0.015625), ('206', 0.021484375), ('231', 0.015625), ('244', 0.01171875), ('194', 0.013671875), ('220', 0.03125), ('253', 0.009765625), ('204', 0.0078125), ('252', 0.017578125), ('242', 0.009765625), ('192', 0.013671875), ('255', 0.09375), ('219', 0.021484375), ('205', 0.001953125), ('199', 0.01171875), ('211', 0.013671875), ('225', 0.013671875), ('216', 0.015625), ('250', 0.013671875), ('237', 0.009765625), ('221', 0.021484375), ('230', 0.015625), ('247', 0.01171875), ('209', 0.005859375), ('234', 0.013671875), ('210', 0.00390625), ('246', 0.001953125), ('241', 0.01171875), ('238', 0.0078125), ('229', 0.009765625), ('248', 0.015625), ('217', 0.0078125), ('200', 0.009765625), ('214', 0.001953125), ('233', 0.005859375), ('236', 0.001953125), ('218', 0.001953125), ('245', 0.001953125), ('243', 0.001953125), ('235', 0.00390625)])), ('histogram_url', 'https://api.quantum-inspire.com/results/6747326/histogram/b2bcf5e9e6874e1b5dd25b4368d958fdd73e069fbc0eced8f4e8057dfb224418/'), ('measurement_mask', 0), ('quantum_states_url', 'https://api.quantum-inspire.com/results/6747326/quantum-states/b2bcf5e9e6874e1b5dd25b4368d958fdd73e069fbc0eced8f4e8057dfb224418/'), ('measurement_register_url', 'https://api.quantum-inspire.com/results/6747326/measurement-register/b2bcf5e9e6874e1b5dd25b4368d958fdd73e069fbc0eced8f4e8057dfb224418/'), ('calibration', None)])\n\nif __name__ == '__main__':\n\tplot_results(result, 6, 2, 0.9)"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
data-RanDan/pandas
|
[
"b58ee5779583b31de513b3d5c12f5c69c035e920",
"b58ee5779583b31de513b3d5c12f5c69c035e920"
] |
[
"pandas/tests/io/test_parquet.py",
"pandas/tests/series/indexing/test_datetime.py"
] |
[
"\"\"\" test parquet compat \"\"\"\nimport datetime\nfrom distutils.version import LooseVersion\nfrom io import BytesIO\nimport os\nimport pathlib\nfrom warnings import catch_warnings\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import PY38\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nimport pandas._testing as tm\n\nfrom pandas.io.parquet import (\n FastParquetImpl,\n PyArrowImpl,\n get_engine,\n read_parquet,\n to_parquet,\n)\n\ntry:\n import pyarrow\n\n _HAVE_PYARROW = True\nexcept ImportError:\n _HAVE_PYARROW = False\n\ntry:\n import fastparquet\n\n _HAVE_FASTPARQUET = True\nexcept ImportError:\n _HAVE_FASTPARQUET = False\n\n\npytestmark = pytest.mark.filterwarnings(\n \"ignore:RangeIndex.* is deprecated:DeprecationWarning\"\n)\n\n\n# setup engines & skips\[email protected](\n params=[\n pytest.param(\n \"fastparquet\",\n marks=pytest.mark.skipif(\n not _HAVE_FASTPARQUET, reason=\"fastparquet is not installed\"\n ),\n ),\n pytest.param(\n \"pyarrow\",\n marks=pytest.mark.skipif(\n not _HAVE_PYARROW, reason=\"pyarrow is not installed\"\n ),\n ),\n ]\n)\ndef engine(request):\n return request.param\n\n\[email protected]\ndef pa():\n if not _HAVE_PYARROW:\n pytest.skip(\"pyarrow is not installed\")\n return \"pyarrow\"\n\n\[email protected]\ndef fp():\n if not _HAVE_FASTPARQUET:\n pytest.skip(\"fastparquet is not installed\")\n return \"fastparquet\"\n\n\[email protected]\ndef df_compat():\n return pd.DataFrame({\"A\": [1, 2, 3], \"B\": \"foo\"})\n\n\[email protected]\ndef df_cross_compat():\n df = pd.DataFrame(\n {\n \"a\": list(\"abc\"),\n \"b\": list(range(1, 4)),\n # 'c': np.arange(3, 6).astype('u1'),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.date_range(\"20130101\", periods=3),\n # 'g': pd.date_range('20130101', periods=3,\n # tz='US/Eastern'),\n # 'h': pd.date_range('20130101', periods=3, freq='ns')\n }\n )\n return df\n\n\[email protected]\ndef df_full():\n return pd.DataFrame(\n {\n \"string\": list(\"abc\"),\n \"string_with_nan\": [\"a\", np.nan, \"c\"],\n \"string_with_none\": [\"a\", None, \"c\"],\n \"bytes\": [b\"foo\", b\"bar\", b\"baz\"],\n \"unicode\": [\"foo\", \"bar\", \"baz\"],\n \"int\": list(range(1, 4)),\n \"uint\": np.arange(3, 6).astype(\"u1\"),\n \"float\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"float_with_nan\": [2.0, np.nan, 3.0],\n \"bool\": [True, False, True],\n \"datetime\": pd.date_range(\"20130101\", periods=3),\n \"datetime_with_nat\": [\n pd.Timestamp(\"20130101\"),\n pd.NaT,\n pd.Timestamp(\"20130103\"),\n ],\n }\n )\n\n\ndef check_round_trip(\n df,\n engine=None,\n path=None,\n write_kwargs=None,\n read_kwargs=None,\n expected=None,\n check_names=True,\n check_like=False,\n repeat=2,\n):\n \"\"\"Verify parquet serializer and deserializer produce the same results.\n\n Performs a pandas to disk and disk to pandas round trip,\n then compares the 2 resulting DataFrames to verify equality.\n\n Parameters\n ----------\n df: Dataframe\n engine: str, optional\n 'pyarrow' or 'fastparquet'\n path: str, optional\n write_kwargs: dict of str:str, optional\n read_kwargs: dict of str:str, optional\n expected: DataFrame, optional\n Expected deserialization result, otherwise will be equal to `df`\n check_names: list of str, optional\n Closed set of column names to be compared\n check_like: bool, optional\n If True, ignore the order of index & columns.\n repeat: int, optional\n How many times to repeat the test\n \"\"\"\n write_kwargs = write_kwargs or {\"compression\": None}\n read_kwargs = read_kwargs or {}\n\n if expected is None:\n expected = df\n\n if engine:\n write_kwargs[\"engine\"] = engine\n read_kwargs[\"engine\"] = engine\n\n def compare(repeat):\n for _ in range(repeat):\n df.to_parquet(path, **write_kwargs)\n with catch_warnings(record=True):\n actual = read_parquet(path, **read_kwargs)\n\n tm.assert_frame_equal(\n expected, actual, check_names=check_names, check_like=check_like\n )\n\n if path is None:\n with tm.ensure_clean() as path:\n compare(repeat)\n else:\n compare(repeat)\n\n\ndef test_invalid_engine(df_compat):\n with pytest.raises(ValueError):\n check_round_trip(df_compat, \"foo\", \"bar\")\n\n\ndef test_options_py(df_compat, pa):\n # use the set option\n\n with pd.option_context(\"io.parquet.engine\", \"pyarrow\"):\n check_round_trip(df_compat)\n\n\ndef test_options_fp(df_compat, fp):\n # use the set option\n\n with pd.option_context(\"io.parquet.engine\", \"fastparquet\"):\n check_round_trip(df_compat)\n\n\ndef test_options_auto(df_compat, fp, pa):\n # use the set option\n\n with pd.option_context(\"io.parquet.engine\", \"auto\"):\n check_round_trip(df_compat)\n\n\ndef test_options_get_engine(fp, pa):\n assert isinstance(get_engine(\"pyarrow\"), PyArrowImpl)\n assert isinstance(get_engine(\"fastparquet\"), FastParquetImpl)\n\n with pd.option_context(\"io.parquet.engine\", \"pyarrow\"):\n assert isinstance(get_engine(\"auto\"), PyArrowImpl)\n assert isinstance(get_engine(\"pyarrow\"), PyArrowImpl)\n assert isinstance(get_engine(\"fastparquet\"), FastParquetImpl)\n\n with pd.option_context(\"io.parquet.engine\", \"fastparquet\"):\n assert isinstance(get_engine(\"auto\"), FastParquetImpl)\n assert isinstance(get_engine(\"pyarrow\"), PyArrowImpl)\n assert isinstance(get_engine(\"fastparquet\"), FastParquetImpl)\n\n with pd.option_context(\"io.parquet.engine\", \"auto\"):\n assert isinstance(get_engine(\"auto\"), PyArrowImpl)\n assert isinstance(get_engine(\"pyarrow\"), PyArrowImpl)\n assert isinstance(get_engine(\"fastparquet\"), FastParquetImpl)\n\n\ndef test_get_engine_auto_error_message():\n # Expect different error messages from get_engine(engine=\"auto\")\n # if engines aren't installed vs. are installed but bad version\n from pandas.compat._optional import VERSIONS\n\n # Do we have engines installed, but a bad version of them?\n pa_min_ver = VERSIONS.get(\"pyarrow\")\n fp_min_ver = VERSIONS.get(\"fastparquet\")\n have_pa_bad_version = (\n False\n if not _HAVE_PYARROW\n else LooseVersion(pyarrow.__version__) < LooseVersion(pa_min_ver)\n )\n have_fp_bad_version = (\n False\n if not _HAVE_FASTPARQUET\n else LooseVersion(fastparquet.__version__) < LooseVersion(fp_min_ver)\n )\n # Do we have usable engines installed?\n have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version\n have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version\n\n if not have_usable_pa and not have_usable_fp:\n # No usable engines found.\n if have_pa_bad_version:\n match = f\"Pandas requires version .{pa_min_ver}. or newer of .pyarrow.\"\n with pytest.raises(ImportError, match=match):\n get_engine(\"auto\")\n else:\n match = \"Missing optional dependency .pyarrow.\"\n with pytest.raises(ImportError, match=match):\n get_engine(\"auto\")\n\n if have_fp_bad_version:\n match = f\"Pandas requires version .{fp_min_ver}. or newer of .fastparquet.\"\n with pytest.raises(ImportError, match=match):\n get_engine(\"auto\")\n else:\n match = \"Missing optional dependency .fastparquet.\"\n with pytest.raises(ImportError, match=match):\n get_engine(\"auto\")\n\n\ndef test_cross_engine_pa_fp(df_cross_compat, pa, fp):\n # cross-compat with differing reading/writing engines\n\n df = df_cross_compat\n with tm.ensure_clean() as path:\n df.to_parquet(path, engine=pa, compression=None)\n\n result = read_parquet(path, engine=fp)\n tm.assert_frame_equal(result, df)\n\n result = read_parquet(path, engine=fp, columns=[\"a\", \"d\"])\n tm.assert_frame_equal(result, df[[\"a\", \"d\"]])\n\n\ndef test_cross_engine_fp_pa(df_cross_compat, pa, fp):\n # cross-compat with differing reading/writing engines\n\n if (\n LooseVersion(pyarrow.__version__) < \"0.15\"\n and LooseVersion(pyarrow.__version__) >= \"0.13\"\n ):\n pytest.xfail(\n \"Reading fastparquet with pyarrow in 0.14 fails: \"\n \"https://issues.apache.org/jira/browse/ARROW-6492\"\n )\n\n df = df_cross_compat\n with tm.ensure_clean() as path:\n df.to_parquet(path, engine=fp, compression=None)\n\n with catch_warnings(record=True):\n result = read_parquet(path, engine=pa)\n tm.assert_frame_equal(result, df)\n\n result = read_parquet(path, engine=pa, columns=[\"a\", \"d\"])\n tm.assert_frame_equal(result, df[[\"a\", \"d\"]])\n\n\nclass Base:\n def check_error_on_write(self, df, engine, exc):\n # check that we are raising the exception on writing\n with tm.ensure_clean() as path:\n with pytest.raises(exc):\n to_parquet(df, path, engine, compression=None)\n\n\nclass TestBasic(Base):\n def test_error(self, engine):\n for obj in [\n pd.Series([1, 2, 3]),\n 1,\n \"foo\",\n pd.Timestamp(\"20130101\"),\n np.array([1, 2, 3]),\n ]:\n self.check_error_on_write(obj, engine, ValueError)\n\n def test_columns_dtypes(self, engine):\n df = pd.DataFrame({\"string\": list(\"abc\"), \"int\": list(range(1, 4))})\n\n # unicode\n df.columns = [\"foo\", \"bar\"]\n check_round_trip(df, engine)\n\n def test_columns_dtypes_invalid(self, engine):\n df = pd.DataFrame({\"string\": list(\"abc\"), \"int\": list(range(1, 4))})\n\n # numeric\n df.columns = [0, 1]\n self.check_error_on_write(df, engine, ValueError)\n\n # bytes\n df.columns = [b\"foo\", b\"bar\"]\n self.check_error_on_write(df, engine, ValueError)\n\n # python object\n df.columns = [\n datetime.datetime(2011, 1, 1, 0, 0),\n datetime.datetime(2011, 1, 1, 1, 1),\n ]\n self.check_error_on_write(df, engine, ValueError)\n\n @pytest.mark.parametrize(\"compression\", [None, \"gzip\", \"snappy\", \"brotli\"])\n def test_compression(self, engine, compression):\n\n if compression == \"snappy\":\n pytest.importorskip(\"snappy\")\n\n elif compression == \"brotli\":\n pytest.importorskip(\"brotli\")\n\n df = pd.DataFrame({\"A\": [1, 2, 3]})\n check_round_trip(df, engine, write_kwargs={\"compression\": compression})\n\n def test_read_columns(self, engine):\n # GH18154\n df = pd.DataFrame({\"string\": list(\"abc\"), \"int\": list(range(1, 4))})\n\n expected = pd.DataFrame({\"string\": list(\"abc\")})\n check_round_trip(\n df, engine, expected=expected, read_kwargs={\"columns\": [\"string\"]}\n )\n\n def test_write_index(self, engine):\n check_names = engine != \"fastparquet\"\n\n df = pd.DataFrame({\"A\": [1, 2, 3]})\n check_round_trip(df, engine)\n\n indexes = [\n [2, 3, 4],\n pd.date_range(\"20130101\", periods=3),\n list(\"abc\"),\n [1, 3, 4],\n ]\n # non-default index\n for index in indexes:\n df.index = index\n if isinstance(index, pd.DatetimeIndex):\n df.index = df.index._with_freq(None) # freq doesnt round-trip\n check_round_trip(df, engine, check_names=check_names)\n\n # index with meta-data\n df.index = [0, 1, 2]\n df.index.name = \"foo\"\n check_round_trip(df, engine)\n\n def test_write_multiindex(self, pa):\n # Not supported in fastparquet as of 0.1.3 or older pyarrow version\n engine = pa\n\n df = pd.DataFrame({\"A\": [1, 2, 3]})\n index = pd.MultiIndex.from_tuples([(\"a\", 1), (\"a\", 2), (\"b\", 1)])\n df.index = index\n check_round_trip(df, engine)\n\n def test_write_column_multiindex(self, engine):\n # column multi-index\n mi_columns = pd.MultiIndex.from_tuples([(\"a\", 1), (\"a\", 2), (\"b\", 1)])\n df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)\n self.check_error_on_write(df, engine, ValueError)\n\n def test_multiindex_with_columns(self, pa):\n engine = pa\n dates = pd.date_range(\"01-Jan-2018\", \"01-Dec-2018\", freq=\"MS\")\n df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list(\"ABC\"))\n index1 = pd.MultiIndex.from_product(\n [[\"Level1\", \"Level2\"], dates], names=[\"level\", \"date\"]\n )\n index2 = index1.copy(names=None)\n for index in [index1, index2]:\n df.index = index\n\n check_round_trip(df, engine)\n check_round_trip(\n df, engine, read_kwargs={\"columns\": [\"A\", \"B\"]}, expected=df[[\"A\", \"B\"]]\n )\n\n def test_write_ignoring_index(self, engine):\n # ENH 20768\n # Ensure index=False omits the index from the written Parquet file.\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [\"q\", \"r\", \"s\"]})\n\n write_kwargs = {\"compression\": None, \"index\": False}\n\n # Because we're dropping the index, we expect the loaded dataframe to\n # have the default integer index.\n expected = df.reset_index(drop=True)\n\n check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)\n\n # Ignore custom index\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [\"q\", \"r\", \"s\"]}, index=[\"zyx\", \"wvu\", \"tsr\"]\n )\n\n check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)\n\n # Ignore multi-indexes as well.\n arrays = [\n [\"bar\", \"bar\", \"baz\", \"baz\", \"foo\", \"foo\", \"qux\", \"qux\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n df = pd.DataFrame(\n {\"one\": list(range(8)), \"two\": [-i for i in range(8)]}, index=arrays\n )\n\n expected = df.reset_index(drop=True)\n check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)\n\n\nclass TestParquetPyArrow(Base):\n def test_basic(self, pa, df_full):\n\n df = df_full\n\n # additional supported types for pyarrow\n dti = pd.date_range(\"20130101\", periods=3, tz=\"Europe/Brussels\")\n dti = dti._with_freq(None) # freq doesnt round-trip\n df[\"datetime_tz\"] = dti\n df[\"bool_with_none\"] = [True, None, True]\n\n check_round_trip(df, pa)\n\n def test_basic_subset_columns(self, pa, df_full):\n # GH18628\n\n df = df_full\n # additional supported types for pyarrow\n df[\"datetime_tz\"] = pd.date_range(\"20130101\", periods=3, tz=\"Europe/Brussels\")\n\n check_round_trip(\n df,\n pa,\n expected=df[[\"string\", \"int\"]],\n read_kwargs={\"columns\": [\"string\", \"int\"]},\n )\n\n def test_duplicate_columns(self, pa):\n # not currently able to handle duplicate columns\n df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list(\"aaa\")).copy()\n self.check_error_on_write(df, pa, ValueError)\n\n def test_unsupported(self, pa):\n if LooseVersion(pyarrow.__version__) < LooseVersion(\"0.15.1.dev\"):\n # period - will be supported using an extension type with pyarrow 1.0\n df = pd.DataFrame({\"a\": pd.period_range(\"2013\", freq=\"M\", periods=3)})\n # pyarrow 0.11 raises ArrowTypeError\n # older pyarrows raise ArrowInvalid\n self.check_error_on_write(df, pa, Exception)\n\n # timedelta\n df = pd.DataFrame({\"a\": pd.timedelta_range(\"1 day\", periods=3)})\n self.check_error_on_write(df, pa, NotImplementedError)\n\n # mixed python objects\n df = pd.DataFrame({\"a\": [\"a\", 1, 2.0]})\n # pyarrow 0.11 raises ArrowTypeError\n # older pyarrows raise ArrowInvalid\n self.check_error_on_write(df, pa, Exception)\n\n def test_categorical(self, pa):\n\n # supported in >= 0.7.0\n df = pd.DataFrame()\n df[\"a\"] = pd.Categorical(list(\"abcdef\"))\n\n # test for null, out-of-order values, and unobserved category\n df[\"b\"] = pd.Categorical(\n [\"bar\", \"foo\", \"foo\", \"bar\", None, \"bar\"],\n dtype=pd.CategoricalDtype([\"foo\", \"bar\", \"baz\"]),\n )\n\n # test for ordered flag\n df[\"c\"] = pd.Categorical(\n [\"a\", \"b\", \"c\", \"a\", \"c\", \"b\"], categories=[\"b\", \"c\", \"d\"], ordered=True\n )\n\n if LooseVersion(pyarrow.__version__) >= LooseVersion(\"0.15.0\"):\n check_round_trip(df, pa)\n else:\n # de-serialized as object for pyarrow < 0.15\n expected = df.astype(object)\n check_round_trip(df, pa, expected=expected)\n\n def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so):\n s3fs = pytest.importorskip(\"s3fs\")\n if LooseVersion(pyarrow.__version__) <= LooseVersion(\"0.17.0\"):\n pytest.skip()\n s3 = s3fs.S3FileSystem(**s3so)\n kw = dict(filesystem=s3)\n check_round_trip(\n df_compat,\n pa,\n path=\"pandas-test/pyarrow.parquet\",\n read_kwargs=kw,\n write_kwargs=kw,\n )\n\n def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so):\n if LooseVersion(pyarrow.__version__) <= LooseVersion(\"0.17.0\"):\n pytest.skip()\n # GH #19134\n s3so = dict(storage_options=s3so)\n check_round_trip(\n df_compat,\n pa,\n path=\"s3://pandas-test/pyarrow.parquet\",\n read_kwargs=s3so,\n write_kwargs=s3so,\n )\n\n @td.skip_if_no(\"s3fs\") # also requires flask\n @pytest.mark.parametrize(\n \"partition_col\",\n [\n pytest.param(\n [\"A\"],\n marks=pytest.mark.xfail(\n PY38, reason=\"Getting back empty DataFrame\", raises=AssertionError\n ),\n ),\n [],\n ],\n )\n def test_s3_roundtrip_for_dir(\n self, df_compat, s3_resource, pa, partition_col, s3so\n ):\n # GH #26388\n expected_df = df_compat.copy()\n\n # GH #35791\n # read_table uses the new Arrow Datasets API since pyarrow 1.0.0\n # Previous behaviour was pyarrow partitioned columns become 'category' dtypes\n # These are added to back of dataframe on read. In new API category dtype is\n # only used if partition field is string.\n legacy_read_table = LooseVersion(pyarrow.__version__) < LooseVersion(\"1.0.0\")\n if partition_col and legacy_read_table:\n partition_col_type = \"category\"\n else:\n partition_col_type = \"int32\"\n\n expected_df[partition_col] = expected_df[partition_col].astype(\n partition_col_type\n )\n\n check_round_trip(\n df_compat,\n pa,\n expected=expected_df,\n path=\"s3://pandas-test/parquet_dir\",\n read_kwargs=dict(storage_options=s3so),\n write_kwargs=dict(\n partition_cols=partition_col, compression=None, storage_options=s3so\n ),\n check_like=True,\n repeat=1,\n )\n\n @tm.network\n @td.skip_if_no(\"pyarrow\")\n def test_parquet_read_from_url(self, df_compat):\n url = (\n \"https://raw.githubusercontent.com/pandas-dev/pandas/\"\n \"master/pandas/tests/io/data/parquet/simple.parquet\"\n )\n df = pd.read_parquet(url)\n tm.assert_frame_equal(df, df_compat)\n\n @td.skip_if_no(\"pyarrow\")\n def test_read_file_like_obj_support(self, df_compat):\n buffer = BytesIO()\n df_compat.to_parquet(buffer)\n df_from_buf = pd.read_parquet(buffer)\n tm.assert_frame_equal(df_compat, df_from_buf)\n\n @td.skip_if_no(\"pyarrow\")\n def test_expand_user(self, df_compat, monkeypatch):\n monkeypatch.setenv(\"HOME\", \"TestingUser\")\n monkeypatch.setenv(\"USERPROFILE\", \"TestingUser\")\n with pytest.raises(OSError, match=r\".*TestingUser.*\"):\n pd.read_parquet(\"~/file.parquet\")\n with pytest.raises(OSError, match=r\".*TestingUser.*\"):\n df_compat.to_parquet(\"~/file.parquet\")\n\n def test_partition_cols_supported(self, pa, df_full):\n # GH #23283\n partition_cols = [\"bool\", \"int\"]\n df = df_full\n with tm.ensure_clean_dir() as path:\n df.to_parquet(path, partition_cols=partition_cols, compression=None)\n import pyarrow.parquet as pq\n\n dataset = pq.ParquetDataset(path, validate_schema=False)\n assert len(dataset.partitions.partition_names) == 2\n assert dataset.partitions.partition_names == set(partition_cols)\n\n def test_partition_cols_string(self, pa, df_full):\n # GH #27117\n partition_cols = \"bool\"\n partition_cols_list = [partition_cols]\n df = df_full\n with tm.ensure_clean_dir() as path:\n df.to_parquet(path, partition_cols=partition_cols, compression=None)\n import pyarrow.parquet as pq\n\n dataset = pq.ParquetDataset(path, validate_schema=False)\n assert len(dataset.partitions.partition_names) == 1\n assert dataset.partitions.partition_names == set(partition_cols_list)\n\n @pytest.mark.parametrize(\n \"path_type\", [lambda path: path, lambda path: pathlib.Path(path)]\n )\n def test_partition_cols_pathlib(self, pa, df_compat, path_type):\n # GH 35902\n\n partition_cols = \"B\"\n partition_cols_list = [partition_cols]\n df = df_compat\n\n with tm.ensure_clean_dir() as path_str:\n path = path_type(path_str)\n df.to_parquet(path, partition_cols=partition_cols_list)\n\n def test_empty_dataframe(self, pa):\n # GH #27339\n df = pd.DataFrame()\n check_round_trip(df, pa)\n\n def test_write_with_schema(self, pa):\n import pyarrow\n\n df = pd.DataFrame({\"x\": [0, 1]})\n schema = pyarrow.schema([pyarrow.field(\"x\", type=pyarrow.bool_())])\n out_df = df.astype(bool)\n check_round_trip(df, pa, write_kwargs={\"schema\": schema}, expected=out_df)\n\n @td.skip_if_no(\"pyarrow\", min_version=\"0.15.0\")\n def test_additional_extension_arrays(self, pa):\n # test additional ExtensionArrays that are supported through the\n # __arrow_array__ protocol\n df = pd.DataFrame(\n {\n \"a\": pd.Series([1, 2, 3], dtype=\"Int64\"),\n \"b\": pd.Series([1, 2, 3], dtype=\"UInt32\"),\n \"c\": pd.Series([\"a\", None, \"c\"], dtype=\"string\"),\n }\n )\n if LooseVersion(pyarrow.__version__) >= LooseVersion(\"0.16.0\"):\n expected = df\n else:\n # de-serialized as plain int / object\n expected = df.assign(\n a=df.a.astype(\"int64\"), b=df.b.astype(\"int64\"), c=df.c.astype(\"object\")\n )\n check_round_trip(df, pa, expected=expected)\n\n df = pd.DataFrame({\"a\": pd.Series([1, 2, 3, None], dtype=\"Int64\")})\n if LooseVersion(pyarrow.__version__) >= LooseVersion(\"0.16.0\"):\n expected = df\n else:\n # if missing values in integer, currently de-serialized as float\n expected = df.assign(a=df.a.astype(\"float64\"))\n check_round_trip(df, pa, expected=expected)\n\n @td.skip_if_no(\"pyarrow\", min_version=\"0.16.0\")\n def test_additional_extension_types(self, pa):\n # test additional ExtensionArrays that are supported through the\n # __arrow_array__ protocol + by defining a custom ExtensionType\n df = pd.DataFrame(\n {\n # Arrow does not yet support struct in writing to Parquet (ARROW-1644)\n # \"c\": pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2), (3, 4)]),\n \"d\": pd.period_range(\"2012-01-01\", periods=3, freq=\"D\"),\n }\n )\n check_round_trip(df, pa)\n\n @td.skip_if_no(\"pyarrow\", min_version=\"0.14\")\n def test_timestamp_nanoseconds(self, pa):\n # with version 2.0, pyarrow defaults to writing the nanoseconds, so\n # this should work without error\n df = pd.DataFrame({\"a\": pd.date_range(\"2017-01-01\", freq=\"1n\", periods=10)})\n check_round_trip(df, pa, write_kwargs={\"version\": \"2.0\"})\n\n @td.skip_if_no(\"pyarrow\", min_version=\"0.17\")\n def test_filter_row_groups(self, pa):\n # https://github.com/pandas-dev/pandas/issues/26551\n df = pd.DataFrame({\"a\": list(range(0, 3))})\n with tm.ensure_clean() as path:\n df.to_parquet(path, pa)\n result = read_parquet(\n path, pa, filters=[(\"a\", \"==\", 0)], use_legacy_dataset=False\n )\n assert len(result) == 1\n\n\nclass TestParquetFastParquet(Base):\n @td.skip_if_no(\"fastparquet\", min_version=\"0.3.2\")\n def test_basic(self, fp, df_full):\n df = df_full\n\n dti = pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\")\n dti = dti._with_freq(None) # freq doesnt round-trip\n df[\"datetime_tz\"] = dti\n df[\"timedelta\"] = pd.timedelta_range(\"1 day\", periods=3)\n check_round_trip(df, fp)\n\n @pytest.mark.skip(reason=\"not supported\")\n def test_duplicate_columns(self, fp):\n\n # not currently able to handle duplicate columns\n df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list(\"aaa\")).copy()\n self.check_error_on_write(df, fp, ValueError)\n\n def test_bool_with_none(self, fp):\n df = pd.DataFrame({\"a\": [True, None, False]})\n expected = pd.DataFrame({\"a\": [1.0, np.nan, 0.0]}, dtype=\"float16\")\n check_round_trip(df, fp, expected=expected)\n\n def test_unsupported(self, fp):\n\n # period\n df = pd.DataFrame({\"a\": pd.period_range(\"2013\", freq=\"M\", periods=3)})\n self.check_error_on_write(df, fp, ValueError)\n\n # mixed\n df = pd.DataFrame({\"a\": [\"a\", 1, 2.0]})\n self.check_error_on_write(df, fp, ValueError)\n\n def test_categorical(self, fp):\n df = pd.DataFrame({\"a\": pd.Categorical(list(\"abc\"))})\n check_round_trip(df, fp)\n\n def test_filter_row_groups(self, fp):\n d = {\"a\": list(range(0, 3))}\n df = pd.DataFrame(d)\n with tm.ensure_clean() as path:\n df.to_parquet(path, fp, compression=None, row_group_offsets=1)\n result = read_parquet(path, fp, filters=[(\"a\", \"==\", 0)])\n assert len(result) == 1\n\n def test_s3_roundtrip(self, df_compat, s3_resource, fp, s3so):\n # GH #19134\n check_round_trip(\n df_compat,\n fp,\n path=\"s3://pandas-test/fastparquet.parquet\",\n read_kwargs=dict(storage_options=s3so),\n write_kwargs=dict(compression=None, storage_options=s3so),\n )\n\n def test_partition_cols_supported(self, fp, df_full):\n # GH #23283\n partition_cols = [\"bool\", \"int\"]\n df = df_full\n with tm.ensure_clean_dir() as path:\n df.to_parquet(\n path,\n engine=\"fastparquet\",\n partition_cols=partition_cols,\n compression=None,\n )\n assert os.path.exists(path)\n import fastparquet\n\n actual_partition_cols = fastparquet.ParquetFile(path, False).cats\n assert len(actual_partition_cols) == 2\n\n def test_partition_cols_string(self, fp, df_full):\n # GH #27117\n partition_cols = \"bool\"\n df = df_full\n with tm.ensure_clean_dir() as path:\n df.to_parquet(\n path,\n engine=\"fastparquet\",\n partition_cols=partition_cols,\n compression=None,\n )\n assert os.path.exists(path)\n import fastparquet\n\n actual_partition_cols = fastparquet.ParquetFile(path, False).cats\n assert len(actual_partition_cols) == 1\n\n def test_partition_on_supported(self, fp, df_full):\n # GH #23283\n partition_cols = [\"bool\", \"int\"]\n df = df_full\n with tm.ensure_clean_dir() as path:\n df.to_parquet(\n path,\n engine=\"fastparquet\",\n compression=None,\n partition_on=partition_cols,\n )\n assert os.path.exists(path)\n import fastparquet\n\n actual_partition_cols = fastparquet.ParquetFile(path, False).cats\n assert len(actual_partition_cols) == 2\n\n def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):\n # GH #23283\n partition_cols = [\"bool\", \"int\"]\n df = df_full\n with pytest.raises(ValueError):\n with tm.ensure_clean_dir() as path:\n df.to_parquet(\n path,\n engine=\"fastparquet\",\n compression=None,\n partition_on=partition_cols,\n partition_cols=partition_cols,\n )\n\n def test_empty_dataframe(self, fp):\n # GH #27339\n df = pd.DataFrame()\n expected = df.copy()\n expected.index.name = \"index\"\n check_round_trip(df, fp, expected=expected)\n",
"\"\"\"\nAlso test support for datetime64[ns] in Series / DataFrame\n\"\"\"\nfrom datetime import datetime, timedelta\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import iNaT\nimport pandas._libs.index as _index\n\nimport pandas as pd\nfrom pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range\nimport pandas._testing as tm\n\n\ndef test_fancy_getitem():\n dti = date_range(\n freq=\"WOM-1FRI\", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)\n )\n\n s = Series(np.arange(len(dti)), index=dti)\n\n assert s[48] == 48\n assert s[\"1/2/2009\"] == 48\n assert s[\"2009-1-2\"] == 48\n assert s[datetime(2009, 1, 2)] == 48\n assert s[Timestamp(datetime(2009, 1, 2))] == 48\n with pytest.raises(KeyError, match=r\"^'2009-1-3'$\"):\n s[\"2009-1-3\"]\n tm.assert_series_equal(\n s[\"3/6/2009\":\"2009-06-05\"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]\n )\n\n\ndef test_fancy_setitem():\n dti = date_range(\n freq=\"WOM-1FRI\", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)\n )\n\n s = Series(np.arange(len(dti)), index=dti)\n s[48] = -1\n assert s[48] == -1\n s[\"1/2/2009\"] = -2\n assert s[48] == -2\n s[\"1/2/2009\":\"2009-06-05\"] = -3\n assert (s[48:54] == -3).all()\n\n\ndef test_dti_reset_index_round_trip():\n dti = date_range(start=\"1/1/2001\", end=\"6/1/2001\", freq=\"D\")._with_freq(None)\n d1 = DataFrame({\"v\": np.random.rand(len(dti))}, index=dti)\n d2 = d1.reset_index()\n assert d2.dtypes[0] == np.dtype(\"M8[ns]\")\n d3 = d2.set_index(\"index\")\n tm.assert_frame_equal(d1, d3, check_names=False)\n\n # #2329\n stamp = datetime(2012, 11, 22)\n df = DataFrame([[stamp, 12.1]], columns=[\"Date\", \"Value\"])\n df = df.set_index(\"Date\")\n\n assert df.index[0] == stamp\n assert df.reset_index()[\"Date\"][0] == stamp\n\n\ndef test_series_set_value():\n # #1561\n\n dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]\n index = DatetimeIndex(dates)\n\n s = Series(dtype=object)\n s._set_value(dates[0], 1.0)\n s._set_value(dates[1], np.nan)\n\n expected = Series([1.0, np.nan], index=index)\n\n tm.assert_series_equal(s, expected)\n\n\[email protected]\ndef test_slice_locs_indexerror():\n times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)]\n s = Series(range(100000), times)\n s.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]\n\n\ndef test_slicing_datetimes():\n # GH 7523\n\n # unique\n df = DataFrame(\n np.arange(4.0, dtype=\"float64\"),\n index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]],\n )\n result = df.loc[datetime(2001, 1, 1, 10) :]\n tm.assert_frame_equal(result, df)\n result = df.loc[: datetime(2001, 1, 4, 10)]\n tm.assert_frame_equal(result, df)\n result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]\n tm.assert_frame_equal(result, df)\n\n result = df.loc[datetime(2001, 1, 1, 11) :]\n expected = df.iloc[1:]\n tm.assert_frame_equal(result, expected)\n result = df.loc[\"20010101 11\":]\n tm.assert_frame_equal(result, expected)\n\n # duplicates\n df = pd.DataFrame(\n np.arange(5.0, dtype=\"float64\"),\n index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]],\n )\n\n result = df.loc[datetime(2001, 1, 1, 10) :]\n tm.assert_frame_equal(result, df)\n result = df.loc[: datetime(2001, 1, 4, 10)]\n tm.assert_frame_equal(result, df)\n result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]\n tm.assert_frame_equal(result, df)\n\n result = df.loc[datetime(2001, 1, 1, 11) :]\n expected = df.iloc[1:]\n tm.assert_frame_equal(result, expected)\n result = df.loc[\"20010101 11\":]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_getitem_setitem_datetime_tz_pytz():\n from pytz import timezone as tz\n\n N = 50\n # testing with timezone, GH #2785\n rng = date_range(\"1/1/1990\", periods=N, freq=\"H\", tz=\"US/Eastern\")\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz(\"UTC\"))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz(\"UTC\"))] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n\n # comparison dates with datetime MUST be localized!\n date = tz(\"US/Central\").localize(datetime(1990, 1, 1, 3))\n result[date] = 0\n result[date] = ts[4]\n tm.assert_series_equal(result, ts)\n\n\ndef test_getitem_setitem_datetime_tz_dateutil():\n from dateutil.tz import tzutc\n\n from pandas._libs.tslibs.timezones import dateutil_gettz as gettz\n\n tz = (\n lambda x: tzutc() if x == \"UTC\" else gettz(x)\n ) # handle special case for utc in dateutil\n\n N = 50\n\n # testing with timezone, GH #2785\n rng = date_range(\"1/1/1990\", periods=N, freq=\"H\", tz=\"America/New_York\")\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz(\"UTC\"))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz(\"UTC\"))] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 3, tzinfo=tz(\"America/Chicago\"))] = 0\n result[datetime(1990, 1, 1, 3, tzinfo=tz(\"America/Chicago\"))] = ts[4]\n tm.assert_series_equal(result, ts)\n\n\ndef test_getitem_setitem_datetimeindex():\n N = 50\n # testing with timezone, GH #2785\n rng = date_range(\"1/1/1990\", periods=N, freq=\"H\", tz=\"US/Eastern\")\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04:00:00\"]\n expected = ts[4]\n assert result == expected\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\"] = 0\n result[\"1990-01-01 04:00:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = 0\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = ts[4:8]\n tm.assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04:00:00\"\n rb = \"1990-01-01 07:00:00\"\n # GH#18435 strings get a pass from tzawareness compat\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n lb = \"1990-01-01 04:00:00-0500\"\n rb = \"1990-01-01 07:00:00-0500\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n # repeat all the above with naive datetimes\n result = ts[datetime(1990, 1, 1, 4)]\n expected = ts[4]\n assert result == expected\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4)] = 0\n result[datetime(1990, 1, 1, 4)] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = 0\n result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = ts[4:8]\n tm.assert_series_equal(result, ts)\n\n lb = datetime(1990, 1, 1, 4)\n rb = datetime(1990, 1, 1, 7)\n msg = r\"Invalid comparison between dtype=datetime64\\[ns, US/Eastern\\] and datetime\"\n with pytest.raises(TypeError, match=msg):\n # tznaive vs tzaware comparison is invalid\n # see GH#18376, GH#18162\n ts[(ts.index >= lb) & (ts.index <= rb)]\n\n lb = pd.Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo)\n rb = pd.Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts[ts.index[4]]\n expected = ts[4]\n assert result == expected\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result.iloc[4:8] = ts.iloc[4:8]\n tm.assert_series_equal(result, ts)\n\n # also test partial date slicing\n result = ts[\"1990-01-02\"]\n expected = ts[24:48]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-02\"] = 0\n result[\"1990-01-02\"] = ts[24:48]\n tm.assert_series_equal(result, ts)\n\n\ndef test_getitem_setitem_periodindex():\n from pandas import period_range\n\n N = 50\n rng = period_range(\"1/1/1990\", periods=N, freq=\"H\")\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04\"]\n expected = ts[4]\n assert result == expected\n\n result = ts.copy()\n result[\"1990-01-01 04\"] = 0\n result[\"1990-01-01 04\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04\":\"1990-01-01 07\"]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = 0\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = ts[4:8]\n tm.assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04\"\n rb = \"1990-01-01 07\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n # GH 2782\n result = ts[ts.index[4]]\n expected = ts[4]\n assert result == expected\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result.iloc[4:8] = ts.iloc[4:8]\n tm.assert_series_equal(result, ts)\n\n\ndef test_datetime_indexing():\n\n index = date_range(\"1/1/2000\", \"1/7/2000\")\n index = index.repeat(3)\n\n s = Series(len(index), index=index)\n stamp = Timestamp(\"1/8/2000\")\n\n with pytest.raises(KeyError, match=re.escape(repr(stamp))):\n s[stamp]\n s[stamp] = 0\n assert s[stamp] == 0\n\n # not monotonic\n s = Series(len(index), index=index)\n s = s[::-1]\n\n with pytest.raises(KeyError, match=re.escape(repr(stamp))):\n s[stamp]\n s[stamp] = 0\n assert s[stamp] == 0\n\n\n\"\"\"\ntest duplicates in time series\n\"\"\"\n\n\[email protected]\ndef dups():\n dates = [\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n ]\n\n return Series(np.random.randn(len(dates)), index=dates)\n\n\ndef test_constructor(dups):\n assert isinstance(dups, Series)\n assert isinstance(dups.index, DatetimeIndex)\n\n\ndef test_is_unique_monotonic(dups):\n assert not dups.index.is_unique\n\n\ndef test_index_unique(dups):\n uniques = dups.index.unique()\n expected = DatetimeIndex(\n [\n datetime(2000, 1, 2),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n ]\n )\n assert uniques.dtype == \"M8[ns]\" # sanity\n tm.assert_index_equal(uniques, expected)\n assert dups.index.nunique() == 4\n\n # #2563\n assert isinstance(uniques, DatetimeIndex)\n\n dups_local = dups.index.tz_localize(\"US/Eastern\")\n dups_local.name = \"foo\"\n result = dups_local.unique()\n expected = DatetimeIndex(expected, name=\"foo\")\n expected = expected.tz_localize(\"US/Eastern\")\n assert result.tz is not None\n assert result.name == \"foo\"\n tm.assert_index_equal(result, expected)\n\n # NaT, note this is excluded\n arr = [1370745748 + t for t in range(20)] + [iNaT]\n idx = DatetimeIndex(arr * 3)\n tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))\n assert idx.nunique() == 20\n assert idx.nunique(dropna=False) == 21\n\n arr = [\n Timestamp(\"2013-06-09 02:42:28\") + timedelta(seconds=t) for t in range(20)\n ] + [NaT]\n idx = DatetimeIndex(arr * 3)\n tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))\n assert idx.nunique() == 20\n assert idx.nunique(dropna=False) == 21\n\n\ndef test_duplicate_dates_indexing(dups):\n ts = dups\n\n uniques = ts.index.unique()\n for date in uniques:\n result = ts[date]\n\n mask = ts.index == date\n total = (ts.index == date).sum()\n expected = ts[mask]\n if total > 1:\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_almost_equal(result, expected[0])\n\n cp = ts.copy()\n cp[date] = 0\n expected = Series(np.where(mask, 0, ts), index=ts.index)\n tm.assert_series_equal(cp, expected)\n\n key = datetime(2000, 1, 6)\n with pytest.raises(KeyError, match=re.escape(repr(key))):\n ts[key]\n\n # new index\n ts[datetime(2000, 1, 6)] = 0\n assert ts[datetime(2000, 1, 6)] == 0\n\n\ndef test_range_slice():\n idx = DatetimeIndex([\"1/1/2000\", \"1/2/2000\", \"1/2/2000\", \"1/3/2000\", \"1/4/2000\"])\n\n ts = Series(np.random.randn(len(idx)), index=idx)\n\n result = ts[\"1/2/2000\":]\n expected = ts[1:]\n tm.assert_series_equal(result, expected)\n\n result = ts[\"1/2/2000\":\"1/3/2000\"]\n expected = ts[1:4]\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_average_dup_values(dups):\n result = dups.groupby(level=0).mean()\n expected = dups.groupby(dups.index).mean()\n tm.assert_series_equal(result, expected)\n\n\ndef test_indexing_over_size_cutoff():\n import datetime\n\n # #1821\n\n old_cutoff = _index._SIZE_CUTOFF\n try:\n _index._SIZE_CUTOFF = 1000\n\n # create large list of non periodic datetime\n dates = []\n sec = datetime.timedelta(seconds=1)\n half_sec = datetime.timedelta(microseconds=500000)\n d = datetime.datetime(2011, 12, 5, 20, 30)\n n = 1100\n for i in range(n):\n dates.append(d)\n dates.append(d + sec)\n dates.append(d + sec + half_sec)\n dates.append(d + sec + sec + half_sec)\n d += 3 * sec\n\n # duplicate some values in the list\n duplicate_positions = np.random.randint(0, len(dates) - 1, 20)\n for p in duplicate_positions:\n dates[p + 1] = dates[p]\n\n df = DataFrame(\n np.random.randn(len(dates), 4), index=dates, columns=list(\"ABCD\")\n )\n\n pos = n * 3\n timestamp = df.index[pos]\n assert timestamp in df.index\n\n # it works!\n df.loc[timestamp]\n assert len(df.loc[[timestamp]]) > 0\n finally:\n _index._SIZE_CUTOFF = old_cutoff\n\n\ndef test_indexing_over_size_cutoff_period_index(monkeypatch):\n # GH 27136\n\n monkeypatch.setattr(_index, \"_SIZE_CUTOFF\", 1000)\n\n n = 1100\n idx = pd.period_range(\"1/1/2000\", freq=\"T\", periods=n)\n assert idx._engine.over_size_threshold\n\n s = pd.Series(np.random.randn(len(idx)), index=idx)\n\n pos = n - 1\n timestamp = idx[pos]\n assert timestamp in s.index\n\n # it works!\n s[timestamp]\n assert len(s.loc[[timestamp]]) > 0\n\n\ndef test_indexing_unordered():\n # GH 2437\n rng = date_range(start=\"2011-01-01\", end=\"2011-01-15\")\n ts = Series(np.random.rand(len(rng)), index=rng)\n ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])\n\n for t in ts.index:\n\n expected = ts[t]\n result = ts2[t]\n assert expected == result\n\n # GH 3448 (ranges)\n def compare(slobj):\n result = ts2[slobj].copy()\n result = result.sort_index()\n expected = ts[slobj]\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n compare(slice(\"2011-01-01\", \"2011-01-15\"))\n compare(slice(\"2010-12-30\", \"2011-01-15\"))\n compare(slice(\"2011-01-01\", \"2011-01-16\"))\n\n # partial ranges\n compare(slice(\"2011-01-01\", \"2011-01-6\"))\n compare(slice(\"2011-01-06\", \"2011-01-8\"))\n compare(slice(\"2011-01-06\", \"2011-01-12\"))\n\n # single values\n result = ts2[\"2011\"].sort_index()\n expected = ts[\"2011\"]\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n # diff freq\n rng = date_range(datetime(2005, 1, 1), periods=20, freq=\"M\")\n ts = Series(np.arange(len(rng)), index=rng)\n ts = ts.take(np.random.permutation(20))\n\n result = ts[\"2005\"]\n for t in result.index:\n assert t.year == 2005\n\n\ndef test_indexing():\n idx = date_range(\"2001-1-1\", periods=20, freq=\"M\")\n ts = Series(np.random.rand(len(idx)), index=idx)\n\n # getting\n\n # GH 3070, make sure semantics work on Series/Frame\n expected = ts[\"2001\"]\n expected.name = \"A\"\n\n df = DataFrame(dict(A=ts))\n with tm.assert_produces_warning(FutureWarning):\n # GH#36179 string indexing on rows for DataFrame deprecated\n result = df[\"2001\"][\"A\"]\n tm.assert_series_equal(expected, result)\n\n # setting\n ts[\"2001\"] = 1\n expected = ts[\"2001\"]\n expected.name = \"A\"\n\n df.loc[\"2001\", \"A\"] = 1\n\n with tm.assert_produces_warning(FutureWarning):\n # GH#36179 string indexing on rows for DataFrame deprecated\n result = df[\"2001\"][\"A\"]\n tm.assert_series_equal(expected, result)\n\n # GH3546 (not including times on the last day)\n idx = date_range(start=\"2013-05-31 00:00\", end=\"2013-05-31 23:00\", freq=\"H\")\n ts = Series(range(len(idx)), index=idx)\n expected = ts[\"2013-05\"]\n tm.assert_series_equal(expected, ts)\n\n idx = date_range(start=\"2013-05-31 00:00\", end=\"2013-05-31 23:59\", freq=\"S\")\n ts = Series(range(len(idx)), index=idx)\n expected = ts[\"2013-05\"]\n tm.assert_series_equal(expected, ts)\n\n idx = [\n Timestamp(\"2013-05-31 00:00\"),\n Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999)),\n ]\n ts = Series(range(len(idx)), index=idx)\n expected = ts[\"2013\"]\n tm.assert_series_equal(expected, ts)\n\n # GH14826, indexing with a seconds resolution string / datetime object\n df = DataFrame(\n np.random.rand(5, 5),\n columns=[\"open\", \"high\", \"low\", \"close\", \"volume\"],\n index=date_range(\"2012-01-02 18:01:00\", periods=5, tz=\"US/Central\", freq=\"s\"),\n )\n expected = df.loc[[df.index[2]]]\n\n # this is a single date, so will raise\n with pytest.raises(KeyError, match=r\"^'2012-01-02 18:01:02'$\"):\n df[\"2012-01-02 18:01:02\"]\n msg = r\"Timestamp\\('2012-01-02 18:01:02-0600', tz='US/Central', freq='S'\\)\"\n with pytest.raises(KeyError, match=msg):\n df[df.index[2]]\n\n\n\"\"\"\ntest NaT support\n\"\"\"\n\n\ndef test_setitem_tuple_with_datetimetz():\n # GH 20441\n arr = date_range(\"2017\", periods=4, tz=\"US/Eastern\")\n index = [(0, 1), (0, 2), (0, 3), (0, 4)]\n result = Series(arr, index=index)\n expected = result.copy()\n result[(0, 1)] = np.nan\n expected.iloc[0] = np.nan\n tm.assert_series_equal(result, expected)\n"
] |
[
[
"pandas.Series",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.read_parquet",
"numpy.random.randn",
"pandas._testing.ensure_clean_dir",
"pandas._testing.assert_frame_equal",
"pandas.CategoricalDtype",
"numpy.arange",
"pandas.io.parquet.to_parquet",
"pandas.compat._optional.VERSIONS.get",
"pandas.Categorical",
"pandas.option_context",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"numpy.array",
"pandas.timedelta_range",
"pandas.period_range",
"pandas.io.parquet.read_parquet",
"pandas._testing.ensure_clean",
"pandas.Timestamp",
"pandas.io.parquet.get_engine",
"pandas.util._test_decorators.skip_if_no"
],
[
"pandas._testing.assert_almost_equal",
"pandas.Series",
"pandas.DataFrame",
"numpy.dtype",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"numpy.where",
"numpy.arange",
"pandas.DatetimeIndex",
"pandas._testing.assert_series_equal",
"pandas._libs.tslibs.timezones.dateutil_gettz",
"pandas._testing.assert_index_equal",
"pandas.concat",
"pandas._testing.assert_produces_warning",
"numpy.random.rand",
"pandas.date_range",
"pandas.period_range",
"numpy.random.permutation",
"pandas.Timestamp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
johncolezhang/DeepKE
|
[
"ea4552ec42cb003a835f00fc14fb454f9a9a7183",
"ea4552ec42cb003a835f00fc14fb454f9a9a7183"
] |
[
"src/deepke/relation_extraction/few_shot/dataset/processor.py",
"example/re/document/predict.py"
] |
[
"import csv\nimport pickle \nimport os\nimport logging\nfrom tqdm import tqdm, trange\nfrom torch.utils.data import TensorDataset\nimport torch.nn.functional as F\nimport numpy as np\nimport torch\nfrom collections import OrderedDict\n\nfrom transformers.utils.dummy_tokenizers_objects import BertTokenizerFast\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', \n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n# 这就是包内引用吗\nimport json\nimport re\n\nfrom transformers import AutoTokenizer\n\nkeyword_files = [\"keyword_train.txt\", \"keyword_dev.txt\", \"keyword_test.txt\"]\n\ndef tokenize(text, tokenizer):\n # berts tokenize ways\n # tokenize the [unused12345678910]\n D = [f\"[unused{i}]\" for i in range(10)]\n textraw = [text]\n for delimiter in D:\n ntextraw = []\n for i in range(len(textraw)):\n t = textraw[i].split(delimiter)\n for j in range(len(t)):\n ntextraw += [t[j]]\n if j != len(t)-1:\n ntextraw += [delimiter]\n textraw = ntextraw\n text = []\n for t in textraw:\n if t in D:\n text += [t]\n else:\n tokens = tokenizer.tokenize(t, add_special_tokens=False)\n for tok in tokens:\n text += [tok]\n\n for idx, t in enumerate(text):\n if idx + 3 < len(text) and t == \"[\" and text[idx+1] == \"[UNK]\" and text[idx+2] == \"]\":\n text = text[:idx] + [\"[MASK]\"] + text[idx+3:]\n\n return text\n\nn_class = 1\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None, text_c=None, entity=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.text_c = text_c\n self.label = label\n self.entity = entity\n\n\nclass InputExampleSST2(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None, text_c=None, entity=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeaturesSST2(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, attention_mask, token_type_ids, label_id):\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.label_id = label_id\n\n\nclass InputExampleWiki80(object):\n \"\"\"A single training/test example for span pair classification.\"\"\"\n\n def __init__(self, guid, sentence, span1, span2, ner1, ner2, label):\n self.guid = guid\n self.sentence = sentence\n self.span1 = span1\n self.span2 = span2\n self.ner1 = ner1\n self.ner2 = ner2\n self.label = label\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id, entity=None):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.entity = entity\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\nclass Sst2Processor(DataProcessor):\n \"\"\"Processor for the SST-2 data set (GLUE version).\"\"\"\n\n def __init__(self, data_dir, a):\n super().__init__()\n self.data_dir = data_dir\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence\"].numpy().decode(\"utf-8\"),\n None,\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training, dev and test sets.\"\"\"\n examples = []\n text_index = 0\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[text_index]\n label = line[1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, text_c=None, label=label))\n return examples\n\n\n\nclass relossProcessor(DataProcessor): #bert_s\n def __init__(self, data_path=\"data\", use_prompt=False):\n def is_speaker(a):\n a = a.split()\n return len(a) == 2 and a[0] == \"speaker\" and a[1].isdigit()\n \n # replace the speaker with [unused] token\n def rename(d, x, y):\n d = d.replace(\"’\",\"'\")\n d = d.replace(\"im\",\"i\")\n d = d.replace(\"...\",\".\")\n unused = [\"[unused1]\", \"[unused2]\"]\n a = []\n if is_speaker(x):\n a += [x]\n else:\n a += [None]\n if x != y and is_speaker(y):\n a += [y]\n else:\n a += [None]\n for i in range(len(a)):\n if a[i] is None:\n continue\n d = d.replace(a[i] + \":\", unused[i] + \" :\")\n if x == a[i]:\n x = unused[i]\n if y == a[i]:\n y = unused[i]\n return d, x, y\n \n self.D = [[], [], []]\n for sid in range(3):\n # 分成三个数据集\n with open(data_path + \"/\"+[\"train.json\", \"dev.json\", \"test.json\"][sid], \"r\", encoding=\"utf8\") as f:\n data = json.load(f)\n for i in range(len(data)):\n for j in range(len(data[i][1])):\n rid = []\n for k in range(36):\n if k+1 in data[i][1][j][\"rid\"]:\n rid += [1]\n else:\n rid += [0]\n d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j][\"x\"].lower(), data[i][1][j][\"y\"].lower())\n prompt = f\"what is the relation between {h} and {t} ? {t} is the [MASK] {h} .\"\n d = [\n prompt + d,\n h,\n t,\n rid,\n t\n ]\n self.D[sid] += [d]\n logger.info(str(len(self.D[0])) + \",\" + str(len(self.D[1])) + \",\" + str(len(self.D[2])))\n \n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[0], \"train\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[2], \"test\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[1], \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [str(x) for x in range(36)]\n\n def _create_examples(self, data, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, d) in enumerate(data):\n guid = \"%s-%s\" % (set_type, i)\n examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2], entity=data[i][4]))\n \n return examples\n\n\nclass bertProcessor(DataProcessor): #bert_s\n def __init__(self, data_path=\"data\", use_prompt=False):\n def is_speaker(a):\n a = a.split()\n return len(a) == 2 and a[0] == \"speaker\" and a[1].isdigit()\n \n # replace the speaker with [unused] token\n def rename(d, x, y):\n d = d.replace(\"’\",\"'\")\n d = d.replace(\"im\",\"i\")\n d = d.replace(\"...\",\".\")\n unused = [\"[unused1]\", \"[unused2]\"]\n a = []\n if is_speaker(x):\n a += [x]\n else:\n a += [None]\n if x != y and is_speaker(y):\n a += [y]\n else:\n a += [None]\n for i in range(len(a)):\n if a[i] is None:\n continue\n d = d.replace(a[i] + \":\", unused[i] + \" :\")\n if x == a[i]:\n x = unused[i]\n if y == a[i]:\n y = unused[i]\n return d, x, y\n \n self.D = [[], [], []]\n for sid in range(3):\n # 分成三个数据集\n with open(data_path + \"/\"+[\"train.json\", \"dev.json\", \"test.json\"][sid], \"r\", encoding=\"utf8\") as f:\n data = json.load(f)\n sample_idx = 0\n for i in range(len(data)):\n for j in range(len(data[i][1])):\n rid = []\n for k in range(36):\n if k+1 in data[i][1][j][\"rid\"]:\n rid += [1]\n else:\n rid += [0]\n d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j][\"x\"].lower(), data[i][1][j][\"y\"].lower())\n if use_prompt:\n prompt = f\"{h} is the [MASK] {t} .\"\n else:\n prompt = f\"what is the relation between {h} and {t} ?\"\n sample_idx += 1\n d = [\n prompt + d,\n h,\n t,\n rid,\n ]\n self.D[sid] += [d]\n logger.info(str(len(self.D[0])) + \",\" + str(len(self.D[1])) + \",\" + str(len(self.D[2])))\n \n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[0], \"train\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[2], \"test\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[1], \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [str(x) for x in range(36)]\n\n def _create_examples(self, data, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, d) in enumerate(data):\n guid = \"%s-%s\" % (set_type, i)\n examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2]))\n \n return examples\n\n\nclass ptuneProcessor(DataProcessor): #bert_s\n def __init__(self, data_path=\"data\", use_prompt=False, ptune_k=6):\n def is_speaker(a):\n a = a.split()\n return len(a) == 2 and a[0] == \"speaker\" and a[1].isdigit()\n \n # replace the speaker with [unused] token\n def rename(d, x, y):\n d = d.replace(\"’\",\"'\")\n d = d.replace(\"im\",\"i\")\n d = d.replace(\"...\",\".\")\n unused = [\"[unused1]\", \"[unused2]\"]\n a = []\n if is_speaker(x):\n a += [x]\n else:\n a += [None]\n if x != y and is_speaker(y):\n a += [y]\n else:\n a += [None]\n for i in range(len(a)):\n if a[i] is None:\n continue\n d = d.replace(a[i] + \":\", unused[i] + \" :\")\n if x == a[i]:\n x = unused[i]\n if y == a[i]:\n y = unused[i]\n return d, x, y\n \n self.D = [[], [], []]\n \"\"\"\n TODO, add new samples, every sample if there is a trigger then mask trigger and replace the origin mask with right token,\n if no trigger in the sentence, random mask a word in the sentence and replace the origin mask with the right token.\n \n \"\"\"\n for sid in range(3):\n # 分成三个数据集\n with open(data_path + \"/\"+[\"train.json\", \"dev.json\", \"test.json\"][sid], \"r\", encoding=\"utf8\") as f:\n data = json.load(f)\n sample_idx = 0\n for i in range(len(data)):\n for j in range(len(data[i][1])):\n rid = []\n for k in range(36):\n if k+1 in data[i][1][j][\"rid\"]:\n rid += [1]\n else:\n rid += [0]\n d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j][\"x\"].lower(), data[i][1][j][\"y\"].lower())\n unused_word = \" \".join([f\"[unused{i}]\" for i in range(3, ptune_k+3)])\n # st 3,4 ; ed 5,6\n st = [f\"[unused{i}]\" for i in range(3,5)]\n ed = [f\"[unused{i}]\" for i in range(5,7)]\n # 789 as prompt\n prompt = f\"[sub] {st[0]} {h} {st[1]} [sub] [unused7] [unused8] [MASK] [unused9] [obj] {ed[0]} {t} {ed[1]} [obj].\"\n \n # for temp_i in range(10):\n # d = d.replace(f\"speaker {temp_i}:\", f\"[speaker{temp_i}]\")\n\n sample_idx += 1\n sample = [\n prompt + d,\n h,\n t,\n rid,\n ]\n self.D[sid] += [sample]\n # multi labels, add more data in the training set\n if i == 0:\n for idx,trigger in enumerate(data[i][1][j]['t']):\n if trigger != \"\":\n label_token = f\"[class{data[i][1][j]['rid'][idx]+1}]\"\n prompt = prompt.replace(\"[MASK]\", label_token)\n # first assume the model predict the same output in the trigger, ...\n d = d.replace(trigger, \"[MASK]\", 1)\n sample = [\n prompt + d,\n h,\n t,\n rid,\n ]\n self.D[sid] += [sample]\n logger.info(str(len(self.D[0])) + \",\" + str(len(self.D[1])) + \",\" + str(len(self.D[2])))\n \n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[0], \"train\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[2], \"test\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self.D[1], \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [str(x) for x in range(36)]\n\n def _create_examples(self, data, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, d) in enumerate(data):\n guid = \"%s-%s\" % (set_type, i)\n examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2]))\n \n return examples\n\n\nclass wiki80Processor(DataProcessor):\n \"\"\"Processor for the TACRED data set.\"\"\"\n def __init__(self, data_path, use_prompt):\n super().__init__()\n self.data_dir = data_path\n\n @classmethod\n def _read_json(cls, input_file):\n data = []\n with open(input_file, \"r\", encoding='utf-8') as reader:\n all_lines = reader.readlines()\n for line in all_lines:\n ins = eval(line)\n data.append(ins)\n return data\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.txt\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"val.txt\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.txt\")), \"test\")\n\n def get_labels(self, negative_label=\"no_relation\"):\n data_dir = self.data_dir\n \"\"\"See base class.\"\"\"\n # if 'k-shot' in self.data_dir:\n # data_dir = os.path.abspath(os.path.join(self.data_dir, \"../..\"))\n # else:\n # data_dir = self.data_dir\n with open(os.path.join(data_dir,'rel2id.json'), \"r\", encoding='utf-8') as reader:\n re2id = json.load(reader)\n return re2id\n\n\n def _create_examples(self, dataset, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for example in dataset:\n sentence = example['token']\n examples.append(InputExampleWiki80(guid=None,\n sentence=sentence,\n # maybe some bugs here, I don't -1\n span1=(example['h']['pos'][0], example['h']['pos'][1]),\n span2=(example['t']['pos'][0], example['t']['pos'][1]),\n ner1=None,\n ner2=None,\n label=example['relation']))\n return examples\n\ndef convert_examples_to_features_for_loss(examples, max_seq_length, tokenizer):\n print(\"#examples\", len(examples))\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenize(example.text_a, tokenizer)\n tokens_b = tokenize(example.text_b, tokenizer)\n tokens_c = tokenize(example.text_c, tokenizer)\n\n # t_tokens = tokenize(example.entity, tokenizer)\n t_tokens = tokenizer(example.entity, add_special_tokens=False)[\"input_ids\"]\n\n _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)\n tokens_b = tokens_b + [\"[SEP]\"] + tokens_c\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = example.label \n\n len_t = len(t_tokens)\n normal_input_ids = input_ids[:]\n for idx, input_id in enumerate(input_ids):\n if idx + len_t < len(input_ids) and input_ids[idx:idx+len_t] == t_tokens:\n # [MASK] id = 103\n for j in range(len_t):\n input_ids[j+idx] = 103\n\n # append 1 sample with 2 input\n features.append(\n [InputFeatures(\n input_ids=normal_input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n entity = t_tokens\n ),\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n entity = t_tokens\n )]\n )\n \n print('#features', len(features))\n return features\n\ndef convert_examples_to_features_normal(examples, max_seq_length, tokenizer):\n print(\"#examples\", len(examples))\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenize(example.text_a, tokenizer)\n tokens_b = tokenize(example.text_b, tokenizer)\n tokens_c = tokenize(example.text_c, tokenizer)\n\n\n _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)\n tokens_b = tokens_b + [\"[SEP]\"] + tokens_c\n \n \n inputs = tokenizer(\n example.text_a,\n example.text_b + tokenizer.sep_token + example.text_c,\n truncation=\"longest_first\",\n max_length=max_seq_length,\n padding=\"max_length\",\n add_special_tokens=True\n )\n\n # tokens = []\n # segment_ids = []\n # tokens.append(\"[CLS]\")\n # segment_ids.append(0)\n # for token in tokens_a:\n # tokens.append(token)\n # segment_ids.append(0)\n # tokens.append(\"[SEP]\")\n # segment_ids.append(0)\n\n # for token in tokens_b:\n # tokens.append(token)\n # segment_ids.append(1)\n # tokens.append(\"[SEP]\")\n # segment_ids.append(1)\n\n # input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # # tokens are attended to.\n # input_mask = [1] * len(input_ids)\n\n # # Zero-pad up to the sequence length.\n # while len(input_ids) < max_seq_length:\n # input_ids.append(0)\n # input_mask.append(0)\n # segment_ids.append(0)\n\n # assert(inputs['input_ids'] == input_ids), print(inputs['input_ids'])\n\n # assert len(input_ids) == max_seq_length\n # assert len(input_mask) == max_seq_length\n # assert len(segment_ids) == max_seq_length\n\n label_id = example.label \n\n if ex_index == 0:\n logger.info(f\"input_text : {tokens_a} {tokens_b} {tokens_c}\")\n logger.info(f\"input_ids : {inputs['input_ids']}\")\n logger.info(f\"token_type_ids : {inputs['token_type_ids']}\")\n \n # inputs = {}\n # inputs['input_ids'] = input_ids\n # inputs['attention_mask'] = input_mask\n # inputs['token_type_ids'] = segment_ids\n\n # append 1 sample with 2 input\n features.append(\n InputFeatures(\n input_ids=inputs['input_ids'],\n input_mask=inputs['attention_mask'],\n segment_ids=inputs['token_type_ids'],\n label_id=label_id,\n )\n )\n \n print('#features', len(features))\n return features\n\n\n\ndef convert_examples_to_features(examples, max_seq_length, tokenizer, args, rel2id):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n save_file = \"data/cached_wiki80.pkl\"\n mode = \"text\"\n\n num_tokens = 0\n num_fit_examples = 0\n num_shown_examples = 0\n instances = []\n \n \n use_bert = \"BertTokenizer\" in tokenizer.__class__.__name__\n use_gpt = \"GPT\" in tokenizer.__class__.__name__\n \n assert not (use_bert and use_gpt), \"model cannot be gpt and bert together\"\n\n if False:\n with open(file=save_file, mode='rb') as fr:\n instances = pickle.load(fr)\n print('load preprocessed data from {}.'.format(save_file))\n\n else:\n print('loading..')\n for (ex_index, example) in enumerate(examples):\n \n\n \"\"\"\n the relation between SUBJECT and OBJECT is .\n \n \"\"\"\n\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n tokens = []\n SUBJECT_START = \"[subject_start]\"\n SUBJECT_END = \"[subject_end]\"\n OBJECT_START = \"[object_start]\"\n OBJECT_END = \"[object_end]\"\n\n\n if mode.startswith(\"text\"):\n for i, token in enumerate(example.sentence):\n if i == example.span1[0]:\n tokens.append(SUBJECT_START)\n if i == example.span2[0]:\n tokens.append(OBJECT_START)\n # for sub_token in tokenizer.tokenize(token):\n # tokens.append(sub_token)\n if i == example.span1[1]:\n tokens.append(SUBJECT_END)\n if i == example.span2[1]:\n tokens.append(OBJECT_END)\n\n tokens.append(token)\n\n SUBJECT = \" \".join(example.sentence[example.span1[0]: example.span1[1]])\n OBJECT = \" \".join(example.sentence[example.span2[0]: example.span2[1]])\n SUBJECT_ids = tokenizer(\" \"+SUBJECT, add_special_tokens=False)['input_ids']\n OBJECT_ids = tokenizer(\" \"+OBJECT, add_special_tokens=False)['input_ids']\n \n if use_gpt:\n if args.CT_CL:\n prompt = f\"[T1] [T2] [T3] [sub] {OBJECT} [sub] [T4] [obj] {SUBJECT} [obj] [T5] {tokenizer.cls_token}\"\n else:\n prompt = f\"The relation between [sub] {SUBJECT} [sub] and [obj] {OBJECT} [obj] is {tokenizer.cls_token} .\"\n else:\n # add prompt [T_n] and entity marker [obj] to enrich the context.\n prompt = f\"[sub] {SUBJECT} [sub] {tokenizer.mask_token} [obj] {OBJECT} [obj] .\"\n \n if ex_index == 0:\n input_text = \" \".join(tokens)\n logger.info(f\"input text : {input_text}\")\n logger.info(f\"prompt : {prompt}\")\n logger.info(f\"label : {example.label}\")\n inputs = tokenizer(\n prompt,\n \" \".join(tokens),\n truncation=\"longest_first\",\n max_length=max_seq_length,\n padding=\"max_length\",\n add_special_tokens=True\n )\n if use_gpt: cls_token_location = inputs['input_ids'].index(tokenizer.cls_token_id) \n \n # find the subject and object tokens, choose the first ones\n sub_st = sub_ed = obj_st = obj_ed = -1\n for i in range(len(inputs['input_ids'])):\n if sub_st == -1 and inputs['input_ids'][i:i+len(SUBJECT_ids)] == SUBJECT_ids:\n sub_st = i\n sub_ed = i + len(SUBJECT_ids)\n if obj_st == -1 and inputs['input_ids'][i:i+len(OBJECT_ids)] == OBJECT_ids:\n obj_st = i\n obj_ed = i + len(OBJECT_ids)\n \n assert sub_st != -1 and obj_st != -1\n\n\n num_tokens += sum(inputs['attention_mask'])\n\n\n if sum(inputs['attention_mask']) > max_seq_length:\n pass\n # tokens = tokens[:max_seq_length]\n else:\n num_fit_examples += 1\n\n x = OrderedDict()\n x['input_ids'] = inputs['input_ids']\n if use_bert: x['token_type_ids'] = inputs['token_type_ids']\n x['attention_mask'] = inputs['attention_mask']\n x['label'] = rel2id[example.label]\n if use_gpt: x['cls_token_location'] = cls_token_location\n x['so'] =[sub_st, sub_ed, obj_st, obj_ed]\n\n instances.append(x)\n\n\n with open(file=save_file, mode='wb') as fw:\n pickle.dump(instances, fw)\n print('Finish save preprocessed data to {}.'.format( save_file))\n\n input_ids = [o['input_ids'] for o in instances]\n attention_mask = [o['attention_mask'] for o in instances]\n if use_bert: token_type_ids = [o['token_type_ids'] for o in instances]\n if use_gpt: cls_idx = [o['cls_token_location'] for o in instances]\n labels = [o['label'] for o in instances]\n so = torch.tensor([o['so'] for o in instances])\n\n\n input_ids = torch.tensor(input_ids)\n attention_mask = torch.tensor(attention_mask)\n if use_gpt: cls_idx = torch.tensor(cls_idx)\n if use_bert: token_type_ids = torch.tensor(token_type_ids)\n labels = torch.tensor(labels)\n\n logger.info(\"Average #tokens: %.2f\" % (num_tokens * 1.0 / len(examples)))\n logger.info(\"%d (%.2f %%) examples can fit max_seq_length = %d\" % (num_fit_examples,\n num_fit_examples * 100.0 / len(examples), max_seq_length))\n\n if use_gpt:\n dataset = TensorDataset(input_ids, attention_mask, cls_idx, labels)\n elif use_bert:\n dataset = TensorDataset(input_ids, attention_mask, token_type_ids, labels, so)\n else:\n dataset = TensorDataset(input_ids, attention_mask, labels)\n \n return dataset\n\n\ndef convert_examples_to_feature_sst2(examples, max_seq_length, tokenizer, args, rel2id):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n save_file = \"data/cached_wiki80.pkl\"\n mode = \"text\"\n\n num_tokens = 0\n num_fit_examples = 0\n num_shown_examples = 0\n instances = []\n\n\n if False:\n with open(file=save_file, mode='rb') as fr:\n instances = pickle.load(fr)\n print('load preprocessed data from {}.'.format(save_file))\n\n else:\n print('loading..')\n for (ex_index, example) in enumerate(examples):\n try:\n prompt = f\"[T1] [T2] {tokenizer.mask_token} .\"\n inputs = tokenizer(\n example.text_a + prompt,\n truncation=\"longest_first\",\n max_length=max_seq_length,\n padding=\"max_length\",\n add_special_tokens=True\n )\n\n x = OrderedDict()\n x['input_ids'] = inputs['input_ids']\n x['attention_mask'] = inputs['attention_mask']\n if \"roberta\" not in args.model_name_or_path:\n x['token_type_ids'] = inputs['token_type_ids']\n x['label'] = int(example.label)\n\n\n instances.append(x)\n\n except Exception as e:\n print(e)\n\n with open(file=save_file, mode='wb') as fw:\n pickle.dump(instances, fw)\n print('Finish save preprocessed data to {}.'.format( save_file))\n\n input_ids = [o['input_ids'] for o in instances]\n attention_mask = [o['attention_mask'] for o in instances]\n\n if \"roberta\" not in args.model_name_or_path:\n token_type_ids = [o['token_type_ids'] for o in instances]\n token_type_ids = torch.tensor(token_type_ids)\n labels = [o['label'] for o in instances]\n\n\n input_ids = torch.tensor(input_ids)\n attention_mask = torch.tensor(attention_mask)\n labels = torch.tensor(labels)\n\n logger.info(\"Average #tokens: %.2f\" % (num_tokens * 1.0 / len(examples)))\n logger.info(\"%d (%.2f %%) examples can fit max_seq_length = %d\" % (num_fit_examples,\n num_fit_examples * 100.0 / len(examples), max_seq_length))\n if \"roberta\" not in args.model_name_or_path:\n dataset = TensorDataset(input_ids, attention_mask, token_type_ids, labels)\n else:\n dataset = TensorDataset(input_ids, attention_mask, labels)\n\n return dataset\n\n\n\ndef _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_length):\n \"\"\"Truncates a sequence tuple in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b) + len(tokens_c)\n if total_length <= max_length:\n break\n if len(tokens_a) >= len(tokens_b) and len(tokens_a) >= len(tokens_c):\n tokens_a.pop()\n elif len(tokens_b) >= len(tokens_a) and len(tokens_b) >= len(tokens_c):\n tokens_b.pop()\n else:\n tokens_c.pop() \n\ndef get_dataset(mode, args, tokenizer, processor):\n\n if mode == \"train\":\n examples = processor.get_train_examples(args.data_dir)\n elif mode == \"dev\":\n examples = processor.get_dev_examples(args.data_dir)\n elif mode == \"test\":\n examples = processor.get_test_examples(args.data_dir)\n else:\n raise Exception(\"mode must be in choice [trian, dev, test]\")\n gpt_mode = \"wiki80\" in args.task_name\n \n if \"wiki80\" in args.task_name:\n # normal relation extraction task\n dataset = convert_examples_to_features(\n examples, args.max_seq_length, tokenizer, args, processor.get_labels()\n )\n return dataset\n elif \"sst\" in args.task_name:\n dataset = convert_examples_to_feature_sst2(\n examples, args.max_seq_length, tokenizer, args, None\n )\n return dataset\n else:\n train_features = convert_examples_to_features_normal(\n examples, args.max_seq_length, tokenizer\n )\n\n\n input_ids = []\n input_mask = []\n segment_ids = []\n label_id = []\n entity_id = []\n\n for f in train_features:\n input_ids.append(f.input_ids)\n input_mask.append(f.input_mask)\n segment_ids.append(f.segment_ids)\n label_id.append(f.label_id) \n\n all_input_ids = torch.tensor(input_ids, dtype=torch.long)\n all_input_mask = torch.tensor(input_mask, dtype=torch.long)\n all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)\n all_label_ids = torch.tensor(label_id, dtype=torch.float)\n \n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n \n\n return train_data\n\n\ndef collate_fn(batch):\n pass\n\n\n\n\nprocessors = {\"normal\": bertProcessor, \"reloss\": relossProcessor , \"ptune\": ptuneProcessor, \"wiki80\": wiki80Processor,\n \"sst-2\": Sst2Processor\n}",
"import os\nimport time\nimport hydra\nfrom hydra.utils import get_original_cwd\nimport numpy as np\nimport torch\n\nimport ujson as json\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer\nfrom transformers.optimization import AdamW, get_linear_schedule_with_warmup\n\nfrom deepke.relation_extraction.document import *\n\n\ndef report(args, model, features):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False)\n preds = []\n for batch in dataloader:\n model.eval()\n\n inputs = {'input_ids': batch[0].to(device),\n 'attention_mask': batch[1].to(device),\n 'entity_pos': batch[3],\n 'hts': batch[4],\n }\n\n with torch.no_grad():\n pred = model(**inputs)\n pred = pred.cpu().numpy()\n pred[np.isnan(pred)] = 0\n preds.append(pred)\n\n preds = np.concatenate(preds, axis=0).astype(np.float32)\n preds = to_official(args, preds, features)\n return preds\n\n\n\n\[email protected](config_path=\"conf/config.yaml\")\ndef main(cfg):\n cwd = get_original_cwd()\n os.chdir(cwd)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n config = AutoConfig.from_pretrained(\n cfg.config_name if cfg.config_name else cfg.model_name_or_path,\n num_labels=cfg.num_class,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n cfg.tokenizer_name if cfg.tokenizer_name else cfg.model_name_or_path,\n )\n\n Dataset = ReadDataset(cfg, cfg.dataset, tokenizer, cfg.max_seq_length)\n\n\n test_file = os.path.join(cfg.data_dir, cfg.test_file)\n\n test_features = Dataset.read(test_file)\n\n model = AutoModel.from_pretrained(\n cfg.model_name_or_path,\n from_tf=bool(\".ckpt\" in cfg.model_name_or_path),\n config=config,\n )\n\n config.cls_token_id = tokenizer.cls_token_id\n config.sep_token_id = tokenizer.sep_token_id\n config.transformer_type = cfg.transformer_type\n\n set_seed(cfg)\n model = DocREModel(config, cfg, model, num_labels=cfg.num_labels)\n\n\n model.load_state_dict(torch.load(cfg.load_path)['checkpoint'])\n model.to(device)\n T_features = test_features # Testing on the test set\n #T_score, T_output = evaluate(cfg, model, T_features, tag=\"test\")\n pred = report(cfg, model, T_features)\n with open(\"./result.json\", \"w\") as fh:\n json.dump(pred, fh)\n\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"torch.utils.data.TensorDataset",
"torch.tensor"
],
[
"torch.load",
"numpy.isnan",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aFewThings/personal-snippets
|
[
"e404bc7599f1fbd24e5c2be612ec192456989f9b",
"e404bc7599f1fbd24e5c2be612ec192456989f9b",
"e404bc7599f1fbd24e5c2be612ec192456989f9b"
] |
[
"python, pytorch/repeat, expand.py",
"python, pytorch/weight sharing (use same layer).py",
"python, pytorch/mask.py"
] |
[
"import torch\n\nx = torch.randn((1, 5, 5))\nprint(x)\n\nx = x.unsqueeze(1)\nprint(x)\n# expand와 repeat은 특정 차원을 복제해서 반복시킨다. \nx = x.expand(-1, 2, -1, -1) # expand는 데이터 복사가 없음\n#x = x.repeat(1, 2, 1, 1) repeat은 데이터를 복사함\nprint(x)",
"import torch\nimport copy\n\nclass DynamicNet(torch.nn.Module):\n def __init__(self):\n super(DynamicNet, self).__init__()\n self.lin1 = torch.nn.Linear(1, 1, bias=False)\n self.lin2 = copy.deepcopy(self.lin1)\n\n def forward(self, x):\n out = self.lin1(x)\n out = self.lin2(out) # w2 * w1x\n\n return out\n\nnet = DynamicNet()\n\nx = torch.ones((1, 1)) # 1\nprint('x:', x)\n\nfor name, param in net.named_parameters():\n print(name, param)\n\ny_hat = net(x)\nprint('y_hat:', y_hat)\n\nfor para in net.parameters():\n print(para.grad) # None\n\ny_hat.backward()\n\nfor para in net.parameters():\n print(para.grad) # w1x + w2x\n",
"import torch\n\nsrc = torch.arange(9).float().reshape((3, 3))\nmask = torch.tensor([[True, True, True],\n [False, False, True],\n [True, False, True]])\nvalue = torch.randn(3, 3)\nsrc[mask] = value[mask]\nprint(src)"
] |
[
[
"torch.randn"
],
[
"torch.nn.Linear",
"torch.ones"
],
[
"torch.randn",
"torch.arange",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ACALJJ32/MMEditing_learning
|
[
"d3e45133c4c6d42e7c88904ac1925eb51ae1a945"
] |
[
"ACALJJ32/basicvsr_v2_backup.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import load_checkpoint\nfrom torch.nn.parameter import Parameter\n\nfrom mmedit.models.common import (PixelShufflePack, ResidualBlockNoBN,\n flow_warp, make_layer)\nfrom mmedit.models.registry import BACKBONES\nfrom mmedit.utils import get_root_logger\nfrom .edvr_net import PCDAlignment, TSAFusion\nimport math\n\n\[email protected]_module()\nclass BasicVSRGaussModulationV2(nn.Module):\n \"\"\"BasicVSR network structure for video super-resolution.\n\n Support only x4 upsampling.\n Paper:\n BasicVSR: The Search for Essential Components in Video Super-Resolution\n and Beyond, CVPR, 2021\n\n Args:\n mid_channels (int): Channel number of the intermediate features.\n Default: 64.\n num_blocks (int): Number of residual blocks in each propagation branch.\n Default: 30.\n spynet_pretrained (str): Pre-trained model path of SPyNet.\n Default: None.\n \"\"\"\n\n def __init__(self,\n mid_channels=64,\n num_blocks=30,\n keyframe_stride=5,\n padding=2,\n spynet_pretrained=None,\n edvr_pretrained=None,\n with_dft=False):\n\n super().__init__()\n\n self.mid_channels = mid_channels\n self.padding = padding\n self.keyframe_stride = keyframe_stride\n\n # optical flow network for feature alignment\n self.spynet = SPyNet(pretrained=spynet_pretrained)\n\n # information-refill\n self.edvr = EDVRFeatureExtractor(\n num_frames=padding * 2 + 1,\n center_frame_idx=padding,\n pretrained=edvr_pretrained)\n self.backward_fusion = nn.Conv2d(\n 2 * mid_channels, mid_channels, 3, 1, 1, bias=True)\n self.forward_fusion = nn.Conv2d(\n 2 * mid_channels, mid_channels, 3, 1, 1, bias=True)\n\n # propagation branches\n self.backward_resblocks = ResidualBlocksWithInputConv(\n mid_channels + 3, mid_channels, num_blocks)\n self.forward_resblocks = ResidualBlocksWithInputConv(\n 2 * mid_channels + 3, mid_channels, num_blocks)\n\n # upsample\n self.fusion = nn.Conv2d(\n mid_channels * 2, mid_channels, 1, 1, 0, bias=True)\n self.upsample1 = PixelShufflePack(\n mid_channels, mid_channels, 2, upsample_kernel=3)\n self.upsample2 = PixelShufflePack(\n mid_channels, 64, 2, upsample_kernel=3)\n self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)\n self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)\n self.img_upsample = nn.Upsample(\n scale_factor=4, mode='bilinear', align_corners=False)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n # DFT feature extractor\n self.with_dft_feature_extractor = with_dft\n self.dft_feature_extractor = DftFeatureExtractor(mid_channels, num_blocks=10, with_gauss=True)\n\n self.crac_module = CRACV2(in_channels=mid_channels, mid_channels = mid_channels)\n\n self.dft_fusion_backward = nn.Conv2d(2 * mid_channels + 3, mid_channels + 3, 3, 1, 1, bias=True)\n self.dft_fusion_forward = nn.Conv2d(3 * mid_channels + 3, 2 * mid_channels + 3, 3, 1, 1, bias=True)\n \n # pooling layer\n self.max_pool = nn.MaxPool2d(3, stride=2, padding=1)\n self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)\n\n def spatial_padding(self, lrs):\n \"\"\" Apply pdding spatially.\n\n Since the PCD module in EDVR requires that the resolution is a multiple\n of 4, we apply padding to the input LR images if their resolution is\n not divisible by 4.\n\n Args:\n lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).\n\n Returns:\n Tensor: Padded LR sequence with shape (n, t, c, h_pad, w_pad).\n\n \"\"\"\n\n n, t, c, h, w = lrs.size()\n\n pad_h = (4 - h % 4) % 4\n pad_w = (4 - w % 4) % 4\n\n # padding\n lrs = lrs.view(-1, c, h, w)\n lrs = F.pad(lrs, [0, pad_w, 0, pad_h], mode='reflect')\n\n return lrs.view(n, t, c, h + pad_h, w + pad_w)\n\n def check_if_mirror_extended(self, lrs):\n \"\"\"Check whether the input is a mirror-extended sequence.\n\n If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the\n (t-1-i)-th frame.\n\n Args:\n lrs (tensor): Input LR images with shape (n, t, c, h, w)\n \"\"\"\n\n self.is_mirror_extended = False\n if lrs.size(1) % 2 == 0:\n lrs_1, lrs_2 = torch.chunk(lrs, 2, dim=1)\n if torch.norm(lrs_1 - lrs_2.flip(1)) == 0:\n self.is_mirror_extended = True\n\n def compute_refill_features(self, lrs, keyframe_idx):\n \"\"\" Compute keyframe features for information-refill.\n Since EDVR-M is used, padding is performed before feature computation.\n Args:\n lrs (Tensor): Input LR images with shape (n, t, c, h, w)\n keyframe_idx (list(int)): The indices specifying the keyframes.\n Return:\n dict(Tensor): The keyframe features. Each key corresponds to the\n indices in keyframe_idx.\n \"\"\"\n\n if self.padding == 2:\n lrs = [lrs[:, [4, 3]], lrs, lrs[:, [-4, -5]]] # padding\n elif self.padding == 3:\n lrs = [lrs[:, [6, 5, 4]], lrs, lrs[:, [-5, -6, -7]]] # padding\n lrs = torch.cat(lrs, dim=1)\n\n num_frames = 2 * self.padding + 1\n feats_refill = {}\n for i in keyframe_idx:\n feats_refill[i] = self.edvr(lrs[:, i:i + num_frames].contiguous())\n return feats_refill\n \n def compute_flow(self, lrs):\n \"\"\"Compute optical flow using SPyNet for feature warping.\n\n Note that if the input is an mirror-extended sequence, 'flows_forward'\n is not needed, since it is equal to 'flows_backward.flip(1)'.\n\n Args:\n lrs (tensor): Input LR images with shape (n, t, c, h, w)\n\n Return:\n tuple(Tensor): Optical flow. 'flows_forward' corresponds to the\n flows used for forward-time propagation (current to previous).\n 'flows_backward' corresponds to the flows used for\n backward-time propagation (current to next).\n \"\"\"\n\n n, t, c, h, w = lrs.size()\n lrs_1 = lrs[:, :-1, :, :, :].reshape(-1, c, h, w)\n lrs_2 = lrs[:, 1:, :, :, :].reshape(-1, c, h, w)\n\n flows_backward = self.spynet(lrs_1, lrs_2).view(n, t - 1, 2, h, w)\n\n if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)\n flows_forward = None\n else:\n flows_forward = self.spynet(lrs_2, lrs_1).view(n, t - 1, 2, h, w)\n\n return flows_forward, flows_backward\n\n def forward(self, lrs, gts):\n \"\"\"Forward function for BasicVSR.\n\n Args:\n lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).\n\n Returns:\n Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).\n \"\"\"\n\n n, t, c, h_input, w_input = lrs.size()\n\n assert h_input >= 64 and w_input >= 64, (\n 'The height and width of inputs should be at least 64, '\n f'but got {h_input} and {w_input}.')\n\n # check whether the input is an extended sequence\n self.check_if_mirror_extended(lrs)\n\n lrs = self.spatial_padding(lrs)\n h, w = lrs.size(3), lrs.size(4)\n\n # get the keyframe indices for information-refill\n keyframe_idx = list(range(0, t, self.keyframe_stride))\n if keyframe_idx[-1] != t - 1:\n keyframe_idx.append(t - 1) # the last frame must be a keyframe\n\n # compute optical flow and compute features for information-refill \n flows_forward, flows_backward = self.compute_flow(lrs) \n feats_refill = self.compute_refill_features(lrs, keyframe_idx) # dict; feats_refill[0] shape: [b, mid_channels, h, w]\n\n # backward-time propgation\n outputs = []\n feat_prop = lrs.new_zeros(n, self.mid_channels, h, w)\n for i in range(t - 1, -1, -1):\n lr_curr = lrs[:, i, :, :, :]\n if i < t - 1: # no warping for the last timestep\n flow = flows_backward[:, i, :, :, :] # [b, 2, h, w]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n\n if i in keyframe_idx:\n feat_prop = torch.cat([feat_prop, feats_refill[i]], dim=1) # [b, 2 * mid_channles, h, w]\n feat_prop = self.backward_fusion(feat_prop) # [b, mid_channels, h, w]\n\n feat_prop = torch.cat([lr_curr, feat_prop], dim=1) # [b, mid_channel + 3, h, w]\n\n # DFT feature extractor\n if self.with_dft_feature_extractor and i in keyframe_idx:\n dft_feature = self.dft_feature_extractor(feats_refill[i]) # [b, mid_channels, h, w]\n feat_prop = torch.cat((dft_feature, feat_prop), dim=1) # [b, 2 * mid_channles + 3, h, w]\n feat_prop = self.dft_fusion_backward(feat_prop) \n \n feat_prop = self.backward_resblocks(feat_prop)\n \n outputs.append(feat_prop)\n outputs = outputs[::-1]\n\n # forward-time propagation and upsampling\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, t):\n lr_curr = lrs[:, i, :, :, :]\n if i > 0: # no warping required for the first timestep\n if flows_forward is not None:\n flow = flows_forward[:, i - 1, :, :, :]\n else:\n flow = flows_backward[:, -i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n\n if i in keyframe_idx: # information-refill\n feat_prop = torch.cat([feat_prop, feats_refill[i]], dim=1)\n feat_prop = self.forward_fusion(feat_prop)\n\n feat_prop = torch.cat([lr_curr, outputs[i], feat_prop], dim=1) # [b, 2 * mid_channels + 3, h, w]\n\n # DFT feature extractor\n if self.with_dft_feature_extractor and i in keyframe_idx:\n dft_feature = self.dft_feature_extractor(feats_refill[i])\n feat_prop = torch.cat((dft_feature, feat_prop), dim=1)\n feat_prop = self.dft_fusion_forward(feat_prop)\n\n feat_prop = self.forward_resblocks(feat_prop) # [b, mid_channel, h, w]\n\n out = self.lrelu(self.upsample1(feat_prop))\n out = self.lrelu(self.upsample2(out))\n out = self.lrelu(self.conv_hr(out))\n out = self.conv_last(out)\n base = self.img_upsample(lr_curr)\n out += base # [b, c, h, w]\n outputs[i] = out\n\n return torch.stack(outputs, dim=1)[:, :, :, :4 * h_input, :4 * w_input], gts\n\n def init_weights(self, pretrained=None, strict=True):\n \"\"\"Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults: None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n \"\"\"\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=strict, logger=logger)\n elif pretrained is not None:\n raise TypeError(f'\"pretrained\" must be a str or None. '\n f'But received {type(pretrained)}.')\n\nclass ResidualBlocksWithInputConv(nn.Module):\n \"\"\"Residual blocks with a convolution in front.\n\n Args:\n in_channels (int): Number of input channels of the first conv.\n out_channels (int): Number of channels of the residual blocks.\n Default: 64.\n num_blocks (int): Number of residual blocks. Default: 30.\n \"\"\"\n\n def __init__(self, in_channels, out_channels=64, num_blocks=30):\n super().__init__()\n\n main = []\n\n # a convolution used to match the channels of the residual blocks\n main.append(nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=True))\n main.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))\n\n # residual blocks\n main.append(\n make_layer(\n ResidualBlockNoBN, num_blocks, mid_channels=out_channels))\n\n self.main = nn.Sequential(*main)\n\n def forward(self, feat):\n \"\"\"\n Forward function for ResidualBlocksWithInputConv.\n\n Args:\n feat (Tensor): Input feature with shape (n, in_channels, h, w)\n\n Returns:\n Tensor: Output feature with shape (n, out_channels, h, w)\n \"\"\"\n return self.main(feat)\n\nclass SPyNet(nn.Module):\n \"\"\"SPyNet network structure.\n\n The difference to the SPyNet in [tof.py] is that\n 1. more SPyNetBasicModule is used in this version, and\n 2. no batch normalization is used in this version.\n\n Paper:\n Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017\n\n Args:\n pretrained (str): path for pre-trained SPyNet. Default: None.\n \"\"\"\n\n def __init__(self, pretrained):\n super().__init__()\n\n self.basic_module = nn.ModuleList(\n [SPyNetBasicModule() for _ in range(6)])\n\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=True, logger=logger)\n elif pretrained is not None:\n raise TypeError('[pretrained] should be str or None, '\n f'but got {type(pretrained)}.')\n\n self.register_buffer(\n 'mean',\n torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))\n self.register_buffer(\n 'std',\n torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))\n\n def compute_flow(self, ref, supp):\n \"\"\"Compute flow from ref to supp.\n\n Note that in this function, the images are already resized to a\n multiple of 32.\n\n Args:\n ref (Tensor): Reference image with shape of (n, 3, h, w).\n supp (Tensor): Supporting image with shape of (n, 3, h, w).\n\n Returns:\n Tensor: Estimated optical flow: (n, 2, h, w).\n \"\"\"\n n, _, h, w = ref.size()\n\n # normalize the input images\n ref = [(ref - self.mean) / self.std]\n supp = [(supp - self.mean) / self.std]\n\n # generate downsampled frames\n for level in range(5):\n ref.append(\n F.avg_pool2d(\n input=ref[-1],\n kernel_size=2,\n stride=2,\n count_include_pad=False))\n supp.append(\n F.avg_pool2d(\n input=supp[-1],\n kernel_size=2,\n stride=2,\n count_include_pad=False))\n ref = ref[::-1]\n supp = supp[::-1]\n\n # flow computation\n flow = ref[0].new_zeros(n, 2, h // 32, w // 32)\n for level in range(len(ref)):\n if level == 0:\n flow_up = flow\n else:\n flow_up = F.interpolate(\n input=flow,\n scale_factor=2,\n mode='bilinear',\n align_corners=True) * 2.0\n\n # add the residue to the upsampled flow\n flow = flow_up + self.basic_module[level](\n torch.cat([\n ref[level],\n flow_warp(\n supp[level],\n flow_up.permute(0, 2, 3, 1),\n padding_mode='border'), flow_up\n ], 1))\n\n return flow\n\n def forward(self, ref, supp):\n \"\"\"Forward function of SPyNet.\n\n This function computes the optical flow from ref to supp.\n\n Args:\n ref (Tensor): Reference image with shape of (n, 3, h, w).\n supp (Tensor): Supporting image with shape of (n, 3, h, w).\n\n Returns:\n Tensor: Estimated optical flow: (n, 2, h, w).\n \"\"\"\n\n # upsize to a multiple of 32\n h, w = ref.shape[2:4]\n w_up = w if (w % 32) == 0 else 32 * (w // 32 + 1)\n h_up = h if (h % 32) == 0 else 32 * (h // 32 + 1)\n ref = F.interpolate(\n input=ref, size=(h_up, w_up), mode='bilinear', align_corners=False)\n supp = F.interpolate(\n input=supp,\n size=(h_up, w_up),\n mode='bilinear',\n align_corners=False)\n\n # compute flow, and resize back to the original resolution\n flow = F.interpolate(\n input=self.compute_flow(ref, supp),\n size=(h, w),\n mode='bilinear',\n align_corners=False)\n\n # adjust the flow values\n flow[:, 0, :, :] *= float(w) / float(w_up)\n flow[:, 1, :, :] *= float(h) / float(h_up)\n\n return flow\n\nclass SPyNetBasicModule(nn.Module):\n \"\"\"Basic Module for SPyNet.\n\n Paper:\n Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.basic_module = nn.Sequential(\n ConvModule(\n in_channels=8,\n out_channels=32,\n kernel_size=7,\n stride=1,\n padding=3,\n norm_cfg=None,\n act_cfg=dict(type='ReLU')),\n ConvModule(\n in_channels=32,\n out_channels=64,\n kernel_size=7,\n stride=1,\n padding=3,\n norm_cfg=None,\n act_cfg=dict(type='ReLU')),\n ConvModule(\n in_channels=64,\n out_channels=32,\n kernel_size=7,\n stride=1,\n padding=3,\n norm_cfg=None,\n act_cfg=dict(type='ReLU')),\n ConvModule(\n in_channels=32,\n out_channels=16,\n kernel_size=7,\n stride=1,\n padding=3,\n norm_cfg=None,\n act_cfg=dict(type='ReLU')),\n ConvModule(\n in_channels=16,\n out_channels=2,\n kernel_size=7,\n stride=1,\n padding=3,\n norm_cfg=None,\n act_cfg=None))\n\n def forward(self, tensor_input):\n \"\"\"\n Args:\n tensor_input (Tensor): Input tensor with shape (b, 8, h, w).\n 8 channels contain:\n [reference image (3), neighbor image (3), initial flow (2)].\n\n Returns:\n Tensor: Refined flow with shape (b, 2, h, w)\n \"\"\"\n return self.basic_module(tensor_input)\n\nclass EDVRFeatureExtractor(nn.Module):\n \"\"\"EDVR feature extractor for information-refill in IconVSR.\n\n We use EDVR-M in IconVSR. To adopt pretrained models, please\n specify \"pretrained\".\n\n Paper:\n EDVR: Video Restoration with Enhanced Deformable Convolutional Networks.\n Args:\n in_channels (int): Channel number of inputs.\n out_channels (int): Channel number of outputs.\n mid_channels (int): Channel number of intermediate features.\n Default: 64.\n num_frames (int): Number of input frames. Default: 5.\n deform_groups (int): Deformable groups. Defaults: 8.\n num_blocks_extraction (int): Number of blocks for feature extraction.\n Default: 5.\n num_blocks_reconstruction (int): Number of blocks for reconstruction.\n Default: 10.\n center_frame_idx (int): The index of center frame. Frame counting from\n 0. Default: 2.\n with_tsa (bool): Whether to use TSA module. Default: True.\n pretrained (str): The pretrained model path. Default: None.\n \"\"\"\n\n def __init__(self,\n in_channels=3,\n out_channel=3,\n mid_channels=64,\n num_frames=5,\n deform_groups=8,\n num_blocks_extraction=5,\n num_blocks_reconstruction=10,\n center_frame_idx=2,\n with_tsa=True,\n pretrained=None):\n\n super().__init__()\n\n self.center_frame_idx = center_frame_idx\n self.with_tsa = with_tsa\n act_cfg = dict(type='LeakyReLU', negative_slope=0.1)\n\n self.conv_first = nn.Conv2d(in_channels, mid_channels, 3, 1, 1)\n self.feature_extraction = make_layer(\n ResidualBlockNoBN,\n num_blocks_extraction,\n mid_channels=mid_channels)\n\n # generate pyramid features\n self.feat_l2_conv1 = ConvModule(\n mid_channels, mid_channels, 3, 2, 1, act_cfg=act_cfg)\n self.feat_l2_conv2 = ConvModule(\n mid_channels, mid_channels, 3, 1, 1, act_cfg=act_cfg)\n self.feat_l3_conv1 = ConvModule(\n mid_channels, mid_channels, 3, 2, 1, act_cfg=act_cfg)\n self.feat_l3_conv2 = ConvModule(\n mid_channels, mid_channels, 3, 1, 1, act_cfg=act_cfg)\n # pcd alignment\n self.pcd_alignment = PCDAlignment(\n mid_channels=mid_channels, deform_groups=deform_groups)\n # fusion\n if self.with_tsa:\n self.fusion = TSAFusion(\n mid_channels=mid_channels,\n num_frames=num_frames,\n center_frame_idx=self.center_frame_idx)\n else:\n self.fusion = nn.Conv2d(num_frames * mid_channels, mid_channels, 1, 1)\n\n # CRAC module\n # self.crac_module = CRACV2(in_channels=mid_channels, mid_channels=mid_channels)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=True, logger=logger)\n elif pretrained is not None:\n raise TypeError(f'\"pretrained\" must be a str or None. '\n f'But received {type(pretrained)}.')\n\n def forward(self, x):\n \"\"\"Forward function for EDVRFeatureExtractor.\n Args:\n x (Tensor): Input tensor with shape (n, t, 3, h, w).\n Returns:\n Tensor: Intermediate feature with shape (n, mid_channels, h, w).\n \"\"\"\n\n n, t, c, h, w = x.size()\n\n # extract LR features\n # L1\n l1_feat = self.lrelu(self.conv_first(x.view(-1, c, h, w)))\n # l1_feat = self.crac_module(l1_feat)\n \n l1_feat = self.feature_extraction(l1_feat)\n\n # L2\n l2_feat = self.feat_l2_conv2(self.feat_l2_conv1(l1_feat))\n\n # L3\n l3_feat = self.feat_l3_conv2(self.feat_l3_conv1(l2_feat))\n\n l1_feat = l1_feat.view(n, t, -1, h, w)\n l2_feat = l2_feat.view(n, t, -1, h // 2, w // 2)\n l3_feat = l3_feat.view(n, t, -1, h // 4, w // 4)\n\n # pcd alignment\n ref_feats = [ # reference feature list\n l1_feat[:, self.center_frame_idx, :, :, :].clone(),\n l2_feat[:, self.center_frame_idx, :, :, :].clone(),\n l3_feat[:, self.center_frame_idx, :, :, :].clone()\n ]\n aligned_feat = []\n for i in range(t):\n neighbor_feats = [\n l1_feat[:, i, :, :, :].clone(), l2_feat[:, i, :, :, :].clone(),\n l3_feat[:, i, :, :, :].clone()\n ]\n aligned_feat.append(self.pcd_alignment(neighbor_feats, ref_feats))\n aligned_feat = torch.stack(aligned_feat, dim=1) # (n, t, c, h, w)\n\n if self.with_tsa:\n feat = self.fusion(aligned_feat)\n else:\n aligned_feat = aligned_feat.view(n, -1, h, w)\n feat = self.fusion(aligned_feat)\n\n return feat\n\nclass DftFeatureExtractor(nn.Module):\n def __init__(self, mid_channels=64, num_blocks=20, with_gauss=False, guass_key = 2.0):\n super().__init__()\n self.conv_first = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n main = []\n main.append(\n make_layer(\n ResidualBlockNoBN, 6, mid_channels=mid_channels))\n self.main = nn.Sequential(*main)\n\n self.conv_middle = nn.Conv2d(2 * mid_channels, mid_channels, 3, 1, 1, bias=True)\n\n feature_extractor = []\n feature_extractor.append(\n make_layer(\n ResidualBlockNoBN, num_blocks, mid_channels=mid_channels))\n self.feature_extractor = nn.Sequential(*feature_extractor)\n\n self.conv_last = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)\n\n # Gauss \n self.with_gauss = with_gauss\n self.guass_key = guass_key\n\n modulations = [\n nn.Conv2d(1, mid_channels, 1,1,0, bias=False),\n nn.LeakyReLU(negative_slope=0.1, inplace=False),\n nn.Conv2d(mid_channels, mid_channels, 1,1,0, bias=False),\n nn.LeakyReLU(negative_slope=0.1, inplace=False),\n nn.Conv2d(mid_channels, mid_channels, 1,1,0, bias=False),\n nn.LeakyReLU(negative_slope=0.1, inplace=False),\n ]\n\n modulations.append(\n make_layer(\n ResidualBlockNoBN, 5, mid_channels=mid_channels))\n \n self.modulation = nn.Sequential(*modulations)\n \n def forward(self, lr):\n \"\"\"\n Args\n lr: low resolution images. \n\n Returns \n DFT feature maps of lr image. \n \"\"\"\n assert isinstance(lr, torch.Tensor), (\n print(\"lr must be Torch.Tensor!\"))\n\n b, c, h, w = lr.size()\n\n x = self.conv_first(lr) # [b, mid_channels, h, w]\n x = self.main(x) # [b, mid_channels, h, w]\n\n x_proj = (2 * math.pi * x)\n\n if self.with_gauss:\n gauss_b = torch.randn((h,h)).to(lr.device)\n\n # modulate the gauss mat\n gauss_key = torch.ones((b,1,h,h)) * self.guass_key\n gauss_key = gauss_key.to(lr.device)\n gauss_key = self.modulation(gauss_key)\n gauss_b = gauss_b * gauss_key\n\n x_proj = torch.matmul(gauss_b, x_proj)\n \n dft_feature = torch.cat((torch.sin(x_proj), torch.cos(x_proj)),dim=1) # [b, 2 * mid_channels, h, w]\n\n dft_feature = self.conv_middle(dft_feature)\n dft_feature = self.feature_extractor(dft_feature)\n\n return dft_feature\n\nclass CRACV2(nn.Conv2d):\n def __init__(self, in_channels=64, mid_channels=64, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True):\n super(CRACV2, self).__init__(in_channels, mid_channels, kernel_size, stride, padding, dilation, groups, bias)\n \n self.stride = stride\n self.padding= padding\n self.dilation = dilation\n self.groups = groups\n self.mid_channel = mid_channels\n self.kernel_size = kernel_size\n\n # weight & bias for content-gated-convolution\n self.weight_conv = Parameter(torch.zeros(mid_channels, in_channels, kernel_size, kernel_size), requires_grad=True)\n self.bias_conv = Parameter(torch.zeros(mid_channels), requires_grad=True)\n \n # init weight_conv layer\n nn.init.kaiming_normal_(self.weight_conv)\n\n # target spatial size of the pooling layer\n self.avg_pool = nn.AdaptiveAvgPool2d((kernel_size, kernel_size))\n\n # the dimension of latent representation\n self.num_latent = int((kernel_size * kernel_size) / 2 + 1)\n\n # the context encoding module\n self.context_encoding = nn.Linear(kernel_size*kernel_size, self.num_latent, False)\n self.context_encoding_bn = nn.BatchNorm1d(in_channels)\n\n # relu function\n self.relu = nn.ReLU(inplace=True)\n\n # the number of groups in the channel interaction module\n if in_channels // 16: self.g = 16\n else: self.g = in_channels\n \n # the channel interacting module\n self.channel_interact = nn.Linear(self.g, mid_channels // (in_channels // self.g), bias=False)\n self.channel_interact_bn = nn.BatchNorm1d(mid_channels)\n self.channel_interact_bn2 = nn.BatchNorm1d(in_channels)\n\n # the gate decoding module (spatial interaction)\n self.gate_decode = nn.Linear(self.num_latent, kernel_size * kernel_size, False)\n self.gate_decode2 = nn.Linear(self.num_latent, kernel_size * kernel_size, False)\n\n # used to prepare the input feature map to patches\n self.unfold = nn.Unfold(kernel_size, dilation, padding, stride)\n\n # sigmoid function\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n b, c, h, w = x.size() \n weight = self.weight_conv\n\n # allocate global information and context-encoding module\n out = self.context_encoding(self.avg_pool(x).view(b, c, -1)) \n\n # use different bn for following two branches\n context_encoding2 = out.clone() \n out = self.relu(self.context_encoding_bn(out)) \n\n # gate decoding branch 1 (spatial interaction)\n out = self.gate_decode(out) # out: batch x n_feat x 9 (5 --> 9 = 3x3)\n print(out.size())\n\n # channel interacting module\n oc = self.channel_interact(self.relu(self.channel_interact_bn2(context_encoding2).view(b, c//self.g, self.g, -1).transpose(2,3))).transpose(2,3).contiguous()\n oc = self.relu(self.channel_interact_bn(oc.view(b, self.mid_channel, -1))) # oc: batch x n_feat x 5 (after grouped linear layer)\n\n # gate decoding branch 2 (spatial interaction)\n oc = self.gate_decode2(oc) # oc: batch x n_feat x 9 (5 --> 9 = 3x3)\n \n # produce gate (equation (4) in the CRAN paper)\n out = self.sigmoid(out.view(b, 1, c, self.kernel_size, self.kernel_size)\n + oc.view(b, self.mid_channel, 1, self.kernel_size, self.kernel_size)) # out: batch x out_channel x in_channel x kernel_size x kernel_size (same dimension as conv2d weight)\n\n # unfolding input feature map to patches\n x_unfold = self.unfold(x)\n b, _, l = x_unfold.size()\n\n # gating\n out = (out * weight.unsqueeze(0)).view(b, self.mid_channel, -1)\n\n return torch.matmul(out, x_unfold).view(-1, c, h, w)"
] |
[
[
"torch.cat",
"torch.zeros",
"torch.sin",
"torch.nn.functional.interpolate",
"torch.ones",
"torch.randn",
"torch.nn.Sigmoid",
"torch.nn.functional.pad",
"torch.cos",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.LeakyReLU",
"torch.stack",
"torch.Tensor",
"torch.nn.MaxPool2d",
"torch.nn.Unfold",
"torch.matmul",
"torch.nn.Upsample",
"torch.nn.AdaptiveAvgPool2d",
"torch.chunk",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Janderson/pandasbt
|
[
"e3f6826d55be05330928750209297a014976a8c5",
"e3f6826d55be05330928750209297a014976a8c5"
] |
[
"tests/test_calc_signal.py",
"pandasbt/build_timeframe.py"
] |
[
"import sys\nimport os\nimport pandas as pd\nimport pytest\n\n# Avoid error module path\nsys.path.insert(0, os.path.abspath( # noqa: E402 - Avoid flake8 error 402\n os.path.join(os.path.dirname(__file__), '..'))\n)\n\nfrom pandasbt import calc_signal\nfrom pandasbt.calc_signal import calc_return\nfrom pandasbt import zscore\n\n\[email protected]\ndef dataframe_default():\n return pd.DataFrame([\n {\"close\": 25}, {\"close\": 15}, {\"close\": 41}, {\"close\": 7},\n {\"close\": 5}, {\"close\": 115}, {\"close\": 45}, {\"close\": 32},\n ])\n\n\ndef test_calc_signal_should_signal_with_zeros_and_ones(dataframe_default):\n df_test = dataframe_default\n df_test = calc_signal(zscore(df_test, period=3),\n buy_query=\"zscore > 1\",\n sell_query=\"zscore <= -0.5\")\n assert df_test.pbt_signal.iloc[2] == 1\n assert df_test.pbt_signal.iloc[3] == -1\n assert df_test.pbt_signal.iloc[6] == 0\n\n\ndef test_calc_should_calc_a_return_column(dataframe_default):\n df_test = dataframe_default\n df_test = calc_signal(zscore(df_test, period=3),\n buy_query=\"zscore > 1\",\n sell_query=\"zscore <= -0.5\")\n\n assert calc_return(df_test, \"close\").pbt_rets.iloc[1] == -0.4\n assert calc_return(df_test, \"close\").pbt_rets.iloc[5] == 22.0\n\n\ndef test_calc_signal_and_returns_at_once(dataframe_default):\n df_test = dataframe_default\n df_test = calc_signal(zscore(df_test, period=3),\n buy_query=\"zscore > 1\",\n sell_query=\"zscore <= -0.5\",\n price_column=\"close\")\n\n assert df_test.pbt_rets.iloc[1] == -0.4\n assert df_test.pbt_rets.iloc[5] == 22.0\n\n\ndef test_calc_should_not_modify_original_df(dataframe_default):\n df_test = dataframe_default\n calc_signal(zscore(df_test, period=3),\n buy_query=\"zscore > 1\",\n sell_query=\"zscore <= -0.5\",\n price_column=\"close\")\n assert \"pbt_signal\" not in list(df_test.columns)\n",
"import pandas as pd\nimport numpy as np\n\n\ndef build_timeframe(dataframe,\n timeframe=\"D1\",\n datetime_column=\"time\",\n filter_at_end=True):\n \"\"\"\n function: build_timeframe\n This function should build a highertimeframe like D1,\n using a lower timeframe like M1\n \"\"\"\n dataframe[\"time\"] = pd.to_datetime(dataframe[\"time\"])\n dataframe.sort_values([\"time\"], ascending=True, inplace=True)\n\n if timeframe==\"D1\":\n dataframe[\"start_bar_sign\"] = dataframe.time.dt.day != dataframe.time.dt.day.shift(1)\n dataframe[\"close_bar_sign\"] = (dataframe.start_bar_sign.shift(-1)) | \\\n (dataframe.index == dataframe.index[-1])\n dataframe[\"time_bar\"] = dataframe.time.dt.date\n elif timeframe in [\"M2\", \"M5\", \"M10\", \"M15\", \"M20\", \"M30\"]:\n minute = dataframe.time.dt.minute\n hour = dataframe.time.dt.hour\n timeframe_minutes = int(timeframe.upper().replace(\"M\", \"\"))\n dataframe[\"number_bar\"] = minute - np.mod(minute, timeframe_minutes) + (60*hour)\n dataframe[\"start_bar_sign\"] = dataframe.number_bar != dataframe.number_bar.shift(1)\n dataframe[\"close_bar_sign\"] = (dataframe.start_bar_sign.shift(-1)) | \\\n (dataframe.index == dataframe.index[-1])\n dataframe[\"time_bar\"] = dataframe.time - pd.TimedeltaIndex(dataframe.time.dt.minute, unit=\"m\") - pd.TimedeltaIndex(dataframe.time.dt.second, unit=\"s\") + pd.TimedeltaIndex(minute - np.mod(minute, timeframe_minutes), unit=\"m\")\n else:\n return Exception(\"Timeframe {} not implemented\".format(timeframe))\n\n dataframe[\"open_bar\"] = np.where(dataframe.start_bar_sign,\n dataframe.open, np.nan)\n dataframe[\"close_bar\"] = np.where(dataframe.close_bar_sign,\n dataframe.close, np.nan)\n dataframe[\"low_bar\"] = dataframe.groupby(\n (dataframe.start_bar_sign).cumsum()\n ).low.cummin()\n dataframe[\"high_bar\"] = dataframe.groupby(\n (dataframe.start_bar_sign).cumsum()\n ).high.cummax()\n\n dataframe.fillna(method=\"ffill\", inplace=True)\n if filter_at_end:\n return dataframe.query(\"close_bar_sign == True\")[[\"time_bar\", \"open_bar\", \"high_bar\", \"low_bar\", \"close_bar\"]]\n return dataframe\n\n\ndef end_build_timeframe(dataframe, column=\"close_bar_sign\"):\n return dataframe.query(\"close_bar_sign == True\")\n"
] |
[
[
"pandas.DataFrame"
],
[
"numpy.mod",
"pandas.to_datetime",
"numpy.where",
"pandas.TimedeltaIndex"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
CSautier/metrics
|
[
"32389fbc03b1bbbf0b15b05c56dc1db7a90bff97",
"32389fbc03b1bbbf0b15b05c56dc1db7a90bff97",
"32389fbc03b1bbbf0b15b05c56dc1db7a90bff97"
] |
[
"tests/classification/test_iou.py",
"torchmetrics/functional/regression/mean_squared_log_error.py",
"tests/retrieval/helpers.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\n\nimport numpy as np\nimport pytest\nimport torch\nfrom sklearn.metrics import jaccard_score as sk_jaccard_score\nfrom torch import Tensor, tensor\n\nfrom tests.classification.inputs import _input_binary, _input_binary_prob\nfrom tests.classification.inputs import _input_multiclass as _input_mcls\nfrom tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob\nfrom tests.classification.inputs import _input_multidim_multiclass as _input_mdmc\nfrom tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob\nfrom tests.classification.inputs import _input_multilabel as _input_mlb\nfrom tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob\nfrom tests.helpers.testers import NUM_CLASSES, THRESHOLD, MetricTester\nfrom torchmetrics.classification.iou import IoU\nfrom torchmetrics.functional import iou\n\n\ndef _sk_iou_binary_prob(preds, target, average=None):\n sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8)\n sk_target = target.view(-1).numpy()\n\n return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average)\n\n\ndef _sk_iou_binary(preds, target, average=None):\n sk_preds = preds.view(-1).numpy()\n sk_target = target.view(-1).numpy()\n\n return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average)\n\n\ndef _sk_iou_multilabel_prob(preds, target, average=None):\n sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8)\n sk_target = target.view(-1).numpy()\n\n return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average)\n\n\ndef _sk_iou_multilabel(preds, target, average=None):\n sk_preds = preds.view(-1).numpy()\n sk_target = target.view(-1).numpy()\n\n return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average)\n\n\ndef _sk_iou_multiclass_prob(preds, target, average=None):\n sk_preds = torch.argmax(preds, dim=len(preds.shape) - 1).view(-1).numpy()\n sk_target = target.view(-1).numpy()\n\n return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average)\n\n\ndef _sk_iou_multiclass(preds, target, average=None):\n sk_preds = preds.view(-1).numpy()\n sk_target = target.view(-1).numpy()\n\n return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average)\n\n\ndef _sk_iou_multidim_multiclass_prob(preds, target, average=None):\n sk_preds = torch.argmax(preds, dim=len(preds.shape) - 2).view(-1).numpy()\n sk_target = target.view(-1).numpy()\n\n return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average)\n\n\ndef _sk_iou_multidim_multiclass(preds, target, average=None):\n sk_preds = preds.view(-1).numpy()\n sk_target = target.view(-1).numpy()\n\n return sk_jaccard_score(y_true=sk_target, y_pred=sk_preds, average=average)\n\n\[email protected](\"reduction\", ['elementwise_mean', 'none'])\[email protected](\n \"preds, target, sk_metric, num_classes\",\n [(_input_binary_prob.preds, _input_binary_prob.target, _sk_iou_binary_prob, 2),\n (_input_binary.preds, _input_binary.target, _sk_iou_binary, 2),\n (_input_mlb_prob.preds, _input_mlb_prob.target, _sk_iou_multilabel_prob, 2),\n (_input_mlb.preds, _input_mlb.target, _sk_iou_multilabel, 2),\n (_input_mcls_prob.preds, _input_mcls_prob.target, _sk_iou_multiclass_prob, NUM_CLASSES),\n (_input_mcls.preds, _input_mcls.target, _sk_iou_multiclass, NUM_CLASSES),\n (_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_iou_multidim_multiclass_prob, NUM_CLASSES),\n (_input_mdmc.preds, _input_mdmc.target, _sk_iou_multidim_multiclass, NUM_CLASSES)]\n)\nclass TestIoU(MetricTester):\n\n @pytest.mark.parametrize(\"ddp\", [True, False])\n @pytest.mark.parametrize(\"dist_sync_on_step\", [True, False])\n def test_iou(self, reduction, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step):\n average = 'macro' if reduction == 'elementwise_mean' else None # convert tags\n self.run_class_metric_test(\n ddp=ddp,\n preds=preds,\n target=target,\n metric_class=IoU,\n sk_metric=partial(sk_metric, average=average),\n dist_sync_on_step=dist_sync_on_step,\n metric_args={\n \"num_classes\": num_classes,\n \"threshold\": THRESHOLD,\n \"reduction\": reduction\n }\n )\n\n def test_iou_functional(self, reduction, preds, target, sk_metric, num_classes):\n average = 'macro' if reduction == 'elementwise_mean' else None # convert tags\n self.run_functional_metric_test(\n preds,\n target,\n metric_functional=iou,\n sk_metric=partial(sk_metric, average=average),\n metric_args={\n \"num_classes\": num_classes,\n \"threshold\": THRESHOLD,\n \"reduction\": reduction\n }\n )\n\n def test_iou_differentiability(self, reduction, preds, target, sk_metric, num_classes):\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=IoU,\n metric_functional=iou,\n metric_args={\n \"num_classes\": num_classes,\n \"threshold\": THRESHOLD,\n \"reduction\": reduction\n }\n )\n\n\[email protected](['half_ones', 'reduction', 'ignore_index', 'expected'], [\n pytest.param(False, 'none', None, Tensor([1, 1, 1])),\n pytest.param(False, 'elementwise_mean', None, Tensor([1])),\n pytest.param(False, 'none', 0, Tensor([1, 1])),\n pytest.param(True, 'none', None, Tensor([0.5, 0.5, 0.5])),\n pytest.param(True, 'elementwise_mean', None, Tensor([0.5])),\n pytest.param(True, 'none', 0, Tensor([2 / 3, 1 / 2])),\n])\ndef test_iou(half_ones, reduction, ignore_index, expected):\n preds = (torch.arange(120) % 3).view(-1, 1)\n target = (torch.arange(120) % 3).view(-1, 1)\n if half_ones:\n preds[:60] = 1\n iou_val = iou(\n preds=preds,\n target=target,\n ignore_index=ignore_index,\n reduction=reduction,\n )\n assert torch.allclose(iou_val, expected, atol=1e-9)\n\n\n# test `absent_score`\[email protected](\n ['pred', 'target', 'ignore_index', 'absent_score', 'num_classes', 'expected'],\n [\n # Note that -1 is used as the absent_score in almost all tests here to distinguish it from the range of valid\n # scores the function can return ([0., 1.] range, inclusive).\n # 2 classes, class 0 is correct everywhere, class 1 is absent.\n pytest.param([0], [0], None, -1., 2, [1., -1.]),\n pytest.param([0, 0], [0, 0], None, -1., 2, [1., -1.]),\n # absent_score not applied if only class 0 is present and it's the only class.\n pytest.param([0], [0], None, -1., 1, [1.]),\n # 2 classes, class 1 is correct everywhere, class 0 is absent.\n pytest.param([1], [1], None, -1., 2, [-1., 1.]),\n pytest.param([1, 1], [1, 1], None, -1., 2, [-1., 1.]),\n # When 0 index ignored, class 0 does not get a score (not even the absent_score).\n pytest.param([1], [1], 0, -1., 2, [1.0]),\n # 3 classes. Only 0 and 2 are present, and are perfectly predicted. 1 should get absent_score.\n pytest.param([0, 2], [0, 2], None, -1., 3, [1., -1., 1.]),\n pytest.param([2, 0], [2, 0], None, -1., 3, [1., -1., 1.]),\n # 3 classes. Only 0 and 1 are present, and are perfectly predicted. 2 should get absent_score.\n pytest.param([0, 1], [0, 1], None, -1., 3, [1., 1., -1.]),\n pytest.param([1, 0], [1, 0], None, -1., 3, [1., 1., -1.]),\n # 3 classes, class 0 is 0.5 IoU, class 1 is 0 IoU (in pred but not target; should not get absent_score), class\n # 2 is absent.\n pytest.param([0, 1], [0, 0], None, -1., 3, [0.5, 0., -1.]),\n # 3 classes, class 0 is 0.5 IoU, class 1 is 0 IoU (in target but not pred; should not get absent_score), class\n # 2 is absent.\n pytest.param([0, 0], [0, 1], None, -1., 3, [0.5, 0., -1.]),\n # Sanity checks with absent_score of 1.0.\n pytest.param([0, 2], [0, 2], None, 1.0, 3, [1., 1., 1.]),\n pytest.param([0, 2], [0, 2], 0, 1.0, 3, [1., 1.]),\n ]\n)\ndef test_iou_absent_score(pred, target, ignore_index, absent_score, num_classes, expected):\n iou_val = iou(\n preds=tensor(pred),\n target=tensor(target),\n ignore_index=ignore_index,\n absent_score=absent_score,\n num_classes=num_classes,\n reduction='none',\n )\n assert torch.allclose(iou_val, tensor(expected).to(iou_val))\n\n\n# example data taken from\n# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py\[email protected](\n ['pred', 'target', 'ignore_index', 'num_classes', 'reduction', 'expected'],\n [\n # Ignoring an index outside of [0, num_classes-1] should have no effect.\n pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], None, 3, 'none', [1, 1 / 2, 2 / 3]),\n pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], -1, 3, 'none', [1, 1 / 2, 2 / 3]),\n pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 255, 3, 'none', [1, 1 / 2, 2 / 3]),\n # Ignoring a valid index drops only that index from the result.\n pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, 'none', [1 / 2, 2 / 3]),\n pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 1, 3, 'none', [1, 2 / 3]),\n pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 2, 3, 'none', [1, 1]),\n # When reducing to mean or sum, the ignored index does not contribute to the output.\n pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, 'elementwise_mean', [7 / 12]),\n pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, 'sum', [7 / 6]),\n ]\n)\ndef test_iou_ignore_index(pred, target, ignore_index, num_classes, reduction, expected):\n iou_val = iou(\n preds=tensor(pred),\n target=tensor(target),\n ignore_index=ignore_index,\n num_classes=num_classes,\n reduction=reduction,\n )\n assert torch.allclose(iou_val, tensor(expected).to(iou_val))\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _mean_squared_log_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:\n \"\"\"\n Returns variables required to compute Mean Squared Log Error.\n Checks for same shape of tensors.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n \"\"\"\n\n _check_same_shape(preds, target)\n sum_squared_log_error = torch.sum(torch.pow(torch.log1p(preds) - torch.log1p(target), 2))\n n_obs = target.numel()\n return sum_squared_log_error, n_obs\n\n\ndef _mean_squared_log_error_compute(sum_squared_log_error: Tensor, n_obs: int) -> Tensor:\n \"\"\"\n Computes Mean Squared Log Error.\n\n Args:\n sum_squared_log_error: Sum of square of log errors over all observations\n (log error = log(target) - log(prediction))\n n_obs: Number of predictions or observations\n\n Example:\n >>> preds = torch.tensor([0., 1, 2, 3])\n >>> target = torch.tensor([0., 1, 2, 2])\n >>> sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target)\n >>> _mean_squared_log_error_compute(sum_squared_log_error, n_obs)\n tensor(0.0207)\n \"\"\"\n\n return sum_squared_log_error / n_obs\n\n\ndef mean_squared_log_error(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"\n Computes mean squared log error\n\n Args:\n preds: estimated labels\n target: ground truth labels\n\n Return:\n Tensor with RMSLE\n\n Example:\n >>> from torchmetrics.functional import mean_squared_log_error\n >>> x = torch.tensor([0., 1, 2, 3])\n >>> y = torch.tensor([0., 1, 2, 2])\n >>> mean_squared_log_error(x, y)\n tensor(0.0207)\n\n .. note::\n Half precision is only support on GPU for this metric\n\n \"\"\"\n sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target)\n return _mean_squared_log_error_compute(sum_squared_log_error, n_obs)\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom typing import Callable, Dict, List, Tuple, Type, Union\n\nimport numpy as np\nimport pytest\nimport torch\nfrom numpy import array\nfrom torch import Tensor, tensor\n\nfrom tests.helpers import seed_all\nfrom tests.helpers.testers import Metric, MetricTester\nfrom tests.retrieval.inputs import _input_retrieval_scores as _irs\nfrom tests.retrieval.inputs import _input_retrieval_scores_all_target as _irs_all\nfrom tests.retrieval.inputs import _input_retrieval_scores_empty as _irs_empty\nfrom tests.retrieval.inputs import _input_retrieval_scores_extra as _irs_extra\nfrom tests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes as _irs_mis_sz\nfrom tests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes_func as _irs_mis_sz_fn\nfrom tests.retrieval.inputs import _input_retrieval_scores_no_target as _irs_no_tgt\nfrom tests.retrieval.inputs import _input_retrieval_scores_non_binary_target as _irs_non_binary\nfrom tests.retrieval.inputs import _input_retrieval_scores_wrong_targets as _irs_bad_tgt\n\nseed_all(42)\n\n# a version of get_group_indexes that depends on NumPy is here to avoid this dependency for the full library\n\n\ndef get_group_indexes(indexes: Union[Tensor, np.ndarray]) -> List[Union[Tensor, np.ndarray]]:\n \"\"\"\n Given an integer `torch.Tensor` or `np.ndarray` `indexes`, return a `torch.Tensor` or `np.ndarray` of indexes for\n each different value in `indexes`.\n\n Args:\n indexes: a `torch.Tensor` or `np.ndarray` of integers\n\n Return:\n A list of integer `torch.Tensor`s or `np.ndarray`s\n\n Example:\n >>> indexes = torch.tensor([0, 0, 0, 1, 1, 1, 1])\n >>> get_group_indexes(indexes)\n [tensor([0, 1, 2]), tensor([3, 4, 5, 6])]\n \"\"\"\n structure, dtype = (tensor, torch.long) if isinstance(indexes, Tensor) else (np.array, np.int64)\n\n res = {}\n for i, _id in enumerate(indexes):\n _id = _id.item()\n if _id in res:\n res[_id] += [i]\n else:\n res[_id] = [i]\n\n return [structure(x, dtype=dtype) for x in res.values()]\n\n\ndef _compute_sklearn_metric(\n preds: Union[Tensor, array],\n target: Union[Tensor, array],\n indexes: np.ndarray = None,\n metric: Callable = None,\n empty_target_action: str = \"skip\",\n reverse: bool = False,\n **kwargs\n) -> Tensor:\n \"\"\" Compute metric with multiple iterations over every query predictions set. \"\"\"\n\n if indexes is None:\n indexes = np.full_like(preds, fill_value=0, dtype=np.int64)\n if isinstance(indexes, Tensor):\n indexes = indexes.cpu().numpy()\n if isinstance(preds, Tensor):\n preds = preds.cpu().numpy()\n if isinstance(target, Tensor):\n target = target.cpu().numpy()\n\n assert isinstance(indexes, np.ndarray)\n assert isinstance(preds, np.ndarray)\n assert isinstance(target, np.ndarray)\n\n indexes = indexes.flatten()\n preds = preds.flatten()\n target = target.flatten()\n groups = get_group_indexes(indexes)\n\n sk_results = []\n for group in groups:\n trg, pds = target[group], preds[group]\n\n if ((1 - trg) if reverse else trg).sum() == 0:\n if empty_target_action == 'skip':\n pass\n elif empty_target_action == 'pos':\n sk_results.append(1.0)\n else:\n sk_results.append(0.0)\n else:\n res = metric(trg, pds, **kwargs)\n sk_results.append(res)\n\n if len(sk_results) > 0:\n return np.mean(sk_results)\n return np.array(0.0)\n\n\ndef _concat_tests(*tests: Tuple[Dict]) -> Dict:\n \"\"\"Concat tests composed by a string and a list of arguments.\"\"\"\n assert len(tests), \"`_concat_tests` expects at least an argument\"\n assert all(tests[0]['argnames'] == x['argnames'] for x in tests[1:]), \"the header must be the same for all tests\"\n return dict(argnames=tests[0]['argnames'], argvalues=sum((x['argvalues'] for x in tests), []))\n\n\n_errors_test_functional_metric_parameters_default = dict(\n argnames=\"preds,target,message,metric_args\",\n argvalues=[\n # check input shapes are consistent (func)\n (_irs_mis_sz_fn.preds, _irs_mis_sz_fn.target, \"`preds` and `target` must be of the same shape\", {}),\n # check input tensors are not empty\n (_irs_empty.preds, _irs_empty.target, \"`preds` and `target` must be non-empty and non-scalar tensors\", {}),\n # check on input dtypes\n (_irs.preds.bool(), _irs.target, \"`preds` must be a tensor of floats\", {}),\n (_irs.preds, _irs.target.float(), \"`target` must be a tensor of booleans or integers\", {}),\n # check targets are between 0 and 1\n (_irs_bad_tgt.preds, _irs_bad_tgt.target, \"`target` must contain `binary` values\", {}),\n ]\n)\n\n_errors_test_functional_metric_parameters_with_nonbinary = dict(\n argnames=\"preds,target,message,metric_args\",\n argvalues=[\n # check input shapes are consistent (func)\n (_irs_mis_sz_fn.preds, _irs_mis_sz_fn.target, \"`preds` and `target` must be of the same shape\", {}),\n # check input tensors are not empty\n (_irs_empty.preds, _irs_empty.target, \"`preds` and `target` must be non-empty and non-scalar tensors\", {}),\n # check on input dtypes\n (_irs.preds.bool(), _irs.target, \"`preds` must be a tensor of floats\", {}),\n (_irs.preds, _irs.target.float(), \"`target` must be a tensor of booleans or integers\", {}),\n ]\n)\n\n_errors_test_functional_metric_parameters_k = dict(\n argnames=\"preds,target,message,metric_args\",\n argvalues=[\n (_irs.preds, _irs.target, \"`k` has to be a positive integer or None\", dict(k=-10)),\n (_irs.preds, _irs.target, \"`k` has to be a positive integer or None\", dict(k=4.0)),\n ]\n)\n\n_errors_test_class_metric_parameters_no_pos_target = dict(\n argnames=\"indexes,preds,target,message,metric_args\",\n argvalues=[\n # check when error when there are no positive targets\n (\n _irs_no_tgt.indexes, _irs_no_tgt.preds, _irs_no_tgt.target,\n \"`compute` method was provided with a query with no positive target.\", dict(empty_target_action=\"error\")\n ),\n ]\n)\n\n_errors_test_class_metric_parameters_no_neg_target = dict(\n argnames=\"indexes,preds,target,message,metric_args\",\n argvalues=[\n # check when error when there are no negative targets\n (\n _irs_all.indexes, _irs_all.preds, _irs_all.target,\n \"`compute` method was provided with a query with no negative target.\", dict(empty_target_action=\"error\")\n ),\n ]\n)\n\n_errors_test_class_metric_parameters_with_nonbinary = dict(\n argnames=\"indexes,preds,target,message,metric_args\",\n argvalues=[\n (None, _irs.preds, _irs.target, \"`indexes` cannot be None\", dict(empty_target_action=\"error\")),\n # check when input arguments are invalid\n (\n _irs.indexes, _irs.preds, _irs.target, \"`empty_target_action` received a wrong value `casual_argument`.\",\n dict(empty_target_action=\"casual_argument\")\n ),\n # check input shapes are consistent\n (\n _irs_mis_sz.indexes, _irs_mis_sz.preds, _irs_mis_sz.target,\n \"`indexes`, `preds` and `target` must be of the same shape\", dict(empty_target_action=\"skip\")\n ),\n # check input tensors are not empty\n (\n _irs_empty.indexes, _irs_empty.preds,\n _irs_empty.target, \"`indexes`, `preds` and `target` must be non-empty and non-scalar tensors\",\n dict(empty_target_action=\"skip\")\n ),\n # check on input dtypes\n (\n _irs.indexes.bool(), _irs.preds, _irs.target, \"`indexes` must be a tensor of long integers\",\n dict(empty_target_action=\"skip\")\n ),\n (\n _irs.indexes, _irs.preds.bool(), _irs.target, \"`preds` must be a tensor of floats\",\n dict(empty_target_action=\"skip\")\n ),\n (\n _irs.indexes, _irs.preds, _irs.target.float(), \"`target` must be a tensor of booleans or integers\",\n dict(empty_target_action=\"skip\")\n )\n ]\n)\n\n_errors_test_class_metric_parameters_default = dict(\n argnames=\"indexes,preds,target,message,metric_args\",\n argvalues=[\n (None, _irs.preds, _irs.target, \"`indexes` cannot be None\", dict(empty_target_action=\"error\")),\n # check when input arguments are invalid\n (\n _irs.indexes, _irs.preds, _irs.target, \"`empty_target_action` received a wrong value `casual_argument`.\",\n dict(empty_target_action=\"casual_argument\")\n ),\n # check input shapes are consistent\n (\n _irs_mis_sz.indexes, _irs_mis_sz.preds, _irs_mis_sz.target,\n \"`indexes`, `preds` and `target` must be of the same shape\", dict(empty_target_action=\"skip\")\n ),\n # check input tensors are not empty\n (\n _irs_empty.indexes, _irs_empty.preds,\n _irs_empty.target, \"`indexes`, `preds` and `target` must be non-empty and non-scalar tensors\",\n dict(empty_target_action=\"skip\")\n ),\n # check on input dtypes\n (\n _irs.indexes.bool(), _irs.preds, _irs.target, \"`indexes` must be a tensor of long integers\",\n dict(empty_target_action=\"skip\")\n ),\n (\n _irs.indexes, _irs.preds.bool(), _irs.target, \"`preds` must be a tensor of floats\",\n dict(empty_target_action=\"skip\")\n ),\n (\n _irs.indexes, _irs.preds, _irs.target.float(), \"`target` must be a tensor of booleans or integers\",\n dict(empty_target_action=\"skip\")\n ),\n # check targets are between 0 and 1\n (\n _irs_bad_tgt.indexes, _irs_bad_tgt.preds, _irs_bad_tgt.target, \"`target` must contain `binary` values\",\n dict(empty_target_action=\"skip\")\n ),\n ]\n)\n\n_errors_test_class_metric_parameters_k = dict(\n argnames=\"indexes,preds,target,message,metric_args\",\n argvalues=[\n (_irs.index, _irs.preds, _irs.target, \"`k` has to be a positive integer or None\", dict(k=-10)),\n ]\n)\n\n_default_metric_class_input_arguments = dict(\n argnames=\"indexes,preds,target\",\n argvalues=[\n (_irs.indexes, _irs.preds, _irs.target),\n (_irs_extra.indexes, _irs_extra.preds, _irs_extra.target),\n (_irs_no_tgt.indexes, _irs_no_tgt.preds, _irs_no_tgt.target),\n ]\n)\n\n_default_metric_class_input_arguments_with_non_binary_target = dict(\n argnames=\"indexes,preds,target\",\n argvalues=[\n (_irs.indexes, _irs.preds, _irs.target),\n (_irs_extra.indexes, _irs_extra.preds, _irs_extra.target),\n (_irs_no_tgt.indexes, _irs_no_tgt.preds, _irs_no_tgt.target),\n (_irs_non_binary.indexes, _irs_non_binary.preds, _irs_non_binary.target),\n ]\n)\n\n_default_metric_functional_input_arguments = dict(\n argnames=\"preds,target\",\n argvalues=[\n (_irs.preds, _irs.target),\n (_irs_extra.preds, _irs_extra.target),\n (_irs_no_tgt.preds, _irs_no_tgt.target),\n ]\n)\n\n_default_metric_functional_input_arguments_with_non_binary_target = dict(\n argnames=\"preds,target\",\n argvalues=[\n (_irs.preds, _irs.target),\n (_irs_extra.preds, _irs_extra.target),\n (_irs_no_tgt.preds, _irs_no_tgt.target),\n (_irs_non_binary.preds, _irs_non_binary.target),\n ]\n)\n\n\ndef _errors_test_class_metric(\n indexes: Tensor,\n preds: Tensor,\n target: Tensor,\n metric_class: Metric,\n message: str = \"\",\n metric_args: dict = None,\n exception_type: Type[Exception] = ValueError,\n kwargs_update: dict = None,\n):\n \"\"\"Utility function doing checks about types, parameters and errors.\n\n Args:\n indexes: torch tensor with indexes\n preds: torch tensor with predictions\n target: torch tensor with targets\n metric_class: lightning metric class that should be tested\n message: message that exception should return\n metric_args: arguments for class initialization\n exception_type: callable function that is used for comparison\n kwargs_update: Additional keyword arguments that will be passed with indexes, preds and\n target when running update on the metric.\n \"\"\"\n metric_args = metric_args or {}\n kwargs_update = kwargs_update or {}\n with pytest.raises(exception_type, match=message):\n metric = metric_class(**metric_args)\n metric(preds, target, indexes=indexes, **kwargs_update)\n\n\ndef _errors_test_functional_metric(\n preds: Tensor,\n target: Tensor,\n metric_functional: Metric,\n message: str = \"\",\n exception_type: Type[Exception] = ValueError,\n kwargs_update: dict = None,\n):\n \"\"\"Utility function doing checks about types, parameters and errors.\n\n Args:\n preds: torch tensor with predictions\n target: torch tensor with targets\n metric_functional: lightning functional metric that should be tested\n message: message that exception should return\n exception_type: callable function that is used for comparison\n kwargs_update: Additional keyword arguments that will be passed with indexes, preds and\n target when running update on the metric.\n \"\"\"\n kwargs_update = kwargs_update or {}\n with pytest.raises(exception_type, match=message):\n metric_functional(preds, target, **kwargs_update)\n\n\nclass RetrievalMetricTester(MetricTester):\n\n def run_class_metric_test(\n self,\n ddp: bool,\n indexes: Tensor,\n preds: Tensor,\n target: Tensor,\n metric_class: Metric,\n sk_metric: Callable,\n dist_sync_on_step: bool,\n metric_args: dict,\n reverse: bool = False,\n ):\n _sk_metric_adapted = partial(_compute_sklearn_metric, metric=sk_metric, reverse=reverse, **metric_args)\n\n super().run_class_metric_test(\n ddp=ddp,\n preds=preds,\n target=target,\n metric_class=metric_class,\n sk_metric=_sk_metric_adapted,\n dist_sync_on_step=dist_sync_on_step,\n metric_args=metric_args,\n fragment_kwargs=True,\n indexes=indexes, # every additional argument will be passed to metric_class and _sk_metric_adapted\n )\n\n def run_functional_metric_test(\n self,\n preds: Tensor,\n target: Tensor,\n metric_functional: Callable,\n sk_metric: Callable,\n metric_args: dict,\n reverse: bool = False,\n **kwargs,\n ):\n _sk_metric_adapted = partial(_compute_sklearn_metric, metric=sk_metric, reverse=reverse, **metric_args)\n\n super().run_functional_metric_test(\n preds=preds,\n target=target,\n metric_functional=metric_functional,\n sk_metric=_sk_metric_adapted,\n metric_args=metric_args,\n fragment_kwargs=True,\n **kwargs,\n )\n\n def run_precision_test_cpu(\n self,\n indexes: Tensor,\n preds: Tensor,\n target: Tensor,\n metric_module: Metric,\n metric_functional: Callable,\n ):\n\n def metric_functional_ignore_indexes(preds, target, indexes):\n return metric_functional(preds, target)\n\n super().run_precision_test_cpu(\n preds=preds,\n target=target,\n metric_module=metric_module,\n metric_functional=metric_functional_ignore_indexes,\n metric_args={'empty_target_action': 'neg'},\n indexes=indexes, # every additional argument will be passed to RetrievalMAP and _sk_metric_adapted\n )\n\n def run_precision_test_gpu(\n self,\n indexes: Tensor,\n preds: Tensor,\n target: Tensor,\n metric_module: Metric,\n metric_functional: Callable,\n ):\n if not torch.cuda.is_available():\n pytest.skip()\n\n def metric_functional_ignore_indexes(preds, target, indexes):\n return metric_functional(preds, target)\n\n super().run_precision_test_gpu(\n preds=preds,\n target=target,\n metric_module=metric_module,\n metric_functional=metric_functional_ignore_indexes,\n metric_args={'empty_target_action': 'neg'},\n indexes=indexes, # every additional argument will be passed to RetrievalMAP and _sk_metric_adapted\n )\n\n @staticmethod\n def run_metric_class_arguments_test(\n indexes: Tensor,\n preds: Tensor,\n target: Tensor,\n metric_class: Metric,\n message: str = \"\",\n metric_args: dict = None,\n exception_type: Type[Exception] = ValueError,\n kwargs_update: dict = None,\n ):\n _errors_test_class_metric(\n indexes=indexes,\n preds=preds,\n target=target,\n metric_class=metric_class,\n message=message,\n metric_args=metric_args,\n exception_type=exception_type,\n **kwargs_update,\n )\n\n @staticmethod\n def run_functional_metric_arguments_test(\n preds: Tensor,\n target: Tensor,\n metric_functional: Callable,\n message: str = \"\",\n exception_type: Type[Exception] = ValueError,\n kwargs_update: dict = None,\n ):\n _errors_test_functional_metric(\n preds=preds,\n target=target,\n metric_functional=metric_functional,\n message=message,\n exception_type=exception_type,\n kwargs_update=kwargs_update,\n )\n"
] |
[
[
"torch.Tensor",
"sklearn.metrics.jaccard_score",
"torch.tensor",
"torch.arange",
"torch.allclose"
],
[
"torch.log1p"
],
[
"numpy.full_like",
"numpy.array",
"numpy.mean",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gchhablani/financial-sentiment-analysis
|
[
"b18e9072f8edb9f09d0fef697892f2462d6d44e9"
] |
[
"src/models/lstm_model.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import PackedSequence\n\nfrom src.utils.mapper import configmapper\n\n\ndef hotfix_pack_padded_sequence(\n input, lengths, batch_first=False, enforce_sorted=False\n):\n lengths = torch.as_tensor(lengths, dtype=torch.int64)\n lengths = lengths.cpu()\n if enforce_sorted:\n sorted_indices = None\n else:\n lengths, sorted_indices = torch.sort(lengths, descending=True)\n sorted_indices = sorted_indices.to(input.device)\n batch_dim = 0 if batch_first else 1\n input = input.index_select(batch_dim, sorted_indices)\n\n data, batch_sizes = torch._C._VariableFunctions._pack_padded_sequence(\n input, lengths, batch_first\n )\n return PackedSequence(data, batch_sizes, sorted_indices)\n\n\n# class LSTM(nn.Module):\n# def __init__(\n# self,\n# input_size,\n# hidden_size,\n# batch_first=False,\n# num_layers=1,\n# bidirectional=False,\n# dropout=0.2,\n# ):\n# super(LSTM, self).__init__()\n\n# self.rnn = nn.LSTM(\n# input_size=input_size,\n# hidden_size=hidden_size,\n# num_layers=num_layers,\n# bidirectional=bidirectional,\n# batch_first=batch_first,\n# )\n# self.reset_params()\n# self.dropout = nn.Dropout(p=dropout)\n\n# def reset_params(self):\n# for i in range(self.rnn.num_layers):\n# nn.init.orthogonal_(getattr(self.rnn, f\"weight_hh_l{i}\"))\n# nn.init.kaiming_normal_(getattr(self.rnn, f\"weight_ih_l{i}\"))\n# nn.init.constant_(getattr(self.rnn, f\"bias_hh_l{i}\"), val=0)\n# nn.init.constant_(getattr(self.rnn, f\"bias_ih_l{i}\"), val=0)\n# bias = getattr(self.rnn, f\"bias_hh_l{i}\").detach()\n# bias.chunk(4)[1].fill_(1)\n# with torch.no_grad():\n# setattr(self.rnn, f\"bias_hh_l{i}\", nn.Parameter(bias))\n\n# if self.rnn.bidirectional:\n# nn.init.orthogonal_(getattr(self.rnn, f\"weight_hh_l{i}_reverse\"))\n# nn.init.kaiming_normal_(getattr(self.rnn, f\"weight_ih_l{i}_reverse\"))\n# nn.init.constant_(getattr(self.rnn, f\"bias_hh_l{i}_reverse\"), val=0)\n# nn.init.constant_(getattr(self.rnn, f\"bias_ih_l{i}_reverse\"), val=0)\n# bias = getattr(self.rnn, f\"bias_hh_l{i}_reverse\").detach()\n# bias.chunk(4)[1].fill_(1)\n# with torch.no_grad():\n# setattr(self.rnn, f\"bias_hh_l{i}_reverse\", nn.Parameter(bias))\n\n# def forward(self, x, x_len):\n# # x: [batch_size, seq_len, dim], x_len:[batch_size]\n# x_len_sorted, x_idx = torch.sort(x_len, descending=True)\n# x_sorted = torch.index_select(x, dim=0, index=x_idx)\n# sorted_x, x_ori_idx = torch.sort(x_idx)\n\n# # x_packed = nn.utils.rnn.pack_padded_sequence(\n# # x_sorted, x_len_sorted, batch_first=True\n# # )\n# x_packed = hotfix_pack_padded_sequence(x_sorted, x_len_sorted, batch_first=True)\n# x_packed, (hidden, c) = self.rnn(x_packed)\n\n# x = nn.utils.rnn.pad_packed_sequence(x_packed, batch_first=True)[0]\n# x = x.index_select(dim=0, index=x_ori_idx)\n\n# hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)\n# # hidden = hidden.permute(1, 0, 2).contiguous().view(-1,\n# # hidden.size(0) * hidden.size(2)).squeeze()\n# hidden = hidden.index_select(dim=0, index=x_ori_idx)\n\n# return hidden, x\n\n\[email protected](\"models\", \"lstm_model\")\nclass LstmModel(nn.Module):\n def __init__(\n self,\n vocab_size,\n embedding_dim,\n hidden_size,\n lstm_num_layers,\n bidirectional,\n dropout,\n num_labels,\n ):\n super().__init__()\n self.embedding = nn.Embedding(\n vocab_size, embedding_dim, padding_idx=0\n ) # from_pretrained\n self.lstm = nn.LSTM(\n embedding_dim,\n hidden_size,\n lstm_num_layers,\n batch_first=True,\n bidirectional=bidirectional,\n dropout=dropout,\n )\n self.hidden_size = hidden_size\n self.lstm_num_layers = lstm_num_layers\n self.dropout = nn.Dropout(dropout)\n self.mult = 2 if bidirectional else 1\n self.linear = nn.Linear(hidden_size * self.mult, num_labels)\n self.num_labels = num_labels\n\n def forward(self, inputs, lengths, labels=None):\n x = self.embedding(inputs)\n x = self.dropout(x)\n x_pack = hotfix_pack_padded_sequence(x, lengths, batch_first=True)\n out_pack, (ht, ct) = self.lstm(x_pack)\n ht = ht.view(self.lstm_num_layers, self.mult, -1, self.hidden_size)\n logits = self.linear(\n self.dropout(torch.cat([ht[-1, 0, :, :], ht[-1, 1, :, :]], dim=-1))\n )\n if labels is not None:\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss, logits\n return logits\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.nn.LSTM",
"torch.cat",
"torch.nn.Embedding",
"torch.nn.utils.rnn.PackedSequence",
"torch.nn.Linear",
"torch.sort",
"torch._C._VariableFunctions._pack_padded_sequence",
"torch.as_tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vered1986/reporting_bias_lms
|
[
"f4e3a26f41db30939c899855b413bad1ebe14d21"
] |
[
"outcomes/src/generate_outcomes.py"
] |
[
"import tqdm\nimport json\nimport torch\nimport random\nimport argparse\n\nfrom orderedset import OrderedSet\nfrom collections import defaultdict\n\nfrom outcomes.src.common import init_model\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--device\", default=\"cpu\", type=str, help=\"cpu or number for GPU device\")\n ap.add_argument(\"--copa_dir\", default=\"data/copa\", type=str, help=\"COPA data directory\")\n args = ap.parse_args()\n\n device = torch.device(f\"cuda:{args.device}\") if args.device != \"cpu\" else torch.device(\"cpu\")\n\n with open(f\"{args.copa_dir}/dev.jsonl\") as f_in:\n events = [json.loads(line.strip())[\"premise\"] for line in f_in]\n\n out = defaultdict(lambda: defaultdict(list))\n lms = [(lm, *init_model(lm, device)) for lm in [\"openai-gpt\", \"gpt2\", \"gpt2-xl\"]]\n\n for event in tqdm.tqdm(random.sample(events, 20)):\n for lm, model, tokenizer in lms:\n prefix = f\"{event} As a result,\"\n\n preds_topk = generate(\n tokenizer, model, prefix, device, num_return_sequences=10, max_length=10, k=10)\n\n preds_topp = generate(\n tokenizer, model, prefix, device, num_return_sequences=10, max_length=10, p=0.9)\n\n preds_beam = generate(\n tokenizer, model, prefix, device, num_return_sequences=5, max_length=10, beams=5)\n\n out[event][f\"{lm}_preds_top10\"] = preds_topk\n out[event][f\"{lm}_preds_top0.9\"] = preds_topp\n out[event][f\"{lm}_preds_beam5\"] = preds_beam\n\n print_latex_table(out)\n\n\ndef generate(tokenizer, model, prompt, device, num_return_sequences=1, max_length=10, beams=0, p=0, k=0):\n \"\"\"\n Generate a sequence with models like GPT, GPT2, or XLNet\n \"\"\"\n context_tokens = tokenizer.encode(prompt)\n max_length = max_length + len(context_tokens)\n input_ids = torch.tensor(context_tokens, device=device).unsqueeze(0)\n eos_token_id = tokenizer.encode(\".\", add_special_tokens=False)[-1]\n\n outputs = model.generate(\n input_ids=input_ids,\n do_sample=beams == 0,\n max_length=max_length,\n # temperature=temperature,\n top_p=p if p > 0 else None,\n top_k=k if k > 0 else None,\n eos_token_id=eos_token_id,\n num_beams=beams if beams > 0 else None,\n early_stopping=True,\n no_repeat_ngram_size=3,\n num_return_sequences=num_return_sequences\n )\n\n preds = [tokenizer.decode(output, skip_special_tokens=True)[len(prompt):].strip() for output in outputs]\n print(preds)\n preds = list(OrderedSet([pred.split(\".\")[0].strip() for pred in preds]))\n preds = [t for t in preds if len(t) > 0]\n\n return preds\n\n\ndef print_latex_table(out):\n \"\"\"\n Print the example generated outcomes\n \"\"\"\n examples = [(event, fields)\n for event, fields in out.items()\n if len(fields) > 0 and\n all([len(v) > 0 for v in fields.values()])]\n\n print(\"\"\"\\\\begin{tabular}{lllll}\"\"\")\n print(\"\"\"\\\\toprule\"\"\")\n print(\"\"\"\\\\textbf{Event} & \\\\textbf{LM} & \\\\textbf{Sampling} & \\\\textbf{Outcome} \\\\\\\\\"\"\")\n print(\"\\\\midrule\")\n\n lms = [\"openai-gpt\", \"gpt2\", \"gpt2-xl\"]\n\n for event, fields in examples:\n print(\"\\\\multirow{9}{*}{\\\\textbf{\" + event + \"}} \")\n\n by_lm = {lm: {k.replace(f\"{lm}_preds_\", \"\"): v[0] for k, v in fields.items() if lm in k} for lm in lms}\n\n for i, lm in enumerate(lms):\n for j, sampling in enumerate([\"top10\", \"top0.9\", \"beam5\"]):\n first_col = \"\\\\multirow{3}{*}{\" + lm + \"} \" if j == 0 else \"\"\n print(f\"& {first_col} & {sampling} & {by_lm[lm][sampling]} \\\\\\\\\")\n\n if sampling == \"beam5\":\n print(\"\\\\midrule\")\n\n if i == 2:\n print(\"\\\\midrule\")\n\n print(\"\\\\midrule\")\n\n print(\"\"\"\\\\bottomrule\"\"\")\n print(\"\"\"\\end{tabular}\"\"\")\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.device",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RichardScottOZ/map2loop-2
|
[
"0260080ad33edeb7f8184f9ccb1409626c51dc9b"
] |
[
"map2loop/m2l_export.py"
] |
[
"# from map2loop import m2l_topology\nimport networkx as nx\nimport random\nimport numpy as np\nimport pandas as pd\nimport os\nimport geopandas as gpd\nimport rasterio\nfrom rasterio import plot\nfrom rasterio.plot import show\nfrom rasterio.mask import mask\nfrom rasterio.transform import from_origin\nfrom rasterio.io import MemoryFile\nimport matplotlib\nfrom map2loop import m2l_utils\nimport LoopProjectFile\nimport re\n\n\n##########################################################################\n# Save out and compile taskfile needed to generate geomodeller model using the geomodellerbatch engine\n#\n# loop2geomodeller(test_data_path,tmp_path,output_path,save_faults,compute_etc)\n# Args:\n# test_data_path root directory of test data\n# tmp_path directory of temporary outputs\n# output_path directory of outputs\n# ave_faults flag for saving faults or not\n# compute_etc flag for actual calculations or just project output\n#\n# Creates geomodeller taskfile files from varous map2loop outputs\n##########################################################################\ndef loop2geomodeller(model_name, test_data_path, tmp_path, output_path,\n dtm_file, bbox, model_top, model_base, save_faults,\n compute_etc, workflow):\n\n f = open(os.path.join(test_data_path, model_name, 'm2l.taskfile'), 'w')\n f.write(\n '#---------------------------------------------------------------\\n')\n f.write('#-----------------------Project Header-----------------------\\n')\n f.write(\n '#---------------------------------------------------------------\\n')\n f.write('name: \"UWA_Intrepid\"\\n')\n f.write('description: \"Automate_batch_Model\"\\n')\n f.write(' GeomodellerTask {\\n')\n f.write(' CreateProject {\\n')\n f.write(' name: \"Hamersley\"\\n')\n f.write(' author: \"Mark\"\\n')\n f.write(' date: \"23/10/2019 0: 0: 0\"\\n')\n f.write(' projection { map_projection: \"GDA94 / MGA50\"}\\n')\n f.write(' version: \"2.0\"\\n')\n f.write(' units: meters\\n')\n f.write(' precision: 1.0\\n')\n f.write(' Extents {\\n')\n f.write(' xmin: ' + str(bbox[0]) + '\\n')\n f.write(' ymin: ' + str(bbox[1]) + '\\n')\n f.write(' zmin: ' + str(model_base) + '\\n')\n f.write(' xmax: ' + str(bbox[2]) + '\\n')\n f.write(' ymax: ' + str(bbox[3]) + '\\n')\n f.write(' zmax: ' + str(model_top) + '\\n')\n f.write(' }\\n')\n f.write(' deflection2d: 0.001\\n')\n f.write(' deflection3d: 0.001\\n')\n f.write(' discretisation: 10.0\\n')\n f.write(' referenceTop: false\\n')\n f.write(' CustomDTM {\\n')\n f.write(' Extents {\\n')\n f.write(' xmin: ' + str(bbox[0]) + '\\n')\n f.write(' ymin: ' + str(bbox[1]) + '\\n')\n f.write(' xmax: ' + str(bbox[2]) + '\\n')\n f.write(' ymax: ' + str(bbox[3]) + '\\n')\n f.write(' }\\n')\n f.write(' name: \"Topography\"\\n')\n f.write(' filename {\\n')\n f.write(' Grid_Name: \"' + dtm_file + '\"\\n')\n f.write(' }\\n')\n f.write(' nx: 10\\n')\n f.write(' ny: 10\\n')\n f.write(' }\\n')\n f.write(' }\\n')\n f.write('}\\n')\n\n orientations = pd.read_csv(\n os.path.join(output_path, 'orientations_clean.csv'), ',')\n contacts = pd.read_csv(os.path.join(output_path, 'contacts_clean.csv'),\n ',')\n all_sorts = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'), ',')\n print(os.getcwd())\n\n supergroups = {}\n sgi = 0\n with open(os.path.join(tmp_path, 'super_groups.csv')) as sgf:\n lines = sgf.readlines()\n for l in lines:\n for g in l.split(','):\n g = g.replace('-', '_').replace(' ', '_').rstrip()\n if (g):\n supergroups[g] = 'supergroup_{}'.format(sgi)\n sgi += 1\n sg_set = set(supergroups.values())\n\n empty_fm = []\n\n for indx, afm in all_sorts.iterrows():\n foundcontact = False\n for indx2, acontact in contacts.iterrows():\n if (acontact['formation'] in afm['code']):\n foundcontact = True\n break\n foundorientation = False\n for indx3, ano in orientations.iterrows():\n if (ano['formation'] in afm['code']):\n foundorientation = True\n break\n if (not foundcontact or not foundorientation):\n empty_fm.append(afm['code'])\n\n # print(empty_fm)\n asc = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'), \",\")\n\n all_sorts = np.genfromtxt(os.path.join(tmp_path, 'all_sorts_clean.csv'),\n delimiter=',',\n dtype='U100')\n nformations = len(all_sorts)\n\n f.write(\n '#---------------------------------------------------------------\\n')\n f.write(\n '#-----------------------Create Formations-----------------------\\n')\n f.write(\n '#---------------------------------------------------------------\\n')\n\n for ind, row in asc.iterrows():\n if (not row['code'] in empty_fm):\n f.write('GeomodellerTask {\\n')\n f.write('CreateFormation {\\n')\n\n ostr = ' name: \"' + row['code'].replace(\"\\n\", \"\") + '\"\\n'\n f.write(ostr)\n r, g, b = m2l_utils.hextoints(row['colour'])\n ostr = ' red: ' + str(int(r)) + '\\n'\n f.write(ostr)\n\n ostr = ' green: ' + str(int(g)) + '\\n'\n f.write(ostr)\n\n ostr = ' blue: ' + str(int(b)) + '\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n\n if (True): # Basal formation if only one supergroup\n f.write('GeomodellerTask {\\n')\n f.write('CreateFormation {\\n')\n\n ostr = ' name: \"Basal_Formation\"\\n'\n f.write(ostr)\n r, g, b = m2l_utils.hextoints(row['colour'])\n ostr = ' red: ' + str(int(128)) + '\\n'\n f.write(ostr)\n\n ostr = ' green: ' + str(int(128)) + '\\n'\n f.write(ostr)\n\n ostr = ' blue: ' + str(int(128)) + '\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n\n f.write(\n '#---------------------------------------------------------------\\n')\n f.write(\n '#-----------------------Set Stratigraphic Pile------------------\\n')\n f.write(\n '#---------------------------------------------------------------\\n')\n\n for sg in sg_set:\n # for i in range (nformations-1,0,-1):\n f.write('GeomodellerTask {\\n')\n f.write('SetSeries {\\n')\n\n ostr = ' name: \"' + sg + '\"\\n'\n f.write(ostr)\n\n ostr = ' position: 1\\n'\n f.write(ostr)\n\n ostr = ' relation: \"erode\"\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n\n for j in range(nformations - 1, 0, -1):\n # for j in range(1,nformations):\n if (supergroups[all_sorts[j, 5]] == sg):\n if (not all_sorts[j][4] in empty_fm):\n f.write('GeomodellerTask {\\n')\n f.write('AddFormationToSeries {\\n')\n\n ostr = ' series: \"' + sg + '\"\\n'\n f.write(ostr)\n\n ostr = ' formation: \"' + all_sorts[j][4] + '\"\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n\n if (True): # Basal series/formation if only one supergroup\n f.write('GeomodellerTask {\\n')\n f.write('SetSeries {\\n')\n\n ostr = ' name: \"Basal_Series\"\\n'\n f.write(ostr)\n\n ostr = ' position: 1\\n'\n f.write(ostr)\n\n ostr = ' relation: \"erode\"\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n\n f.write('GeomodellerTask {\\n')\n f.write('AddFormationToSeries {\\n')\n\n ostr = ' series: \"Basal_Series\"\\n'\n f.write(ostr)\n\n ostr = ' formation: \"Basal_Formation\"\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n\n if (save_faults):\n output_path = os.path.join(test_data_path, 'output')\n\n faults_len = pd.read_csv(\n os.path.join(output_path, 'fault_dimensions.csv'))\n\n n_allfaults = len(faults_len)\n\n fcount = 0\n for i in range(0, n_allfaults):\n f.write('GeomodellerTask {\\n')\n f.write('CreateFault {\\n')\n r, g, b = m2l_utils.hextoints(str(faults_len.iloc[i][\"colour\"]))\n ostr = ' name: \"' + faults_len.iloc[i][\"Fault\"] + '\"\\n'\n f.write(ostr)\n\n ostr = ' red: ' + str(r) + '\\n'\n f.write(ostr)\n\n ostr = ' green: ' + str(g) + '\\n'\n f.write(ostr)\n\n ostr = ' blue: ' + str(b) + '\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n fcount = fcount + 1\n\n f.write('GeomodellerTask {\\n')\n f.write(' Set3dFaultLimits {\\n')\n f.write(' Fault_name: \"' + faults_len.iloc[i][\"Fault\"] +\n '\"\\n')\n f.write(' Horizontal: ' +\n str(faults_len.iloc[i][\"HorizontalRadius\"]) + '\\n')\n f.write(' Vertical: ' +\n str(faults_len.iloc[i][\"VerticalRadius\"]) + '\\n')\n f.write(' InfluenceDistance: ' +\n str(faults_len.iloc[i][\"InfluenceDistance\"]) + '\\n')\n f.write(' }\\n')\n f.write('}\\n')\n\n f.write(\n '#---------------------------------------------------------------\\n')\n f.write(\n '#-----------------------Import 3D contact data ---Base Model----\\n')\n f.write(\n '#---------------------------------------------------------------\\n')\n\n contacts = pd.read_csv(os.path.join(output_path, 'contacts_clean.csv'),\n ',')\n all_sorts = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'), ',')\n #all_sorts.set_index('code', inplace = True)\n # display(all_sorts)\n\n for inx, afm in all_sorts.iterrows():\n # print(afm[0])\n if (not afm['code'] in empty_fm):\n f.write('GeomodellerTask {\\n')\n f.write(' Add3DInterfacesToFormation {\\n')\n f.write(' formation: \"' + str(afm['code']) + '\"\\n')\n\n for indx2, acontact in contacts.iterrows():\n if (acontact['formation'] in afm['code']):\n ostr = ' point {x:' + str(\n acontact['X']) + '; y:' + str(\n acontact['Y']) + '; z:' + str(\n acontact['Z']) + '}\\n'\n f.write(ostr)\n f.write(' }\\n')\n f.write('}\\n')\n f.write(\n '#---------------------------------------------------------------\\n')\n f.write(\n '#------------------Import 3D orientation data ---Base Model-----\\n')\n f.write(\n '#---------------------------------------------------------------\\n')\n\n orientations = pd.read_csv(\n os.path.join(output_path, 'orientations_clean.csv'), ',')\n all_sorts = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'), ',')\n #all_sorts.set_index('code', inplace = True)\n # display(all_sorts)\n\n for inx, afm in all_sorts.iterrows():\n # print(groups[agp])\n if (not afm['code'] in empty_fm):\n f.write('GeomodellerTask {\\n')\n f.write(' Add3DFoliationToFormation {\\n')\n f.write(' formation: \"' + str(afm['code']) + '\"\\n')\n for indx2, ano in orientations.iterrows():\n if (ano['formation'] in afm['code']):\n f.write(' foliation {\\n')\n ostr = ' Point3D {x:' + str(\n ano['X']) + '; y:' + str(ano['Y']) + '; z:' + str(\n ano['Z']) + '}\\n'\n f.write(ostr)\n ostr = ' direction: ' + str(\n ano['azimuth']) + '\\n'\n f.write(ostr)\n ostr = ' dip: ' + str(ano['dip']) + '\\n'\n f.write(ostr)\n if (ano['polarity'] == 1):\n ostr = ' polarity: Normal_Polarity\\n'\n else:\n ostr = ' polarity: Reverse_Polarity\\n'\n f.write(ostr)\n ostr = ' }\\n'\n f.write(ostr)\n f.write(' }\\n')\n f.write('}\\n')\n\n f.write(\n '#---------------------------------------------------------------\\n')\n f.write(\n '#-----------------------Import 3D fault data ---Base Model------\\n')\n f.write(\n '#---------------------------------------------------------------\\n')\n\n contacts = pd.read_csv(os.path.join(output_path, 'faults.csv'), ',')\n faults = pd.read_csv(os.path.join(output_path, 'fault_dimensions.csv'),\n ',')\n\n for indx, afault in faults.iterrows():\n f.write('GeomodellerTask {\\n')\n f.write(' Add3DInterfacesToFormation {\\n')\n f.write(' formation: \"' + str(afault['Fault']) + '\"\\n')\n for indx2, acontact in contacts.iterrows():\n if (acontact['formation'] == afault['Fault']):\n ostr = ' point {x:' + str(\n acontact['X']) + '; y:' + str(\n acontact['Y']) + '; z:' + str(acontact['Z']) + '}\\n'\n f.write(ostr)\n f.write(' }\\n')\n f.write('}\\n')\n\n f.write(\n '#---------------------------------------------------------------\\n')\n f.write(\n '#------------------Import 3D fault orientation data ------------\\n')\n f.write(\n '#---------------------------------------------------------------\\n')\n\n orientations = pd.read_csv(\n os.path.join(output_path, 'fault_orientations.csv'), ',')\n faults = pd.read_csv(os.path.join(output_path, 'fault_dimensions.csv'),\n ',')\n\n for indx, afault in faults.iterrows():\n f.write('GeomodellerTask {\\n')\n f.write(' Add3DFoliationToFormation {\\n')\n f.write(' formation: \"' + str(afault['Fault']) + '\"\\n')\n for indx2, ano in orientations.iterrows():\n if (ano['formation'] == afault['Fault']):\n f.write(' foliation {\\n')\n ostr = ' Point3D {x:' + str(\n ano['X']) + '; y:' + str(ano['Y']) + '; z:' + str(\n ano['Z']) + '}\\n'\n f.write(ostr)\n ostr = ' direction: ' + str(\n ano['DipDirection']) + '\\n'\n f.write(ostr)\n if (ano['dip'] == -999):\n ostr = ' dip: ' + str(\n random.randint(60, 90)) + '\\n'\n else:\n ostr = ' dip: ' + str(ano['dip']) + '\\n'\n f.write(ostr)\n if (ano['DipPolarity'] == 1):\n ostr = ' polarity: Normal_Polarity\\n'\n else:\n ostr = ' polarity: Reverse_Polarity\\n'\n f.write(ostr)\n ostr = ' }\\n'\n f.write(ostr)\n f.write(' }\\n')\n f.write('}\\n')\n\n if (save_faults):\n G = nx.read_gml(os.path.join(tmp_path, \"fault_network.gml\"),\n label='label')\n #nx.draw(G, with_labels=True, font_weight='bold')\n edges = list(G.edges)\n # for i in range(0,len(edges)):\n # print(edges[i][0],edges[i][1])\n cycles = list(nx.simple_cycles(G))\n # display(cycles)\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write(\n '#-----------------------Link faults with faults ----------------\\n'\n )\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write('GeomodellerTask {\\n')\n f.write(' LinkFaultsWithFaults {\\n')\n\n for i in range(0, len(edges)):\n found = False\n for j in range(0, len(cycles)):\n if (edges[i][0] == cycles[j][0]\n and edges[i][1] == cycles[j][1]):\n found = True # fault pair is first two elements in a cycle list so don't save to taskfile\n if (not found):\n ostr = ' FaultStopsOnFaults{ fault: \"' + edges[i][\n 1] + '\"; stopson: \"' + edges[i][0] + '\"}\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n\n if (save_faults):\n all_fault_group = np.genfromtxt(os.path.join(\n output_path, 'group-fault-relationships.csv'),\n delimiter=',',\n dtype='U100')\n ngroups = len(all_fault_group)\n all_fault_group = np.transpose(all_fault_group)\n nfaults = len(all_fault_group)\n\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write(\n '#-----------------------Link series with faults ----------------\\n'\n )\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write('GeomodellerTask {\\n')\n f.write(' LinkFaultsWithSeries {\\n')\n\n for i in range(1, nfaults):\n first = True\n for j in range(1, ngroups):\n if (all_fault_group[i, j] == str(1)):\n if (first):\n ostr = ' FaultSeriesLinks{ fault: \"' + all_fault_group[\n i, 0] + '\"; series: ['\n f.write(ostr)\n ostr = '\"' + supergroups[all_fault_group[0, j]] + '\"'\n f.write(ostr)\n last_sg = supergroups[all_fault_group[0, j]]\n first = False\n else:\n if (not supergroups[all_fault_group[0, j]] == last_sg):\n ostr = ', \"' + supergroups[all_fault_group[\n 0, j]] + '\"'\n last_sg = supergroups[all_fault_group[0, j]]\n f.write(ostr)\n if (not first):\n ostr = ']}\\n'\n f.write(ostr)\n\n f.write(' }\\n')\n f.write('}\\n')\n\n f.write('GeomodellerTask {\\n')\n f.write(' SaveProjectAs {\\n')\n f.write(' filename: \"./' + model_name + '.xml\"\\n')\n f.write(' }\\n')\n f.write('}\\n')\n f.close()\n\n if (compute_etc):\n f = open(\n os.join.path(test_data_path, model_name, 'm2l_compute.taskfile'),\n 'w')\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write(\n '#----------------------------Load Model----------------------\\n')\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write('GeomodellerTask {\\n')\n f.write(' OpenProjectNoGUI {\\n')\n f.write(' filename: \"./' + model_name + '.xml\"\\n')\n f.write(' }\\n')\n f.write('}\\n')\n\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write(\n '#----------------------------Compute Model----------------------\\n'\n )\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write('\\n')\n f.write('GeomodellerTask {\\n')\n f.write(' ComputeModel {\\n')\n f.write(' SeriesList {\\n')\n f.write(' node: \"All\" \\n')\n f.write(' }\\n')\n f.write(' SectionList {\\n')\n f.write(' node: \"All\"\\n')\n f.write(' }\\n')\n f.write(' FaultList {\\n')\n f.write(' node: \"All\"\\n')\n f.write(' }\\n')\n f.write(' radius: 10.0\\n')\n f.write(' }\\n')\n f.write('}\\n')\n\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write(\n '#-----------------------Add geophysical Properties--------------\\n'\n )\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write(\n '#--------------------------Export Lithology Voxet---------------\\n'\n )\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write('GeomodellerTask {\\n')\n f.write(' SaveLithologyVoxet {\\n')\n f.write(' nx: 25\\n')\n f.write(' ny: 25\\n')\n f.write(' nz: 40\\n')\n f.write(\n ' LithologyVoxetFileStub: \"./Litho_Voxet/LithoVoxet.vo\"\\n')\n f.write(' }\\n')\n f.write('}\\n')\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write(\n '#--------------------------Save As Model------------------------\\n'\n )\n f.write(\n '#---------------------------------------------------------------\\n'\n )\n f.write('\\n')\n\n f.write('GeomodellerTask {\\n')\n f.write(' SaveProjectAs {\\n')\n f.write(' filename: \"/' + model_name + '.xml\"\\n')\n f.write(' }\\n')\n f.write('}\\n')\n f.write('GeomodellerTask {\\n')\n f.write(' CloseProjectNoGUI {\\n')\n f.write(' }\\n')\n f.write('}\\n')\n\n f.close()\n\n\n# same same expect it builds a list that then gets written all at once (this version is slower!)\ndef loop2geomodeller2(model_name, test_data_path, tmp_path, output_path,\n dtm_file, bbox, save_faults, compute_etc, workflow):\n\n f = open(os.path.join(test_data_path, model_name, 'm2l.taskfile'), 'w')\n ostr = []\n\n ostr.append(\n '#---------------------------------------------------------------\\n')\n ostr.append(\n '#-----------------------Project Header-----------------------\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n')\n ostr.append('name: \"UWA_Intrepid\"\\n')\n ostr.append('description: \"Automate_batch_Model\"\\n')\n ostr.append(' GeomodellerTask {\\n')\n ostr.append(' CreateProject {\\n')\n ostr.append(' name: \"Hamersley\"\\n')\n ostr.append(' author: \"Mark\"\\n')\n ostr.append(' date: \"23/10/2019 0: 0: 0\"\\n')\n ostr.append(' projection { map_projection: \"GDA94 / MGA50\"}\\n')\n ostr.append(' version: \"2.0\"\\n')\n ostr.append(' units: meters\\n')\n ostr.append(' precision: 1.0\\n')\n ostr.append(' Extents {\\n')\n ostr.append(' xmin: ' + str(bbox[0]) + '\\n')\n ostr.append(' ymin: ' + str(bbox[1]) + '\\n')\n ostr.append(' zmin: -7000\\n')\n ostr.append(' xmax: ' + str(bbox[2]) + '\\n')\n ostr.append(' ymax: ' + str(bbox[3]) + '\\n')\n ostr.append(' zmax: 1200\\n')\n ostr.append(' }\\n')\n ostr.append(' deflection2d: 0.001\\n')\n ostr.append(' deflection3d: 0.001\\n')\n ostr.append(' discretisation: 10.0\\n')\n ostr.append(' referenceTop: false\\n')\n ostr.append(' CustomDTM {\\n')\n ostr.append(' Extents {\\n')\n ostr.append(' xmin: ' + str(bbox[0]) + '\\n')\n ostr.append(' ymin: ' + str(bbox[1]) + '\\n')\n ostr.append(' xmax: ' + str(bbox[2]) + '\\n')\n ostr.append(' ymax: ' + str(bbox[3]) + '\\n')\n ostr.append(' }\\n')\n ostr.append(' name: \"Topography\"\\n')\n ostr.append(' filename {\\n')\n ostr.append(' Grid_Name: \"' + dtm_file + '\"\\n')\n ostr.append(' }\\n')\n ostr.append(' nx: 10\\n')\n ostr.append(' ny: 10\\n')\n ostr.append(' }\\n')\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n orientations = pd.read_csv(\n os.path.join(output_path, 'orientations_clean.csv'), ',')\n contacts = pd.read_csv(os.path.join(output_path, 'contacts_clean.csv'),\n ',')\n all_sorts = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'), ',')\n\n empty_fm = []\n\n for indx, afm in all_sorts.iterrows():\n foundcontact = False\n for indx2, acontact in contacts.iterrows():\n if (acontact['formation'] in afm['code']):\n foundcontact = True\n break\n foundorientation = False\n for indx3, ano in orientations.iterrows():\n if (ano['formation'] in afm['code']):\n foundorientation = True\n break\n if (not foundcontact or not foundorientation):\n empty_fm.append(afm['code'])\n\n # print(empty_fm)\n\n all_sorts = np.genfromtxt(os.path.join(tmp_path, 'all_sorts_clean.csv'),\n delimiter=',',\n dtype='U100')\n nformations = len(all_sorts)\n\n ostr.append(\n '#---------------------------------------------------------------\\n')\n ostr.append(\n '#-----------------------Create Formations-----------------------\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n')\n\n for i in range(1, nformations):\n if (not all_sorts[i, 4] in empty_fm):\n ostr.append('GeomodellerTask {\\n')\n ostr.append('CreateFormation {\\n')\n\n ostr2 = ' name: \"' + all_sorts[i, 4].replace(\"\\n\", \"\") + '\"\\n'\n ostr.append(ostr2)\n\n ostr2 = ' red: ' + str(random.randint(1, 256) - 1) + '\\n'\n ostr.append(ostr2)\n\n ostr2 = ' green: ' + str(random.randint(1, 256) - 1) + '\\n'\n ostr.append(ostr2)\n\n ostr2 = ' blue: ' + str(random.randint(1, 256) - 1) + '\\n'\n ostr.append(ostr2)\n\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n ostr.append(\n '#---------------------------------------------------------------\\n')\n ostr.append(\n '#-----------------------Set Stratigraphic Pile------------------\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n')\n\n for i in range(1, nformations):\n # for i in range (nformations-1,0,-1):\n if (all_sorts[i, 2] == str(1)):\n ostr.append('GeomodellerTask {\\n')\n ostr.append('SetSeries {\\n')\n\n ostr2 = ' name: \"' + all_sorts[i][5].replace(\"\\n\", \"\") + '\"\\n'\n ostr.append(ostr2)\n\n ostr2 = ' position: 1\\n'\n ostr.append(ostr2)\n\n ostr2 = ' relation: \"erode\"\\n'\n ostr.append(ostr2)\n\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n for j in range(nformations - 1, 0, -1):\n # for j in range(1,nformations):\n if (all_sorts[j, 1] == all_sorts[i, 1]):\n if (not all_sorts[j][4] in empty_fm):\n ostr.append('GeomodellerTask {\\n')\n ostr.append('AddFormationToSeries {\\n')\n\n ostr2 = ' series: \"' + all_sorts[j][5] + '\"\\n'\n ostr.append(ostr2)\n\n ostr2 = ' formation: \"' + all_sorts[j][4] + '\"\\n'\n ostr.append(ostr2)\n\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n if (save_faults):\n output_path = os.path.join(test_data_path, 'output')\n\n faults_len = pd.read_csv(\n os.path.join(output_path, 'fault_dimensions.csv'))\n\n n_allfaults = len(faults_len)\n\n fcount = 0\n for i in range(0, n_allfaults):\n ostr.append('GeomodellerTask {\\n')\n ostr.append('CreateFault {\\n')\n ostr2 = ' name: \"' + faults_len.iloc[i][\"Fault\"] + '\"\\n'\n ostr.append(ostr2)\n\n ostr2 = ' red: ' + str(random.randint(1, 256) - 1) + '\\n'\n ostr.append(ostr2)\n\n ostr2 = ' green: ' + str(random.randint(1, 256) - 1) + '\\n'\n ostr.append(ostr2)\n\n ostr2 = ' blue: ' + str(random.randint(1, 256) - 1) + '\\n'\n ostr.append(ostr2)\n\n ostr.append(' }\\n')\n ostr.append('}\\n')\n fcount = fcount + 1\n\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' Set3dFaultLimits {\\n')\n ostr.append(' Fault_name: \"' + faults_len.iloc[i][\"Fault\"] +\n '\"\\n')\n ostr.append(' Horizontal: ' +\n str(faults_len.iloc[i][\"HorizontalRadius\"]) + '\\n')\n ostr.append(' Vertical: ' +\n str(faults_len.iloc[i][\"VerticalRadius\"]) + '\\n')\n ostr.append(' InfluenceDistance: ' +\n str(faults_len.iloc[i][\"InfluenceDistance\"]) + '\\n')\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n ostr.append(\n '#---------------------------------------------------------------\\n')\n ostr.append(\n '#-----------------------Import 3D contact data ---Base Model----\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n')\n\n contacts = pd.read_csv(os.path.join(output_path, 'contacts_clean.csv'),\n ',')\n all_sorts = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'), ',')\n #all_sorts.set_index('code', inplace = True)\n # display(all_sorts)\n\n for inx, afm in all_sorts.iterrows():\n # print(afm[0])\n if (not afm['code'] in empty_fm):\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' Add3DInterfacesToFormation {\\n')\n ostr.append(' formation: \"' + str(afm['code']) + '\"\\n')\n\n for indx2, acontact in contacts.iterrows():\n if (acontact['formation'] in afm['code']):\n ostr2 = ' point {x:' + str(\n acontact['X']) + '; y:' + str(\n acontact['Y']) + '; z:' + str(\n acontact['Z']) + '}\\n'\n ostr.append(ostr2)\n ostr.append(' }\\n')\n ostr.append('}\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n')\n ostr.append(\n '#------------------Import 3D orientation data ---Base Model-----\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n')\n\n orientations = pd.read_csv(\n os.path.join(output_path, 'orientations_clean.csv'), ',')\n all_sorts = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'), ',')\n #all_sorts.set_index('code', inplace = True)\n # display(all_sorts)\n\n for inx, afm in all_sorts.iterrows():\n # print(groups[agp])\n if (not afm['code'] in empty_fm):\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' Add3DFoliationToFormation {\\n')\n ostr.append(' formation: \"' + str(afm['code']) + '\"\\n')\n for indx2, ano in orientations.iterrows():\n if (ano['formation'] in afm['code']):\n ostr.append(' foliation {\\n')\n ostr2 = ' Point3D {x:' + str(\n ano['X']) + '; y:' + str(ano['Y']) + '; z:' + str(\n ano['Z']) + '}\\n'\n ostr.append(ostr2)\n ostr2 = ' direction: ' + str(\n ano['azimuth']) + '\\n'\n ostr.append(ostr2)\n ostr2 = ' dip: ' + str(ano['dip']) + '\\n'\n ostr.append(ostr2)\n if (ano['polarity'] == 1):\n ostr2 = ' polarity: Normal_Polarity\\n'\n else:\n ostr2 = ' polarity: Reverse_Polarity\\n'\n ostr.append(ostr2)\n ostr2 = ' }\\n'\n ostr.append(ostr2)\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n ostr.append(\n '#---------------------------------------------------------------\\n')\n ostr.append(\n '#-----------------------Import 3D fault data ---Base Model------\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n')\n\n contacts = pd.read_csv(os.path.join(output_path, 'faults.csv'), ',')\n faults = pd.read_csv(os.path.join(output_path, 'fault_dimensions.csv'),\n ',')\n\n for indx, afault in faults.iterrows():\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' Add3DInterfacesToFormation {\\n')\n ostr.append(' formation: \"' + str(afault['Fault']) + '\"\\n')\n for indx2, acontact in contacts.iterrows():\n if (acontact['formation'] == afault['Fault']):\n ostr2 = ' point {x:' + str(\n acontact['X']) + '; y:' + str(\n acontact['Y']) + '; z:' + str(acontact['Z']) + '}\\n'\n ostr.append(ostr2)\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n ostr.append(\n '#---------------------------------------------------------------\\n')\n ostr.append(\n '#------------------Import 3D fault orientation data ------------\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n')\n\n orientations = pd.read_csv(\n os.path.join(output_path, 'fault_orientations.csv'), ',')\n faults = pd.read_csv(os.path.join(output_path, 'fault_dimensions.csv'),\n ',')\n\n for indx, afault in faults.iterrows():\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' Add3DFoliationToFormation {\\n')\n ostr.append(' formation: \"' + str(afault['Fault']) + '\"\\n')\n for indx2, ano in orientations.iterrows():\n if (ano['formation'] == afault['Fault']):\n ostr.append(' foliation {\\n')\n ostr2 = ' Point3D {x:' + str(\n ano['X']) + '; y:' + str(ano['Y']) + '; z:' + str(\n ano['Z']) + '}\\n'\n ostr.append(ostr2)\n ostr2 = ' direction: ' + str(\n ano['DipDirection']) + '\\n'\n ostr.append(ostr2)\n if (ano['dip'] == -999):\n ostr2 = ' dip: ' + str(\n random.randint(60, 90)) + '\\n'\n else:\n ostr2 = ' dip: ' + str(ano['dip']) + '\\n'\n ostr.append(ostr2)\n if (ano['DipPolarity'] == 1):\n ostr2 = ' polarity: Normal_Polarity\\n'\n else:\n ostr2 = ' polarity: Reverse_Polarity\\n'\n ostr.append(ostr2)\n ostr2 = ' }\\n'\n ostr.append(ostr2)\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n if (save_faults):\n G = nx.read_gml(os.path.join(tmp_path, \"fault_network.gml\"),\n label='label')\n #nx.draw(G, with_labels=True, font_weight='bold')\n edges = list(G.edges)\n # for i in range(0,len(edges)):\n # print(edges[i][0],edges[i][1])\n cycles = list(nx.simple_cycles(G))\n # display(cycles)\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append(\n '#-----------------------Link faults with faults ----------------\\n'\n )\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' LinkFaultsWithFaults {\\n')\n\n for i in range(0, len(edges)):\n found = False\n for j in range(0, len(cycles)):\n if (edges[i][0] == cycles[j][0]\n and edges[i][1] == cycles[j][1]):\n found = True # fault pair is first two elements in a cycle list so don't save to taskfile\n if (not found):\n ostr2 = ' FaultStopsOnFaults{ fault: \"' + edges[i][\n 1] + '\"; stopson: \"' + edges[i][0] + '\"}\\n'\n ostr.append(ostr2)\n\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n if (save_faults):\n all_fault_group = np.genfromtxt(os.path.join(\n output_path, 'group-fault-relationships.csv'),\n delimiter=',',\n dtype='U100')\n ngroups = len(all_fault_group)\n all_fault_group = np.transpose(all_fault_group)\n nfaults = len(all_fault_group)\n\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append(\n '#-----------------------Link series with faults ----------------\\n'\n )\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' LinkFaultsWithSeries {\\n')\n\n for i in range(1, nfaults):\n first = True\n for j in range(1, ngroups):\n if (all_fault_group[i, j] == str(1)):\n if (first):\n ostr2 = ' FaultSeriesLinks{ fault: \"' + all_fault_group[\n i, 0] + '\"; series: ['\n ostr.append(ostr2)\n ostr2 = '\"' + all_fault_group[0, j] + '\"'\n ostr.append(ostr2)\n first = False\n else:\n ostr2 = ', \"' + all_fault_group[0, j] + '\"'\n ostr.append(ostr2)\n if (not first):\n ostr2 = ']}\\n'\n ostr.append(ostr2)\n\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' SaveProjectAs {\\n')\n ostr.append(' filename: \"./' + model_name + '.xml\"\\n')\n ostr.append(' }\\n')\n ostr.append('}\\n')\n f.close()\n\n if (compute_etc):\n f = open(\n os.path.join(test_data_path, model_name, 'm2l_compute.taskfile'),\n 'w')\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append(\n '#----------------------------Load Model----------------------\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' OpenProjectNoGUI {\\n')\n ostr.append(' filename: \"./' + model_name + '.xml\"\\n')\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append(\n '#----------------------------Compute Model----------------------\\n'\n )\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append('\\n')\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' ComputeModel {\\n')\n ostr.append(' SeriesList {\\n')\n ostr.append(' node: \"All\" \\n')\n ostr.append(' }\\n')\n ostr.append(' SectionList {\\n')\n ostr.append(' node: \"All\"\\n')\n ostr.append(' }\\n')\n ostr.append(' FaultList {\\n')\n ostr.append(' node: \"All\"\\n')\n ostr.append(' }\\n')\n ostr.append(' radius: 10.0\\n')\n ostr.append(' }\\n')\n ostr.append('}\\n')\n\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append(\n '#-----------------------Add geophysical Properties--------------\\n'\n )\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append('\\n')\n ostr.append('\\n')\n ostr.append('\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append(\n '#--------------------------Export Lithology Voxet---------------\\n'\n )\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' SaveLithologyVoxet {\\n')\n ostr.append(' nx: 25\\n')\n ostr.append(' ny: 25\\n')\n ostr.append(' nz: 40\\n')\n ostr.append(\n ' LithologyVoxetFileStub: \"./Litho_Voxet/LithoVoxet.vo\"\\n')\n ostr.append(' }\\n')\n ostr.append('}\\n')\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append(\n '#--------------------------Save As Model------------------------\\n'\n )\n ostr.append(\n '#---------------------------------------------------------------\\n'\n )\n ostr.append('\\n')\n\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' SaveProjectAs {\\n')\n ostr.append(' filename: \"/' + model_name + '.xml\"\\n')\n ostr.append(' }\\n')\n ostr.append('}\\n')\n ostr.append('GeomodellerTask {\\n')\n ostr.append(' CloseProjectNoGUI {\\n')\n ostr.append(' }\\n')\n ostr.append('}\\n')\n f.writelines(ostr)\n f.close()\n\n\n##########################################################################\n# Import outputs from map2loop to LoopStructural and view with Lavavu\n#\n# loop2LoopStructural(thickness_file,orientation_file,contacts_file,bbox)\n# Args:\n# bbox model bounding box\n#\n# Calculates model and displays in LavaVu wthin notebook\n##########################################################################\ndef loop2LoopStructural(m2l_directory):\n \"\"\" create a model from a map2loop directory\n\n [extended_summary]\n\n Parameters\n ----------\n m2l_directory : string\n path to the map2loop directory\n \"\"\"\n visualise = False\n # make sure everything is installed and can be imported\n try:\n from LoopStructural import GeologicalModel\n from LoopStructural.utils import process_map2loop\n except ImportError:\n print('Loop Structural not installed')\n return\n try:\n from LoopStructural.visualisation import LavaVuModelViewer\n visualise = True\n except ImportError:\n print(\"Lavavu is not installed, try installing it with pip \\n\"\n \"Model will be built but cannot be visualised\")\n\n m2l_data = process_map2loop(m2l_directory)\n boundary_points = np.zeros((2, 3))\n boundary_points[0, 0] = m2l_data['bounding_box']['minx']\n boundary_points[0, 1] = m2l_data['bounding_box']['miny']\n boundary_points[0, 2] = m2l_data['bounding_box']['lower']\n boundary_points[1, 0] = m2l_data['bounding_box']['maxx']\n boundary_points[1, 1] = m2l_data['bounding_box']['maxy']\n boundary_points[1, 2] = m2l_data['bounding_box']['upper']\n\n model = GeologicalModel(boundary_points[0, :], boundary_points[1, :])\n model.set_model_data(m2l_data['data'])\n\n faults = []\n for f in m2l_data['max_displacement'].keys():\n if model.data[model.data['type'] == f].shape[0] == 0:\n continue\n fault_id = f[6:]\n overprints = []\n try:\n overprint_id = m2l_data['fault_fault'][\n m2l_data['fault_fault'][fault_id] == 1]['fault_id'].to_numpy()\n for i in overprint_id:\n overprints.append(['Fault_%i' % i])\n except:\n print('No entry for %s in fault_fault_relations' % f)\n # continue\n faults.append(\n model.create_and_add_fault(\n f,\n -m2l_data['max_displacement'][f],\n faultfunction='BaseFault',\n interpolatortype='FDI',\n nelements=1e4,\n data_region=.1,\n # regularisation=[1,1,1],\n solver='pyamg',\n # damp=True,\n # buffer=0.1,\n # steps=1,\n overprints=overprints,\n cpw=10,\n npw=10))\n\n # loop through all of the groups and add them to the model in youngest to oldest.\n group_features = []\n for i in m2l_data['groups']['group number'].unique():\n g = m2l_data['groups'].loc[m2l_data['groups']['group number'] == i,\n 'group'].unique()[0]\n group_features.append(\n model.create_and_add_foliation(\n g,\n interpolatortype=\"PLI\", # which interpolator to use\n nelements=1e5, # how many tetras/voxels\n buffer=0.5, # how much to extend nterpolation around box\n solver='pyamg',\n damp=True))\n # if the group was successfully added (not null) then lets add the base (0 to be unconformity)\n if group_features[-1]:\n model.add_unconformity(group_features[-1]['feature'], 0)\n model.set_stratigraphic_column(m2l_data['stratigraphic_column'])\n if visualise:\n viewer = LavaVuModelViewer(model)\n viewer.add_model(cmap='tab20')\n viewer.interactive()\n\n\n##########################################################################\n# Import outputs from map2loop to gempy and view with pyvtk\n# loop2gempy(test_data_name,tmp_path,vtk_pth,orientations_file,contacts_file,groups_file,dtm_reproj_file,bbox,model_base, model_top,vtk)\n# Args:\n# test_data_name root name of project\n# tmp_path path of temp files directory\n# vtk_pth path of vtk output directory\n# orientations_file path of orientations file\n# contacts_file path of contacts file\n# groups_file path of groups file\n# dtm_reproj_file path of dtm file\n# bbox model bounding box\n# model_base z value ofbase of model\n# model_top z value of top of model\n# vtk flag as to wether to save out model to vtk\n#\n# Calculates model and displays in external vtk viewer\n##########################################################################\ndef loop2gempy__(test_data_name: str,\n tmp_path: str,\n vtk_path: str,\n orientations_file: str,\n contacts_file: str,\n groups_file: str,\n bbox: tuple,\n model_base: float,\n model_top: float,\n vtk: bool,\n dtm_reproj_file: str = None,\n va=None,\n verbose: bool = False,\n compute: bool = True):\n \"\"\"\n\n :param test_data_name:\n :param tmp_path:\n :param vtk_path:\n :param orientations_file:\n :param contacts_file:\n :param groups_file:\n :param bbox:\n :param model_base:\n :param model_top:\n :param vtk:\n :param dtm_reproj_file:\n :param va: vertical anisotropy. Factor by which all Z coordinates are multiplied by\n :param verbose:\n :param compute:\n :return:\n \"\"\"\n import gempy as gp\n from gempy import plot\n\n geo_model = gp.create_model(test_data_name)\n\n # If depth coordinates are much smaller than XY the whole system of equations becomes very unstable. Until\n # I fix it properly in gempy this is a handcrafted hack\n if va is None:\n va = (float(bbox[0]) - float(bbox[2])) / (model_base - model_top) / 2\n\n if va < 3:\n va = 0\n else:\n print('The vertical exageration is: ', va)\n\n gp.init_data(geo_model,\n extent=[\n bbox[0], bbox[2], bbox[1], bbox[3], model_base * va,\n model_top * va\n ],\n resolution=(50, 50, 50),\n path_o=orientations_file,\n path_i=contacts_file)\n\n geo_model.modify_surface_points(geo_model.surface_points.df.index,\n Z=geo_model.surface_points.df['Z'] * va)\n\n if dtm_reproj_file is not None:\n # Load reprojected topography to model\n\n fp = dtm_reproj_file\n geo_model.set_topography(source='gdal', filepath=fp)\n\n # Rescaling topography:\n geo_model._grid.topography.values[:, 2] *= va\n geo_model._grid.update_grid_values()\n geo_model.update_from_grid()\n\n # Pile processing:\n contents = np.genfromtxt(groups_file, delimiter=',', dtype='U100')\n\n # Init dictionary Series:Surfaces\n map_series_to_surfaces = {}\n choice = 0\n for group in contents:\n # Reading surfaces groups\n surfaces_g = np.atleast_2d(\n np.genfromtxt(os.path.join(tmp_path, group + '.csv'),\n delimiter=',',\n dtype='U100'))\n\n # Check if there are several choices\n if surfaces_g.shape[1] > 1:\n surfaces_g = surfaces_g[choice]\n # Deleting the first element since it is not a surface\n surfaces_g = surfaces_g[1:]\n # Creating the mapping dictionary\n map_series_to_surfaces[group] = surfaces_g.tolist()\n\n if verbose is True:\n print(map_series_to_surfaces)\n\n # Setting pile to model\n gp.map_series_to_surfaces(geo_model,\n map_series_to_surfaces,\n remove_unused_series=False)\n\n if ('Default series' in map_series_to_surfaces):\n\n # Removing related data\n del_surfaces = geo_model.surfaces.df.groupby('series').get_group(\n 'Default series')['surface']\n geo_model.delete_surfaces(del_surfaces, remove_data=True)\n\n # Removing series that have not been mapped to any surface\n geo_model.delete_series('Default series')\n\n if compute is True:\n gp.set_interpolator(geo_model,\n theano_optimizer='fast_run',\n dtype='float64')\n gp.compute_model(geo_model)\n\n # Visualise Model\n #gp.plot.plot_3D(geo_model, render_data=False)\n p3d = gp.plot_3d(geo_model, plotter_type='background', notebook=False)\n\n p3d3 = gp.plot_3d(geo_model, notebook=True)\n\n # Save model as vtk\n if vtk:\n gp.plot.export_to_vtk(geo_model,\n path=vtk_path,\n name=test_data_name + '.vtk',\n voxels=False,\n block=None,\n surfaces=True)\n\n return geo_model\n\n\ndef loop2gempy_(test_data_name, tmp_path, vtk_path, orientations_file,\n contacts_file, groups_file, dtm_reproj_file, bbox, model_base,\n model_top, vtk):\n import gempy as gp\n from gempy import plot\n geo_model = gp.create_model(test_data_name)\n\n # If depth coordinates are much smaller than XY the whole system of equations becomes very unstable. Until\n # I fix it properly in gempy this is a handcrafted hack\n ve = (bbox[0] - bbox[2]) / (model_base - model_top)\n\n if ve < 3:\n ve = 0\n else:\n print('The vertical exageration is: ', ve)\n\n gp.init_data(geo_model,\n extent=[\n bbox[0], bbox[2], bbox[1], bbox[3], model_base * ve,\n model_top * ve\n ],\n resolution=(50, 50, 50),\n path_o=orientations_file,\n path_i=contacts_file,\n default_values=True)\n\n # Show example lithological points\n #gp.get_data(geo_model, 'surface_points').head()\n\n # Show example orientations\n #gp.get_data(geo_model, 'orientations').head()\n\n # Plot some of this data\n #gp.plot.plot_data(geo_model, direction='z')\n\n geo_model.modify_surface_points(geo_model.surface_points.df.index,\n Z=geo_model.surface_points.df['Z'] * ve)\n\n # Load reprojected topgraphy to model\n\n fp = dtm_reproj_file\n geo_model.set_topography(source='gdal', filepath=fp)\n\n contents = np.genfromtxt(groups_file, delimiter=',', dtype='U100')\n ngroups = len(contents)\n\n faults = gp.Faults()\n series = gp.Series(faults)\n # series.df\n\n # display(ngroups,contents)\n groups = []\n\n for i in range(0, ngroups):\n groups.append(contents[i].replace(\"\\n\", \"\"))\n series.add_series(contents[i].replace(\"\\n\", \"\"))\n print(contents[i].replace(\"\\n\", \"\"))\n\n series.delete_series('Default series')\n\n # series\n\n # Load surfaces and assign to series\n surfaces = gp.Surfaces(series)\n\n print(ngroups, groups)\n for i in range(0, ngroups):\n contents = np.genfromtxt(os.path.join(tmp_path, groups[i] + '.csv'),\n delimiter=',',\n dtype='U100')\n nformations = len(contents.shape)\n\n if (nformations == 1):\n for j in range(1, len(contents)):\n surfaces.add_surface(str(contents[j]).replace(\"\\n\", \"\"))\n d = {groups[i]: str(contents[j]).replace(\"\\n\", \"\")}\n surfaces.map_series({\n groups[i]: (str(contents[j]).replace(\"\\n\", \"\"))\n }) # working but no gps\n else:\n for j in range(1, len(contents[0])):\n surfaces.add_surface(str(contents[0][j]).replace(\"\\n\", \"\"))\n d = {groups[i]: str(contents[0][j]).replace(\"\\n\", \"\")}\n surfaces.map_series({\n groups[i]: (str(contents[0][j]).replace(\"\\n\", \"\"))\n }) # working but no gps\n\n # Set Interpolation Data\n id_only_one_bool = geo_model.surface_points.df['id'].value_counts() == 1\n id_only_one = id_only_one_bool.index[id_only_one_bool]\n single_vals = geo_model.surface_points.df[\n geo_model.surface_points.df['id'].isin(id_only_one)]\n for idx, vals in single_vals.iterrows():\n geo_model.add_surface_points(vals['X'], vals['Y'], vals['Z'],\n vals['surface'])\n\n geo_model.update_structure()\n\n gp.set_interpolation_data(geo_model,\n compile_theano=True,\n theano_optimizer='fast_compile',\n verbose=[])\n\n # Provide summary data on model\n\n # geo_model.additional_data.structure_data\n\n # Calculate Model\n gp.compute_model(geo_model)\n\n # Extract surfaces to visualize in 3D renderers\n #gp.plot.plot_section(geo_model, 49, direction='z', show_data=False)\n\n ver, sim = gp.get_surfaces(geo_model)\n\n # import winsound\n # duration = 700 # milliseconds\n # freq = 1100 # Hz\n # winsound.Beep(freq, duration)\n # winsound.Beep(freq, duration)\n # winsound.Beep(freq, duration)\n\n # Visualise Model\n gp.plot.plot_3D(geo_model, render_data=False)\n\n # Save model as vtk\n if (vtk):\n gp.plot.export_to_vtk(geo_model,\n path=vtk_path,\n name=test_data_name + '.vtk',\n voxels=False,\n block=None,\n surfaces=True)\n\n return geo_model\n\n\n# Courtesy of https://gist.github.com/delestro/54d5a34676a8cef7477e\n\n\ndef rand_cmap(nlabels,\n type='bright',\n first_color_black=True,\n last_color_black=False,\n verbose=True):\n \"\"\"\n Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks\n :param nlabels: Number of labels (size of colormap)\n :param type: 'bright' for strong colors, 'soft' for pastel colors\n :param first_color_black: Option to use first color as black, True or False\n :param last_color_black: Option to use last color as black, True or False\n :param verbose: Prints the number of labels and shows the colormap. True or False\n :return: colormap for matplotlib\n\n \"\"\"\n from matplotlib.colors import LinearSegmentedColormap\n import colorsys\n import numpy as np\n\n if type not in ('bright', 'soft'):\n print('Please choose \"bright\" or \"soft\" for type')\n return\n\n if verbose:\n print('Number of labels: ' + str(nlabels))\n\n # Generate color map for bright colors, based on hsv\n if type == 'bright':\n randHSVcolors = [(np.random.uniform(low=0.0, high=1),\n np.random.uniform(low=0.2, high=1),\n np.random.uniform(low=0.9, high=1))\n for i in range(nlabels)]\n\n # Convert HSV list to RGB\n randRGBcolors = []\n for HSVcolor in randHSVcolors:\n randRGBcolors.append(\n colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))\n\n if first_color_black:\n randRGBcolors[0] = [0, 0, 0]\n\n if last_color_black:\n randRGBcolors[-1] = [0, 0, 0]\n\n random_colormap = LinearSegmentedColormap.from_list('new_map',\n randRGBcolors,\n N=nlabels)\n\n # Generate soft pastel colors, by limiting the RGB spectrum\n if type == 'soft':\n low = 0.6\n high = 0.95\n randRGBcolors = [(np.random.uniform(low=low, high=high),\n np.random.uniform(low=low, high=high),\n np.random.uniform(low=low, high=high))\n for i in range(nlabels)]\n\n if first_color_black:\n randRGBcolors[0] = [0, 0, 0]\n\n if last_color_black:\n randRGBcolors[-1] = [0, 0, 0]\n random_colormap = LinearSegmentedColormap.from_list('new_map',\n randRGBcolors,\n N=nlabels)\n\n # Display colorbar\n if verbose:\n from matplotlib import colors, colorbar\n from matplotlib import pyplot as plt\n fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))\n\n bounds = np.linspace(0, nlabels, nlabels + 1)\n norm = colors.BoundaryNorm(bounds, nlabels)\n\n cb = colorbar.ColorbarBase(ax,\n cmap=random_colormap,\n norm=norm,\n spacing='proportional',\n ticks=None,\n boundaries=bounds,\n format='%1i',\n orientation=u'horizontal')\n\n return random_colormap\n\n\ndef display_LS_map(model, dtm, geol_clip, faults_clip, dst_crs, use_cmap, cmap,\n use_topo, use_faults):\n\n if (not use_cmap):\n cmap = rand_cmap(100,\n type='soft',\n first_color_black=False,\n last_color_black=False,\n verbose=False)\n\n dtm_val = dtm.read(1)\n\n grid = np.array((dtm_val.shape[0] * dtm_val.shape[1], 3))\n scale = (dtm.bounds[2] - dtm.bounds[0]) / dtm_val.shape[1]\n x = np.linspace(dtm.bounds[0], dtm.bounds[2], dtm_val.shape[1])\n y = np.linspace(dtm.bounds[3], dtm.bounds[1], dtm_val.shape[0])\n xx, yy = np.meshgrid(x, y, indexing='ij')\n\n if (use_topo):\n zz = dtm_val.flatten()\n else:\n zz = np.zeros_like(xx)\n\n points = np.array(\n [xx.flatten(order='F'),\n yy.flatten(order='F'),\n zz.flatten(order='F')]).T\n v = model.evaluate_model(model.scale(points), scale=False)\n transform = from_origin(dtm.bounds[0], dtm.bounds[3], scale, scale)\n\n memfile = MemoryFile()\n new_dataset = memfile.open(driver='GTiff',\n height=dtm.shape[0],\n width=dtm.shape[1],\n count=1,\n dtype='float64',\n crs=dst_crs,\n transform=transform)\n new_dataset.write(\n v.astype('float64').reshape(dtm_val.shape[0], dtm_val.shape[1]), 1)\n\n fig, ax = matplotlib.pyplot.subplots(figsize=(15, 15))\n rasterio.plot.show(new_dataset.read(1),\n transform=new_dataset.transform,\n cmap=cmap,\n ax=ax)\n geol_clip.plot(ax=ax, facecolor='none', edgecolor='black', linewidth=0.4)\n if (use_faults):\n faults_clip.plot(ax=ax,\n facecolor='none',\n edgecolor='red',\n linewidth=0.7)\n\n\ndef export_to_projectfile(loopFilename, tmp_path, output_path, bbox, proj_crs):\n\n form2supergroup = pd.read_csv(\n os.path.join(tmp_path, 'all_sorts_clean.csv'),\n ',')[['code', 'group', 'colour']].rename(columns={\n 'code': 'formation',\n 'group': 'supergroup'\n })\n\n stratigraphicLayers = pd.read_csv(\n os.path.join(output_path, \"formation_thicknesses.csv\"))\n\n stratAges = pd.read_csv(os.path.join(\n tmp_path, 'age_sorted_groups.csv'))[['group_', 'min', 'max']]\n stratAges.rename(columns={\n 'group_': 'supergroup',\n 'min': 'minAge',\n 'max': 'maxAge'\n },\n inplace=True)\n stratLayers = pd.merge(form2supergroup,\n stratigraphicLayers,\n on=['formation'])\n stratLayers = pd.merge(stratLayers, stratAges, on=['supergroup'])\n stratLayers['colour1Red'] = [\n int(a[1:3], 16) for a in stratLayers['colour']\n ]\n stratLayers['colour1Green'] = [\n int(a[3:5], 16) for a in stratLayers['colour']\n ]\n stratLayers['colour1Blue'] = [\n int(a[5:7], 16) for a in stratLayers['colour']\n ]\n thickness = {}\n uniqueLayers = stratLayers[[\n 'formation', 'supergroup', 'colour1Red', 'colour1Green', 'colour1Blue',\n 'minAge', 'maxAge'\n ]].drop_duplicates(subset=\"formation\")\n for f in uniqueLayers['formation']:\n thickness[f] = np.mean(stratigraphicLayers[\n stratigraphicLayers['formation'] == f]['thickness'])\n stratigraphicLogData = np.zeros(uniqueLayers.shape[0],\n LoopProjectFile.stratigraphicLayerType)\n stratigraphicLogData['layerId'] = range(uniqueLayers.shape[0])\n stratigraphicLogData['layerId'] += 1\n stratigraphicLogData['minAge'] = uniqueLayers['minAge']\n stratigraphicLogData['maxAge'] = uniqueLayers['maxAge']\n stratigraphicLogData['name'] = uniqueLayers['formation']\n stratigraphicLogData['supergroup'] = uniqueLayers['supergroup']\n stratigraphicLogData['enabled'] = 1\n stratigraphicLogData['rank'] = 0\n stratigraphicLogData['type'] = 4\n stratigraphicLogData['thickness'] = list(thickness.values())\n\n # Should check format of colour first for \"#ffffff\" perhaps with regex\n stratigraphicLogData['colour1Red'] = uniqueLayers['colour1Red']\n stratigraphicLogData['colour1Green'] = uniqueLayers['colour1Green']\n stratigraphicLogData['colour1Blue'] = uniqueLayers['colour1Blue']\n stratigraphicLogData['colour2Red'] = [\n int(a * 0.95) for a in stratigraphicLogData['colour1Red']\n ]\n stratigraphicLogData['colour2Green'] = [\n int(a * 0.95) for a in stratigraphicLogData['colour1Green']\n ]\n stratigraphicLogData['colour2Blue'] = [\n int(a * 0.95) for a in stratigraphicLogData['colour1Blue']\n ]\n resp = LoopProjectFile.Set(loopFilename,\n \"stratigraphicLog\",\n data=stratigraphicLogData,\n verbose=True)\n if resp[\"errorFlag\"]:\n print(resp[\"errorString\"])\n\n faults = pd.read_csv(os.path.join(output_path, \"fault_orientations.csv\"))\n faults['formation'] = [re.sub(\"\\.0\", \"\", s) for s in faults['formation']]\n faultDims = pd.read_csv(os.path.join(output_path, \"fault_dimensions.csv\"))\n faultDims.rename(columns={'Fault': 'formation'}, inplace=True)\n faultDims['formation'] = [\n re.sub(\"\\.0\", \"\", s) for s in faultDims['formation']\n ]\n faultDisplacements = pd.read_csv(\n os.path.join(output_path, \"fault_displacements3.csv\"))\n faultDisplacements.rename(columns={'fname': 'formation'}, inplace=True)\n faultDisplacements['formation'] = [\n re.sub(\"\\.0\", \"\", s) for s in faultDisplacements['formation']\n ]\n faults = faults.merge(faultDims, on='formation')\n\n minStratAge = np.nanmin(uniqueLayers['minAge'])\n maxStratAge = np.nanmax(uniqueLayers['maxAge'])\n faultObs = pd.read_csv(os.path.join(output_path, \"faults.csv\"))\n faultObs['formation'] = [\n re.sub(\"\\.0\", \"\", s) for s in faultObs['formation']\n ]\n faultObs['posOnly'] = 1\n faultsJoined = pd.concat([faults, faultObs])\n if (len(faultDims) > 0):\n faultEvents = np.zeros(faultDims.shape[0],\n LoopProjectFile.faultEventType)\n # The fault eventId is called formation for some reason\n faultEvents['name'] = faultDims['formation']\n faultEvents['enabled'] = 1\n faultEvents['rank'] = 0\n faultEvents['type'] = 0\n faultEvents['minAge'] = np.arange(minStratAge, maxStratAge,\n (maxStratAge - minStratAge) /\n faultDims.shape[0])\n faultEvents['maxAge'] = faultEvents['minAge']\n avgDisplacements = []\n avgDownthrowDir = []\n for formationName in faultDims['formation'].unique():\n avgDisplacements.append(\n np.average(faultDisplacements[faultDisplacements['formation']\n == formationName]\n ['vertical_displacement']))\n avgDownthrowDir.append(\n np.average(\n faultDisplacements[faultDisplacements['formation'] ==\n formationName]['downthrow_dir']))\n faultEvents['avgDisplacement'] = avgDisplacements\n faultEvents['avgDownthrowDir'] = avgDownthrowDir\n faultEvents['influenceDistance'] = faultDims['InfluenceDistance']\n faultEvents['verticalRadius'] = faultDims['VerticalRadius']\n faultEvents['horizontalRadius'] = faultDims['HorizontalRadius']\n faultEvents['colour'] = faultDims['colour']\n\n faultEvents['eventId'] = [\n re.sub('.*_', '', s) for s in faultDims['formation']\n ]\n\n resp = LoopProjectFile.Set(loopFilename,\n \"faultLog\",\n data=faultEvents,\n verbose=False)\n if resp[\"errorFlag\"]:\n print(resp[\"errorString\"])\n\n faultsData = np.zeros(faultsJoined.shape[0],\n LoopProjectFile.faultObservationType)\n faultsData['eventId'] = [\n re.sub('.*_', '', s) for s in faultsJoined['formation']\n ]\n faultsData['easting'] = faultsJoined['X']\n faultsData['northing'] = faultsJoined['Y']\n faultsData['altitude'] = faultsJoined['Z']\n faultsData['dipDir'] = faultsJoined['DipDirection']\n faultsData['dip'] = faultsJoined['dip']\n faultsData['dipPolarity'] = faultsJoined['DipPolarity']\n faultsData['displacement'] = 0\n faultsData['posOnly'] = faultsJoined['posOnly']\n resp = LoopProjectFile.Set(loopFilename,\n \"faultObservations\",\n data=faultsData,\n verbose=True)\n if resp[\"errorFlag\"]:\n print(resp[\"errorString\"])\n\n # each contact contains a location and which formation it is on\n contacts = pd.read_csv(os.path.join(output_path, \"contacts4.csv\"))\n layerIds = []\n for form in contacts['formation']:\n a = bytes(form, 'ascii')\n if a in stratigraphicLogData['name']:\n layerIds.append(\n int(stratigraphicLogData[stratigraphicLogData['name'] == a]\n ['layerId']))\n else:\n layerIds.append(0)\n contactsData = np.zeros(contacts.shape[0],\n LoopProjectFile.contactObservationType)\n contactsData['layerId'] = layerIds\n contactsData['easting'] = contacts['X']\n contactsData['northing'] = contacts['Y']\n contactsData['altitude'] = contacts['Z']\n # contactsData['dipdir'] = contacts['']\n # contactsData['dip'] = contacts['']\n resp = LoopProjectFile.Set(loopFilename,\n \"contacts\",\n data=contactsData,\n verbose=True)\n if resp[\"errorFlag\"]:\n print(resp[\"errorString\"])\n\n observations = pd.read_csv(os.path.join(output_path, \"orientations.csv\"))\n layerIds = []\n for form in observations['formation']:\n a = bytes(form, 'ascii')\n if a in stratigraphicLogData['name']:\n layerIds.append(\n int(stratigraphicLogData[stratigraphicLogData['name'] == a]\n ['layerId']))\n else:\n layerIds.append(0)\n observations['layer'] = \"s0\"\n observationsData = np.zeros(observations.shape[0],\n LoopProjectFile.stratigraphicObservationType)\n observationsData['layerId'] = layerIds\n observationsData['easting'] = observations['X']\n observationsData['northing'] = observations['Y']\n observationsData['altitude'] = observations['Z']\n observationsData['dipDir'] = observations['azimuth']\n observationsData['dip'] = observations['dip']\n observationsData['dipPolarity'] = observations['polarity']\n observationsData['layer'] = observations['layer']\n resp = LoopProjectFile.Set(loopFilename,\n \"stratigraphicObservations\",\n data=observationsData,\n verbose=True)\n if resp[\"errorFlag\"]:\n print(resp[\"errorString\"])\n\n # Check created file is valid\n if LoopProjectFile.CheckFileValid(loopFilename):\n return loopFilename\n else:\n return None\n\n\n##########################################################################\n# Import outputs from map2loop to gempy and view with pyvtk\n# loop2gempy(test_data_name,tmp_path,vtk_pth,orientations_file,contacts_file,groups_file,dtm_reproj_file,bbox,model_base, model_top,vtk)\n# Args:\n# test_data_name root name of project\n# tmp_path path of temp files directory\n# vtk_pth path of vtk output directory\n# orientations_file path of orientations file\n# contacts_file path of contacts file\n# groups_file path of groups file\n# dtm_reproj_file path of dtm file\n# bbox model bounding box\n# model_base z value ofbase of model\n# model_top z value of top of model\n# vtk flag as to wether to save out model to vtk\n#\n# Calculates model and displays in external vtk viewer\n##########################################################################\ndef loop2gempy(*args, **kwargs):\n \"\"\" Calculate the model using gempy as backend.\n At the moment there is not support for finite faults since gempy does not\n accept passing the ellipsoid parameters directly.\n :param contacts_file (str): path of contacts file\n :param orientations_file: path of orientations file\n :param bbox: model bounding box\n :param groups_file: path of groups file\n :param model_base: z value ofbase of model\n :param model_top: z value of top of model\n :param dtm_reproj_file: path of dtm file\n :param faults_contact: path of contacts file with fault data\n :param faults_orientations: path of orientations file with fault data\n :param faults_rel_matrix: bool matrix describing the interaction between groups. Rows offset columns\n :param faults_groups_rel: bool matrix describing the interaction between faults and features\n :param faults_faults_rel: bool matrix describing the interaction between faults and faults\n :param model_name: name of the model\n :param compute (bool): Default True. Whether or not compute the model\n :param vtk (bool): Default False. Whether or not visualize the model\n :param vtk_path (str): Default None. Path of vtk output directory\n :param plot_3d_kwargs (dict): kwargs for `gempy.plot_3d`\n :return: gempy.Project\n \"\"\"\n from gempy.addons.map2gempy import loop2gempy\n geo_model = loop2gempy(*args, **kwargs)\n return geo_model\n"
] |
[
[
"numpy.nanmax",
"pandas.merge",
"matplotlib.colors.BoundaryNorm",
"numpy.linspace",
"numpy.nanmin",
"numpy.zeros_like",
"numpy.mean",
"numpy.arange",
"matplotlib.colorbar.ColorbarBase",
"numpy.zeros",
"pandas.concat",
"numpy.genfromtxt",
"numpy.transpose",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.array",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"numpy.random.uniform",
"numpy.average"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
likeand/ml
|
[
"fa54cebeb9998d8aa1241445b4b9492695bb4073"
] |
[
"samples/decision_tree.py"
] |
[
"# -*- coding: utf-8 -*-\n# @Date : 2020/5/26\n# @Author: Luokun\n# @Email : [email protected]\n\nimport sys\nfrom os.path import dirname, abspath\n\nimport numpy as np\n\nsys.path.append(dirname(dirname(abspath(__file__))))\n\n\ndef test_decision_tree():\n from models.decision_tree import DecisionTree\n\n X = np.array([\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 1],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 0],\n [0, 1, 1],\n [0, 1, 1],\n\n [1, 0, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 0],\n [1, 1, 1],\n [1, 1, 1],\n ])\n Y = np.array([1 if np.sum(x) >= 2 else 0 for x in X])\n\n dec_tree = DecisionTree(rate=0.95)\n dec_tree.fit(X, Y)\n print(dec_tree.tree)\n\n print(Y)\n pred = dec_tree.predict(X)\n print(pred)\n acc = np.sum(pred == Y) / len(pred)\n print(f'Acc = {100 * acc:.2f}%')\n\n\nif __name__ == '__main__':\n test_dkeysion_tree()\n"
] |
[
[
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
swagat5147/wallgen
|
[
"33f317d073a6ddb7519d8325a35c263d08305f3f"
] |
[
"tools/points.py"
] |
[
"import warnings\nimport numpy as np\nfrom scipy.spatial import Delaunay\nfrom skimage.filters import sobel\nfrom skimage import color, img_as_ubyte\nfrom PIL import Image, ImageDraw, ImageFilter\n\n\ndef distance(p1, p2):\n\t(x1, y1) = p1\n\t(x2, y2) = p2\n\n\td = int((y2-y1)**2 + (x2-x1)**2)**0.5\n\treturn d\n\ndef populate(a, b, n, width, height, ret):\n\tside = (width+height)//2\n\tradius = side // 100\n\tpoints = []\n\twhile len(points) < n:\n\t\tx = randint(a,a+width)\n\t\ty = randint(b,b+height)\n\n\t\tif len(points) == 0:\n\t\t\tpoints.append((x,y))\n\t\telse:\n\t\t\tfor p in points:\n\t\t\t\tif distance(p, (x,y)) <= radius:\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tpoints.append((x,y))\n\n\tret.extend(points)\n\ndef genPoints(qty, width, height):\n\tside = max(width, height)\n\trandPoints = np.random.choice(side, size=(qty, 2))\n\n\tog = side\n\t\n\ttri = Delaunay(randPoints) # calculate D triangulation of points\n\tpoints = tri.points[tri.simplices] # find all groups of points\n\n\treturn points\n\n\ndef calcCenter(ps):\n\t\"\"\" calculate incenter of a triangle given all vertices\"\"\"\n\tmid1 = ((ps[0][0]+ps[1][0])/2, (ps[0][1]+ps[1][1])/2)\n\tmid = ((mid1[0]+ps[2][0])/2, (mid1[1]+ps[2][1])/2)\n\treturn mid\n\ndef genSmartPoints(image):\n\twidth = image.shape[1]\n\theight = image.shape[0]\n\n\tedges = sobel(image)\n\n\t# convert to RGB compatible image\n\twith warnings.catch_warnings():\n\t\twarnings.simplefilter('ignore')\n\t\trgb_img = img_as_ubyte(color.gray2rgb(edges))\n\n\t# convert to PIL image\n\tpimg = Image.fromarray(rgb_img)\n\tidata = pimg.load()\n\n\tedges_data = []\n\n\t# get image pixel data and pass through a filter to get only prominent edges\n\n\tfor x in range(pimg.width):\n\t\tfor y in range(pimg.height):\n\t\t\tif sum(idata[x,y])/3 > 10:\n\t\t\t\tedges_data.append((x,y))\n\n\t# print(len(edges_data))\n\t\n\t# sometimes edges detected wont pass ^ this required case\n\tif len(edges_data) < 1:\n\t\traise Exception(\"EdgeDetectionError\")\n\t\tsys.exit(1)\n\n\t# get a n/5 number of points rather than all of the points\n\tsample = np.random.choice(len(edges_data), len(edges_data)//5 if len(edges_data)/5 < 50000 else 50000)\n\tedges_data = [edges_data[x] for x in sample]\n\n\t# print(len(edges_data))\n\n\tpoints = []\n\tradius = int(0.1 * (width+height)/2)\n\n\t# print(radius)\n\t\t\n\tpoints = edges_data\n\n\tws = width//50\n\ths = height//50\n\n\tfor x in range(0, width+ws, ws):\n\t\tpoints.append((x,0))\n\t\tpoints.append((x,height))\n\n\tfor y in range(0, height+hs, hs):\n\t\tpoints.append((0,y))\n\t\tpoints.append((width,y))\n\n\ttri = Delaunay(points) # calculate D triangulation of points\n\tdelaunay_points = tri.points[tri.simplices] # find all groups of points\n\n\treturn delaunay_points\n\n\n"
] |
[
[
"scipy.spatial.Delaunay",
"numpy.random.choice"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.