repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
samplics-org/samplics | [
"4a0f6ea6168afb74c2ea2c958fb76c7d27dfba83"
]
| [
"tests/sae/test_eb_unit_model.py"
]
| [
"import numpy as np\nimport pandas as pd\n\nfrom samplics.sae.eb_unit_model import EbUnitModel\n\n\nincomesample = pd.read_csv(\"./tests/sae/incomedata.csv\")\n\nareas = incomesample[\"prov\"]\nys = incomesample[\"income\"]\nXs = incomesample[\n [\"age2\", \"age3\", \"age4\", \"age5\", \"nat1\", \"educ1\", \"educ3\", \"labor1\", \"labor2\"]\n].to_numpy()\n# X_s = np.insert(X_s, 0, np.ones(X_s.shape[0]), axis=1)\n\nX_outsample = pd.read_csv(\"./tests/sae/Xoutsamp.csv\")\n\narear = X_outsample[\"domain\"]\nXr = X_outsample[\n [\"age2\", \"age3\", \"age4\", \"age5\", \"nat1\", \"educ1\", \"educ3\", \"labor1\", \"labor2\"]\n].to_numpy()\n# X_s = np.insert(X_s, 0, np.ones(X_s.shape[0]), axis=1)\n\nnp.random.seed(12345)\n\n\ndef pov_gap(y, pov_line):\n return np.mean((y < pov_line) * (pov_line - y) / pov_line)\n\n\n\"\"\"REML Method\"\"\"\n\neb_bhf_reml = EbUnitModel(method=\"REML\", boxcox=0, constant=3600.5,)\neb_bhf_reml.fit(ys, Xs, areas, intercept=True)\n\neb_bhf_reml.predict(Xr, arear, pov_gap, 10, show_progress=False, pov_line=6477.484)\n\neb_bhf_reml.bootstrap_mse(Xr, arear, pov_gap, 10, show_progress=False, pov_line=6477.484)\n\n\ndef test_eb_bhf_reml():\n assert eb_bhf_reml.method == \"REML\"\n\n\ndef test_fixed_effects_eb_bhf_reml():\n assert np.isclose(\n eb_bhf_reml.fixed_effects,\n np.array(\n [\n 9.537283,\n -0.027813,\n -0.027413,\n 0.074673,\n 0.043535,\n -0.028042,\n -0.159866,\n 0.283830,\n 0.163679,\n -0.056200,\n ]\n ),\n atol=1e-4,\n ).all()\n\n\ndef test_re_std_eb_bhf_reml():\n assert np.isclose(eb_bhf_reml.re_std ** 2, 0.009116, atol=1e-1)\n\n\ndef test_error_var_eb_bhf_reml():\n assert np.isclose(eb_bhf_reml.error_std ** 2, 0.170677, atol=1e-1)\n\n\n\"\"\"ML Method\"\"\"\n\neb_bhf_ml = EbUnitModel(method=\"ML\", boxcox=0, constant=3600.5,)\neb_bhf_ml.fit(ys, Xs, areas, intercept=True)\n\neb_bhf_ml.predict(Xr, arear, pov_gap, 10, show_progress=False, pov_line=6477.484)\n\n# eb_bhf_ml.bootstrap_mse(Xr, arear, pov_gap, 10, show_progress=False, pov_line=6477.484)\n\n\ndef test_eb_bhf_ml():\n assert eb_bhf_ml.method == \"ML\"\n\n\ndef test_fixed_effects_eb_bhf_ml():\n assert np.isclose(\n eb_bhf_ml.fixed_effects,\n np.array(\n [\n 9.537283,\n -0.027813,\n -0.027413,\n 0.074673,\n 0.043535,\n -0.028042,\n -0.159866,\n 0.283830,\n 0.163679,\n -0.056200,\n ]\n ),\n atol=1e-4,\n ).all()\n\n\ndef test_re_std_eb_bhf_ml():\n assert np.isclose(eb_bhf_ml.re_std ** 2, 0.009116, atol=1e-1)\n\n\ndef test_error_var_eb_bhf_ml():\n assert np.isclose(eb_bhf_ml.error_std ** 2, 0.170677, atol=1e-1)\n\n\nlmm_pop = pd.read_csv(\"./tests/sae/simulated_lmm_population_seed12345.csv\")\n\narea = lmm_pop[\"area\"]\nY = lmm_pop[\"Y\"]\nX = lmm_pop[[\"X1\", \"X2\", \"X3\"]]\nsample = lmm_pop[\"sample\"]\n\n# Sample data\narea_s = area[sample == 1]\ny_s = Y[sample == 1]\nX_s = X[sample == 1]\n\n\n# Out of sample data\narea_r = area[sample != 1]\nX_r = X[sample != 1]\n\n\n\"\"\"REML Method\"\"\"\n\n\neb_bhf_reml = EbUnitModel(method=\"REML\", boxcox=0, constant=10,)\neb_bhf_reml.fit(y_s, X_s, area_s, tol=1e-6, intercept=True)\n\n\ndef test_eb_bhf_reml():\n assert eb_bhf_reml.method == \"REML\"\n\n\ndef test_fixed_effects_eb_bhf_reml():\n assert np.isclose(\n eb_bhf_reml.fixed_effects,\n np.array([5.18988986, 2.89804078, -3.00472657, 2.82705747]),\n atol=1e-4,\n ).all()\n\n\ndef test_re_std_eb_bhf_reml():\n assert np.isclose(eb_bhf_reml.re_std, 0.259776, atol=1e-4)\n\n\ndef test_error_var_eb_bhf_reml():\n assert np.isclose(eb_bhf_reml.error_std, 0.957548, atol=1e-4)\n\n\n\"\"\"ML Method\"\"\"\n\n\neb_bhf_ml = EbUnitModel(method=\"ML\", boxcox=0, constant=10,)\neb_bhf_ml.fit(y_s, X_s, area_s, tol=1e-6, intercept=True)\n\n\ndef test_eb_bhf_ml():\n assert eb_bhf_ml.method == \"ML\"\n\n\ndef test_fixed_effects_eb_bhf_ml():\n assert np.isclose(\n eb_bhf_ml.fixed_effects, np.array([5.189840, 2.898447, -3.005277, 2.827395]), atol=1e-4,\n ).all()\n\n\ndef test_re_std_eb_bhf_ml():\n assert np.isclose(eb_bhf_reml.re_std, 0.259776, atol=1e-4)\n\n\ndef test_error_var_eb_bhf_ml():\n assert np.isclose(eb_bhf_reml.error_std, 0.957584, atol=1e-4)\n\n\n\n"
]
| [
[
"pandas.read_csv",
"numpy.random.seed",
"numpy.mean",
"numpy.array",
"numpy.isclose"
]
]
|
thtang/DLCV2018SPRING | [
"5aa6bd2921e317590cd8261fc2d9e0e2534acf37"
]
| [
"hw5/hw5_p3_inference.py"
]
| [
"from reader import readShortVideo\nfrom reader import getVideoList\nfrom os import listdir\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport pickle\n\nimport torchvision\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nimport skimage.io\nimport skimage\n\nimport torch.nn as nn\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\ndef normalize(image):\n '''\n normalize for pre-trained model input\n '''\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n transform_input = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Pad((0,40), fill=0, padding_mode='constant'),\n transforms.Resize(224),\n # transforms.CenterCrop(224),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n return transform_input(image)\n\n# load images\nvideo_path = sys.argv[1]\ncategory_list = sorted(listdir(video_path))\n\nall_video_frame = []\ncnn_feature_extractor = torchvision.models.densenet121(pretrained=True).features.cuda() \nwith torch.no_grad():\n for category in category_list:\n print(\"category:\",category)\n image_list_per_folder = sorted(listdir(os.path.join(video_path,category)))\n category_frames = []\n for image in image_list_per_folder:\n image_rgb = skimage.io.imread(os.path.join(video_path, category,image))\n image_nor = normalize(image_rgb)\n feature = cnn_feature_extractor(image_nor.view(1,3,224,224).cuda()).cpu().view(1024*7*7)\n category_frames.append(feature)\n all_video_frame.append(torch.stack(category_frames))\n\nvideo_lengths = [len(s) for s in all_video_frame]\n# build model and loss function\nclass seq2seq(nn.Module):\n def __init__(self, input_size, hidden_size=512, n_layers=2, dropout=0.1):\n super(seq2seq, self).__init__()\n self.hidden_size = hidden_size\n self.lstm = nn.LSTM(input_size, self.hidden_size, n_layers,\n dropout=(0 if n_layers == 1 else dropout), bidirectional=False,\n batch_first=True)\n self.bn_0 = nn.BatchNorm1d(self.hidden_size)\n self.fc_1 = nn.Linear(self.hidden_size, int(self.hidden_size/2))\n self.bn_1 = nn.BatchNorm1d(int(self.hidden_size/2))\n self.fc_2 = nn.Linear(int(self.hidden_size), 11)\n self.softmax = nn.Softmax(1)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.5)\n def forward(self, padded_sequence, input_lengths, hidden=None):\n packed = torch.nn.utils.rnn.pack_padded_sequence(padded_sequence, \n input_lengths, \n batch_first=True)\n outputs, (hn,cn) = self.lstm(packed, hidden) # output: (seq_len, batch, hidden*n_dir)\n \n outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)\n\n cut_frame_prediction = []\n for i in range(outputs.size(0)):\n category = self.fc_2(outputs[i])\n cut_frame_prediction.append(category)\n\n category = torch.stack(cut_frame_prediction)\n\n return category\n\nprint(\"load model ...\")\nfeature_size = 1024*7*7\nmodel = seq2seq(feature_size,hidden_size=512,dropout=0.5, n_layers=2).cuda()\nmodel.load_state_dict(torch.load(\"./models/RNN_seq2seq_model.pkt\"))\nprint(\"model loaded\")\n# inference\nwith torch.no_grad():\n model.eval()\n valid_output = []\n valid_y_list = []\n for valid_X, length in zip(all_video_frame, video_lengths):\n input_valid_X = valid_X.unsqueeze(0)\n output = model(input_valid_X.cuda(), [length])\n prediction = torch.argmax(torch.squeeze(output.cpu()),1).data.numpy()\n valid_output.append(prediction)\n\n# store result to txt\nvalid_dir_name = sorted(listdir(video_path))\n\noutput_folder = sys.argv[2]\nfor i in range(len(valid_dir_name)):\n with open(os.path.join(output_folder, valid_dir_name[i]+'.txt'), \"w\") as f:\n for j, pred in enumerate(valid_output[i]):\n f.write(str(pred))\n if j != len(valid_output[i])-1:\n f.write(\"\\n\")"
]
| [
[
"torch.nn.BatchNorm1d",
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.load",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.no_grad",
"torch.stack",
"torch.nn.ReLU"
]
]
|
RichardRed0x/checkonchain | [
"2a2c1b50fb9f31c9afc01e97095ca09d62b41860"
]
| [
"dcronchain/charts/chart_dcr_mcap_powerlaws.py"
]
| [
"#Calculate the Linear Regression between Market Caps\nimport pandas as pd\nimport numpy as np\nimport datetime as date\ntoday = date.datetime.now().strftime('%Y-%m-%d')\n\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport plotly.io as pio\npio.renderers.default = \"browser\"\n\nfrom checkonchain.general.coinmetrics_api import *\nfrom checkonchain.btconchain.btc_add_metrics import *\nfrom checkonchain.dcronchain.dcr_add_metrics import *\n\nfrom checkonchain.general.regression_analysis import *\n\n#Pull Coinmetrics Data for Coins\nBTC = btc_add_metrics().btc_coin()\nLTC = Coinmetrics_api('ltc',\"2011-10-07\",today).convert_to_pd()\nBCH = Coinmetrics_api('bch',\"2017-08-01\",today).convert_to_pd()\nDAS = Coinmetrics_api('dash',\"2014-01-19\",today).convert_to_pd()\nDCR = dcr_add_metrics().dcr_coin()\nXMR = Coinmetrics_api('xmr',\"2014-04-18\",today).convert_to_pd()\nZEC = Coinmetrics_api('zec',\"2016-10-28\",today).convert_to_pd()\nETH = Coinmetrics_api('eth',\"2015-07-30\",today).convert_to_pd()\nXRP = Coinmetrics_api('xrp',\"2013-01-01\",today).convert_to_pd()\n\n#Reduce dataset down to date and a single metric\nmetric=\"CapMrktCurUSD\"\nBTC2 =BTC[['date',metric]]\nLTC2 =LTC[['date',metric]] \nBCH2 =BCH[['date',metric]] \nDAS2 =DAS[['date',metric]]\nDCR2 =DCR[['date',metric]] \nXMR2 =XMR[['date',metric]] \nZEC2 =ZEC[['date',metric]] \nETH2 =ETH[['date',metric]] \n#XRP2 =XRP[['date',metric]] \n\n#Rename all columns\nprefix = 'Cap_'\nBTC2.columns =['date',prefix+'BTC'] \nLTC2.columns =['date',prefix+'LTC']\nBCH2.columns =['date',prefix+'BCH']\nDAS2.columns=['date',prefix+'DAS']\nDCR2.columns =['date',prefix+'DCR']\nXMR2.columns =['date',prefix+'XMR']\nZEC2.columns =['date',prefix+'ZEC']\nETH2.columns =['date',prefix+'ETH']\nXRP2.columns =['date',prefix+'XRP']\n\n#Compile into a single dataframe with all coins\nBTC_data = BTC2.dropna(axis=0)\nBTC_data = pd.merge_asof(BTC_data,LTC2,on='date')\nBTC_data = pd.merge_asof(BTC_data,BCH2,on='date')\nBTC_data = pd.merge_asof(BTC_data,DAS2,on='date')\nBTC_data = pd.merge_asof(BTC_data,DCR2,on='date')\nBTC_data = pd.merge_asof(BTC_data,XMR2,on='date')\nBTC_data = pd.merge_asof(BTC_data,ZEC2,on='date')\nBTC_data = pd.merge_asof(BTC_data,ETH2,on='date')\nBTC_data = pd.merge_asof(BTC_data,XRP2,on='date')\n\nBTC_data\n\nregression_analysis().ln_regression(BTC_data[['date',prefix+'BTC',prefix+'LTC']].dropna(axis=0),prefix+'BTC',prefix+'LTC','date')\nregression_analysis().ln_regression(BTC_data[['date',prefix+'BTC',prefix+'BCH']].dropna(axis=0),prefix+'BTC',prefix+'BCH','date')\nregression_analysis().ln_regression(BTC_data[['date',prefix+'BTC',prefix+'DAS']].dropna(axis=0),prefix+'BTC',prefix+'DAS','date')\nregression_analysis().ln_regression(BTC_data[['date',prefix+'BTC',prefix+'DCR']].dropna(axis=0),prefix+'BTC',prefix+'DCR','date')\nregression_analysis().ln_regression(BTC_data[['date',prefix+'BTC',prefix+'XMR']].dropna(axis=0),prefix+'BTC',prefix+'XMR','date')\nregression_analysis().ln_regression(BTC_data[['date',prefix+'BTC',prefix+'ZEC']].dropna(axis=0),prefix+'BTC',prefix+'ZEC','date')\nregression_analysis().ln_regression(BTC_data[['date',prefix+'BTC',prefix+'ETH']].dropna(axis=0),prefix+'BTC',prefix+'ETH','date')\nregression_analysis().ln_regression(BTC_data[['date',prefix+'BTC',prefix+'XRP']].dropna(axis=0),prefix+'BTC',prefix+'XRP','date')\n\n\n\nx_data = [\n BTC_data[prefix+'DCR'],BTC_data[prefix+'LTC'],\n BTC_data[prefix+'BCH'],BTC_data[prefix+'DAS'],\n BTC_data[prefix+'XMR'],BTC_data[prefix+'ZEC'],\n BTC_data[prefix+'ETH'],BTC_data[prefix+'XRP']\n ]\ny_data = [\n BTC_data[prefix+'BTC'],BTC_data[prefix+'BTC'],\n BTC_data[prefix+'BTC'],BTC_data[prefix+'BTC'],\n BTC_data[prefix+'BTC'],BTC_data[prefix+'BTC'],\n BTC_data[prefix+'BTC'],BTC_data[prefix+'BTC']\n ]\nname_data = [\n 'DCR','LTC',\n 'BCH','DAS',\n 'XMR','ZEC',\n 'ETH','XRP'\n]\nwidth_data = [\n 2,2,\n 2,2,\n 2,2,\n 2,2\n]\nopacity_data = [\n 1,1,\n 1,1,\n 1,1,\n 1,1\n]\ncolor_data = [\n 'rgb(255, 153, 0)','rgb(214, 214, 194)',\n 'rgb(0, 153, 51)','rgb(51, 204, 255)',\n 'rgb(255, 102, 0)','rgb(255, 255, 0)',\n 'rgb(153, 51, 255)','rgb(51, 102, 255)'\n]\ndash_data = [\n 'solid','solid',\n 'solid','solid',\n 'solid','solid',\n 'solid','solid'\n]\n\n\nfig = make_subplots(specs=[[{\"secondary_y\": False}]])\nfor i in range(0,8):\n fig.add_trace(go.Scatter(\n x=x_data[i], y=y_data[i],\n mode='markers',\n name=name_data[i],\n opacity=opacity_data[i],\n marker=dict(\n width=width_data[i],\n color=color_data[i]#,\n #dash=dash_data[i]\n )),\n secondary_y=False)\n\n\n\n\"\"\"$$$$$$$$$$$$$$$ FORMATTING $$$$$$$$$$$$$$$$\"\"\"\n# Add figure title\nfig.update_layout(title_text=\"Compare Value Metrics\")\nfig.update_xaxes(\n title_text=\"<b>Coin MCap</b>\",\n type = 'log'\n )\nfig.update_yaxes(\n title_text=\"<b>Bitcoin MCap</b>\",\n type=\"log\",\n #range=[8,12],\n secondary_y=False)\nfig.update_layout(template=\"plotly_dark\")\nfig.show()\n"
]
| [
[
"pandas.merge_asof"
]
]
|
Longqi-S/keras_cpn | [
"53d241ecde4bff5073832dfb1ea9c1e08931520e"
]
| [
"models/subnet.py"
]
| [
"import keras.layers as KL\nimport keras.backend as K\nimport tensorflow as tf\nfrom lib.nets.resnet_backbone import identity_block as bottleneck\nfrom keras.utils import conv_utils\nfrom keras.engine import InputSpec\nimport numpy as np\n\nclass UpsampleBilinear(KL.Layer):\n def call(self, inputs, **kwargs):\n source, target = inputs\n target_shape = tf.shape(target)\n return tf.image.resize_bilinear(source, (target_shape[1], target_shape[2]), align_corners=True)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0][0],) + input_shape[1][1:3] + (input_shape[0][-1],)\n\ndef _conv_bn_relu(input_tensor, kernel_size, nb_filters,\n padding=\"same\", namebase=\"res\", has_act=True, rate=1):\n output = KL.Conv2D(nb_filters, kernel_size, \\\n padding=padding,\n dilation_rate=(rate, rate),\n name=namebase+\"_conv\")(input_tensor)\n output = KL.BatchNormalization(axis=3, \\\n name=namebase+\"_bn\")(output)\n if has_act:\n output = KL.Activation('relu')(output)\n return output\n\ndef _bn_relu_conv(input_tensor, kernel_size, nb_filters,\n padding=\"same\", namebase=\"res\", has_act=True):\n x = input_tensor\n x = KL.BatchNormalization(axis=3, \\\n name=namebase+\"_bn\")(x)\n x = KL.Activation('relu')(x)\n x = KL.Conv2D(nb_filters, kernel_size, \\\n padding=padding,\n name=namebase+\"_conv\")(x)\n return x\n \ndef create_global_net(blocks, cfg, has_bn=True, bn_trainable=True):\n \"\"\" create global net in cpn\n # Inputs:\n blocks = [C2, C3, C4, C5]\n \"\"\"\n global_fms = []\n global_outs = []\n last_fm = None\n ## define pyramid feature maps\n for i, block in enumerate(reversed(blocks)):\n lateral = _conv_bn_relu(block, (1, 1), 256, \"same\", 'lateral/res{}'.format(5-i))\n if last_fm is not None:\n upsample = UpsampleBilinear(\\\n name='fpn/p{}upsampled'.format(5-i+1))([last_fm, lateral])\n upsample = KL.Conv2D(256, (1, 1), \\\n name='fpn/p{}upsampled_conv'.format(5-i))(upsample)\n if has_bn:\n upsample = KL.BatchNormalization(name='fpn/p{}upsampled_bn'.format(5-i), axis=3)(upsample)\n last_fm = KL.Add(name='fpn/p{}merge'.format(5-i))([\\\n upsample, lateral])\n else:\n last_fm = lateral\n tmp = _conv_bn_relu(last_fm, (1, 1), 256, \"SAME\", 'tmp/res{}'.format(5-i))\n out = KL.Conv2D(cfg.KEYPOINTS_NUM, (3, 3), padding=\"SAME\", \\\n name='pyramid/res{}'.format(5-i))(tmp)\n if has_bn:\n out = KL.BatchNormalization(axis=3, name='pyramid/res{}_bn'.format(5-i))(out)\n global_fms.append(last_fm)\n out = KL.Lambda(lambda t: tf.image.resize_bilinear(t, \\\n (cfg.OUTPUT_SHAPE[0], cfg.OUTPUT_SHAPE[1])), \\\n name='pyramid/res{}up'.format(5-i))(out)\n global_outs.append(out)\n global_fms.reverse()\n global_outs.reverse()\n return global_fms, global_outs\n\n\n## original cpn RefineNet version\ndef create_refine_net(blocks, cfg, use_bn=True):\n refine_fms = []\n for i, block in enumerate(blocks):\n mid_fm = block\n for j in range(i):\n mid_fm = bottleneck(mid_fm, 3, [128, 128, 256], \n stage=(2+i),\n block='refine_conv' + str(j), use_bn=use_bn)\n mid_fm = KL.Lambda(lambda t: tf.image.resize_bilinear(t, \\\n (cfg.OUTPUT_SHAPE[0], cfg.OUTPUT_SHAPE[1]), align_corners=True),\\\n name='upsample_conv/res{}'.format(2+i))(mid_fm)\n refine_fms.append(mid_fm)\n refine_fm = KL.Concatenate(axis=3)(refine_fms)\n refine_fm = KL.Conv2D(256, (1, 1), \n padding=\"SAME\", name=\"refine_shotcut\")(refine_fm)\n refine_fm = bottleneck(refine_fm, 3, [128, 128, 256], stage=0, block='final_bottleneck')\n res = KL.Conv2D(cfg.KEYPOINTS_NUM, (3, 3),\n padding='SAME', name='refine_out')(refine_fm)\n if use_bn:\n res = KL.BatchNormalization(name='refine_out_bn', axis=3)(res)\n return res"
]
| [
[
"tensorflow.image.resize_bilinear",
"tensorflow.shape"
]
]
|
dkaterenchuk/lstm_text_embedding | [
"25cf8c434fcf32e5d9af057acff823f191d31b0c"
]
| [
"continue_train_lstm.py"
]
| [
"#! /usr/bin/env python\n\n\"\"\"\nTrains an LSTM embedding model on Wiki articles.\n\nThe data is freelly available here: https://www.wikidata.org/wiki/Wikidata:Database_download\n\nNOTE: the data is in WIKI xml format and in order to extract text I recommend to use wikiextractor\n(https://github.com/attardi/wikiextractor) This project adds \"--json\" param to have the data in json \nformat.\n\nA sample of the data is in \"data/wiki/*\" folder.\n\nRun: python train_lstm_embedding.py <data_dir> <output_file>\n\ndata_dir - is the wiki data processed with \"wikiextractor\"\nouput_file - is trained model\n\"\"\"\n\nimport sys\nimport logging\nimport numpy as np\nfrom code import data_processing\nfrom code import lstm_embedding_model\nfrom keras.models import load_model\nfrom definitions import PATHS, HYPER_PARAM\nfrom gensim.models import FastText\nimport gensim\n\nimport tensorflow as tf\nimport keras.backend as K\n\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nconfig = tf.ConfigProto(allow_soft_placement=True)\n# config.gpu_options.per_process_gpu_memory_fraction = 0.5\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nK.set_session(sess)\n\nlogging.basicConfig(level=logging.INFO)\n#logging.basicConfig(level=logging.DEBUG)\n\n\ndef main(data_path, model_path, temp_model):\n \"\"\"\n Main driver function.\n\n args:\n data_path: str - path to wiki data\n model_path: str - path to save trained model\n \"\"\"\n # Defining hyper-parameters\n epochs = HYPER_PARAM[\"epochs\"] # how many times to go over your dataset\n # (batch_size * steps_per_epoch) = whole dataset for the generator\n batch_size = HYPER_PARAM[\"batch_size\"] # batch size - in generator this is a single step\n sequence_length = HYPER_PARAM[\"sequence_length\"] # length of each text - in this case a sentence\n sentence_embedding_dim = HYPER_PARAM[\"sentence_embedding_dim\"] # size of the latent space\n workers = HYPER_PARAM[\"workers\"] # cores\n use_multiprocessing = HYPER_PARAM[\"use_multiprocessing\"] # multiprocessing\n verbose = HYPER_PARAM[\"verbose\"] # verbose\n training_sentences = 20 # * 10**6 # approximate number of sentences in wiki_small \n steps_per_epoch = HYPER_PARAM[\"steps_per_epoch\"] # number of steps defines your entire dataset in generator \n\n #print(\"Gensim version: \", gensim.__version__)\n # Steps to train an LSTM model\n logging.info(\"Preparing data.\")\n #w2v_model = FastText.load(\"data/word_embeddings/wiki_small_fasttext_128dim.model\") #PATHS[\"fasttext\"])\n \n\n w2v_model = data_processing.get_word_embedding_model(PATHS[\"fasttext\"])\n\n w2v_model.wv['whistlin']\n \n logging.debug(\"Loading a model.\")\n lstm_autoencoder = load_model(temp_model)\n \n\n logging.debug(\"Creating a data generator\")\n # Train using a generator (when data cannot fit into ram)\n\n sent_generator = data_processing.get_preprocessed_data(data_path, w2v_model, sent_length=64)\n data_generator = data_processing.get_batch_preprocessed_data_generator(data_path,\n w2v_model,\n sequence_length=sequence_length,\n batch_size=batch_size)\n\n logging.info(\"Loading test data.\")\n test_data = []\n for i, sent in enumerate(sent_generator):\n test_data.append(sent)\n if i == 100:\n break\n \n test_data = np.asarray(test_data)\n\n \n logging.info(\"Training the model\")\n lstm_autoencoder = lstm_embedding_model.train_model_on_generator(lstm_autoencoder,\n data_generator,\n model_path=model_path,\n # validation_data=(test_data, test_data),\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose)\n\n # Splitting autoencoder into encoder and decoder parts\n encoder, decoder = lstm_embedding_model.split_autoencoder(lstm_autoencoder)\n\n logging.info(\"Testing embedding.\")\n logging.info(\"Initial sentence: %s\",\n data_processing.vector_sequence_to_words(sample_sentence, w2v_model))\n\n logging.info(\"Original sentence dimensions: %s\", str(sample_sentence.shape))\n encoded_sent = encoder.predict(np.asarray([sample_sentence]))\n logging.info(\"Embedded sentence dimensions: %s\", str(encoded_sent.shape))\n\n decoded_sent = decoder.predict(encoded_sent)\n logging.info(\"Reconstructed sentence: %s\",\n data_processing.vector_sequence_to_words(decoded_sent[0], w2v_model))\n\n logging.info(\"Saving trained model to: %s\", model_path)\n lstm_autoencoder.save(model_path)\n\n logging.info(\"Loading the model to test embeddings.\")\n loaded_lstm_autoencoder_model = load_model(model_path)\n loaded_lstm_autoencoder_model.summary()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(__doc__)\n else:\n main(sys.argv[1], sys.argv[2], sys.argv[3])\n"
]
| [
[
"tensorflow.ConfigProto",
"numpy.asarray",
"tensorflow.Session"
]
]
|
utiasASRL/pysteam | [
"c0c8809ee2a5e1dab5ce7f9e5ff9de91138ce68b"
]
| [
"pysteam/solver/dogleg_gauss_newton_solver.py"
]
| [
"import numpy as np\nimport numpy.linalg as npla\nimport scipy.linalg as spla\n\nfrom . import GaussNewtonSolver\nfrom ..problem import OptimizationProblem\n\n\nclass DoglegGaussNewtonSolver(GaussNewtonSolver):\n\n def __init__(self, problem: OptimizationProblem, **parameters) -> None:\n super().__init__(problem, **parameters)\n # override parameters\n self._parameters.update({\n \"ratio_threshold_shrink\": 0.25,\n \"ratio_threshold_grow\": 0.75,\n \"shrink_coeff\": 0.5,\n \"grow_coeff\": 3.0,\n \"max_shrink_steps\": 50\n })\n self._parameters.update(**parameters)\n\n self._trust_region_size = None\n\n def linearize_solve_and_update(self):\n\n # initialize new cost with old cost in case of failure\n new_cost = self._prev_cost\n\n # build the system\n A, b = self.build_gauss_newton_terms()\n grad_norm = npla.norm(b) # compute gradient norm for termination check\n self._approx_hessian = A # keep a copy of the LHS (i.e., the approximated Hessian)\n\n # get gradient descent step\n grad_descent_step = self.get_cauchy_point(A, b)\n grad_descent_norm = npla.norm(grad_descent_step)\n\n # get Gauss-Newton step\n gauss_newton_step = self.solve_gauss_newton(A, b)\n gauss_newton_norm = npla.norm(gauss_newton_step)\n\n # initialize trust region size (if first time)\n if self._trust_region_size is None:\n self._trust_region_size = gauss_newton_norm\n\n # perform dogleg step\n num_tr_decreases = 0\n num_backtrack = 0\n step_success = False\n while num_backtrack < self._parameters[\"max_shrink_steps\"]:\n if (gauss_newton_norm <= self._trust_region_size):\n perturbation = gauss_newton_step\n dogleg_segment = \"Gauss Newton\"\n elif (grad_descent_norm >= self._trust_region_size):\n perturbation = (self._trust_region_size / grad_descent_norm) * grad_descent_step\n dogleg_segment = \"Grad Descent\"\n else:\n # trust region lies between the GD and GN steps, use interpolation\n assert gauss_newton_step.shape == grad_descent_step.shape\n\n # get interpolation direction\n gd_to_gn_vector = gauss_newton_step - grad_descent_step\n\n # calculate interpolation constant\n gd_dot_prod_gd_to_gn = (grad_descent_step.T @ gd_to_gn_vector)[0, 0]\n gd_to_gn_sqr_norm = npla.norm(gd_to_gn_vector)**2\n interp_const = (\n (-gd_dot_prod_gd_to_gn + np.sqrt(gd_dot_prod_gd_to_gn**2 +\n (self._trust_region_size**2 - grad_descent_norm**2) * gd_to_gn_sqr_norm)) /\n gd_to_gn_sqr_norm)\n perturbation = grad_descent_step + interp_const * gd_to_gn_vector\n dogleg_segment = \"Interp GN&GD\"\n\n proposed_cost = self.propose_update(perturbation)\n actual_reduc = self._prev_cost - proposed_cost\n predicted_reduc = self.predict_reduction(A, b, perturbation)\n actual_to_predicted_ratio = actual_reduc / predicted_reduc\n\n if actual_to_predicted_ratio > self._parameters[\"ratio_threshold_shrink\"]:\n self.accept_proposed_state()\n if actual_to_predicted_ratio > self._parameters[\"ratio_threshold_grow\"]:\n self._trust_region_size = max(self._trust_region_size,\n self._parameters[\"grow_coeff\"] * npla.norm(perturbation))\n new_cost = proposed_cost\n step_success = True\n break\n else:\n self.reject_proposed_state()\n self._trust_region_size *= self._parameters[\"shrink_coeff\"]\n num_tr_decreases += 1\n\n num_backtrack += 1\n\n # print report line if verbose option is enabled\n if (self._parameters[\"verbose\"]):\n print(\n \"Iteration: {0:4} - Cost: {1:10.4f} - TR Shrink: {2:6.3f} - AvP Ratio: {3:6.3f} - Dogleg Segment: {4:15}\"\n .format(self._curr_iteration, new_cost, num_tr_decreases, actual_to_predicted_ratio, dogleg_segment))\n\n return step_success, new_cost, grad_norm\n\n def get_cauchy_point(self, A: np.ndarray, b: np.ndarray) -> np.ndarray:\n \"\"\"Solve the Levenberg–Marquardt system of equations:\n A*x = b, A = (J^T*J + diagonalCoeff*diag(J^T*J))\n \"\"\"\n num = npla.norm(b)**2\n den = (b.T @ A @ b)[0, 0]\n return (num / den) * b\n\n def predict_reduction(self, A: np.ndarray, b: np.ndarray, step: np.ndarray) -> float:\n \"\"\"grad^T * step - 0.5 * step^T * Hessian * step\"\"\"\n grad_trans_step = b.T @ step\n step_trans_hessian_step = step.T @ A @ step\n return (grad_trans_step - 0.5 * step_trans_hessian_step)[0, 0]\n"
]
| [
[
"numpy.sqrt",
"numpy.linalg.norm"
]
]
|
zoeparman/benchmark | [
"96331b7fa0db84f5f422b52cae2211b41bbd15ce"
]
| [
"src/attrbench/lib/masking/blurring_masker.py"
]
| [
"from attrbench.lib.masking import ImageMasker\nfrom cv2 import blur\nimport numpy as np\nimport torch\n\n\nclass BlurringMasker(ImageMasker):\n def __init__(self, feature_level, kernel_size):\n super().__init__(feature_level)\n if not 0 < kernel_size < 1.0:\n raise ValueError(\"Kernel size is expressed as a fraction of image height, and must be between 0 and 1.\")\n self.kernel_size = kernel_size\n\n def initialize_baselines(self, samples: torch.tensor):\n kernel_size = int(self.kernel_size * samples.shape[-1])\n\n baseline = []\n for i in range(samples.shape[0]):\n sample = samples[i, ...].cpu().numpy()\n cv_sample = np.transpose(sample, (1, 2, 0))\n blurred_sample = blur(cv_sample, (kernel_size, kernel_size))\n if len(blurred_sample.shape) == 2:\n blurred_sample = blurred_sample[..., np.newaxis]\n baseline.append(np.transpose(blurred_sample, (2, 0, 1)))\n self.baseline = torch.tensor(np.stack(baseline, axis=0), device=samples.device)\n"
]
| [
[
"numpy.stack",
"numpy.transpose"
]
]
|
dozou/pyio_plugins | [
"bb84a449d454d04de156a277709a169e904a9ca2"
]
| [
"recoder_v1/Window.py"
]
| [
"# -*- coding:utf-8 -*-\n\nimport time\nimport os.path\nimport sys\nimport time\nimport numpy as np\nimport pandas as pd\nfrom PyQt5.QtWidgets import *\nfrom yapsy.IPlugin import IPlugin\nfrom pyio.Main import main\nfrom pyio.Util import System\nfrom pyio.Window.LineEdit import *\nfrom pyio.DataSturucture import *\nfrom pyio.Devices.IODevice import IODevice\nfrom recoder_v1.DataDevice import DataDevice\n\nimport pyperclip as clip\n\n\nclass Viewer(QWidget):\n def __init__(self, parent=None, data_container: DataContainer = None):\n \"\"\"\n 初期化\n \"\"\"\n super().__init__(parent)\n\n self.data = data_container\n\n self.ai_device = []\n self.rec_device = []\n self.wave_data = []\n self.data_cnt = 0\n self.size_top = self.data_cnt\n self.data_path = \"\"\n self.loop_flag = False\n self.t_start = None\n self.t_end = None\n self.timer = QTimer()\n self.timer.timeout.connect(self.update_record)\n\n # 色々描画してるところだよ\n self.setWindowTitle('Record Manager')\n\n self.path_line = LabelOnLineEdit(label=\"Path\",\n text='')\n self.path_line_copy_button = QPushButton(\"📋 \")\n self.path_line_copy_button.clicked.connect(self.path_line_copy)\n self.file_name_line = LabelOnLineEdit(label=\"FileName\",\n text='')\n self.timeout_line = LabelOnSpinBox(label=\"RecordTiming\",\n val=50.0,\n maximum=10000.0)\n self.size_checkbox = QCheckBox('記録サイズ指定')\n self.size_checkbox.stateChanged.connect(self.update_checkbox)\n self.size_line = LabelOnSpinBox(label=\"記録サンプル数\",\n val=100,\n maximum=50000)\n self.record_button = QPushButton(\"記録開始\")\n self.record_button.clicked.connect(self.start_record)\n self.clear_button = QPushButton(\"初期化\")\n self.clear_button.clicked.connect(self.clear_data)\n self.write_format_checkbox = QCheckBox(\"Pkl保存\")\n self.write_format_checkbox.toggle()\n self.write_button = QPushButton(\"書き出し\")\n self.write_button.clicked.connect(self.write_files)\n self.data_cnt_label = QLabel(\"\")\n self.error_message_label = QLabel(\"\")\n self.error_message_label.setStyleSheet(\"background:#000000; color:red;\")\n self.write_checkbox = []\n\n def window_create(self):\n \"\"\"\n pyio_v1上recorder_v1ボタン初回クリック時呼び出し関数\n \"\"\"\n print(\"--- Connected Device(s) ---\\n\", self.data.device)\n for dev in self.data.device:\n if dev.info['type'] == 'ai':\n self.ai_device.append(dev)\n print(\"--- Found AnalogIn Device(s) ---\\n\", self.ai_device)\n if len(self.ai_device) == 0:\n # 標準出力\n sys.stderr.write(\"!! AnalogIn Device Not Found. !!\\n\")\n sys.stderr.write(\"Recoder_v1 -> Exit\\n\")\n return\n\n for i, dev in enumerate(self.ai_device):\n box_text = \" \" + str(dev.info['name']) + \" \" + str(int(dev.info['ch']) + 1) + \"ch\"\n self.write_checkbox.append(QCheckBox(box_text))\n\n # ボタンを押してから描画\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(QLabel(\"<b>記録</b>\"))\n grid.addWidget(self.timeout_line, 1, 0)\n grid.addWidget(self.size_checkbox, 2, 0)\n grid.addWidget(self.size_line, 3, 0)\n grid.addWidget(self.record_button, 4, 0)\n grid.addWidget(self.clear_button, 5, 0)\n grid.addWidget(self.write_button, 6, 0)\n grid.addWidget(self.data_cnt_label, 7, 0)\n grid.addWidget(self.write_format_checkbox, 8, 0)\n grid.addWidget(self.path_line, 9, 0)\n grid.addWidget(self.path_line_copy_button, 9, 1)\n grid.addWidget(self.file_name_line, 10, 0)\n grid.addWidget(self.error_message_label, 11, 0)\n\n grid.addWidget(QLabel(\"--- Record Device Select ---\"), 0, 2)\n for i in range(len(self.ai_device)):\n grid.addWidget(self.write_checkbox[i], (1+i), 2)\n\n self.setLayout(grid)\n\n def window_init(self):\n \"\"\"\n pyio_v1上のrecoder_v1ボタンクリック時呼び出し関数\n 初期化のため関数呼び出し\n \"\"\"\n self.update_checkbox()\n self.update_data_cnt_label()\n self.path_setting()\n self.path_line.set_value(self.data_path)\n\n def update_checkbox(self):\n \"\"\"\n self.size_checkboxクリック時呼び出し関数\n \"\"\"\n if self.size_checkbox.isChecked():\n self.size_line.setEnabled(True)\n else:\n self.size_line.setEnabled(False)\n\n def start_record(self):\n \"\"\"\n self.record_buttonクリック時呼び出し関数\n 開始用\n \"\"\"\n self.error_message_label.setText(\"\")\n self.size_top = self.size_line.get_value()\n\n # 記録するデバイスリスト(self.rec_device)を作成\n self.rec_device = []\n for i, dev in enumerate(self.ai_device):\n self.write_checkbox[i].setEnabled(False)\n if self.write_checkbox[i].isChecked():\n self.rec_device.append(dev)\n\n\n # self.rec_deviceが空ではないことを確認\n if len(self.rec_device) == 0:\n # 標準出力\n sys.stderr.write(\"!! No Record Device. !!\\n\")\n self.error_message_label.setText(\"!! No Record Device. !!\")\n return\n\n # self.wave_dataにself.rec_deviceの要素分の配列を追加\n for i, dev in enumerate(self.rec_device):\n if self.size_checkbox.isChecked():\n self.wave_data.append(np.empty((self.size_top, len(dev.get_1d_array())), dtype=float))\n\n else:\n self.wave_data.append([])\n\n print(\"self.wave_data[\"+str(i)+\"].shape :\", np.array(self.wave_data[i]).shape)\n\n self.record_button.setText(\"記録停止\")\n self.re_connect(self.record_button.clicked, self.stop_record)\n # self.record_button.clicked.connect(self.stop_record)\n self.clear_button.setEnabled(False)\n self.size_checkbox.setEnabled(False)\n\n self.timer.setInterval(self.timeout_line.get_value())\n self.loop_flag = True\n if self.data_cnt == 0:\n self.t_start = time.time()\n self.timer.start()\n\n def update_record(self):\n \"\"\"\n データ取得時呼び出し関数\n \"\"\"\n if self.size_checkbox.isChecked():\n for i, dev in enumerate(self.rec_device):\n self.wave_data[i][self.data_cnt] = dev.get_1d_array()\n\n else:\n for i, dev in enumerate(self.rec_device):\n self.wave_data[i].append(dev.get_1d_array())\n\n self.data_cnt += 1\n self.update_data_cnt_label()\n if self.data_cnt == self.size_top:\n self.stop_record()\n\n def stop_record(self):\n \"\"\"\n self.record_buttonクリック時呼び出し関数\n 停止用\n \"\"\"\n self.loop_flag = False\n self.timer.stop()\n self.t_end = time.time()\n self.record_button.setText(\"記録開始\")\n self.re_connect(self.record_button.clicked, self.start_record)\n # self.record_button.clicked.connect(self.start_record)\n self.clear_button.setEnabled(True)\n print(\"--- Stop Record ---\")\n print(\"Time :\", (self.t_end - self.t_start), \"[sec]\")\n for i, dev in enumerate(self.rec_device):\n print(\"wave_data \" + str(i) + \" : \" + dev.info['name'] + \"_\" + str(dev.info['id']) + \" Ch:\"\n + str(dev.info['ch']) + str(np.array(self.wave_data[i]).shape))\n # print(np.array(self.wave_data).shape)\n\n def clear_data(self):\n \"\"\"\n self.clear_buttonクリック時呼び出し関数\n 記録用変数等初期化\n \"\"\"\n self.loop_flag = False\n self.wave_data = []\n self.data_cnt = 0\n self.size_top = self.data_cnt\n self.update_data_cnt_label()\n self.size_checkbox.setEnabled(True)\n for i in range(len(self.ai_device)):\n self.write_checkbox[i].setEnabled(True)\n\n def write_files(self):\n \"\"\"\n ファイル書き出し\n self.write_buttonクリック時呼び出し関数\n \"\"\"\n file_path = self.path_line.get_value()\n file_name = self.file_name_line.get_value()\n print(\"--- Write File ---\")\n if not (file_path[-1:] == \"/\"):\n # 標準出力\n sys.stderr.write(\"!! Path Error(missing \\\"/\\\") : \" + str(file_path) + \"\\n\")\n self.error_message_label.setText(\"!! Path Error(missing \\\"/\\\") !!\")\n return\n else:\n self.error_message_label.setText(\"\")\n print(\"Write file : \" + file_path + file_name + \"*\")\n\n # pkl保存にチェックが入っているとき\n if self.write_format_checkbox.isChecked():\n print(\"Format : \" + \".pkl\")\n for i, dev in enumerate(self.rec_device):\n device_text = \"_\" + str(dev.info['name']) + \"_ch\" + str(int(dev.info['ch']) + 1)\n\n write_data = pd.DataFrame(self.wave_data[i])\n write_data.to_pickle(file_path + file_name + device_text + \"_wave.pkl\")\n # 選択されていない時(csv保存)\n else:\n print(\"Format : \" + \".csv\")\n for i, dev in enumerate(self.rec_device):\n device_text = \"_\" + str(dev.info['name']) + \"_ch\" + str(int(dev.info['ch']) + 1)\n\n write_data = pd.DataFrame(self.wave_data[i])\n write_data.to_csv(file_path + file_name + device_text + \"_wave.csv\")\n\n def update_data_cnt_label(self):\n \"\"\"\n self.data_cnt_labelのtext更新関数\n \"\"\"\n self.data_cnt_label.setText(\"Samples:\"+str(self.data_cnt))\n\n def path_setting(self):\n \"\"\"\n 書き出し先パス設定関数\n self.data_pathにパスを代入。\n\n Notes\n -----\n ~/.pyio/param.json内から確認\n 優先順位\n 1. work_folder内設定\n 2. plugin_folder内設定\n 3. Homeパス\n いずれも確認できない場合はErrorメッセージをprint出力等した上で\n self.data_path=\"\"とする。\n \"\"\"\n # ~/.pyio/param.jsonの内容を取得するためpyio.UtilのSystem呼び出し\n system = System()\n system.load_param()\n # 初期値1 : param.jsonのwork_folder\n work_dir = system.get_work_dir()\n # print(\"work_dir : \", work_dir, \" | type : \", type(work_dir))\n\n # 初期値2 : ~/.pyio/param.jsonのplugin_folder(複数指定の場合は必ずFalseになる)\n if not (os.path.exists(work_dir)):\n work_dir = str(system.get_pulgin_dir())\n # print(\"work_dir : \", work_dir, \" | type : \", type(work_dir))\n\n # 初期値3 : ~ (Home)\n if not (os.path.exists(work_dir)):\n work_dir = str(os.path.expanduser('~'))\n # print(\"work_dir : \", work_dir, \" | type : \", type(work_dir))\n\n # 初期値4 : (空)\n if not (os.path.exists(work_dir)):\n work_dir = \"\"\n # 標準出力\n sys.stderr.write(\"!Error : All default path not exists. Please type path yourself.\\n\")\n self.error_message_label.setText(\"!! Please Type Path !!\")\n\n self.data_path = work_dir + \"/\"\n print(\"Path : \" + str(self.data_path))\n\n def path_line_copy(self):\n \"\"\"\n self.path_line_copy_buttonクリック時呼び出し関数\n self.path_lineのtextをクリップボードへコピー\n \"\"\"\n copy_path = self.path_line.get_value()\n clip.copy(copy_path)\n\n def re_connect(self, signal, new_handler=None, old_handler=None):\n \"\"\"\n Button.connect等付け替え用\n \"\"\"\n while True:\n try:\n if old_handler is not None:\n signal.disconnect(old_handler)\n else:\n signal.disconnect()\n except TypeError:\n break\n if new_handler is not None:\n signal.connect(new_handler)\n\n def get_data(self):\n \"\"\"\n 記録されているデータを返却する\n 扱いやすくするため DataDevice型に一回変換を行いDeviceManagerで纏めて返却\n \"\"\"\n manager = DeviceManager()\n for i, d in enumerate(self.rec_device): # type:(int, IODevice)\n dev = DataDevice(name=d.info['name'],type=\"2d_data\",data=self.wave_data[i])\n manager.append(dev)\n\n return manager\n\n def get_device(self):\n \"\"\"\n 記録に使用したデバイスを返却する\n :return: DeviceManager\n \"\"\"\n manager = DeviceManager()\n for i in self.rec_device:\n manager.append(i)\n return manager\n"
]
| [
[
"numpy.array",
"pandas.DataFrame"
]
]
|
ankahira/chainermnx | [
"ffee217a555a5d59a6ccd5d8b054e071d1d7d09a"
]
| [
"chainermnx/links/filter_parallel_convolution_2d.py"
]
| [
"import chainer\nimport chainermn\nimport numpy as np\nimport chainer.functions as F\n\n\nclass FilterParallelConvolution2D(chainer.links.Convolution2D):\n def __init__(self, comm, in_channels, out_channels, *args, **kwargs):\n self.comm = comm\n self.in_channels = in_channels\n self.filters = out_channels\n indices = np.arange(self.filters)\n indices = indices[indices % self.comm.size == 0] + self.comm.rank\n self.filter_indices = [i for i in indices if i < self.filters]\n self.new_filters = len(self.filter_indices)\n super(FilterParallelConvolution2D, self).__init__(self.in_channels, self.new_filters, *args, **kwargs)\n\n def __call__(self, x):\n y = super(FilterParallelConvolution2D, self).__call__(x)\n ys = chainermn.functions.allgather(self.comm, y)\n # Backward will be invoked as well as the ordinary chainer functions,\n # where gradients are reduced to each process\n return F.concat(ys, axis=1)\n\n"
]
| [
[
"numpy.arange"
]
]
|
ouhenio/stylegan2-ada-pytorch-1 | [
"aadac107a7b63569053a73793ea9580a3fdd671e"
]
| [
"projector_clip.py"
]
| [
"# Modified StyleGAN2 Projector with CLIP, addl. losses, kmeans, etc.\n# by Peter Baylies, 2021 -- @pbaylies on Twitter\n\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\"\"\"Project given image to the latent space of pretrained network pickle.\"\"\"\n\nimport copy\nimport math\nimport os\nfrom time import perf_counter\n\nimport click\nimport imageio\nimport numpy as np\nimport PIL.Image\nimport torch\nimport torch.nn.functional as F\n#import madgrad\nimport SM3\nimport clip\n\nimport dnnlib\nimport legacy\n\nimage_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073]).cuda()\nimage_std = torch.tensor([0.26862954, 0.26130258, 0.27577711]).cuda()\n\ndef score_images(G, model, text, latents, device, label_class = 0, batch_size = 8):\n scores = np.array([])\n all_images = np.array([])\n for i in range(math.ceil(latents.shape[0]/batch_size)):\n images = G.synthesis(torch.tensor(latents[i*batch_size:(i+1)*batch_size,:,:], dtype=torch.float32, device=device), noise_mode='const')\n with torch.no_grad():\n image_input = (torch.clamp(images, -1, 1) + 1) * 0.5\n image_input = F.interpolate(image_input, size=(256, 256), mode='area')\n image_input = image_input[:, :, 16:240, 16:240] # 256 -> 224, center crop\n image_input -= image_mean[None, :, None, None]\n image_input /= image_std[None, :, None, None]\n score = model(image_input, text)[0]\n scores = np.append(scores, score.cpu().numpy())\n all_images = np.append(all_images, images.cpu().numpy())\n\n scores = np.array(scores)\n #scores = np.hstack(scores).flatten()\n #scores = scores.reshape(-1, *scores.shape[2:]).squeeze()\n #scores = scores.reshape(-1).squeeze()\n #print(scores.shape)\n #print(scores)\n scores = 1 - scores / np.linalg.norm(scores)\n all_images = np.array(all_images)\n #all_images = all_images.reshape(-1, *all_images.shape[2:])\n return scores, all_images\n\ndef cluster_latents(samples, num_clusters, device):\n from kmeans_pytorch import kmeans\n # data\n #print(samples.shape)\n data_size = samples.shape[0]\n dims = samples.shape[2]\n x = torch.from_numpy(samples)\n\n # kmeans\n print(f'Performing kmeans clustering using {data_size} latents into {num_clusters} clusters...')\n cluster_ids_x, cluster_centers = kmeans(\n X=x, num_clusters=num_clusters, distance='euclidean', device=device\n )\n return cluster_centers, cluster_ids_x\n\ndef project(\n G,\n target_image: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution\n target_text,\n *,\n num_steps = 300,\n w_avg_samples = 8192,\n initial_learning_rate = 0.02,\n initial_latent = None,\n initial_noise_factor = 0.01,\n lr_rampdown_length = 0.25,\n lr_rampup_length = 0.5,\n noise_ramp_length = 0.5,\n latent_range = 2.0,\n max_noise = 0.5,\n min_threshold = 0.6,\n use_vgg = True,\n use_clip = True,\n use_pixel = True,\n use_penalty = True,\n use_center = True,\n regularize_noise_weight = 1e5,\n kmeans = True,\n kmeans_clusters = 64,\n verbose = False,\n use_w_only = True,\n device: torch.device\n):\n if target_image is not None:\n assert target_image.shape == (G.img_channels, G.img_resolution, G.img_resolution)\n else:\n use_vgg = False\n use_pixel = False\n\n def logprint(*args):\n if verbose:\n print(*args)\n\n G = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore\n\n # Compute w stats.\n logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')\n z_samples = np.random.randn(w_avg_samples, G.z_dim)\n labels = None\n if (G.mapping.c_dim):\n labels = torch.from_numpy(0.5*np.random.randn(w_avg_samples, G.mapping.c_dim)).to(device)\n w_samples = G.mapping(torch.from_numpy(z_samples).to(device), labels) # [N, L, C]\n w_samples = w_samples.cpu().numpy().astype(np.float32) # [N, L, C]\n w_samples_1d = w_samples[:, :1, :].astype(np.float32)\n\n w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, L, C]\n w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5\n std_dev = np.std(w_samples)\n\n kmeans_latents = None\n if initial_latent is not None:\n w_avg = initial_latent\n if w_avg.shape[1] == 1 and not use_w_only:\n w_avg = np.tile(w_avg, (1, G.mapping.num_ws, 1))\n else:\n if kmeans and use_clip and target_text is not None:\n kmeans_latents, cluster_ids_x = cluster_latents(w_samples_1d, kmeans_clusters, device)\n cluster_centers = torch.tensor(kmeans_latents, dtype=torch.float32, device=device, requires_grad=True), cluster_ids_x\n\n # Setup noise inputs.\n noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name }\n\n # Load VGG16 feature detector.\n if use_vgg:\n url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'\n with dnnlib.util.open_url(url) as f:\n vgg16 = torch.jit.load(f).eval().to(device)\n\n # Load CLIP\n if use_clip:\n model, transform = clip.load(\"ViT-B/16\", device=device)\n\n # Features for target image.\n if target_image is not None:\n target_images = target_image.unsqueeze(0).to(device).to(torch.float32)\n small_target = F.interpolate(target_images, size=(64, 64), mode='area')\n if use_center:\n center_target = F.interpolate(target_images, size=(448, 448), mode='area')[:, :, 112:336, 112:336]\n target_images = F.interpolate(target_images, size=(256, 256), mode='area')\n target_images = target_images[:, :, 16:240, 16:240] # 256 -> 224, center crop\n\n if use_vgg:\n vgg_target_features = vgg16(target_images, resize_images=False, return_lpips=True)\n if use_center:\n vgg_target_center = vgg16(center_target, resize_images=False, return_lpips=True)\n\n if use_clip:\n if target_image is not None:\n with torch.no_grad():\n clip_target_features = model.encode_image(((target_images / 255.0) - image_mean[None, :, None, None]) / image_std[None, :, None, None]).float()\n if use_center:\n clip_target_center = model.encode_image(((center_target / 255.0) - image_mean[None, :, None, None]) / image_std[None, :, None, None]).float()\n\n if kmeans and kmeans_latents is not None and use_clip and target_text is not None:\n scores, kmeans_images = score_images(G, model, target_text, kmeans_latents.repeat([1, G.mapping.num_ws, 1]), device=device)\n ind = np.argpartition(scores, 2)[:2]\n #w_avg = torch.median(kmeans_latents[ind],dim=0,keepdim=True)[0].repeat([1, G.mapping.num_ws, 1])\n\n filter_clusters = np.in1d(cluster_ids_x.cpu().numpy(), ind)\n filtered_latents = w_samples_1d[filter_clusters]\n kmeans_latents, cluster_ids_x = cluster_latents(filtered_latents, kmeans_clusters // 2, device)\n cluster_centers = torch.tensor(kmeans_latents, dtype=torch.float32, device=device, requires_grad=True), cluster_ids_x\n\n batch_size = 8\n if kmeans_latents.shape[0] < 8:\n batch_size = kmeans_latents.shape[0]\n scores, kmeans_images = score_images(G, model, target_text, kmeans_latents.repeat([1, G.mapping.num_ws, 1]), device=device, batch_size=batch_size)\n ind = np.argpartition(scores, 2)[:2]\n #w_avg = torch.median(kmeans_latents[ind],dim=0,keepdim=True)[0].repeat([1, G.mapping.num_ws, 1])\n\n filter_clusters = np.in1d(cluster_ids_x.cpu().numpy(), ind)\n final_latents = filtered_latents[filter_clusters]\n batch_size = 8\n if final_latents.shape[0] < 8:\n batch_size = final_latents.shape[0]\n scores, kmeans_images = score_images(G, model, target_text, np.tile(final_latents, (1, G.mapping.num_ws, 1)), device=device, batch_size=batch_size)\n ind = np.argpartition(scores, 1)[:1]\n final_candidates = torch.tensor(final_latents, dtype=torch.float32, device=device, requires_grad=True)\n w_avg = torch.median(final_candidates[ind],dim=0,keepdim=True)[0]\n if not use_w_only:\n w_avg = w_avg.repeat([1, G.mapping.num_ws, 1])\n else:\n if (use_w_only):\n w_avg = np.mean(w_avg, axis=1, keepdims=True)\n\n w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable\n w_avg_tensor = w_opt.clone()\n with torch.no_grad():\n latent_range = torch.max(w_avg_tensor) + std_dev\n w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device)\n #optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate)\n #optimizer = madgrad.MADGRAD([w_opt] + list(noise_bufs.values()), lr=initial_learning_rate)\n optimizer = SM3.SM3([w_opt] + list(noise_bufs.values()), lr=initial_learning_rate)\n\n # Init noise.\n for buf in noise_bufs.values():\n buf[:] = torch.randn_like(buf)\n buf.requires_grad = True\n\n for step in range(num_steps):\n # Learning rate schedule.\n t = step / num_steps\n w_noise_scale = max_noise * w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2\n lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)\n lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)\n lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)\n lr = initial_learning_rate * lr_ramp\n #print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # Synth images from opt_w.\n w_noise = torch.randn_like(w_opt) * w_noise_scale\n ws = w_opt + w_noise\n if use_w_only:\n ws = ws.repeat([1, G.mapping.num_ws, 1])\n synth_images = G.synthesis(torch.clamp(ws,-latent_range,latent_range), noise_mode='const')\n\n # Downsample image to 256x256 if it's larger than that. CLIP was built for 224x224 images.\n synth_images = (torch.clamp(synth_images, -1, 1) + 1) * (255/2)\n small_synth = F.interpolate(synth_images, size=(64, 64), mode='area')\n if use_center:\n center_synth = F.interpolate(synth_images, size=(448, 448), mode='area')[:, :, 112:336, 112:336]\n synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')\n\n # Features for synth images.\n synth_images = synth_images[:, :, 16:240, 16:240] # 256 -> 224, center crop\n\n dist = 0\n\n if use_vgg:\n vgg_synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)\n vgg_dist = (vgg_target_features - vgg_synth_features).square().sum()\n if use_center:\n vgg_synth_center = vgg16(center_synth, resize_images=False, return_lpips=True)\n vgg_dist += (vgg_target_center - vgg_synth_center).square().sum()\n vgg_dist *= 6\n dist += F.relu(vgg_dist*vgg_dist - min_threshold)\n\n if use_clip:\n clip_synth_image = ((synth_images / 255.0) - image_mean[None, :, None, None]) / image_std[None, :, None, None]\n clip_synth_features = model.encode_image(clip_synth_image).float()\n adj_center = 2.0\n\n if use_center:\n clip_cynth_center_image = ((center_synth / 255.0) - image_mean[None, :, None, None]) / image_std[None, :, None, None]\n adj_center = 1.0\n clip_synth_center = model.encode_image(clip_cynth_center_image).float()\n\n if target_image is not None:\n clip_dist = (clip_target_features - clip_synth_features).square().sum()\n if use_center:\n clip_dist += (clip_target_center - clip_synth_center).square().sum()\n dist += F.relu(0.5 + adj_center*clip_dist - min_threshold)\n\n if target_text is not None:\n clip_text = 1 - model(clip_synth_image, target_text)[0].sum() / 100\n if use_center:\n clip_text += 1 - model(clip_cynth_center_image, target_text)[0].sum() / 100\n dist += 2*F.relu(adj_center*clip_text*clip_text - min_threshold / adj_center)\n\n if use_pixel:\n pixel_dist = (target_images - synth_images).abs().sum() / 2000000.0\n if use_center:\n pixel_dist += (center_target - center_synth).abs().sum() / 2000000.0\n pixel_dist += (small_target - small_synth).square().sum() / 800000.0\n pixel_dist /= 4\n dist += F.relu(lr_ramp * pixel_dist - min_threshold)\n\n if use_penalty:\n #l1_penalty = (w_opt - w_avg_tensor).abs().sum() / 5000.0\n penalty_range = torch.sqrt(torch.arange(start=1,end=G.mapping.num_ws+1).float()).to(device)\n l1_penalty = ((w_opt - w_avg_tensor)*penalty_range[None, :, None]).abs().sum() / 20000.0\n l2_penalty = ((w_opt - w_avg_tensor)*penalty_range[None, :, None]).square().sum() / 10000.0\n\n dist += F.relu(lr_ramp * l1_penalty - min_threshold)\n dist += F.relu(lr_ramp * l2_penalty - min_threshold)\n\n # Noise regularization.\n reg_loss = 0.0\n for v in noise_bufs.values():\n noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d()\n while True:\n reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2\n reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2\n if noise.shape[2] <= 8:\n break\n noise = F.avg_pool2d(noise, kernel_size=2)\n #print(vgg_dist, clip_dist, pixel_dist, l1_penalty, reg_loss * regularize_noise_weight)\n loss = dist + reg_loss * regularize_noise_weight\n\n # Step\n optimizer.zero_grad(set_to_none=True)\n loss.backward()\n optimizer.step()\n logprint(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')\n #print(torch.max(w_opt))\n\n #if (torch.max(w_opt) > latent_range):\n # with torch.no_grad():\n # initial_learning_rate *= 0.9\n # torch.add(w_opt, -w_avg_tensor, out=w_opt)\n # torch.mul(w_opt, 0.8, out=w_opt)\n # torch.add(w_opt, w_avg_tensor, out=w_opt)\n # print(torch.max(w_opt))\n\n # Save projected W for each optimization step.\n w_out[step] = w_opt.detach()[0]\n # Normalize noise.\n with torch.no_grad():\n for buf in noise_bufs.values():\n buf -= buf.mean()\n buf *= buf.square().mean().rsqrt()\n\n return w_out\n\n#----------------------------------------------------------------------------\n\[email protected]()\[email protected]('--network', 'network_pkl', help='Network pickle filename', required=True)\[email protected]('--target-image', 'target_fname', help='Target image file to project to', required=False, metavar='FILE', default=None)\[email protected]('--target-text', help='Target text to project to', required=False, default=None)\[email protected]('--initial-latent', help='Initial latent', default=None)\[email protected]('--lr', help='Learning rate', type=float, default=0.3, show_default=True)\[email protected]('--num-steps', help='Number of optimization steps', type=int, default=300, show_default=True)\[email protected]('--seed', help='Random seed', type=int, default=303, show_default=True)\[email protected]('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True)\[email protected]('--outdir', help='Where to save the output images', required=True, metavar='DIR')\[email protected]('--use-vgg', help='Use VGG16 in the loss', type=bool, default=True, show_default=True)\[email protected]('--use-clip', help='Use CLIP in the loss', type=bool, default=True, show_default=True)\[email protected]('--use-pixel', help='Use L1/L2 distance on pixels in the loss', type=bool, default=True, show_default=True)\[email protected]('--use-penalty', help='Use a penalty on latent values distance from the mean in the loss', type=bool, default=True, show_default=True)\[email protected]('--use-center', help='Optimize against an additional center image crop', type=bool, default=True, show_default=True)\[email protected]('--min-threshold', help='Minimum threshold for ReLU cutoff', required=False, default=0.6, show_default=True)\[email protected]('--kmeans', help='Perform kmeans clustering for selecting initial latents', type=bool, default=True, show_default=True)\[email protected]('--use-w-only', help='Project into w space instead of w+ space', type=bool, default=False, show_default=True)\ndef run_projection(\n network_pkl: str,\n target_fname: str,\n target_text: str,\n initial_latent: str,\n outdir: str,\n save_video: bool,\n seed: int,\n lr: float,\n num_steps: int,\n use_vgg: bool,\n use_clip: bool,\n use_pixel: bool,\n use_penalty: bool,\n use_center: bool,\n min_threshold: float,\n kmeans: bool,\n use_w_only: bool,\n):\n \"\"\"Project given image to the latent space of pretrained network pickle.\n\n Examples:\n\n \\b\n python projector.py --outdir=out --target=~/mytargetimg.png \\\\\n --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl\n \"\"\"\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n # Load networks.\n print('Loading networks from \"%s\"...' % network_pkl)\n device = torch.device('cuda')\n with dnnlib.util.open_url(network_pkl) as fp:\n G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore\n\n # Load target image.\n target_image = None\n if target_fname:\n target_pil = PIL.Image.open(target_fname).convert('RGB').filter(PIL.ImageFilter.SHARPEN)\n\n w, h = target_pil.size\n s = min(w, h)\n target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))\n target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)\n target_uint8 = np.array(target_pil, dtype=np.uint8)\n target_image = torch.tensor(target_uint8.transpose([2, 0, 1]), device=device)\n\n if target_text:\n target_text = torch.cat([clip.tokenize(target_text)]).to(device)\n\n if initial_latent is not None:\n initial_latent = np.load(initial_latent)\n initial_latent = initial_latent[initial_latent.files[0]]\n\n # Optimize projection.\n start_time = perf_counter()\n projected_w_steps = project(\n G,\n target_image=target_image,\n target_text=target_text,\n initial_latent=initial_latent,\n initial_learning_rate=lr,\n num_steps=num_steps,\n use_vgg=use_vgg,\n use_clip=use_clip,\n use_pixel=use_pixel,\n use_penalty=use_penalty,\n use_center=use_center,\n kmeans=kmeans,\n use_w_only=use_w_only,\n device=device,\n verbose=True\n )\n print (f'Elapsed: {(perf_counter()-start_time):.1f} s')\n\n # Render debug output: optional video and projected image and W vector.\n os.makedirs(outdir, exist_ok=True)\n if save_video:\n video = imageio.get_writer(f'{outdir}/proj.mp4', mode='I', fps=10, codec='libx264', bitrate='16M')\n print (f'Saving optimization progress video \"{outdir}/proj.mp4\"')\n for projected_w in projected_w_steps:\n if use_w_only:\n synth_image = G.synthesis(projected_w.unsqueeze(0).repeat([1, G.mapping.num_ws, 1]), noise_mode='const')\n else:\n synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')\n synth_image = (synth_image + 1) * (255/2)\n synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()\n if target_fname:\n video.append_data(np.concatenate([target_uint8, synth_image], axis=1))\n else:\n video.append_data(synth_image)\n video.close()\n\n # Save final projected frame and W vector.\n if target_fname:\n target_pil.save(f'{outdir}/target.png')\n projected_w = projected_w_steps[-1]\n if use_w_only:\n synth_image = G.synthesis(projected_w.unsqueeze(0).repeat([1, G.mapping.num_ws, 1]), noise_mode='const')\n else:\n synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')\n synth_image = (synth_image + 1) * (255/2)\n synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()\n PIL.Image.fromarray(synth_image, 'RGB').save(f'{outdir}/proj.png')\n np.savez(f'{outdir}/projected_w.npz', w=projected_w.unsqueeze(0).cpu().numpy())\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n run_projection() # pylint: disable=no-value-for-parameter\n\n#----------------------------------------------------------------------------\n"
]
| [
[
"torch.randn_like",
"torch.jit.load",
"torch.max",
"numpy.concatenate",
"numpy.mean",
"numpy.random.randn",
"torch.nn.functional.interpolate",
"torch.no_grad",
"torch.device",
"torch.median",
"torch.from_numpy",
"torch.tensor",
"numpy.std",
"torch.nn.functional.relu",
"numpy.argpartition",
"torch.arange",
"torch.roll",
"numpy.load",
"torch.nn.functional.avg_pool2d",
"numpy.array",
"numpy.sum",
"numpy.random.seed",
"torch.manual_seed",
"numpy.linalg.norm",
"numpy.tile",
"numpy.cos",
"torch.clamp"
]
]
|
samyam/DeepSpeed | [
"22aca3694ad4f01d13813b9630caac8683bb22e9"
]
| [
"deepspeed/runtime/pipe/module.py"
]
| [
"import os\nimport glob\nimport enum\n\nimport re as regex\n\nfrom collections import defaultdict\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\n\nfrom deepspeed.utils import logger\nfrom .. import utils as ds_utils\nfrom ..activation_checkpointing import checkpointing\nfrom .topology import PipeDataParallelTopology, PipelineParallelGrid\nfrom deepspeed.runtime.state_dict_factory import SDLoaderFactory\n\n\nclass PipelineError(Exception):\n \"\"\"Errors related to the use of deepspeed.PipelineModule \"\"\"\n\n\nclass LayerSpec:\n \"\"\"Building block for specifying pipeline-parallel modules.\n\n LayerSpec stores the type information and parameters for each stage in a\n PipelineModule. For example:\n\n .. code-block:: python\n\n nn.Sequence(\n torch.nn.Linear(self.in_dim, self.hidden_dim, bias=False),\n torch.nn.Linear(self.hidden_hidden, self.out_dim)\n )\n\n becomes\n\n .. code-block:: python\n\n layer_specs = [\n LayerSpec(torch.nn.Linear, self.in_dim, self.hidden_dim, bias=False),\n LayerSpec(torch.nn.Linear, self.hidden_hidden, self.out_dim)]\n ]\n \"\"\"\n def __init__(self, typename, *module_args, **module_kwargs):\n self.typename = typename\n self.module_args = module_args\n self.module_kwargs = module_kwargs\n\n if not issubclass(typename, nn.Module):\n raise RuntimeError('LayerSpec only supports torch.nn.Module types.')\n\n if dist.is_initialized():\n self.global_rank = dist.get_rank()\n else:\n self.global_rank = -1\n\n def __repr__(self):\n return ds_utils.call_to_str(self.typename.__name__,\n self.module_args,\n self.module_kwargs)\n\n def build(self, log=False):\n \"\"\"Build the stored specification.\"\"\"\n if log:\n logger.info(f'RANK={self.global_rank} building {repr(self)}')\n\n return self.typename(*self.module_args, **self.module_kwargs)\n\n\nclass TiedLayerSpec(LayerSpec):\n def __init__(self,\n key,\n typename,\n *module_args,\n forward_fn=None,\n tied_weight_attr='weight',\n **module_kwargs):\n super().__init__(typename, *module_args, **module_kwargs)\n self.key = key\n self.forward_fn = forward_fn\n self.tied_weight_attr = tied_weight_attr\n\n\nclass PipelineModule(nn.Module):\n def __init__(self,\n layers,\n num_stages=None,\n topology=None,\n loss_fn=None,\n seed_layers=False,\n seed_fn=None,\n base_seed=1234,\n partition_method='parameters',\n activation_checkpoint_interval=0,\n activation_checkpoint_func=checkpointing.checkpoint):\n \"\"\"Modules to be parallelized with pipeline parallelism.\n\n The key constraint that enables pipeline parallelism is the\n representation of the forward pass as a sequence of layers\n and the enforcement of a simple interface between them. The\n forward pass is implicitly defined by the module ``layers``. The key\n assumption is that the output of each layer can be directly fed as\n input to the next, like a ``torch.nn.Sequence``. The forward pass is\n implicitly:\n\n .. code-block:: python\n\n def forward(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x)\n return x\n\n .. note::\n Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3.\n\n Args:\n layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module.\n num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided.\n topology (``deepseed.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``.\n loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)``\n base_seed (int, optional): [description]. Defaults to 1234.\n partition_method (str, optional): [description]. Defaults to 'parameters'.\n activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing.\n activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``.\n \"\"\"\n\n super().__init__()\n\n if num_stages is None and topology is None:\n raise RuntimeError('must provide num_stages or topology')\n\n self.micro_offset = 0\n\n self.loss_fn = loss_fn\n\n self.seed_layers = seed_layers\n self.seed_fn = seed_fn\n self.base_seed = base_seed\n if dist.get_rank() == 0:\n try:\n seed_str = self.seed_fn.__name__\n except AttributeError:\n seed_str = None\n print(\n f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}'\n )\n\n # Setup world info\n self.world_group = dist.new_group(ranks=range(dist.get_world_size()))\n self.global_rank = dist.get_rank(group=self.world_group)\n self.world_size = dist.get_world_size(group=self.world_group)\n self.local_rank = int(os.environ.get(\"LOCAL_RANK\", None))\n assert self.local_rank != None\n\n if topology:\n self._topo = topology\n self.num_stages = self._topo.get_dim('pipe')\n else:\n self.num_stages = num_stages\n if topology is None:\n if self.world_size % self.num_stages != 0:\n raise RuntimeError(\n f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})'\n )\n dp = self.world_size // num_stages\n topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp)\n self._topo = topology\n\n # Contruct communicators for pipeline topology\n self._grid = PipelineParallelGrid(process_group=self.world_group,\n topology=self._topo)\n\n self.stage_id = self._topo.get_coord(self.global_rank).pipe\n\n # Initialize partition information\n self._layer_specs = list(layers)\n self._num_layers = len(self._layer_specs)\n self._local_start = 0\n self._local_stop = None\n self._partition_layers(method=partition_method)\n\n self.forward_funcs = []\n self.tied_modules = nn.ModuleDict()\n self.tied_weight_attrs = {}\n\n # Offset the random seed by the stage ID.\n #newseed = torch.cuda.initial_seed() + self._grid.get_stage_id()\n #ds_utils.set_random_seed(newseed)\n\n #with torch.random.fork_rng(devices=[torch.cuda.current_device()]):\n self._build()\n self.to(f'cuda:{self.local_rank}')\n\n self.tied_comms = self._index_tied_modules()\n self._synchronize_tied_weights()\n\n self.activation_checkpoint_interval = activation_checkpoint_interval\n self.activation_checkpoint_func = activation_checkpoint_func\n\n def _build(self):\n specs = self._layer_specs\n\n for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]):\n layer_idx = local_idx + self._local_start\n if self.seed_layers:\n if self.seed_fn:\n self.seed_fn(self.base_seed + layer_idx)\n else:\n ds_utils.set_random_seed(self.base_seed + layer_idx)\n\n # Recursively build PipelineModule objects\n if isinstance(layer, PipelineModule):\n raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED')\n\n # LayerSpec objects contain an nn.Module that should be allocated now.\n elif isinstance(layer, nn.Module):\n name = str(layer_idx)\n self.forward_funcs.append(layer)\n self.add_module(name, layer)\n\n # TiedLayerSpec objects contain an nn.Module that should be allocated now.\n elif isinstance(layer, TiedLayerSpec):\n # Build and register the module if we haven't seen it before.\n if layer.key not in self.tied_modules:\n self.tied_modules[layer.key] = layer.build()\n self.tied_weight_attrs[layer.key] = layer.tied_weight_attr\n\n if layer.forward_fn is None:\n # Just use forward()\n self.forward_funcs.append(self.tied_modules[layer.key])\n else:\n # User specified fn with args (module, input)\n self.forward_funcs.append(\n partial(layer.forward_fn,\n self.tied_modules[layer.key]))\n\n # LayerSpec objects contain an nn.Module that should be allocated now.\n elif isinstance(layer, LayerSpec):\n module = layer.build()\n name = str(layer_idx)\n self.forward_funcs.append(module)\n self.add_module(name, module)\n\n # Last option: layer may be a functional (e.g., lambda). We do nothing in\n # that case and just use it in forward()\n else:\n self.forward_funcs.append(layer)\n\n # All pipeline parameters should be considered as model parallel in the context\n # of our FP16 optimizer\n for p in self.parameters():\n p.model_parallel = True\n\n def _count_layer_params(self):\n \"\"\"Count the trainable parameters in individual layers.\n\n This routine will only build one layer at a time.\n\n Returns:\n A list of the number of parameters in each layer.\n \"\"\"\n param_counts = [0] * len(self._layer_specs)\n for idx, layer in enumerate(self._layer_specs):\n if isinstance(layer, LayerSpec):\n l = layer.build()\n params = filter(lambda p: p.requires_grad, l.parameters())\n param_counts[idx] = sum(p.numel() for p in params)\n elif isinstance(layer, nn.Module):\n params = filter(lambda p: p.requires_grad, layer.parameters())\n param_counts[idx] = sum(p.numel() for p in params)\n return param_counts\n\n def _find_layer_type(self, layername):\n idxs = []\n typeregex = regex.compile(layername, regex.IGNORECASE)\n for idx, layer in enumerate(self._layer_specs):\n name = None\n if isinstance(layer, LayerSpec):\n name = layer.typename.__name__\n elif isinstance(layer, nn.Module):\n name = layer.__class__.__name__\n else:\n try:\n name = layer.__name__\n except AttributeError:\n continue\n if typeregex.search(name):\n idxs.append(idx)\n\n if len(idxs) == 0:\n raise RuntimeError(\n f\"Partitioning '{layername}' found no valid layers to partition.\")\n return idxs\n\n def forward(self, forward_input):\n # We need to offset the seed by the microbatch ID. Save it in a local var to\n # ensure it is preserved in the closure. Otherwise checkpointed forward funcs\n # will see a different offset.\n self.micro_offset += 1\n\n def exec_range_func(start, end):\n ''' Helper function to be used with checkpoint()\n Adapted from torch.utils.checkpoint:checkpoint_sequential()\n '''\n local_micro_offset = self.micro_offset + 1\n\n def exec_func(*inputs):\n # Single tensor inputs need to be unwrapped\n if len(inputs) == 1:\n inputs = inputs[0]\n for idx, layer in enumerate(self.forward_funcs[start:end]):\n self.curr_layer = idx + self._local_start\n if self.seed_layers:\n new_seed = (self.base_seed *\n local_micro_offset) + self.curr_layer\n if self.seed_fn:\n self.seed_fn(new_seed)\n else:\n ds_utils.set_random_seed(new_seed)\n\n inputs = layer(inputs)\n return inputs\n\n return exec_func\n\n if self.activation_checkpoint_interval == 0:\n func = exec_range_func(0, len(self.forward_funcs))\n x = func(forward_input)\n else:\n num_layers = len(self.forward_funcs)\n x = forward_input\n for start_idx in range(0, num_layers, self.activation_checkpoint_interval):\n end_idx = min(start_idx + self.activation_checkpoint_interval,\n num_layers)\n\n funcs = self.forward_funcs[start_idx:end_idx]\n # Since we either pass tensors or tuples of tensors without unpacking, we\n # need to be careful not to double-wrap tensors with tuple.\n if not isinstance(x, tuple):\n x = (x, )\n\n if self._is_checkpointable(funcs):\n x = self.activation_checkpoint_func(\n exec_range_func(start_idx,\n end_idx),\n *x)\n else:\n x = exec_range_func(start_idx, end_idx)(*x)\n return x\n\n def _partition_layers(self, method='uniform'):\n num_stages = self._topo.get_dim('pipe')\n stage_id = self._topo.get_coord(self.global_rank).pipe\n\n if self.global_rank == 0:\n logger.info(f'Partitioning pipeline stages with method {method}')\n\n method = method.lower()\n\n # Each stage gets a simple uniform number of layers.\n if method == 'uniform':\n num_layers = len(self._layer_specs)\n self.parts = ds_utils.partition_uniform(num_items=num_layers,\n num_parts=num_stages)\n elif method == 'parameters':\n param_counts = self._count_layer_params()\n self.parts = ds_utils.partition_balanced(weights=param_counts,\n num_parts=num_stages)\n elif method.startswith('type:'):\n layertype = method.split(':')[1]\n binary_weights = [0] * len(self._layer_specs)\n for idx in self._find_layer_type(layertype):\n binary_weights[idx] = 1\n else:\n self.parts = ds_utils.partition_balanced(weights=binary_weights,\n num_parts=num_stages)\n elif method == 'profile':\n raise NotImplementedError(f'Partitioning method {method} not implemented.')\n else:\n raise NotImplementedError(f'Partitioning method {method} not implemented.')\n\n # Print some information on the partitioning.\n if self.global_rank == 0:\n for stage in range(num_stages):\n start = self.parts[stage]\n stop = self.parts[stage + 1]\n print(f'stage={stage} layers={stop - start}')\n for idx, layer in enumerate(self._layer_specs[start:stop]):\n name = str(layer)\n if isinstance(layer, LayerSpec):\n name = layer.typename.__name__\n if isinstance(layer, nn.Module):\n name = layer.__class__.__name__\n else:\n try:\n name = layer.__name__\n except AttributeError:\n pass\n print(f' {idx+start:2d}: {name}')\n if self.loss_fn:\n try:\n print(f' loss: {self.loss_fn.__name__}')\n except AttributeError:\n print(f' loss: {self.loss_fn.__class__.__name__}')\n\n self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1])\n\n def allreduce_tied_weight_gradients(self):\n '''All reduce the gradients of the tied weights between tied stages'''\n for key, comm in self.tied_comms.items():\n weight = getattr(self.tied_modules[key], comm['weight_attr'])\n dist.all_reduce(weight.grad, group=comm['group'])\n\n def _synchronize_tied_weights(self):\n for key, comm in self.tied_comms.items():\n dist.broadcast(\n getattr(comm['module'],\n comm['weight_attr']),\n src=min(comm['ranks']),\n group=comm['group'],\n )\n\n def _index_tied_modules(self):\n ''' Build communication structures for tied modules. '''\n tied_comms = {}\n if self._topo.get_dim('pipe') == 1:\n return tied_comms\n\n specs = self._layer_specs\n tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec))\n for key in tie_keys:\n # Find the layers that the tied module appears in\n tied_layers = []\n for idx, layer in enumerate(specs):\n if isinstance(layer, TiedLayerSpec) and layer.key == key:\n tied_layers.append(idx)\n # Find all stages with this tied module\n # TODO: Would be nice to remove the nested data/model parallelism loops and\n # TODO: instead generalize in some way, since we really just care about the\n # TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...)\n # TODO: fiber to generate process groups.\n tied_stages = set(self.stage_owner(idx) for idx in tied_layers)\n for dp in range(self._grid.data_parallel_size):\n for mp in range(self._grid.get_slice_parallel_world_size()):\n tied_ranks = []\n for s in sorted(tied_stages):\n if self._grid.get_slice_parallel_world_size() > 1:\n tied_ranks.append(\n self._grid.stage_to_global(stage_id=s,\n data=dp,\n model=mp))\n else:\n tied_ranks.append(\n self._grid.stage_to_global(stage_id=s,\n data=dp))\n group = dist.new_group(ranks=tied_ranks)\n\n # Record this tied module if we own a local copy of it.\n if self.global_rank in tied_ranks:\n assert key in self.tied_modules\n if key in self.tied_modules:\n tied_comms[key] = {\n 'ranks': tied_ranks,\n 'group': group,\n 'weight_attr': self.tied_weight_attrs[key],\n 'module': self.tied_modules[key],\n }\n # Only count the tied module once in the eyes of the FP16 optimizer\n if self.global_rank != tied_ranks[0]:\n for p in self.tied_modules[key].parameters():\n p.model_parallel = False\n '''\n if len(tied_comms) > 0:\n print(f'RANK={self.global_rank} tied_comms={tied_comms}')\n '''\n\n return tied_comms\n\n def partitions(self):\n return self.parts\n\n def stage_owner(self, layer_idx):\n assert 0 <= layer_idx < self._num_layers\n for stage in range(self._topo.get_dim('pipe')):\n if self.parts[stage] <= layer_idx < self.parts[stage + 1]:\n return stage\n raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}')\n\n def _set_bounds(self, start=None, stop=None):\n \"\"\"Manually define the range of layers that will be built on this process.\n\n These boundaries are treated as list slices and so start is inclusive and stop is\n exclusive. The default of None for both results in all layers being built\n locally.\n \"\"\"\n self._local_start = start\n self._local_stop = stop\n\n def set_checkpoint_interval(self, interval):\n assert interval >= 0\n self.checkpoint_interval = interval\n\n def topology(self):\n \"\"\" ProcessTopology object to query process mappings. \"\"\"\n return self._topo\n\n def mpu(self):\n return self._grid\n\n def num_pipeline_stages(self):\n return self._topo.get_dim('pipe')\n\n def ckpt_prefix(self, checkpoints_path, tag):\n \"\"\"Build a prefix for all checkpoint files written by this module. \"\"\"\n # All checkpoint files start with this\n rank_name = 'module'\n\n # Data parallelism is omitted from the naming convention because we are agnostic\n # to this in the checkpoint.\n omit_dims = frozenset(['data'])\n axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims]\n for dim in axes:\n rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim)\n rank_name += f'-{dim}_{rank:02d}'\n\n ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name)\n return ckpt_name\n\n def ckpt_layer_path(self, ckpt_dir, local_layer_idx):\n \"\"\"Customize a prefix for a specific pipeline module layer. \"\"\"\n idx = local_layer_idx + self._local_start\n layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}')\n rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank)\n if rank_repr is not '':\n layer_ckpt_path += f'-{rank_repr}'\n layer_ckpt_path += '-model_states.pt'\n return layer_ckpt_path\n\n def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx):\n \"\"\"Get all ckpt file list for a specific pipeline module layer. \"\"\"\n idx = local_layer_idx + self._local_start\n layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}')\n layer_ckpt_path += \"*model_states.pt\"\n ckpt_files = glob.glob(layer_ckpt_path)\n ckpt_files.sort()\n return ckpt_files\n\n def save_state_dict(self, save_dir):\n if self._grid.data_parallel_id != 0:\n return\n\n os.makedirs(save_dir, exist_ok=True)\n layer_offset = self._local_start\n for idx, layer in enumerate(self.forward_funcs):\n model_ckpt_path = self.ckpt_layer_path(save_dir, idx)\n if not hasattr(layer, 'state_dict'):\n continue\n torch.save(layer.state_dict(), model_ckpt_path)\n\n def load_state_dir(self, load_dir, strict=True):\n for idx, layer in enumerate(self.forward_funcs):\n # Functions, etc. will not have state_dicts\n if not hasattr(layer, 'load_state_dict'):\n continue\n\n # get all checkpoint files for the layer.\n model_ckpt_list = self.ckpt_layer_path_list(load_dir, idx)\n mp_rank = self._grid.get_slice_parallel_rank()\n mp_world_size = self._grid.get_slice_parallel_world_size()\n\n sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list, version=2.0)\n load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True)\n\n layer.load_state_dict(checkpoint)\n\n if self._grid.data_parallel_id == 0:\n logger.info(\n f'RANK={self.global_rank} Loaded layer={idx+self._local_start} file={load_path}'\n )\n\n self._synchronize_tied_weights()\n\n def _is_checkpointable(self, funcs):\n if self.__class__.__name__ == 'GPT2ModelPipe':\n return all('ParallelTransformerLayerPipe' in f.__class__.__name__\n for f in funcs)\n\n params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)]\n return any(len(list(p)) > 0 for p in params)\n"
]
| [
[
"torch.nn.ModuleDict",
"torch.distributed.is_initialized",
"torch.distributed.new_group",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.all_reduce"
]
]
|
jonathantelliott/mobile-telecommunications | [
"32df68d05bf12088e6a1e635820452f6870f90db"
]
| [
"code/counterfactuals/infrastructureequilibrium.py"
]
| [
"# %%\nimport copy\n\nimport numpy as np\n\nfrom scipy.optimize import fsolve\n\nimport counterfactuals.infrastructurefunctions as infr\nimport counterfactuals.transmissionequilibrium as transeq\nimport counterfactuals.priceequilibrium as pe\nimport counterfactuals.welfare as welfare\n\nimport demand.blpextension as blp\nimport demand.dataexpressions as de\n\n# %%\ndef pi_deriv_R(R, bw, gamma, ds, xis, theta, pop, market_size, c_u, symmetric=False, impute_MVNO={'impute': False}, q_0=None, eps=0.01):\n \"\"\"\n Return the derivative of the operating income function with respect to cell radius, based on two-sided numerical derivative\n \n Parameters\n ----------\n R : ndarray\n (M,F) array of radii at which taking derivative\n bw : ndarray\n (M,F) array of bandwidth in MHz\n gamma : ndarray\n (M,) array of spectral efficiencies\n ds : DemandSystem\n contains all the data about our markets\n xis : ndarray \n (M,J) matrix of vertical demand components\n theta : ndarray\n (K,) array of demand parameters\n pop : ndarray\n (M,) array of market populations\n market_size : ndarray\n (M,) array of geographic size of markets in km^2\n c_u : ndarray\n (J,) array of per-user costs\n symmetric : bool\n specifies whether the equilibrium solving for is symmetric (quicker to compute)\n impute_MVNO : dict\n dict with\n 'impute' : bool (whether to impute the Qs for MVNO)\n 'firms_share' (optional) : ndarray ((F-1,) array of whether firms share qualities with MVNOs)\n 'include' (optional) : bool (whether to include MVNO Q in returned Q)\n q_0 : ndarray\n (M,F) array of initial guess of q\n eps : float\n size of perturbation to measure derivative\n\n Returns\n -------\n R_deriv : ndarray\n (M,F) array of firms' infrastructure FOCs for operating income\n \"\"\"\n\n # Create high and low radius arrays\n R_high = R + eps\n R_low = R - eps\n\n # Calculate channel capacities at R (this will speed up later calculations)\n cc_R = np.zeros(R.shape)\n cc_high = np.zeros(R.shape)\n cc_low = np.zeros(R.shape)\n for m in range(R.shape[0]):\n for f in range(R.shape[1]):\n cc_R[m,f] = infr.rho_C_hex(bw[m,f], R[m,f], gamma[m])\n cc_high[m,f] = infr.rho_C_hex(bw[m,f], R_high[m,f], gamma[m])\n cc_low[m,f] = infr.rho_C_hex(bw[m,f], R_low[m,f], gamma[m])\n\n # Calculate number of stations with given radius\n num_stations_R = infr.num_stations(R, market_size[:,np.newaxis])\n num_stations_high = infr.num_stations(R_high, market_size[:,np.newaxis])\n num_stations_low = infr.num_stations(R_low, market_size[:,np.newaxis])\n\n # Create information about firms and markets\n firms = np.unique(ds.firms)\n M = R.shape[0]\n F = firms.shape[0]\n if impute_MVNO['impute']: # if we impute MVNO quality (o/w there are no MVNOs)\n firms = firms[:-1] # don't care about the MVNO firm in ds.firms\n F -= 1\n if impute_MVNO['include']: # if MVNO is needed for calculating shares\n F += 1\n \n # Expand variables if symmetric\n if symmetric:\n num_firms = firms.shape[0]\n cc_R = np.tile(cc_R, (1,num_firms))\n cc_high = np.tile(cc_high, (1,num_firms))\n cc_low = np.tile(cc_low, (1,num_firms))\n \n num_stations_R = np.tile(num_stations_R, (1,num_firms))\n num_stations_high = np.tile(num_stations_high, (1,num_firms))\n num_stations_low = np.tile(num_stations_low, (1,num_firms))\n \n xis = np.tile(xis, (1,num_firms))\n c_u = np.tile(c_u, (num_firms,))\n\n # Derivative for each firm\n R_deriv = np.zeros(R.shape)\n select_firms = np.ones(firms.shape[0], dtype=bool)\n if symmetric:\n select_firms[1:] = False\n for f, firm in enumerate(firms[select_firms]):\n # Create arrays for channel capacities with high and low R\n cc_high_f = np.copy(cc_R)\n cc_high_f[:,f] = cc_high[:,f]\n cc_low_f = np.copy(cc_R)\n cc_low_f[:,f] = cc_low[:,f]\n\n # Create arrays for number of stations\n stations_high = np.copy(num_stations_R)\n stations_high[:,f] = num_stations_high[:,f]\n stations_low = np.copy(num_stations_R)\n stations_low[:,f] = num_stations_low[:,f]\n\n # Calculate download speeds\n q_high = np.zeros((M,F))\n q_low = np.zeros((M,F))\n ds_temp = copy.deepcopy(ds)\n for m in range(M):\n select_m = np.arange(M) == m\n ds_temp.data = ds.data[select_m,:,:]\n q_high[m,:] = transeq.q(cc_high_f[select_m,:], ds_temp, xis[select_m,:], theta, stations_high[select_m,:], pop[select_m], impute_MVNO=impute_MVNO, q_0=q_0)[0,:] # 0 b/c we're doing this market-by-market\n q_low[m,:] = transeq.q(cc_low_f[select_m,:], ds_temp, xis[select_m,:], theta, stations_low[select_m,:], pop[select_m], impute_MVNO=impute_MVNO, q_0=q_0)[0,:] # 0 b/c we're doing this market-by-market\n\n # Update download speeds in characteristics\n ds_high = copy.deepcopy(ds)\n ds_low = copy.deepcopy(ds)\n qidx = ds.chars.index(ds.qname)\n firm_counts = np.unique(ds.firms, return_counts=True)[1]\n ds_high.data[:,:,qidx] = np.repeat(q_high, firm_counts, axis=1) # only works b/c products in order\n ds_low.data[:,:,qidx] = np.repeat(q_low, firm_counts, axis=1) # only works b/c products in order\n\n # Calculate demand for each product\n s_high = blp.s_mj(ds_high, theta, ds_high.data, xis) * pop[:,np.newaxis]\n s_low = blp.s_mj(ds_low, theta, ds_low.data, xis) * pop[:,np.newaxis]\n\n # Calculate profits\n pidx = ds.chars.index(ds.pname)\n pi_high = s_high * (ds.data[:,:,pidx] - c_u[np.newaxis,:])\n pi_low = s_low * (ds.data[:,:,pidx] - c_u[np.newaxis,:])\n\n # Sum up profits to firm level\n pi_high = np.sum(pi_high[:,ds.firms == firm], axis=1)\n pi_low = np.sum(pi_low[:,ds.firms == firm], axis=1)\n\n # Calculate derivative for fth radius\n R_deriv[:,f] = (pi_high - pi_low) / (2. * eps)\n\n # Return derivative\n return R_deriv\n\ndef R_foc(R, bw, gamma, ds, xis, theta, pop, market_size, c_u, c_R, symmetric=False, impute_MVNO={'impute': False}, q_0=None, eps=0.01):\n \"\"\"\n Return the derivative of the overall profit function with respect to cell radius, based on two-sided numerical derivative\n \n Parameters\n ----------\n R : ndarray\n (M,F) array of radii at which taking derivative\n bw : ndarray\n (M,F) array of bandwidth in MHz\n gamma : ndarray\n (M,) array of spectral efficiencies\n ds : DemandSystem\n contains all the data about our markets\n xis : ndarray \n (M,J) matrix of vertical demand components\n theta : ndarray\n (K,) array of demand parameters\n pop : ndarray\n (M,) array of market populations\n market_size : ndarray\n (M,) array of geographic size of markets in km^2\n c_u : ndarray\n (J,) array of per-user costs\n c_R : ndarray\n (M,F) array of base station fixed costs\n symmetric : bool\n specifies whether the equilibrium solving for is symmetric (quicker to compute)\n impute_MVNO : dict\n dict with\n 'impute' : bool (whether to impute the Qs for MVNO)\n 'firms_share' (optional) : ndarray ((F-1,) array of whether firms share qualities with MVNOs)\n 'include' (optional) : bool (whether to include MVNO Q in returned Q)\n q_0 : ndarray\n (M,F) array of initial guess of q\n eps : float\n size of perturbation to measure derivative\n\n Returns\n -------\n foc : ndarray\n (M,F) array of firm-market infrastructure FOCs\n \"\"\"\n\n # Solve for derivatives\n MR = pi_deriv_R(R, bw, gamma, ds, xis, theta, pop, market_size, c_u, symmetric=symmetric, impute_MVNO=impute_MVNO, q_0=q_0, eps=eps)\n stations_deriv = infr.num_stations_deriv(R, market_size[:,np.newaxis])\n\n # Solve for FOCs\n foc = MR - stations_deriv * c_R\n\n return foc\n\ndef combine_focs(R, p, bw, gamma, ds, xis, theta, pop, market_size, c_u, c_R, symmetric=False, print_msg=False, impute_MVNO={'impute': False}, q_0=None, eps_R=0.01, eps_p=0.01):\n \"\"\"\n Return a combined array of FOCs that characterize an equilibrium, based on two-sided numerical derivative\n \n Parameters\n ----------\n R : ndarray\n (M,F) array of firm-market radii\n p : ndarray\n (J,) array of prices\n bw : ndarray\n (M,F) array of bandwidth in MHz\n gamma : ndarray\n (M,) array of spectral efficiencies\n ds : DemandSystem\n contains all the data about our markets\n xis : ndarray \n (M,J) matrix of vertical demand components\n theta : ndarray\n (K,) array of demand parameters\n pop : ndarray\n (M,) array of market populations\n market_size : ndarray\n (M,) array of geographic size of markets in km^2\n c_u : ndarray\n (J,) array of per-user costs\n c_R : ndarray\n (M,F) array of per-tower costs\n symmetric : bool\n specifies whether the equilibrium solving for is symmetric (quicker to compute)\n print_msg : bool\n determines whether or not to print inputs and output\n impute_MVNO : dict\n dict with\n 'impute' : bool (whether to impute the Qs for MVNO)\n 'firms_share' (optional) : ndarray ((F-1,) array of whether firms share qualities with MVNOs)\n 'include' (optional) : bool (whether to include MVNO Q in returned Q)\n q_0 : ndarray\n (M,F) array of initial guess of q\n eps_R : float\n size of perturbation to measure radius derivative\n eps_p : float\n size of perturbation to measure price derivative\n\n Returns\n -------\n foc : ndarray\n (M*F + J,) flattened array of FOCs (infrastructure then price)\n \"\"\"\n\n if print_msg:\n print(f\"R: {R}\")\n print(f\"p: {p}\")\n \n F = np.unique(ds.firms).shape[0]\n \n # Update price\n pidx = ds.chars.index(ds.pname)\n ds.data[:,:,pidx] = np.tile(p[np.newaxis,:], (1,F if symmetric else 1))\n\n # Solve for the infrastructure FOCs\n infr_FOCs = R_foc(R, bw, gamma, ds, xis, theta, pop, market_size, c_u, c_R, symmetric=symmetric, impute_MVNO=impute_MVNO, q_0=q_0, eps=eps_R)\n\n # Solve for the channel capacity implied by radius R - NOTE: parallelize this for large number of markets\n cc = np.zeros(R.shape)\n for m in range(R.shape[0]):\n for f in range(R.shape[1]):\n cc[m,f] = infr.rho_C_hex(bw[m,f], R[m,f], gamma[m])\n\n # Solve for the number of stations implied by radius R\n stations = infr.num_stations(R, market_size)\n\n # Solve for the pricing FOCs\n price_FOCs = pe.p_foc(p, c_u, cc, ds, xis, theta, stations, pop, symmetric=symmetric, impute_MVNO=impute_MVNO, q_0=q_0, eps=eps_p)\n\n # Combine FOCs into flattened array\n foc = np.concatenate((np.reshape(infr_FOCs, (-1,)), price_FOCs))\n\n if print_msg:\n #qs = transeq.q(cc, ds, xis, theta, stations, pop, impute_MVNO=impute_MVNO, q_0=q_0)\n# print(f\"Ex: {de.E_x(ds, theta, ds.data, np.tile(qs, (R.shape[1])), ds.data[:,:,ds.chars.index(ds.dlimname)], blp.ycX(ds, theta, ds.data))[0,:,:]}\")\n print(f\"s_j: {np.mean(blp.s_mj(ds, theta, ds.data, np.tile(xis, (1,F)) if symmetric else xis), axis=0)}\")\n #print(f\"q: {np.mean(qs, axis=0)}\")\n #print(f\"E[x*]: {np.mean(de.E_x(ds, theta, ds.data, np.tile(qs, (R.shape[1])), ds.data[:,:,ds.chars.index(ds.dlimname)], blp.ycX(ds, theta, ds.data)), axis=0)}\")\n #print(f\"E[u(x*)]: {np.mean(de.E_u(ds, theta, ds.data, np.tile(qs, (R.shape[1])), ds.data[:,:,ds.chars.index(ds.dlimname)], blp.ycX(ds, theta, ds.data)), axis=0)}\")\n print(f\"foc: {foc}\")\n\n return foc\n\ndef reshape_inputs(foc_shape, R_shape, p_shape, symmetric=False):\n \"\"\"\n Return reshaped array of FOCs\n \n Parameters\n ----------\n foc_shape : ndarray\n (M*F + J,) flattened array of FOCs (infrastructure then price)\n R_shape : tuple\n size of infrastructure array\n p_shape : tuple\n size of price array\n symmetric : bool\n specifies whether the equilibrium solving for is symmetric (quicker to compute)\n\n Returns\n -------\n R : ndarray\n (M,F) array of infrastructure\n p : ndarray\n (J,) array of prices\n \"\"\"\n \n if symmetric:\n R = np.reshape(foc_shape[:R_shape[0]], (R_shape[0],1))\n p = foc_shape[R_shape[0]:]\n else:\n R = np.reshape(foc_shape[:np.prod(R_shape)], R_shape)\n p = foc_shape[np.prod(R_shape):]\n\n return R, p\n\ndef infrastructure_eqm(bw, gamma, ds, xis, theta, pop, market_size, c_u, c_R, R_0, p_0, symmetric=False, print_msg=False, impute_MVNO={'impute': False}, q_0=None, eps_R=0.01, eps_p=0.01, factor=100.):\n \"\"\"\n Return the derivative of the profit function with respect to cell radius, based on two-sided numerical derivative\n \n Parameters\n ----------\n bw : ndarray\n (M,F) or (M,) array of bandwidth in MHz\n gamma : ndarray\n (M,) array of spectral efficiencies\n ds : DemandSystem\n contains all the data about our markets\n xis : ndarray \n (M,J*F) or (M,J) matrix of vertical demand components\n theta : ndarray\n (K,) array of demand parameters\n pop : ndarray\n (M,) array of market populations\n market_size : ndarray\n (M,) array of geographic size of markets in km^2\n c_u : ndarray\n (J*F,) or (J,) array of per-user costs\n c_R : ndarray\n (M,F) or (M,) array of per-tower costs\n R_0 : ndarray\n (M,F) or (M,1) array of initial guess of firm-market radii\n p_0 : ndarray\n (J*F,) or (J,) array of initial guess of prices\n symmetric : bool\n specifies whether the equilibrium solving for is symmetric (quicker to compute)\n print_msg : bool\n determines whether or not to print inputs and output of root solver\n impute_MVNO : dict\n dict with\n 'impute' : bool (whether to impute the Qs for MVNO)\n 'firms_share' (optional) : ndarray ((F-1,) array of whether firms share qualities with MVNOs)\n 'include' (optional) : bool (whether to include MVNO Q in returned Q)\n q_0 : ndarray\n (M,F) array of initial guess of q\n eps_R : float\n size of perturbation to measure radius derivative\n eps_p : float\n size of perturbation to measure price derivative\n factor : float\n size of the factor for fsolve, must be in interval [0.1, 100]\n\n Returns\n -------\n R_star : ndarray\n (M,F) array of firms' optimal infrastrucuture choice\n p_star : ndarray\n (J,) array of firms' optimal prices\n q_star : ndarray\n (M,F) array of qualities that result from prices and infrastructure\n \"\"\"\n\n # Determine sizes of infrastructure and price arrays\n R_shape = (ds.data.shape[0],np.unique(ds.firms).shape[0])\n p_shape = (ds.data.shape[1],)\n\n # Define FOC \n eqm_foc = lambda x: combine_focs(reshape_inputs(x, R_shape, p_shape, symmetric=symmetric)[0], reshape_inputs(x, R_shape, p_shape, symmetric=symmetric)[1], bw, gamma, ds, xis, theta, pop, market_size, c_u, c_R, symmetric, print_msg=print_msg, impute_MVNO=impute_MVNO, q_0=q_0, eps_p=eps_p, eps_R=eps_R)\n\n # Solve for the equilibrium\n Rp_star, infodict, ier, msg = fsolve(eqm_foc, np.concatenate((np.reshape(R_0, (-1,)), p_0)), full_output=True, factor=factor)\n R_star, p_star = reshape_inputs(Rp_star, R_shape, p_shape, symmetric=symmetric)\n if symmetric:\n R_star = np.tile(R_star, (1,R_shape[1]))\n p_star = np.tile(p_star, (R_shape[1],))\n bw = np.tile(bw, (1,R_shape[1]))\n xis = np.tile(xis, (1,R_shape[1]))\n \n # Print error message if failed to converge\n if ier != 1:\n print(f\"Equilibrium computation failed for following reason: {msg}. Additional information: {infodict}\")\n\n # Calculate implied channel capacities\n cc = np.zeros(R_shape)\n for m in range(R_shape[0]):\n for f in range(R_shape[1]):\n cc[m,f] = infr.rho_C_hex(bw[m,f], R_star[m,f], gamma[m])\n\n # Calculate implied stations\n stations = infr.num_stations(R_star, market_size)\n\n # Calculate implied download speeds\n q_star = np.zeros(R_shape)\n M = R_shape[0]\n ds_temp = copy.deepcopy(ds)\n pidx = pidx = ds.chars.index(ds.pname)\n ds_temp.data[:,:,pidx] = p_star\n for m in range(M):\n select_m = np.arange(M) == m\n ds_temp.data = ds.data[select_m,:,:]\n q_star[m,:] = transeq.q(cc[select_m,:], ds_temp, xis[select_m,:], theta, stations[select_m,:], pop[select_m], impute_MVNO=impute_MVNO, q_0=q_0)[0,:] # 0 b/c we're doing this market-by-market\n \n # Add MVNOs if imputing MVNO\n if impute_MVNO['impute']:\n if impute_MVNO['include']:\n q_star = np.concatenate((q_star, transeq.q_MVNO(q_star, impute_MVNO['firms_share'])[:,np.newaxis]), axis=1)\n\n return R_star, p_star, q_star\n\ndef bw_foc(bw, gamma, ds, xis, theta, pop, market_size, c_u, c_R, R_0, p_0, symmetric=False, print_msg=False, impute_MVNO={'impute': False}, q_0=None, eps_R=0.01, eps_p=0.01, eps_bw=0.01, factor=100., include_logit_shock=True):\n \"\"\"\n Return the derivative of the profit function with respect to cell radius, based on two-sided numerical derivative\n \n Parameters\n ----------\n bw : ndarray\n (M,F) or (M,) array of bandwidth in MHz\n gamma : ndarray\n (M,) array of spectral efficiencies\n ds : DemandSystem\n contains all the data about our markets\n xis : ndarray \n (M,J*F) or (M,J) matrix of vertical demand components\n theta : ndarray\n (K,) array of demand parameters\n pop : ndarray\n (M,) array of market populations\n market_size : ndarray\n (M,) array of geographic size of markets in km^2\n c_u : ndarray\n (J*F,) or (J,) array of per-user costs\n c_R : ndarray\n (M,F) or (M,) array of per-tower costs\n R_0 : ndarray\n (M,F) or (M,1) array of initial guess for radii\n p_0 : ndarray\n (J*F,) or (J,) array of initial guess for prices\n symmetric : bool\n specifies whether the equilibrium solving for is symmetric (quicker to compute)\n print_msg : bool\n determines whether or not to print inputs and output of root solver\n impute_MVNO : dict\n dict with\n 'impute' : bool (whether to impute the Qs for MVNO)\n 'firms_share' (optional) : ndarray ((F-1,) array of whether firms share qualities with MVNOs)\n 'include' (optional) : bool (whether to include MVNO Q in returned Q)\n q_0 : ndarray\n (M,F) array of initial guess of q\n eps_R : float\n size of perturbation to measure radius derivative\n eps_p : float\n size of perturbation to measure price derivative\n eps_bw : float\n size of perturbation to measure bandwidth derivative\n factor : float\n size of the factor for fsolve, must be in interval [0.1, 100]\n include_logit_shock : bool\n determine whether or not to include logit shocks in the consumer surplus calculation\n\n Returns\n -------\n partial_Pif_partial_bf : ndarray\n (M,F) array of derivative \n partial_Pif_partial_b : ndarray\n (M,F) array of firms' optimal prices\n partial_CS_partial_b : float\n qualities that result from prices and infrastructure\n \"\"\"\n \n # Add indices\n pidx = ds.chars.index(ds.pname)\n qidx = ds.chars.index(ds.qname)\n \n # Create high and low bandwidth arrays\n bw_high = bw + eps_bw\n bw_low = bw - eps_bw\n \n # Determine derivative of increasing a firm's bandwidth on its profits\n partial_Pif_partial_bf = np.zeros(R_0.shape)\n firms, firm_counts = np.unique(ds.firms, return_counts=True)\n expand_firms = lambda x: np.tile(x, (1,firms.shape[0] if symmetric else 1))\n expand_firms_1d = lambda x: np.tile(x, (firms.shape[0] if symmetric else 1,))\n for f in range(partial_Pif_partial_bf.shape[1]):\n # Create bandwidth arrays\n bw_high_f = expand_firms(bw)\n bw_high_f[:,f] = bw_high[:,f]\n bw_low_f = expand_firms(bw)\n bw_low_f[:,f] = bw_low[:,f]\n \n # Determine equilibrium for high and low bandwidths\n R_stars_high, p_stars_high, q_stars_high = infrastructure_eqm(bw_high_f, gamma, ds, expand_firms(xis), theta, pop, market_size, expand_firms_1d(c_u), expand_firms(c_R), expand_firms(R_0), expand_firms_1d(p_0), symmetric=False, print_msg=print_msg, impute_MVNO=impute_MVNO, q_0=q_0, eps_R=eps_R, eps_p=eps_p, factor=factor)\n R_stars_low, p_stars_low, q_stars_low = infrastructure_eqm(bw_low_f, gamma, ds, expand_firms(xis), theta, pop, market_size, expand_firms_1d(c_u), expand_firms(c_R), expand_firms(R_0), expand_firms_1d(p_0), symmetric=False, print_msg=print_msg, impute_MVNO=impute_MVNO, q_0=q_0, eps_R=eps_R, eps_p=eps_p, factor=factor)\n \n # Add equilibrium results to DemandSystem\n ds_high = copy.deepcopy(ds)\n ds_high.data[:,:,pidx] = p_stars_high[np.newaxis,:]\n ds_high.data[:,:,qidx] = np.repeat(q_stars_high, firm_counts, axis=1) # only works b/c products in order\n ds_low = copy.deepcopy(ds)\n ds_low.data[:,:,pidx] = p_stars_low[np.newaxis,:]\n ds_low.data[:,:,qidx] = np.repeat(q_stars_low, firm_counts, axis=1) # only works b/c products in order\n \n # Determine impact on per-user profit\n shares_high = blp.s_mj(ds_high, theta, ds_high.data, expand_firms(xis)) * pop[:,np.newaxis]\n profits_high = np.sum((shares_high * (p_stars_high - expand_firms_1d(c_u))[np.newaxis,:])[:,ds.firms == firms[f]], axis=1)\n stations_cost_high = (infr.num_stations(R_stars_high, market_size[:,np.newaxis]) * expand_firms(c_R))[:,f]\n Pif_high = (profits_high - stations_cost_high) / pop\n shares_low = blp.s_mj(ds_low, theta, ds_low.data, expand_firms(xis)) * pop[:,np.newaxis]\n profits_low = np.sum((shares_low * (p_stars_low - expand_firms_1d(c_u))[np.newaxis,:])[:,ds.firms == firms[f]], axis=1)\n stations_cost_low = (infr.num_stations(R_stars_low, market_size[:,np.newaxis]) * expand_firms(c_R))[:,f]\n Pif_low = (profits_low - stations_cost_low) / pop\n \n # Determine partial derivative\n partial_Pif_partial_bf[:,f] = (Pif_high - Pif_low) / (2. * eps_bw)\n \n # Determine derivative of increasing all firms bandwidth on an individual firm's profits\n partial_Pif_partial_b = np.zeros(R_0.shape)\n # Determine equilibrium for high and low bandwidths\n R_stars_high, p_stars_high, q_stars_high = infrastructure_eqm(bw_high, gamma, ds, xis, theta, pop, market_size, c_u, c_R, R_0, p_0, symmetric=symmetric, print_msg=print_msg, impute_MVNO=impute_MVNO, q_0=q_0, eps_R=eps_R, eps_p=eps_p, factor=factor)\n R_stars_low, p_stars_low, q_stars_low = infrastructure_eqm(bw_low, gamma, ds, xis, theta, pop, market_size, c_u, c_R, R_0, p_0, symmetric=symmetric, print_msg=print_msg, impute_MVNO=impute_MVNO, q_0=q_0, eps_R=eps_R, eps_p=eps_p, factor=factor)\n\n # Add equilibrium results to DemandSystem\n ds_high = copy.deepcopy(ds)\n ds_high.data[:,:,pidx] = p_stars_high[np.newaxis,:]\n ds_high.data[:,:,qidx] = np.repeat(q_stars_high, firm_counts, axis=1) # only works b/c products in order\n ds_low = copy.deepcopy(ds)\n ds_low.data[:,:,pidx] = p_stars_low[np.newaxis,:]\n ds_low.data[:,:,qidx] = np.repeat(q_stars_low, firm_counts, axis=1) # only works b/c products in order\n\n # Determine impact on per-user profit\n for f in range(partial_Pif_partial_b.shape[1]):\n shares_high = blp.s_mj(ds_high, theta, ds_high.data, expand_firms(xis)) * pop[:,np.newaxis]\n profits_high = np.sum((shares_high * (p_stars_high - expand_firms_1d(c_u))[np.newaxis,:])[:,ds.firms == firms[f]], axis=1)\n stations_cost_high = (infr.num_stations(R_stars_high, market_size[:,np.newaxis]) * expand_firms(c_R))[:,f]\n Pif_high = (profits_high - stations_cost_high) / pop\n shares_low = blp.s_mj(ds_low, theta, ds_low.data, expand_firms(xis)) * pop[:,np.newaxis]\n profits_low = np.sum((shares_low * (p_stars_low - expand_firms_1d(c_u))[np.newaxis,:])[:,ds.firms == firms[f]], axis=1)\n stations_cost_low = (infr.num_stations(R_stars_low, market_size[:,np.newaxis]) * expand_firms(c_R))[:,f]\n Pif_low = (profits_low - stations_cost_low) / pop\n \n # Determine partial derivative\n partial_Pif_partial_b[:,f] = (Pif_high - Pif_low) / (2. * eps_bw)\n\n # Determine impact on consumer surplus\n CS_high = np.mean(welfare.consumer_surplus(ds_high, expand_firms(xis), theta, include_logit_shock=include_logit_shock), axis=1)\n CS_low = np.mean(welfare.consumer_surplus(ds_low, expand_firms(xis), theta, include_logit_shock=include_logit_shock), axis=1)\n\n # Determine partial derivative\n partial_CS_partial_b = (CS_high - CS_low) / (2. * eps_bw)\n \n return partial_Pif_partial_bf, partial_Pif_partial_b, partial_CS_partial_b\n"
]
| [
[
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"numpy.tile",
"numpy.ones",
"numpy.copy",
"numpy.prod",
"numpy.repeat",
"numpy.zeros",
"numpy.sum"
]
]
|
hiropppe/ncm | [
"8861da3a3575c891fc5c429ae52176a3a64d4478"
]
| [
"my_seq2seq.py"
]
| [
"\n\n\"\"\"Library for creating sequence-to-sequence models in TensorFlow.\nSequence-to-sequence recurrent neural networks can learn complex functions\nthat map input sequences to output sequences. These models yield very good\nresults on a number of tasks, such as speech recognition, parsing, machine\ntranslation, or even constructing automated replies to emails.\n* Full sequence-to-sequence models.\n - embedding_rnn_seq2seq: The basic model with input embedding.\n - embedding_attention_seq2seq: Advanced model with input embedding and\n the neural attention mechanism; recommended for complex tasks.\n* Decoders\n - rnn_decoder: The basic decoder based on a pure RNN.\n - attention_decoder: A decoder that uses the attention mechanism.\n* Losses.\n - sequence_loss: Loss for a sequence model returning average log-perplexity.\n - sequence_loss_by_example: As above, but not averaging over all examples.\n* model_with_buckets: A convenience function to create models with bucketing\n (see the tutorial above for an explanation of why and how to use it).\n\"\"\"\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nimport tensorflow as tf\n\ntry:\n linear = tf.nn.rnn_cell.linear\nexcept:\n from tensorflow.python.ops.rnn_cell_impl import _linear as linear\n\n\n\n \ndef _extract_argmax_and_embed(embedding, output_projection=None,\n update_embedding=True):\n \"\"\"Get a loop_function that extracts the previous symbol and embeds it.\n Args:\n embedding: embedding tensor for symbols.\n output_projection: None or a pair (W, B). If provided, each fed previous\n output will first be multiplied by W and added B.\n update_embedding: Boolean; if False, the gradients will not propagate\n through the embeddings.\n Returns:\n A loop function.\n \"\"\"\n def loop_function(prev, _):\n if output_projection is not None:\n prev = tf.nn.xw_plus_b(\n prev, output_projection[0], output_projection[1])\n prev_symbol = tf.argmax(prev, 1)\n # Note that gradients will not propagate through the second parameter of\n # embedding_lookup.\n emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol)\n if not update_embedding:\n emb_prev = tf.stop_gradient(emb_prev)\n return emb_prev\n return loop_function\n\ndef _extract_beam_search(embedding, beam_size, num_symbols, embedding_size, output_projection=None,\n update_embedding=True):\n \"\"\"Get a loop_function that extracts the previous symbol and embeds it.\n Args:\n embedding: embedding tensor for symbols.\n output_projection: None or a pair (W, B). If provided, each fed previous\n output will first be multiplied by W and added B.\n update_embedding: Boolean; if False, the gradients will not propagate\n through the embeddings.\n Returns:\n A loop function.\n \"\"\"\n def loop_function(prev, i, log_beam_probs, beam_path, beam_symbols):\n if output_projection is not None:\n prev = tf.nn.xw_plus_b(\n prev, output_projection[0], output_projection[1])\n # prev= prev.get_shape().with_rank(2)[1]\n\n probs = tf.log(tf.nn.softmax(prev))\n\n if i > 1:\n\n probs = tf.reshape(probs + log_beam_probs[-1],\n [-1, beam_size * num_symbols])\n\n best_probs, indices = tf.nn.top_k(probs, beam_size)\n indices = tf.stop_gradient(tf.squeeze(tf.reshape(indices, [-1, 1])))\n best_probs = tf.stop_gradient(tf.reshape(best_probs, [-1, 1]))\n\n symbols = indices % num_symbols # Which word in vocabulary.\n beam_parent = indices // num_symbols # Which hypothesis it came from.\n\n\n beam_symbols.append(symbols)\n beam_path.append(beam_parent)\n log_beam_probs.append(best_probs)\n\n # Note that gradients will not propagate through the second parameter of\n # embedding_lookup.\n\n emb_prev = tf.nn.embedding_lookup(embedding, symbols)\n emb_prev = tf.reshape(emb_prev,[beam_size,embedding_size])\n if not update_embedding:\n emb_prev = tf.stop_gradient(emb_prev)\n return emb_prev\n return loop_function\n\n\ndef rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,\n scope=None):\n \"\"\"RNN decoder for the sequence-to-sequence model.\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor with shape [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to the i-th output\n in order to generate the i+1-st input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http://arxiv.org/abs/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n scope: VariableScope for the created subgraph; defaults to \"rnn_decoder\".\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing generated outputs.\n state: The state of each cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n (Note that in some cases, like basic RNN cell or GRU cell, outputs and\n states can be the same. They are different for LSTM cells though.)\n \"\"\"\n with tf.variable_scope(scope or \"rnn_decoder\"):\n state = initial_state\n outputs = []\n prev = None\n for i, inp in enumerate(decoder_inputs):\n if loop_function is not None and prev is not None:\n with tf.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n output, state = cell(inp, state)\n\n outputs.append(output)\n if loop_function is not None:\n prev = output\n return outputs, state\n\ndef beam_rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,\n scope=None,output_projection=None, beam_size=10):\n \"\"\"RNN decoder for the sequence-to-sequence model.\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor with shape [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to the i-th output\n in order to generate the i+1-st input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http://arxiv.org/abs/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n scope: VariableScope for the created subgraph; defaults to \"rnn_decoder\".\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing generated outputs.\n state: The state of each cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n (Note that in some cases, like basic RNN cell or GRU cell, outputs and\n states can be the same. They are different for LSTM cells though.)\n \"\"\"\n with tf.variable_scope(scope or \"rnn_decoder\"):\n state = initial_state\n outputs = []\n prev = None\n log_beam_probs, beam_path, beam_symbols = [],[],[]\n state_size = int(initial_state.get_shape().with_rank(2)[1])\n\n for i, inp in enumerate(decoder_inputs):\n if loop_function is not None and prev is not None:\n with tf.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i,log_beam_probs, beam_path, beam_symbols)\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n\n input_size = inp.get_shape().with_rank(2)[1]\n print(input_size)\n x = inp\n output, state = cell(x, state)\n\n if loop_function is not None:\n prev = output\n if i ==0:\n states =[]\n for kk in range(beam_size):\n states.append(state)\n state = tf.reshape(tf.concat(axis=0, values=states), [-1, state_size])\n\n outputs.append(tf.argmax(tf.nn.xw_plus_b(\n output, output_projection[0], output_projection[1]), axis=1))\n return outputs, state, tf.reshape(tf.concat(axis=0, values=beam_path),[-1,beam_size]), tf.reshape(tf.concat(axis=0, values=beam_symbols),[-1,beam_size])\n\n\ndef embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,\n embedding_size, output_projection=None,\n feed_previous=False,\n update_embedding_for_previous=True, scope=None, beam_search=True, beam_size=10 ):\n \"\"\"RNN decoder with embedding and a pure-decoding option.\n Args:\n decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).\n initial_state: 2D Tensor [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function.\n num_symbols: Integer, how many symbols come into the embedding.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has\n shape [num_symbols]; if provided and feed_previous=True, each fed\n previous output will first be multiplied by W and added B.\n feed_previous: Boolean; if True, only the first of decoder_inputs will be\n used (the \"GO\" symbol), and all other decoder inputs will be generated by:\n next = embedding_lookup(embedding, argmax(previous_output)),\n In effect, this implements a greedy decoder. It can also be used\n during training to emulate http://arxiv.org/abs/1506.03099.\n If False, decoder_inputs are used as given (the standard decoder case).\n update_embedding_for_previous: Boolean; if False and feed_previous=True,\n only the embedding for the first symbol of decoder_inputs (the \"GO\"\n symbol) will be updated by back propagation. Embeddings for the symbols\n generated from the decoder itself remain unchanged. This parameter has\n no effect if feed_previous=False.\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_rnn_decoder\".\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing the generated outputs.\n state: The state of each decoder cell in each time-step. This is a list\n with length len(decoder_inputs) -- one item for each time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n Raises:\n ValueError: When output_projection has the wrong shape.\n \"\"\"\n if output_projection is not None:\n proj_weights = tf.convert_to_tensor(output_projection[0],\n dtype=tf.float32)\n proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])\n proj_biases = tf.convert_to_tensor(\n output_projection[1], dtype=tf.float32)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n with tf.variable_scope(scope or \"embedding_rnn_decoder\"):\n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\"embedding\",\n [num_symbols, embedding_size])\n\n if beam_search:\n loop_function = _extract_beam_search(\n embedding, beam_size,num_symbols,embedding_size, output_projection,\n update_embedding_for_previous)\n else:\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection,\n update_embedding_for_previous) if feed_previous else None\n\n emb_inp = [\n tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]\n\n\n if beam_search:\n return beam_rnn_decoder(emb_inp, initial_state, cell,\n loop_function=loop_function,output_projection=output_projection, beam_size=beam_size)\n\n else:\n return rnn_decoder(emb_inp, initial_state, cell,\n loop_function=loop_function)\n\n\n\ndef embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,\n num_encoder_symbols, num_decoder_symbols,\n embedding_size, output_projection=None,\n feed_previous=False, dtype=tf.float32,\n scope=None, beam_search=True, beam_size=10):\n \"\"\"Embedding RNN sequence-to-sequence model.\n This model first embeds encoder_inputs by a newly created embedding (of shape\n [num_encoder_symbols x input_size]). Then it runs an RNN to encode\n embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs\n by another newly created embedding (of shape [num_decoder_symbols x\n input_size]). Then it runs RNN decoder, initialized with the last\n encoder state, on embedded decoder_inputs.\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n num_encoder_symbols: Integer; number of symbols on the encoder side.\n num_decoder_symbols: Integer; number of symbols on the decoder side.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_decoder_symbols] and B has\n shape [num_decoder_symbols]; if provided and feed_previous=True, each\n fed previous output will first be multiplied by W and added B.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first\n of decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype of the initial state for both the encoder and encoder\n rnn cells (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_rnn_seq2seq\"\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x num_decoder_symbols] containing the generated\n outputs.\n state: The state of each decoder cell in each time-step. This is a list\n with length len(decoder_inputs) -- one item for each time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with tf.variable_scope(scope or \"embedding_rnn_seq2seq\"):\n # Encoder.\n encoder_cell = tf.contrib.rnn.EmbeddingWrapper(\n cell, embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)\n _, encoder_state = tf.contrib.rnn.static_rnn(encoder_cell, encoder_inputs, dtype=dtype)\n\n # Decoder.\n if output_projection is None:\n cell = tf.contrib.rnn.OutputProjectionWrapper(cell, num_decoder_symbols)\n\n\n return embedding_rnn_decoder(\n decoder_inputs, encoder_state, cell, num_decoder_symbols,\n embedding_size, output_projection=output_projection,\n feed_previous=feed_previous, beam_search=beam_search, beam_size=beam_size)\n\n\n\n\n\ndef attention_decoder(decoder_inputs, initial_state, attention_states, cell,\n output_size=None, num_heads=1, loop_function=None,\n dtype=tf.float32, scope=None,\n initial_state_attention=False):\n \"\"\"RNN decoder with attention for the sequence-to-sequence model.\n In this context \"attention\" means that, during decoding, the RNN can look up\n information in the additional tensor attention_states, and it does this by\n focusing on a few entries from the tensor. This model has proven to yield\n especially good results in a number of sequence-to-sequence tasks. This\n implementation is based on http://arxiv.org/abs/1412.7449 (see below for\n details). It is recommended for complex sequence-to-sequence tasks.\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor [batch_size x cell.state_size].\n attention_states: 3D Tensor [batch_size x attn_length x attn_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n output_size: Size of the output vectors; if None, we use cell.output_size.\n num_heads: Number of attention heads that read from attention_states.\n loop_function: If not None, this function will be applied to i-th output\n in order to generate i+1-th input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http://arxiv.org/abs/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n dtype: The dtype to use for the RNN initial state (default: tf.float32).\n scope: VariableScope for the created subgraph; default: \"attention_decoder\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states -- useful when we wish to resume decoding from a previously\n stored decoder state and attention states.\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors of\n shape [batch_size x output_size]. These represent the generated outputs.\n Output i is computed from input i (which is either the i-th element\n of decoder_inputs or loop_function(output {i-1}, i)) as follows.\n First, we run the cell on a combination of the input and previous\n attention masks:\n cell_output, new_state = cell(linear(input, prev_attn), prev_state).\n Then, we calculate new attention masks:\n new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))\n and then we calculate the output:\n output = linear(cell_output, new_attn).\n state: The state of each decoder cell the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n Raises:\n ValueError: when num_heads is not positive, there are no inputs, shapes\n of attention_states are not set, or input size cannot be inferred\n from the input.\n \"\"\"\n if not decoder_inputs:\n raise ValueError(\"Must provide at least 1 input to attention decoder.\")\n if num_heads < 1:\n raise ValueError(\"With less than 1 heads, use a non-attention decoder.\")\n if not attention_states.get_shape()[1:2].is_fully_defined():\n raise ValueError(\"Shape[1] and [2] of attention_states must be known: %s\"\n % attention_states.get_shape())\n if output_size is None:\n output_size = cell.output_size\n\n with tf.variable_scope(scope or \"attention_decoder\"):\n batch_size = tf.shape(decoder_inputs[0])[0] # Needed for reshaping.\n attn_length = attention_states.get_shape()[1].value\n attn_size = attention_states.get_shape()[2].value\n\n # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.\n hidden = tf.reshape(\n attention_states, [-1, attn_length, 1, attn_size])\n hidden_features = []\n v = []\n attention_vec_size = attn_size # Size of query vectors for attention.\n for a in xrange(num_heads):\n k = tf.get_variable(\"AttnW_%d\" % a,\n [1, 1, attn_size, attention_vec_size])\n hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], \"SAME\"))\n v.append(tf.get_variable(\"AttnV_%d\" % a,\n [attention_vec_size]))\n\n state = initial_state\n def attention(query):\n \"\"\"Put attention masks on hidden using hidden_features and query.\"\"\"\n ds = [] # Results of attention reads will be stored here.\n for a in xrange(num_heads):\n with tf.variable_scope(\"Attention_%d\" % a):\n y = linear(query, attention_vec_size, True)\n y = tf.reshape(y, [-1, 1, 1, attention_vec_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = tf.reduce_sum(\n v[a] * tf.nn.tanh(hidden_features[a] + y), [2, 3])\n a = tf.nn.softmax(s)\n # Now calculate the attention-weighted vector d.\n d = tf.reduce_sum(\n tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n ds.append(tf.reshape(d, [-1, attn_size]))\n return ds\n\n outputs = []\n prev = None\n batch_attn_size = tf.stack([batch_size, attn_size])\n attns = [tf.zeros(batch_attn_size, dtype=dtype)\n for _ in xrange(num_heads)]\n for a in attns: # Ensure the second shape of attention vectors is set.\n a.set_shape([None, attn_size])\n if initial_state_attention:\n attns = attention(initial_state)\n \n for i, inp in enumerate(decoder_inputs):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n # If loop_function is set, we use it instead of decoder_inputs.\n if loop_function is not None:\n with tf.variable_scope(\"loop_function\", reuse=True):\n if prev is not None:\n inp = loop_function(prev, i)\n \n \n input_size = inp.get_shape().with_rank(2)[1]\n\n x = linear([inp]+attns, input_size, True)\n # Run the RNN.\n \n cell_output, state = cell(x, state)\n # Run the attention mechanism.\n if i == 0 and initial_state_attention:\n with tf.variable_scope(tf.get_variable_scope(),\n reuse=True):\n attns = attention(state)\n else:\n attns = attention(state)\n\n with tf.variable_scope(\"AttnOutputProjection\"):\n output = linear([cell_output] + attns, output_size, True)\n if loop_function is not None:\n prev = output\n outputs.append(output)\n\n return outputs, state\n\n\ndef beam_attention_decoder(decoder_inputs, initial_state, attention_states, cell,\n output_size=None, num_heads=1, loop_function=None,\n dtype=tf.float32, scope=None,\n initial_state_attention=False, output_projection=None, beam_size=10):\n \"\"\"RNN decoder with attention for the sequence-to-sequence model.\n In this context \"attention\" means that, during decoding, the RNN can look up\n information in the additional tensor attention_states, and it does this by\n focusing on a few entries from the tensor. This model has proven to yield\n especially good results in a number of sequence-to-sequence tasks. This\n implementation is based on http://arxiv.org/abs/1412.7449 (see below for\n details). It is recommended for complex sequence-to-sequence tasks.\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor [batch_size x cell.state_size].\n attention_states: 3D Tensor [batch_size x attn_length x attn_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n output_size: Size of the output vectors; if None, we use cell.output_size.\n num_heads: Number of attention heads that read from attention_states.\n loop_function: If not None, this function will be applied to i-th output\n in order to generate i+1-th input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http://arxiv.org/abs/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n dtype: The dtype to use for the RNN initial state (default: tf.float32).\n scope: VariableScope for the created subgraph; default: \"attention_decoder\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states -- useful when we wish to resume decoding from a previously\n stored decoder state and attention states.\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors of\n shape [batch_size x output_size]. These represent the generated outputs.\n Output i is computed from input i (which is either the i-th element\n of decoder_inputs or loop_function(output {i-1}, i)) as follows.\n First, we run the cell on a combination of the input and previous\n attention masks:\n cell_output, new_state = cell(linear(input, prev_attn), prev_state).\n Then, we calculate new attention masks:\n new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))\n and then we calculate the output:\n output = linear(cell_output, new_attn).\n state: The state of each decoder cell the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n Raises:\n ValueError: when num_heads is not positive, there are no inputs, shapes\n of attention_states are not set, or input size cannot be inferred\n from the input.\n \"\"\"\n if not decoder_inputs:\n raise ValueError(\"Must provide at least 1 input to attention decoder.\")\n if num_heads < 1:\n raise ValueError(\"With less than 1 heads, use a non-attention decoder.\")\n if not attention_states.get_shape()[1:2].is_fully_defined():\n raise ValueError(\"Shape[1] and [2] of attention_states must be known: %s\"\n % attention_states.get_shape())\n if output_size is None:\n output_size = cell.output_size\n\n with tf.variable_scope(scope or \"attention_decoder\"):\n batch_size = tf.shape(decoder_inputs[0])[0] # Needed for reshaping.\n attn_length = attention_states.get_shape()[1].value\n attn_size = attention_states.get_shape()[2].value\n\n # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.\n hidden = tf.reshape(\n attention_states, [-1, attn_length, 1, attn_size])\n hidden_features = []\n v = []\n attention_vec_size = attn_size # Size of query vectors for attention.\n for a in xrange(num_heads):\n k = tf.get_variable(\"AttnW_%d\" % a,\n [1, 1, attn_size, attention_vec_size])\n hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], \"SAME\"))\n v.append(tf.get_variable(\"AttnV_%d\" % a,\n [attention_vec_size]))\n\n print(\"Initial_state\")\n\n state_size = int(initial_state.get_shape().with_rank(2)[1])\n states =[]\n for kk in range(1):\n states.append(initial_state)\n state = tf.reshape(tf.concat(axis=0, values=states), [-1, state_size])\n def attention(query):\n \"\"\"Put attention masks on hidden using hidden_features and query.\"\"\"\n ds = [] # Results of attention reads will be stored here.\n for a in xrange(num_heads):\n with tf.variable_scope(\"Attention_%d\" % a):\n y = linear(query, attention_vec_size, True)\n y = tf.reshape(y, [-1, 1, 1, attention_vec_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = tf.reduce_sum(\n v[a] * tf.nn.tanh(hidden_features[a] + y), [2, 3])\n a = tf.nn.softmax(s)\n # Now calculate the attention-weighted vector d. \n d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,[1, 2])\n # for c in range(ct):\n ds.append(tf.reshape(d, [-1, attn_size]))\n return ds\n\n outputs = []\n prev = None\n batch_attn_size = tf.stack([batch_size, attn_size])\n attns = [tf.zeros(batch_attn_size, dtype=dtype)\n for _ in xrange(num_heads)]\n for a in attns: # Ensure the second shape of attention vectors is set.\n a.set_shape([None, attn_size])\n\n if initial_state_attention:\n attns = []\n attns.append(attention(initial_state))\n tmp = tf.reshape(tf.concat(axis=0, values=attns), [-1, attn_size])\n attns = []\n attns.append(tmp)\n\n log_beam_probs, beam_path, beam_symbols = [],[],[]\n for i, inp in enumerate(decoder_inputs):\n\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n # If loop_function is set, we use it instead of decoder_inputs.\n if loop_function is not None :\n with tf.variable_scope(\"loop_function\", reuse=True):\n if prev is not None:\n inp = loop_function(prev, i,log_beam_probs, beam_path, beam_symbols)\n\n input_size = inp.get_shape().with_rank(2)[1]\n x = linear([inp] + attns, input_size, True)\n cell_output, state = cell(x, state)\n\n # Run the attention mechanism.\n if i == 0 and initial_state_attention:\n with tf.variable_scope(tf.get_variable_scope(),\n reuse=True):\n attns = attention(state)\n else:\n attns = attention(state)\n\n with tf.variable_scope(\"AttnOutputProjection\"):\n output = linear([cell_output] + attns, output_size, True)\n if loop_function is not None:\n prev = output\n if i ==0:\n states =[]\n for kk in range(beam_size):\n states.append(state)\n state = tf.reshape(tf.concat(axis=0, values=states), [-1, state_size])\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n attns = attention(state)\n\n outputs.append(tf.argmax(tf.nn.xw_plus_b(\n output, output_projection[0], output_projection[1]), axis=1))\n\n return outputs, state, tf.reshape(tf.concat(axis=0, values=beam_path),[-1,beam_size]), tf.reshape(tf.concat(axis=0, values=beam_symbols),[-1,beam_size])\n\ndef embedding_attention_decoder(decoder_inputs, initial_state, attention_states,\n cell, num_symbols, embedding_size, num_heads=1,\n output_size=None, output_projection=None,\n feed_previous=False,\n update_embedding_for_previous=True,\n dtype=tf.float32, scope=None,\n initial_state_attention=False, beam_search=True, beam_size=10):\n \"\"\"RNN decoder with embedding and attention and a pure-decoding option.\n Args:\n decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).\n initial_state: 2D Tensor [batch_size x cell.state_size].\n attention_states: 3D Tensor [batch_size x attn_length x attn_size].\n cell: rnn_cell.RNNCell defining the cell function.\n num_symbols: Integer, how many symbols come into the embedding.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_heads: Number of attention heads that read from attention_states.\n output_size: Size of the output vectors; if None, use output_size.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has shape\n [num_symbols]; if provided and feed_previous=True, each fed previous\n output will first be multiplied by W and added B.\n feed_previous: Boolean; if True, only the first of decoder_inputs will be\n used (the \"GO\" symbol), and all other decoder inputs will be generated by:\n next = embedding_lookup(embedding, argmax(previous_output)),\n In effect, this implements a greedy decoder. It can also be used\n during training to emulate http://arxiv.org/abs/1506.03099.\n If False, decoder_inputs are used as given (the standard decoder case).\n update_embedding_for_previous: Boolean; if False and feed_previous=True,\n only the embedding for the first symbol of decoder_inputs (the \"GO\"\n symbol) will be updated by back propagation. Embeddings for the symbols\n generated from the decoder itself remain unchanged. This parameter has\n no effect if feed_previous=False.\n dtype: The dtype to use for the RNN initial states (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_attention_decoder\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states -- useful when we wish to resume decoding from a previously\n stored decoder state and attention states.\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing the generated outputs.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n Raises:\n ValueError: When output_projection has the wrong shape.\n \"\"\"\n if output_size is None:\n output_size = cell.output_size\n if output_projection is not None:\n proj_biases = tf.convert_to_tensor(output_projection[1], dtype=dtype)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n with tf.variable_scope(scope or \"embedding_attention_decoder\"):\n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\"embedding\",\n [num_symbols, embedding_size])\n print(\"Check number of symbols\")\n print(num_symbols)\n if beam_search:\n loop_function = _extract_beam_search(\n embedding, beam_size,num_symbols, embedding_size, output_projection,\n update_embedding_for_previous)\n else:\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection,\n update_embedding_for_previous) if feed_previous else None\n emb_inp = [\n tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]\n if beam_search:\n return beam_attention_decoder(\n emb_inp, initial_state, attention_states, cell, output_size=output_size,\n num_heads=num_heads, loop_function=loop_function,\n initial_state_attention=initial_state_attention, output_projection=output_projection, beam_size=beam_size)\n else:\n\n return attention_decoder(\n emb_inp, initial_state, attention_states, cell, output_size=output_size,\n num_heads=num_heads, loop_function=loop_function,\n initial_state_attention=initial_state_attention)\n\n\ndef embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell_1,cell_2,\n num_encoder_symbols, num_decoder_symbols,\n embedding_size,\n num_heads=1, output_projection=None,\n feed_previous=False, dtype=tf.float32,\n scope=None, initial_state_attention=False, beam_search =True, beam_size = 10 ):\n \"\"\"Embedding sequence-to-sequence model with attention.\n This model first embeds encoder_inputs by a newly created embedding (of shape\n [num_encoder_symbols x input_size]). Then it runs an RNN to encode\n embedded encoder_inputs into a state vector. It keeps the outputs of this\n RNN at every step to use for attention later. Next, it embeds decoder_inputs\n by another newly created embedding (of shape [num_decoder_symbols x\n input_size]). Then it runs attention decoder, initialized with the last\n encoder state, on embedded decoder_inputs and attending to encoder outputs.\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n num_encoder_symbols: Integer; number of symbols on the encoder side.\n num_decoder_symbols: Integer; number of symbols on the decoder side.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_heads: Number of attention heads that read from attention_states.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_decoder_symbols] and B has\n shape [num_decoder_symbols]; if provided and feed_previous=True, each\n fed previous output will first be multiplied by W and added B.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first\n of decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype of the initial RNN state (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_attention_seq2seq\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states.\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x num_decoder_symbols] containing the generated\n outputs.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with tf.variable_scope(scope or \"embedding_attention_seq2seq\"):\n # Encoder.\n encoder_cell = tf.contrib.rnn.EmbeddingWrapper(\n cell_1, embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)#reuse=tf.get_variable_scope().reuse\n encoder_outputs, encoder_state = tf.contrib.rnn.static_rnn(\n encoder_cell, encoder_inputs,\n #scope='embedding_attention_decoder/attention_decoder',\n dtype=dtype)\n print('####### embedding_attention_seq2seq scope: {}'.format(encoder_cell))\n print(\"Symbols\")\n print(num_encoder_symbols)\n print(num_decoder_symbols)\n # First calculate a concatenation of encoder outputs to put attention on.\n top_states = [tf.reshape(e, [-1, 1, cell_1.output_size])\n for e in encoder_outputs]\n attention_states = tf.concat(axis=1, values=top_states)\n print(attention_states)\n\n # Decoder.\n output_size = None\n if output_projection is None:\n cell_2 = tf.contrib.rnn.OutputProjectionWrapper(cell_2, num_decoder_symbols)\n output_size = num_decoder_symbols\n return embedding_attention_decoder(\n decoder_inputs, encoder_state, attention_states, cell_2,\n num_decoder_symbols, embedding_size, num_heads=num_heads,\n output_size=output_size, output_projection=output_projection,\n feed_previous=feed_previous,\n initial_state_attention=initial_state_attention, beam_search=beam_search, beam_size=beam_size)\n\n\n\n\ndef sequence_loss_by_example(logits, targets, weights,\n average_across_timesteps=True,\n softmax_loss_function=None, name=None):\n \"\"\"Weighted cross-entropy loss for a sequence of logits (per example).\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n name: Optional name for this operation, default: \"sequence_loss_by_example\".\n Returns:\n 1D batch-sized float Tensor: The log-perplexity for each sequence.\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).\n \"\"\"\n if len(targets) != len(logits) or len(weights) != len(logits):\n raise ValueError(\"Lengths of logits, weights, and targets must be the same \"\n \"%d, %d, %d.\" % (len(logits), len(weights), len(targets)))\n with tf.name_scope( name,\n \"sequence_loss_by_example\",logits + targets + weights):\n log_perp_list = []\n for logit, target, weight in zip(logits, targets, weights):\n if softmax_loss_function is None:\n target = tf.reshape(target, [-1])\n crossent = tf.sparse_softmax_cross_entropy_with_logits(\n logit, target)\n else:\n crossent = softmax_loss_function(logit, target)\n log_perp_list.append(crossent * weight)\n log_perps = tf.add_n(log_perp_list)\n if average_across_timesteps:\n total_size = tf.add_n(weights)\n total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.\n log_perps /= total_size\n return log_perps\n\n\ndef sequence_loss(logits, targets, weights,\n average_across_timesteps=True, average_across_batch=True,\n softmax_loss_function=None, name=None):\n \"\"\"Weighted cross-entropy loss for a sequence of logits, batch-collapsed.\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n average_across_batch: If set, divide the returned cost by the batch size.\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n name: Optional name for this operation, defaults to \"sequence_loss\".\n Returns:\n A scalar float Tensor: The average log-perplexity per symbol (weighted).\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).\n \"\"\"\n with tf.name_scope( name, \"sequence_loss\",logits + targets + weights):\n cost = tf.reduce_sum(sequence_loss_by_example(\n logits, targets, weights,\n average_across_timesteps=average_across_timesteps,\n softmax_loss_function=softmax_loss_function))\n if average_across_batch:\n batch_size = tf.shape(targets[0])[0]\n return cost / tf.cast(batch_size, tf.float32)\n else:\n return cost\n\n\ndef model_with_buckets(encoder_inputs, decoder_inputs, targets, weights,\n buckets, seq2seq, softmax_loss_function=None,\n per_example_loss=False, name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors of shape [batch_size x num_decoder_symbols] (jth outputs).\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n Raises:\n ValueError: If length of encoder_inputsut, targets, or weights is smaller\n than the largest (last) bucket.\n \"\"\"\n if len(encoder_inputs) < buckets[-1][0]:\n raise ValueError(\"Length of encoder_inputs (%d) must be at least that of la\"\n \"st bucket (%d).\" % (len(encoder_inputs), buckets[-1][0]))\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n\n all_inputs = encoder_inputs + decoder_inputs + targets + weights\n losses = []\n outputs = []\n with tf.name_scope(name, \"model_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with tf.variable_scope(tf.get_variable_scope(),\n reuse=True if j > 0 else None):\n\n bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]],\n decoder_inputs[:bucket[1]])\n\n outputs.append(bucket_outputs)\n if per_example_loss:\n losses.append(sequence_loss_by_example(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n else:\n losses.append(sequence_loss(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n\n return outputs, losses\n\ndef decode_model_with_buckets(encoder_inputs, decoder_inputs, targets, weights,\n buckets, seq2seq, softmax_loss_function=None,\n per_example_loss=False, name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors of shape [batch_size x num_decoder_symbols] (jth outputs).\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n Raises:\n ValueError: If length of encoder_inputsut, targets, or weights is smaller\n than the largest (last) bucket.\n \"\"\"\n if len(encoder_inputs) < buckets[-1][0]:\n raise ValueError(\"Length of encoder_inputs (%d) must be at least that of la\"\n \"st bucket (%d).\" % (len(encoder_inputs), buckets[-1][0]))\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n\n all_inputs = encoder_inputs + decoder_inputs + targets + weights\n losses = []\n outputs = []\n beam_paths = []\n beam_symbols = []\n with tf.name_scope(name, \"model_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with tf.variable_scope(tf.get_variable_scope(),\n reuse=True if j > 0 else None):\n bucket_outputs, _, beam_path, beam_symbol = seq2seq(encoder_inputs[:bucket[0]],\n decoder_inputs[:bucket[1]])\n outputs.append(bucket_outputs)\n beam_paths.append(beam_path)\n beam_symbols.append(beam_symbol)\n print(\"End**********\")\n\n return outputs, beam_paths, beam_symbols\n"
]
| [
[
"tensorflow.convert_to_tensor",
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.sparse_softmax_cross_entropy_with_logits",
"tensorflow.cast",
"tensorflow.contrib.rnn.EmbeddingWrapper",
"tensorflow.add_n",
"tensorflow.nn.conv2d",
"tensorflow.contrib.rnn.OutputProjectionWrapper",
"tensorflow.stop_gradient",
"tensorflow.nn.top_k",
"tensorflow.name_scope",
"tensorflow.argmax",
"tensorflow.nn.xw_plus_b",
"tensorflow.python.ops.rnn_cell_impl._linear",
"tensorflow.shape",
"tensorflow.nn.tanh",
"tensorflow.contrib.rnn.static_rnn",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
]
]
|
ljansen2140/4L13NHUN73RS | [
"b1ece351d0f7a0282e8e0e18ef6d124cca211e00"
]
| [
"vae_code.py"
]
| [
"# Full example for my blog post at:\n# https://danijar.com/building-variational-auto-encoders-in-tensorflow/\n\n############################ Yvannia ################################\n\n# Importing the package --> \"numpy\" so that all the objects defined in the module can be used. Imported as \"np\" for easy referral (renaming)\nimport numpy as np\n# Importing the package --> \"matplotlib.pyplot\" so that all the objects defined in the module can be used. Imported as \"plt\" for easy referral (renaming)\nimport matplotlib.pyplot as plt\n# Importing the package --> \"tensorflow\" so that all the objects defined in the module can be used. Imported as \"tf\" for easy referral (renaming)\nimport tensorflow as tf\n# Importing a specific object named \"input_data\" to have shorter calls later on in the code \nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# contrib makes it easier to configure, train, and evaluate a variety of machine learning models\n# distributions is a class that is used for constructing and organizing properties (mean, variance, standard deviation, etc.) of variables \n# Here it is shortened to \"tfd\" to avoid long code \ntfd = tf.contrib.distributions\n\n\n# Encoder function --------------------------------------------------------------------------------------------------------------\n# Function definition of the encoder that takes in two parameters, data and code_size \ndef make_encoder(data, code_size):\n\n# Layers is an object that involves computation --> it is an important method used to create a neural network \n# Flatten is used to compress the input, which in this case is the parameter \"data\"\n# Flatten is applied to the layer having a shape such as (size, 2,2), and then outputs the shape as (size, 4). i.e. collapses spatial dimensions \n x = tf.layers.flatten(data)\n\n# There are two identical calls because the first is a hidden layer (where every node is connected to every other node in the next layer) \n# and the second is an output layer (the last layer of the node connections). \n# Dense is a network layer. It feeds all outputs from the previos layer, to all its neurons. \n# x is the previous layers output defined above and 200 is the number of neurons. \n# nn stands for neural network --> provides support for many basic neural network operations \n# relu stands for Rectified Linear Unit which is a function --> it is computationally faster and allows for fewer vanishing gradients. \n x = tf.layers.dense(x, 200, tf.nn.relu)\n x = tf.layers.dense(x, 200, tf.nn.relu)\n \n# Same as above, but instead of taking the parameter 200 it takes in the input \"code_size\"\n# loc is the mean \n loc = tf.layers.dense(x, code_size)\n \n# Same as above but now using \"softplus\" --> provides more stabilization and performance to deep neural network (than ReLU function).\n# scale is the standard deviation. \n scale = tf.layers.dense(x, code_size, tf.nn.softplus)\n\n# Implement multivariate normal distributions with a diagonal covariance structure.\n# Takes in parameter loc = mean and scale = standard deviation. \n# Returns a graph of the distribution that will be compared against what it should look like later. \n return tfd.MultivariateNormalDiag(loc, scale)\n\n\n# Make_Prior function --------------------------------------------------------------------------------------------------------------\n# Function definition of make_prior that takes in one parameters, code_size. \ndef make_prior(code_size):\n# tf.zeros creates a tensor (a multi-dimensional array with a uniform type) with all elements of the input \"code_size\" set to zero. \n loc = tf.zeros(code_size)\n# tf.ones creates a tensor with all elements of the input \"code_size\" set to one. \n scale = tf.ones(code_size)\n# Same explanation as above\n# Returns a graph of what the distribution should look like that is compared against the returned graph above. \n return tfd.MultivariateNormalDiag(loc, scale)\n\n######################################################################\n\n\n\n############################ Botoul ##################################\n\n#Declaring a function called decoder which takes two parameters: code and data_shape\ndef make_decoder(code, data_shape):\n # initializing variable x which stores code.\n x = code\n #An activation function: tf.nn.relu\n # It is applied to the output of a neural network layer,\n # #which is then passed as the input to the next layer.\n # Activation functions are an essential part of neural networks\n # as they provide non-linearity, without which the neural network\n # reduces to a mere logistic regression model\n # applying `tf.nn.relu` function and add a Dense layer as the first layer.\n x = tf.layers.dense(x, 200, tf.nn.relu)\n x = tf.layers.dense(x, 200, tf.nn.relu)\n #Logits are by definition unnormalized log probabilities\n logit = tf.layers.dense(x, np.prod(data_shape))\n #-1 Place holder for the dimension that will be calculated automatically.\n # this way, we can calculate length accurately\n logit = tf.reshape(logit, [-1] + data_shape)\n #we are returning a tfp independent Bernoulli distribution\n #width and height in our case, belong to the same data point (2)\n # even though they have independent parameters\n return tfd.Independent(tfd.Bernoulli(logit), 2)\n\n#######################################################################\n\ndef plot_codes(ax, codes, labels):\n ax.scatter(codes[:, 0], codes[:, 1], s=2, c=labels, alpha=0.1)\n ax.set_aspect('equal')\n ax.set_xlim(codes.min() - .1, codes.max() + .1)\n ax.set_ylim(codes.min() - .1, codes.max() + .1)\n ax.tick_params(\n axis='both', which='both', left='off', bottom='off',\n labelleft='off', labelbottom='off')\n\n\ndef plot_samples(ax, samples):\n for index, sample in enumerate(samples):\n ax[index].imshow(sample, cmap='gray')\n ax[index].axis('off')\n\n\n############################ Logan ##################################\n\n#Create placeholder data in dimension ?x28x28, has no represented data values\n#'None' represents an unknown dimension\ndata = tf.placeholder(tf.float32, [None, 28, 28])\n\n#Create function templates, this ensures that function specific variables are initialized first and consistent between all calls of this function\nmake_encoder = tf.make_template('encoder', make_encoder)\nmake_decoder = tf.make_template('decoder', make_decoder)\n\n\n# Define the model------------------------------------------------------\n\n#Returns a Multivariate Normal Diag Distribution with basic parameters set [0,0] and [1,1], prior is fixed with no trainable parameters so it does not need a template.\n#See code explanation for 'make_prior'\n#Prior is p(z)\nprior = make_prior(code_size=2)\n#Create a Multivariate Normal Diag Distribution based on our desired encoder\n#See code explanation for 'make_encoder'\n#Posterior is p(z|x)\nposterior = make_encoder(data, code_size=2)\n#Grab a sample of the data from our encoder that will be passed back through our decoder, this is 'z'\ncode = posterior.sample()\n\n\n# Define the loss-------------------------------------------------------\n#We need to compute the negative log-likelihood, so we use our decoder to find log(p(x|z))\n#Data is used as a template\nlikelihood = make_decoder(code, [28, 28]).log_prob(data)\n#Find the KL divergence of the posterior and prior KL[p(z|x)||p(z)]\ndivergence = tfd.kl_divergence(posterior, prior)\n#elbo is our loss function since it should be [-log(p(x|z)) + KL(p(z|x)||p(z))]\n#Here we find the value but negative\nelbo = tf.reduce_mean(likelihood - divergence)\n#Setup the tensorflow optimizer that will minimize loss, we must do it according to our created loss function\n#We must make elbo negative in order to correct signs since our output above is negative\n#Note, learning_rate is default '0.001' so this input is pointless\noptimize = tf.train.AdamOptimizer(0.001).minimize(-elbo)\n#Use the same decoder as before (Since we're using templates) to grab samples of the data. This is purely used for visual output in the code below.\nsamples = make_decoder(prior.sample(10), [28, 28]).mean()\n\n######################################################################\n\n\n############################ Sean ####################################\n#Simplifies the input data into mnist\nmnist = input_data.read_data_sets('MNIST_data/')\n#-------------------------Creating Tensorflow Session--------------------------\n#Create an array with single a-axis\nfig, ax = plt.subplots(nrows=20, ncols=11, figsize=(10, 20))\n#Set Tensorflow monitored session\nwith tf.train.MonitoredSession() as sess:\n#------------------------------Running Session---------------------------------\n #Set max num of epoch\n for epoch in range(20):\n #Reshaping images to parameters (**NumberOfImages, ImageWidth, ImageHeight**, ColorDimension)\n feed = {data: mnist.test.images.reshape([-1, 28, 28])}\n #Runs with Error Cost, Code, and Images\n test_elbo, test_codes, test_samples = sess.run([elbo, code, samples], feed)\n #Prints out epochs and error cost\n print('Epoch', epoch, 'elbo', test_elbo)\n #Plots epoch as y-axis\n ax[epoch, 0].set_ylabel('Epoch {}'.format(epoch))\n #Plots code on current epoch\n plot_codes(ax[epoch, 0], test_codes, mnist.test.labels)\n #Plots Images on current epoch\n plot_samples(ax[epoch, 1:], test_samples)\n#---------------------------------Optimizer--------------------------------------\n for _ in range(600):\n feed = {data: mnist.train.next_batch(100)[0].reshape([-1, 28, 28])}\n #Optimize based on Error Cost\n sess.run(optimize, feed)\n#----------------------------------Output-----------------------------------------\n#Saves images to output file\nplt.savefig('vae-mnist.png', dpi=300, transparent=True, bbox_inches='tight')\n\n#######################################################################\n"
]
| [
[
"tensorflow.layers.flatten",
"tensorflow.reduce_mean",
"tensorflow.zeros",
"tensorflow.train.MonitoredSession",
"tensorflow.reshape",
"matplotlib.pyplot.subplots",
"tensorflow.placeholder",
"matplotlib.pyplot.savefig",
"tensorflow.layers.dense",
"tensorflow.ones",
"tensorflow.make_template",
"numpy.prod",
"tensorflow.train.AdamOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets"
]
]
|
ZhenchaoTang/VectorCapsule | [
"3a5a3373cea11d7aa2186cf0c08d5b8239ce9eab"
]
| [
"DGLRoutingLayer.py"
]
| [
"import torch.nn as nn\r\nimport torch as th\r\nimport torch.nn.functional as F\r\nimport dgl\r\n\r\n\r\nclass DGLRoutingLayer(nn.Module):\r\n def __init__(self, in_nodes, out_nodes, f_size, batch_size=0, device='cpu'):\r\n super(DGLRoutingLayer, self).__init__()\r\n self.batch_size = batch_size\r\n self.g = init_graph(in_nodes, out_nodes, f_size, device=device)\r\n self.in_nodes = in_nodes\r\n self.out_nodes = out_nodes\r\n self.in_indx = list(range(in_nodes))\r\n self.out_indx = list(range(in_nodes, in_nodes + out_nodes))\r\n self.device = device\r\n\r\n def forward(self, u_hat, routing_num=1):\r\n self.g.edata['u_hat'] = u_hat\r\n batch_size = self.batch_size\r\n\r\n # step 2 (line 5)\r\n def cap_message(edges):\r\n if batch_size:\r\n return {'m': edges.data['c'].unsqueeze(1) * edges.data['u_hat']}\r\n else:\r\n return {'m': edges.data['c'] * edges.data['u_hat']}\r\n\r\n def cap_reduce(nodes):\r\n return {'s': th.sum(nodes.mailbox['m'], dim=1)}\r\n\r\n for r in range(routing_num):\r\n # step 1 (line 4): normalize over out edges\r\n edges_b = self.g.edata['b'].view(self.in_nodes, self.out_nodes)\r\n self.g.edata['c'] = F.softmax(edges_b, dim=1).view(-1, 1)\r\n\r\n # Execute step 1 & 2\r\n self.g.update_all(message_func=cap_message, reduce_func=cap_reduce)\r\n\r\n # step 3 (line 6)\r\n if self.batch_size:\r\n self.g.nodes[self.out_indx].data['v'] = squash(self.g.nodes[self.out_indx].data['s'], dim=2)\r\n else:\r\n self.g.nodes[self.out_indx].data['v'] = squash(self.g.nodes[self.out_indx].data['s'], dim=1)\r\n\r\n # step 4 (line 7)\r\n v = th.cat([self.g.nodes[self.out_indx].data['v']] * self.in_nodes, dim=0)\r\n if self.batch_size:\r\n self.g.edata['b'] = self.g.edata['b'] + (self.g.edata['u_hat'] * v).mean(dim=1).sum(dim=1, keepdim=True)\r\n else:\r\n self.g.edata['b'] = self.g.edata['b'] + (self.g.edata['u_hat'] * v).sum(dim=1, keepdim=True)\r\n\r\n\r\ndef squash(s, dim=1):\r\n sq = th.sum(s ** 2, dim=dim, keepdim=True)\r\n s_norm = th.sqrt(sq)\r\n s = (sq / (1.0 + sq)) * (s / s_norm)\r\n return s\r\n\r\n\r\ndef init_graph(in_nodes, out_nodes, f_size, device='cpu'):\r\n g = dgl.DGLGraph()\r\n g.set_n_initializer(dgl.frame.zero_initializer)\r\n all_nodes = in_nodes + out_nodes\r\n g.add_nodes(all_nodes)\r\n in_indx = list(range(in_nodes))\r\n out_indx = list(range(in_nodes, in_nodes + out_nodes))\r\n # add edges use edge broadcasting\r\n for u in in_indx:\r\n g.add_edges(u, out_indx)\r\n\r\n g = g.to(device)\r\n g.edata['b'] = th.zeros(in_nodes * out_nodes, 1).to(device)\r\n return g\r\n\r\n\r\nif __name__==\"__main__\":\r\n # ToDo: monitoring the entropy of coupling coefficients\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n\r\n in_nodes = 20\r\n out_nodes = 10\r\n f_size = 4\r\n u_hat = th.randn(in_nodes * out_nodes, f_size)\r\n routing = DGLRoutingLayer(in_nodes, out_nodes, f_size)\r\n\r\n entropy_list = []\r\n dist_list = []\r\n\r\n for i in range(10):\r\n routing(u_hat)\r\n dist_matrix = routing.g.edata['c'].view(in_nodes, out_nodes)\r\n entropy = (-dist_matrix * th.log(dist_matrix)).sum(dim=1)\r\n entropy_list.append(entropy.data.numpy())\r\n dist_list.append(dist_matrix.data.numpy())\r\n\r\n stds = np.std(entropy_list, axis=1)\r\n means = np.mean(entropy_list, axis=1)\r\n plt.figure()\r\n plt.errorbar(np.arange(len(entropy_list)), means, stds, marker='o')\r\n plt.ylabel(\"Entropy of Weight Distribution\")\r\n plt.xlabel(\"Number of Routing\")\r\n plt.xticks(np.arange(len(entropy_list)))\r\n plt.show()\r\n\r\n # ToDo: watching the evolution of histograms\r\n import seaborn as sns\r\n import matplotlib.animation as animation\r\n\r\n fig,ax = plt.subplots()\r\n def dist_animate(i):\r\n ax.cla()\r\n sns.distplot(dist_list[i].reshape(-1), kde=False, ax=ax)\r\n ax.set_xlabel(\"Weight Distribution Histogram\")\r\n ax.set_title(\"Routing: %d\" % (i))\r\n ani = animation.FuncAnimation(fig, dist_animate, frames=len(entropy_list), interval=500)\r\n plt.show()\r\n\r\n # ToDo: monitoring the how lower-level Capsules gradually attach to one of the higher level ones\r\n import networkx as nx\r\n from networkx.algorithms import bipartite\r\n\r\n g = routing.g.to_networkx()\r\n X, Y = bipartite.sets(g)\r\n height_in = 10\r\n height_out = height_in * 0.8\r\n height_in_y = np.linspace(0, height_in, in_nodes)\r\n height_out_y = np.linspace((height_in - height_out) / 2, height_out, out_nodes)\r\n pos = dict()\r\n\r\n fig2,ax2 = plt.subplots()\r\n pos.update((n, (i, 1)) for i, n in zip(height_in_y, X)) # put nodes from X at x=1\r\n pos.update((n, (i, 2)) for i, n in zip(height_out_y, Y)) # put nodes from Y at x=2\r\n\r\n def weight_animate(i):\r\n ax2.cla()\r\n ax2.axis('off')\r\n ax2.set_title(\"Routing: %d \" % i)\r\n dm = dist_list[i]\r\n nx.draw_networkx_nodes(g, pos, nodelist=range(in_nodes), node_color='r', node_size=100, ax=ax2)\r\n nx.draw_networkx_nodes(g, pos, nodelist=range(in_nodes, in_nodes + out_nodes), node_color='b', node_size=100,\r\n ax=ax2)\r\n for edge in g.edges():\r\n nx.draw_networkx_edges(g, pos, edgelist=[edge], width=dm[edge[0], edge[1] - in_nodes] * 1.5, ax=ax2)\r\n\r\n ani2 = animation.FuncAnimation(fig2, weight_animate, frames=len(dist_list), interval=500)\r\n plt.show()"
]
| [
[
"torch.nn.functional.softmax",
"numpy.linspace",
"torch.cat",
"torch.sqrt",
"torch.randn",
"torch.zeros",
"torch.sum",
"matplotlib.pyplot.subplots",
"numpy.std",
"matplotlib.pyplot.ylabel",
"numpy.mean",
"torch.log",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
]
|
dwillmer/pyquil | [
"f9a8504d20729b79f07ec4730c93f4b84d6439eb"
]
| [
"pyquil/api/_base_connection.py"
]
| [
"##############################################################################\n# Copyright 2016-2018 Rigetti Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\nimport re\nimport time\nimport warnings\nfrom json.decoder import JSONDecodeError\nfrom typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, cast\n\nimport numpy as np\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom urllib3 import Retry\n\nfrom pyquil.api._config import PyquilConfig\nfrom pyquil.api._error_reporting import _record_call\nfrom pyquil.api._errors import (\n error_mapping,\n ApiError,\n UserMessageError,\n UnknownApiError,\n TooManyQubitsError,\n)\nfrom pyquil.api._logger import logger\nfrom pyquil.quil import Program\nfrom pyquil.version import __version__\nfrom pyquil.wavefunction import Wavefunction\n\nTYPE_EXPECTATION = \"expectation\"\nTYPE_MULTISHOT = \"multishot\"\nTYPE_MULTISHOT_MEASURE = \"multishot-measure\"\nTYPE_WAVEFUNCTION = \"wavefunction\"\n\n\ndef get_json(session: requests.Session, url: str, params: Optional[Dict[Any, Any]] = None) -> Any:\n \"\"\"\n Get JSON from a Forest endpoint.\n \"\"\"\n logger.debug(\"Sending GET request to %s. Params: %s\", url, params)\n res = session.get(url, params=params)\n if res.status_code >= 400:\n raise parse_error(res)\n return res.json()\n\n\ndef post_json(session: requests.Session, url: str, json: Any) -> requests.models.Response:\n \"\"\"\n Post JSON to the Forest endpoint.\n \"\"\"\n logger.debug(\"Sending POST request to %s. Body: %s\", url, json)\n res = session.post(url, json=json)\n if res.status_code >= 400:\n raise parse_error(res)\n return res\n\n\ndef parse_error(res: requests.Response) -> ApiError:\n \"\"\"\n Every server error should contain a \"status\" field with a human readable explanation of\n what went wrong as well as a \"error_type\" field indicating the kind of error that can be mapped\n to a Python type.\n\n There's a fallback error UnknownError for other types of exceptions (network issues, api\n gateway problems, etc.)\n \"\"\"\n try:\n body = res.json()\n except JSONDecodeError:\n raise UnknownApiError(res.text)\n\n if \"error_type\" not in body:\n raise UnknownApiError(str(body))\n\n error_type = body[\"error_type\"]\n status = body[\"status\"]\n\n if re.search(r\"[0-9]+ qubits were requested, but the QVM is limited to [0-9]+ qubits.\", status):\n return TooManyQubitsError(status)\n\n error_cls = error_mapping.get(error_type, UnknownApiError)\n return error_cls(status)\n\n\ndef get_session(*args: Any, **kwargs: Any) -> \"ForestSession\":\n \"\"\"\n Create a requests session to access the REST API\n\n :return: requests session\n :rtype: Session\n \"\"\"\n session = ForestSession(*args, **kwargs)\n retry_adapter = HTTPAdapter(\n max_retries=Retry(\n total=3,\n method_whitelist=[\"POST\"],\n status_forcelist=[502, 503, 504, 521, 523],\n backoff_factor=0.2,\n raise_on_status=False,\n )\n )\n\n session.mount(\"http://\", retry_adapter)\n session.mount(\"https://\", retry_adapter)\n\n # We need this to get binary payload for the wavefunction call.\n session.headers.update({\"Accept\": \"application/octet-stream\"})\n\n session.headers.update({\"Content-Type\": \"application/json; charset=utf-8\"})\n\n return session\n\n\ndef validate_noise_probabilities(noise_parameter: Optional[List[float]]) -> None:\n \"\"\"\n Is noise_parameter a valid specification of noise probabilities for depolarizing noise?\n\n :param list noise_parameter: List of noise parameter values to be validated.\n \"\"\"\n if not noise_parameter:\n return\n if not isinstance(noise_parameter, list):\n raise TypeError(\"noise_parameter must be a list\")\n if any([not isinstance(value, float) for value in noise_parameter]):\n raise TypeError(\"noise_parameter values should all be floats\")\n if len(noise_parameter) != 3:\n raise ValueError(\"noise_parameter lists must be of length 3\")\n if sum(noise_parameter) > 1 or sum(noise_parameter) < 0:\n raise ValueError(\"sum of entries in noise_parameter must be between 0 and 1 (inclusive)\")\n if any([value < 0 for value in noise_parameter]):\n raise ValueError(\"noise_parameter values should all be non-negative\")\n\n\ndef validate_qubit_list(qubit_list: Sequence[int]) -> Sequence[int]:\n \"\"\"\n Check the validity of qubits for the payload.\n\n :param qubit_list: List of qubits to be validated.\n \"\"\"\n if not isinstance(qubit_list, Sequence):\n raise TypeError(\"'qubit_list' must be of type 'Sequence'\")\n if any(not isinstance(i, int) or i < 0 for i in qubit_list):\n raise TypeError(\"'qubit_list' must contain positive integer values\")\n return qubit_list\n\n\ndef prepare_register_list(\n register_dict: Dict[str, Union[bool, Sequence[int]]]\n) -> Dict[str, Union[bool, Sequence[int]]]:\n \"\"\"\n Canonicalize classical addresses for the payload and ready MemoryReference instances\n for serialization.\n\n This function will cast keys that are iterables of int-likes to a list of Python\n ints. This is to support specifying the register offsets as ``range()`` or numpy\n arrays. This mutates ``register_dict``.\n\n :param register_dict: The classical memory to retrieve. Specified as a dictionary:\n the keys are the names of memory regions, and the values are either (1) a list of\n integers for reading out specific entries in that memory region, or (2) True, for\n reading out the entire memory region.\n \"\"\"\n if not isinstance(register_dict, dict):\n raise TypeError(\"register_dict must be a dict but got \" + repr(register_dict))\n\n for k, v in register_dict.items():\n if isinstance(v, bool):\n assert v # If boolean v must be True\n continue\n\n indices = [int(x) for x in v] # support ranges, numpy, ...\n\n if not all(x >= 0 for x in indices):\n raise TypeError(\"Negative indices into classical arrays are not allowed.\")\n register_dict[k] = indices\n\n return register_dict\n\n\ndef run_and_measure_payload(\n quil_program: Program, qubits: Sequence[int], trials: int, random_seed: int\n) -> Dict[str, object]:\n \"\"\"REST payload for :py:func:`ForestConnection._run_and_measure`\"\"\"\n if not quil_program:\n raise ValueError(\n \"You have attempted to run an empty program.\"\n \" Please provide gates or measure instructions to your program.\"\n )\n\n if not isinstance(quil_program, Program):\n raise TypeError(\"quil_program must be a Quil program object\")\n qubits = validate_qubit_list(qubits)\n if not isinstance(trials, int):\n raise TypeError(\"trials must be an integer\")\n\n payload = {\n \"type\": TYPE_MULTISHOT_MEASURE,\n \"qubits\": list(qubits),\n \"trials\": trials,\n \"compiled-quil\": quil_program.out(calibrations=False),\n }\n\n if random_seed is not None:\n payload[\"rng-seed\"] = random_seed\n\n return payload\n\n\ndef wavefunction_payload(quil_program: Program, random_seed: int) -> Dict[str, object]:\n \"\"\"REST payload for :py:func:`ForestConnection._wavefunction`\"\"\"\n if not isinstance(quil_program, Program):\n raise TypeError(\"quil_program must be a Quil program object\")\n\n payload: Dict[str, object] = {\n \"type\": TYPE_WAVEFUNCTION,\n \"compiled-quil\": quil_program.out(calibrations=False),\n }\n\n if random_seed is not None:\n payload[\"rng-seed\"] = random_seed\n\n return payload\n\n\ndef expectation_payload(\n prep_prog: Program, operator_programs: Optional[Iterable[Program]], random_seed: int\n) -> Dict[str, object]:\n \"\"\"REST payload for :py:func:`ForestConnection._expectation`\"\"\"\n if operator_programs is None:\n operator_programs = [Program()]\n\n if not isinstance(prep_prog, Program):\n raise TypeError(\"prep_prog variable must be a Quil program object\")\n\n payload: Dict[str, object] = {\n \"type\": TYPE_EXPECTATION,\n \"state-preparation\": prep_prog.out(calibrations=False),\n \"operators\": [x.out(calibrations=False) for x in operator_programs],\n }\n\n if random_seed is not None:\n payload[\"rng-seed\"] = random_seed\n\n return payload\n\n\ndef qvm_run_payload(\n quil_program: Program,\n classical_addresses: Dict[str, Union[bool, Sequence[int]]],\n trials: int,\n measurement_noise: Optional[Tuple[float, float, float]],\n gate_noise: Optional[Tuple[float, float, float]],\n random_seed: Optional[int],\n) -> Dict[str, object]:\n \"\"\"REST payload for :py:func:`ForestConnection._qvm_run`\"\"\"\n if not quil_program:\n raise ValueError(\n \"You have attempted to run an empty program.\"\n \" Please provide gates or measure instructions to your program.\"\n )\n if not isinstance(quil_program, Program):\n raise TypeError(\"quil_program must be a Quil program object\")\n classical_addresses = prepare_register_list(classical_addresses)\n if not isinstance(trials, int):\n raise TypeError(\"trials must be an integer\")\n\n payload = {\n \"type\": TYPE_MULTISHOT,\n \"addresses\": classical_addresses,\n \"trials\": trials,\n \"compiled-quil\": quil_program.out(calibrations=False),\n }\n\n if measurement_noise is not None:\n payload[\"measurement-noise\"] = measurement_noise\n if gate_noise is not None:\n payload[\"gate-noise\"] = gate_noise\n if random_seed is not None:\n payload[\"rng-seed\"] = random_seed\n\n return payload\n\n\nclass ForestSession(requests.Session):\n \"\"\"\n ForestSession inherits from requests.Session. It is responsible for adding\n authentication headers to Forest server requests. Upon receiving a 401 or 403\n response, it will attempt to refresh the auth credential and update the\n PyquilConfig, which in turn writes the refreshed auth credential to file.\n\n Encapsulates the operations required for authorization & encryption\n with the QPU.\n\n Two operations are involved in authorization:\n\n * Requesting & storing a user authentication token, used to authenticate calls\n to Forest, Dispatch, and other Rigetti services\n * Requesting a Curve ZeroMQ keypair for connection to the QPU. The response to\n this request also comes with service endpoints: compiler server and QPU\n\n The authentication tokens are of the standard JWT format and are issued by Forest Server.\n\n The refresh token is only used to renew the access token, which is used for all transactions\n and is valid for a short period of time.\n\n In wrapping the PyQuilConfig object, it provides that object with a callback to\n retrieve a valid engagement when needed, because the engagement is maintained here\n but is used by the config to provide service endpoints.\n \"\"\"\n\n def __init__(self, *, config: PyquilConfig, lattice_name: Optional[str] = None):\n super().__init__()\n self.config = config\n self.config.get_engagement = self.get_engagement\n self._engagement: Optional[\"Engagement\"] = None\n self.headers.update(self.config.qcs_auth_headers)\n self.headers[\"User-Agent\"] = f\"PyQuil/{__version__}\"\n self.lattice_name = lattice_name\n\n def _engage(self) -> Optional[\"Engagement\"]:\n \"\"\"\n The heart of the QPU authorization process, ``engage`` makes a request to\n the dispatch server for the information needed to communicate with the QPU.\n\n This is a standard GraphQL request, authenticated using the access token\n retrieved from Forest Server.\n\n The response includes the endpoints to the QPU and QPU Compiler Server,\n along with the set of keys necessary to connect to the QPU and the time at\n which that key set expires.\n \"\"\"\n query = \"\"\"\n mutation Engage($name: String!) {\n engage(input: { lattice: { name: $name }}) {\n success\n message\n engagement {\n type\n qpu {\n endpoint\n credentials {\n clientPublic\n clientSecret\n serverPublic\n }\n }\n compiler {\n endpoint\n }\n expiresAt\n }\n }\n }\n \"\"\"\n if not self.lattice_name:\n logger.debug(\"ForestSession requires lattice_name in order to engage\")\n return None\n\n logger.debug(\"Requesting engagement from %s\", self.config.dispatch_url)\n variables = dict(name=self.lattice_name)\n query_response = self._request_graphql_retry(\n self.config.dispatch_url, query=query, variables=variables\n )\n\n if query_response.get(\"errors\"):\n errors = query_response.get(\"errors\", [])\n error_messages = map(lambda error: error[\"message\"], errors) # type: ignore\n raise UserMessageError(f\"Failed to engage: {','.join(error_messages)}\")\n\n engagement_response = query_response.get(\"data\", {}).get(\"engage\", None)\n if engagement_response and engagement_response.get(\"success\") is True:\n logger.debug(\"Engagement successful\")\n engagement_data = engagement_response.get(\"engagement\", {})\n return Engagement(\n client_secret_key=engagement_data.get(\"qpu\", {})\n .get(\"credentials\", {})\n .get(\"clientSecret\", \"\")\n .encode(\"utf-8\"),\n client_public_key=engagement_data.get(\"qpu\", {})\n .get(\"credentials\", {})\n .get(\"clientPublic\", \"\")\n .encode(\"utf-8\"),\n server_public_key=engagement_data.get(\"qpu\", {})\n .get(\"credentials\", {})\n .get(\"serverPublic\", \"\")\n .encode(\"utf-8\"),\n expires_at=engagement_data.get(\"expiresAt\", {}),\n qpu_endpoint=engagement_data.get(\"qpu\", {}).get(\"endpoint\"),\n qpu_compiler_endpoint=engagement_data.get(\"compiler\", {}).get(\"endpoint\"),\n )\n else:\n raise UserMessageError(\n f\"Unable to engage {self.lattice_name}: \"\n f\"{engagement_response.get('message', 'No message')}\"\n )\n\n def get_engagement(self) -> Optional[\"Engagement\"]:\n \"\"\"\n Returns memoized engagement information, if still valid - or requests a new engagement\n and then stores and returns that.\n \"\"\"\n if not (self._engagement and self._engagement.is_valid()):\n self._engagement = self._engage()\n return self._engagement\n\n def _refresh_auth_token(self) -> bool:\n self.config.assert_valid_auth_credential()\n if self.config.user_auth_token is not None:\n return self._refresh_user_auth_token()\n elif self.config.qmi_auth_token is not None:\n return self._refresh_qmi_auth_token()\n return False\n\n def _refresh_user_auth_token(self) -> bool:\n url = f\"{self.config.forest_url}/auth/idp/oauth2/v1/token\"\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Cache-Control\": \"no-cache\",\n \"Accept\": \"application/json\",\n }\n assert self.config.user_auth_token is not None\n data = {\n \"grant_type\": \"refresh_token\",\n \"scope\": self.config.user_auth_token[\"scope\"],\n \"refresh_token\": self.config.user_auth_token[\"refresh_token\"],\n }\n response = super().request(\"POST\", url, data=data, headers=headers)\n if response.status_code == 200:\n self.config.update_user_auth_token(response.json())\n self.headers.update(self.config.qcs_auth_headers)\n return True\n\n logger.warning(\n f\"Failed to refresh your user auth token at {self.config.user_auth_token_path}. \"\n f\"Server response: {response.text}\"\n )\n return False\n\n def _refresh_qmi_auth_token(self) -> bool:\n url = f\"{self.config.forest_url}/auth/qmi/refresh\"\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n data = self.config.qmi_auth_token\n response = super().request(\"POST\", url, json=data, headers=headers)\n if response.status_code == 200:\n self.config.update_qmi_auth_token(response.json())\n self.headers.update(self.config.qcs_auth_headers)\n return True\n\n logger.warning(\n f\"Failed to refresh your QMI auth token at {self.config.qmi_auth_token_path}. \"\n f\"Server response: {response.text}\"\n )\n return False\n\n def request(self, *args: Any, **kwargs: Any) -> requests.models.Response:\n \"\"\"\n request is a wrapper around requests.Session#request that checks for\n 401 and 403 response statuses and refreshes the auth credential\n accordingly.\n \"\"\"\n response = super().request(*args, **kwargs)\n if response.status_code in {401, 403}:\n if self._refresh_auth_token():\n response = super().request(*args, **kwargs)\n return response\n\n def _request_graphql(self, url: str, query: str, variables: Dict[Any, Any]) -> Dict[Any, Any]:\n \"\"\"\n Makes a single graphql request using the session credentials, throwing an error\n if the response is not valid JSON.\n\n Returns the JSON parsed from the response.\n \"\"\"\n response = super().post(url, json=dict(query=query, variables=variables))\n try:\n return cast(Dict[Any, Any], response.json())\n except JSONDecodeError as e:\n logger.exception(f\"Unable to parse json response from endpoint {url}:\", response.text)\n raise e\n\n def _request_graphql_retry(self, *args: Any, **kwargs: Any) -> Dict[Any, Any]:\n \"\"\"\n Makes a GraphQL request using session credentials, refreshing them once if the server\n identifies them as expired.\n\n Determining whether a call has failed to a GraphQL endpoint is less axiomatic than for a\n REST interface, and so here we follow the pattern set by Rigetti services, which return an\n HTTP 200 response with an array of errors. If any of those errors cite an expired\n authentication token, we refresh the token to clear that error. Note that other error\n messages will not trigger a retry.\n \"\"\"\n result = self._request_graphql(*args, **kwargs)\n errors = result.get(\"errors\", [])\n token_is_expired = any(\n error.get(\"extensions\", {}).get(\"code\") == \"AUTH_TOKEN_EXPIRED\" for error in errors\n )\n if token_is_expired:\n if self._refresh_auth_token():\n result = self._request_graphql(*args, **kwargs)\n return result\n\n\nclass ForestConnection:\n @_record_call\n def __init__(\n self,\n sync_endpoint: Optional[str] = None,\n compiler_endpoint: Optional[str] = None,\n forest_cloud_endpoint: Optional[str] = None,\n ):\n \"\"\"\n Represents a connection to Forest containing methods to wrap all possible API endpoints.\n\n Users should not use methods from this class directly.\n\n :param sync_endpoint: The endpoint of the server for running QVM jobs\n :param compiler_endpoint: The endpoint of the server for running quilc compiler jobs\n :param forest_cloud_endpoint: The endpoint of the forest cloud server\n \"\"\"\n pyquil_config = PyquilConfig()\n if sync_endpoint is None:\n sync_endpoint = pyquil_config.qvm_url\n if compiler_endpoint is None:\n compiler_endpoint = pyquil_config.quilc_url\n if forest_cloud_endpoint is None:\n forest_cloud_endpoint = pyquil_config.forest_url\n\n assert sync_endpoint is not None\n self.sync_endpoint = sync_endpoint\n self.compiler_endpoint = compiler_endpoint\n self.forest_cloud_endpoint = forest_cloud_endpoint\n self.session = get_session(config=pyquil_config)\n\n @_record_call\n def _run_and_measure(\n self, quil_program: Program, qubits: Sequence[int], trials: int, random_seed: int\n ) -> np.ndarray:\n \"\"\"\n Run a Forest ``run_and_measure`` job.\n\n Users should use :py:func:`WavefunctionSimulator.run_and_measure` instead of calling\n this directly.\n \"\"\"\n payload = run_and_measure_payload(quil_program, qubits, trials, random_seed)\n response = post_json(self.session, self.sync_endpoint + \"/qvm\", payload)\n return np.asarray(response.json())\n\n @_record_call\n def _wavefunction(self, quil_program: Program, random_seed: int) -> Wavefunction:\n \"\"\"\n Run a Forest ``wavefunction`` job.\n\n Users should use :py:func:`WavefunctionSimulator.wavefunction` instead of calling\n this directly.\n \"\"\"\n\n payload = wavefunction_payload(quil_program, random_seed)\n response = post_json(self.session, self.sync_endpoint + \"/qvm\", payload)\n return Wavefunction.from_bit_packed_string(response.content)\n\n @_record_call\n def _expectation(\n self, prep_prog: Program, operator_programs: Iterable[Program], random_seed: int\n ) -> np.ndarray:\n \"\"\"\n Run a Forest ``expectation`` job.\n\n Users should use :py:func:`WavefunctionSimulator.expectation` instead of calling\n this directly.\n \"\"\"\n if isinstance(operator_programs, Program):\n warnings.warn(\n \"You have provided a Program rather than a list of Programs. The results \"\n \"from expectation will be line-wise expectation values of the \"\n \"operator_programs.\",\n SyntaxWarning,\n )\n\n payload = expectation_payload(prep_prog, operator_programs, random_seed)\n response = post_json(self.session, self.sync_endpoint + \"/qvm\", payload)\n return np.asarray(response.json())\n\n @_record_call\n def _qvm_run(\n self,\n quil_program: Program,\n classical_addresses: Dict[str, Union[bool, Sequence[int]]],\n trials: int,\n measurement_noise: Optional[Tuple[float, float, float]],\n gate_noise: Optional[Tuple[float, float, float]],\n random_seed: Optional[int],\n ) -> Dict[str, np.ndarray]:\n \"\"\"\n Run a Forest ``run`` job on a QVM.\n\n Users should use :py:func:`QVM.run` instead of calling this directly.\n \"\"\"\n payload = qvm_run_payload(\n quil_program, classical_addresses, trials, measurement_noise, gate_noise, random_seed\n )\n response = post_json(self.session, self.sync_endpoint + \"/qvm\", payload)\n\n ram: Dict[str, np.ndarray] = {key: np.array(val) for key, val in response.json().items()}\n\n for k in ram.keys():\n ram[k] = np.array(ram[k])\n\n return ram\n\n @_record_call\n def _qvm_get_version_info(self) -> str:\n \"\"\"\n Return version information for the QVM.\n\n :return: String of QVM version\n \"\"\"\n response = post_json(self.session, self.sync_endpoint, {\"type\": \"version\"})\n split_version_string = response.text.split()\n try:\n qvm_version = split_version_string[0]\n except ValueError:\n raise TypeError(f\"Malformed version string returned by the QVM: {response.text}\")\n return qvm_version\n\n\nclass Engagement:\n \"\"\"\n An Engagement stores all the information retrieved via an engagement request sent to\n the dispatch server.\n \"\"\"\n\n def __init__(\n self,\n client_public_key: bytes,\n client_secret_key: bytes,\n server_public_key: bytes,\n expires_at: Union[int, float, str],\n qpu_endpoint: str,\n qpu_compiler_endpoint: str,\n ):\n self.client_public_key = client_public_key\n self.client_secret_key = client_secret_key\n self.server_public_key = server_public_key\n self.expires_at = float(expires_at) if expires_at else None\n self.qpu_endpoint = qpu_endpoint\n self.qpu_compiler_endpoint = qpu_compiler_endpoint\n logger.debug(\"New engagement created: \\n%s\", self)\n\n def is_valid(self) -> bool:\n \"\"\"\n Return true if an engagement is valid for use, false if it is missing required\n fields\n\n An 'invalid' engagement is one which will not grant access to the QPU.\n \"\"\"\n return all(\n [\n self.client_public_key is not None,\n self.client_secret_key is not None,\n self.server_public_key is not None,\n (self.expires_at is None or self.expires_at > time.time()),\n self.qpu_endpoint is not None,\n ]\n )\n\n def __str__(self) -> str:\n return f\"\"\"Client public key: {self.client_public_key}\nClient secret key: masked ({len(self.client_secret_key)} B)\nServer public key: {self.server_public_key}\nExpiration time: {self.expires_at}\nQPU Endpoint: {self.qpu_endpoint}\nQPU Compiler Endpoint: {self.qpu_compiler_endpoint}\"\"\" # type: ignore\n"
]
| [
[
"numpy.array"
]
]
|
KIM-HC/ai-imu-dr | [
"023a71734016e39d52f79fe8080eaf553c06991a"
]
| [
"src/utils_torch_filter.py"
]
| [
"import torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport time\nfrom termcolor import cprint\nfrom utils_numpy_filter import NUMPYIEKF\nfrom utils import prepare_data\n\nclass InitProcessCovNet(torch.nn.Module):\n\n def __init__(self):\n super(InitProcessCovNet, self).__init__()\n\n self.beta_process = 3*torch.ones(2).double()\n self.beta_initialization = 3*torch.ones(2).double()\n\n self.factor_initial_covariance = torch.nn.Linear(1, 6, bias=False).double()\n \"\"\"parameters for initializing covariance\"\"\"\n self.factor_initial_covariance.weight.data[:] /= 10\n\n self.factor_process_covariance = torch.nn.Linear(1, 6, bias=False).double()\n \"\"\"parameters for process noise covariance\"\"\"\n self.factor_process_covariance.weight.data[:] /= 10\n self.tanh = torch.nn.Tanh()\n\n def forward(self, iekf):\n return\n\n def init_cov(self, iekf):\n alpha = self.factor_initial_covariance(torch.ones(1).double()).squeeze()\n beta = 10**(self.tanh(alpha))\n return beta\n\n def init_processcov(self, iekf):\n alpha = self.factor_process_covariance(torch.ones(1).double())\n beta = 10**(self.tanh(alpha))\n return beta\n\n\n### network for measurement noise covariance\nclass MesNet(torch.nn.Module):\n def __init__(self):\n super(MesNet, self).__init__()\n self.beta_measurement = 3*torch.ones(2).double()\n self.tanh = torch.nn.Tanh()\n\n self.cov_net = torch.nn.Sequential(torch.nn.Conv1d(6, 32, 5),\n torch.nn.ReplicationPad1d(4),\n torch.nn.ReLU(),\n torch.nn.Dropout(p=0.5),\n torch.nn.Conv1d(32, 32, 5, dilation=3),\n torch.nn.ReplicationPad1d(4),\n torch.nn.ReLU(),\n torch.nn.Dropout(p=0.5),\n ).double()\n \"CNN for measurement covariance\"\n self.cov_lin = torch.nn.Sequential(torch.nn.Linear(32, 2),\n torch.nn.Tanh(),\n ).double()\n self.cov_lin[0].bias.data[:] /= 100\n self.cov_lin[0].weight.data[:] /= 100\n\n def forward(self, u, iekf):\n y_cov = self.cov_net(u).transpose(0, 2).squeeze()\n z_cov = self.cov_lin(y_cov)\n z_cov_net = self.beta_measurement.unsqueeze(0)*z_cov\n measurements_covs = (iekf.cov0_measurement.unsqueeze(0) * (10**z_cov_net))\n return measurements_covs\n\n\nclass TORCHIEKF(torch.nn.Module, NUMPYIEKF):\n Id1 = torch.eye(1).double()\n Id2 = torch.eye(2).double()\n Id3 = torch.eye(3).double()\n Id6 = torch.eye(6).double()\n IdP = torch.eye(21).double()\n\n def __init__(self, parameter_class=None):\n torch.nn.Module.__init__(self)\n NUMPYIEKF.__init__(self, parameter_class=None)\n\n # mean and standard deviation of parameters for normalizing inputs\n self.u_loc = None\n self.u_std = None\n self.initprocesscov_net = InitProcessCovNet()\n self.mes_net = MesNet()\n self.cov0_measurement = None\n\n # modified parameters\n self.IdP = torch.eye(self.P_dim).double()\n\n if parameter_class is not None:\n self.filter_parameters = parameter_class()\n self.set_param_attr()\n\n ### get parameter from self.filter_parameters = KITTIParameters\n def set_param_attr(self):\n # get a list of attribute only\n attr_list = [a for a in dir(self.filter_parameters) if not a.startswith('__')\n and not callable(getattr(self.filter_parameters, a))]\n for attr in attr_list:\n setattr(self, attr, getattr(self.filter_parameters, attr))\n\n self.Q = torch.diag(torch.Tensor([self.cov_omega, self.cov_omega, self. cov_omega,\n self.cov_acc, self.cov_acc, self.cov_acc,\n self.cov_b_omega, self.cov_b_omega, self.cov_b_omega,\n self.cov_b_acc, self.cov_b_acc, self.cov_b_acc,\n self.cov_Rot_c_i, self.cov_Rot_c_i, self.cov_Rot_c_i,\n self.cov_t_c_i, self.cov_t_c_i, self.cov_t_c_i])\n ).double()\n self.cov0_measurement = torch.Tensor([self.cov_lat, self.cov_up]).double()\n\n ### move, get measurements_covs(which we get values from learning model)\n def run(self, t, u, measurements_covs, v_mes, p_mes, N, ang0):\n\n dt = t[1:] - t[:-1] # (s)\n Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P = self.init_run(dt, u, p_mes, v_mes,\n N, ang0)\n\n for i in range(1, N):\n Rot_i, v_i, p_i, b_omega_i, b_acc_i, Rot_c_i_i, t_c_i_i, P_i = \\\n self.propagate(Rot[i-1], v[i-1], p[i-1], b_omega[i-1], b_acc[i-1], Rot_c_i[i-1],\n t_c_i[i-1], P, u[i], dt[i-1])\n\n Rot[i], v[i], p[i], b_omega[i], b_acc[i], Rot_c_i[i], t_c_i[i], P = \\\n self.update(Rot_i, v_i, p_i, b_omega_i, b_acc_i, Rot_c_i_i, t_c_i_i, P_i,\n u[i], i, measurements_covs[i])\n return Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i\n\n ### init\n def init_run(self, dt, u, p_mes, v_mes, N, ang0):\n Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i = \\\n self.init_saved_state(dt, N, ang0)\n Rot[0] = self.from_rpy(ang0[0], ang0[1], ang0[2])\n v[0] = v_mes[0]\n P = self.init_covariance()\n return Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P\n\n ### init KF covariance P with learning model init_cov\n def init_covariance(self):\n beta = self.initprocesscov_net.init_cov(self)\n P = torch.zeros(self.P_dim, self.P_dim).double()\n P[:2, :2] = self.cov_Rot0*beta[0]*self.Id2 # no yaw error\n P[3:5, 3:5] = self.cov_v0*beta[1]*self.Id2\n P[9:12, 9:12] = self.cov_b_omega0*beta[2]*self.Id3\n P[12:15, 12:15] = self.cov_b_acc0*beta[3]*self.Id3\n P[15:18, 15:18] = self.cov_Rot_c_i0*beta[4]*self.Id3\n P[18:21, 18:21] = self.cov_t_c_i0*beta[5]*self.Id3\n return P\n\n\n ### makes saved state to zero ...?\n def init_saved_state(self, dt, N, ang0):\n Rot = dt.new_zeros(N, 3, 3)\n v = dt.new_zeros(N, 3)\n p = dt.new_zeros(N, 3)\n b_omega = dt.new_zeros(N, 3)\n b_acc = dt.new_zeros(N, 3)\n Rot_c_i = dt.new_zeros(N, 3, 3)\n t_c_i = dt.new_zeros(N, 3)\n Rot_c_i[0] = torch.eye(3).double()\n return Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i\n\n ### same calculations, differences came from using torch framework instead of numpy framework\n def propagate(self, Rot_prev, v_prev, p_prev, b_omega_prev, b_acc_prev, Rot_c_i_prev, t_c_i_prev,\n P_prev, u, dt):\n Rot_prev = Rot_prev.clone()\n acc_b = u[3:6] - b_acc_prev\n acc = Rot_prev.mv(acc_b) + self.g\n v = v_prev + acc * dt\n p = p_prev + v_prev.clone() * dt + 1/2 * acc * dt**2\n\n omega = (u[:3] - b_omega_prev)*dt\n Rot = Rot_prev.mm(self.so3exp(omega))\n\n b_omega = b_omega_prev\n b_acc = b_acc_prev\n Rot_c_i = Rot_c_i_prev.clone()\n t_c_i = t_c_i_prev\n\n P = self.propagate_cov(P_prev, Rot_prev, v_prev, p_prev, b_omega_prev, b_acc_prev,\n u, dt)\n return Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P\n\n ### same calculations, differences came from using torch framework instead of numpy framework\n def propagate_cov(self, P, Rot_prev, v_prev, p_prev, b_omega_prev, b_acc_prev, u,\n dt):\n\n F = P.new_zeros(self.P_dim, self.P_dim)\n G = P.new_zeros(self.P_dim, self.Q.shape[0])\n Q = self.Q.clone()\n F[3:6, :3] = self.skew(self.g)\n F[6:9, 3:6] = self.Id3\n G[3:6, 3:6] = Rot_prev\n F[3:6, 12:15] = -Rot_prev\n v_skew_rot = self.skew(v_prev).mm(Rot_prev)\n p_skew_rot = self.skew(p_prev).mm(Rot_prev)\n G[:3, :3] = Rot_prev\n G[3:6, :3] = v_skew_rot\n G[6:9, :3] = p_skew_rot\n F[:3, 9:12] = -Rot_prev\n F[3:6, 9:12] = -v_skew_rot\n F[6:9, 9:12] = -p_skew_rot\n G[9:12, 6:9] = self.Id3\n G[12:15, 9:12] = self.Id3\n G[15:18, 12:15] = self.Id3\n G[18:21, 15:18] = self.Id3\n\n F = F * dt\n G = G * dt\n F_square = F.mm(F)\n F_cube = F_square.mm(F)\n Phi = self.IdP + F + 1/2*F_square + 1/6*F_cube\n P_new = Phi.mm(P + G.mm(Q).mm(G.t())).mm(Phi.t())\n return P_new\n\n def update(self, Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P, u, i, measurement_cov):\n # orientation of body frame\n Rot_body = Rot.mm(Rot_c_i)\n # velocity in imu frame\n v_imu = Rot.t().mv(v)\n omega = u[:3] - b_omega\n # velocity in body frame\n v_body = Rot_c_i.t().mv(v_imu) + self.skew(t_c_i).mv(omega)\n Omega = self.skew(omega)\n # Jacobian in car frame\n H_v_imu = Rot_c_i.t().mm(self.skew(v_imu))\n H_t_c_i = self.skew(t_c_i)\n\n H = P.new_zeros(2, self.P_dim)\n H[:, 3:6] = Rot_body.t()[1:]\n H[:, 15:18] = H_v_imu[1:]\n H[:, 9:12] = H_t_c_i[1:]\n H[:, 18:21] = -Omega[1:]\n r = - v_body[1:]\n R = torch.diag(measurement_cov)\n\n Rot_up, v_up, p_up, b_omega_up, b_acc_up, Rot_c_i_up, t_c_i_up, P_up = \\\n self.state_and_cov_update(Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P, H, r, R)\n return Rot_up, v_up, p_up, b_omega_up, b_acc_up, Rot_c_i_up, t_c_i_up, P_up\n\n\n @staticmethod\n def state_and_cov_update(Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P, H, r, R):\n S = H.mm(P).mm(H.t()) + R\n Kt, _ = torch.solve(P.mm(H.t()).t(), S)\n K = Kt.t()\n dx = K.mv(r.view(-1))\n\n dR, dxi = TORCHIEKF.sen3exp(dx[:9])\n dv = dxi[:, 0]\n dp = dxi[:, 1]\n Rot_up = dR.mm(Rot)\n v_up = dR.mv(v) + dv\n p_up = dR.mv(p) + dp\n\n b_omega_up = b_omega + dx[9:12]\n b_acc_up = b_acc + dx[12:15]\n\n dR = TORCHIEKF.so3exp(dx[15:18])\n Rot_c_i_up = dR.mm(Rot_c_i)\n t_c_i_up = t_c_i + dx[18:21]\n\n I_KH = TORCHIEKF.IdP - K.mm(H)\n P_upprev = I_KH.mm(P).mm(I_KH.t()) + K.mm(R).mm(K.t())\n P_up = (P_upprev + P_upprev.t())/2\n return Rot_up, v_up, p_up, b_omega_up, b_acc_up, Rot_c_i_up, t_c_i_up, P_up\n\n @staticmethod\n def skew(x):\n X = torch.Tensor([[0, -x[2], x[1]],\n [x[2], 0, -x[0]],\n [-x[1], x[0], 0]]).double()\n return X\n\n @staticmethod\n def rot_from_2_vectors(v1, v2):\n \"\"\" Returns a Rotation matrix between vectors 'v1' and 'v2' \"\"\"\n v1 = v1/torch.norm(v1)\n v2 = v2/torch.norm(v2)\n v = torch.cross(v1, v2)\n cosang = v1.matmul(v2)\n sinang = torch.norm(v)\n Rot = TORCHIEKF.Id3 + TORCHIEKF.skew(v) + \\\n TORCHIEKF.skew(v).mm(TORCHIEKF.skew(v))*(1-cosang)/(sinang**2)\n return Rot\n\n @staticmethod\n def sen3exp(xi):\n phi = xi[:3]\n angle = torch.norm(phi)\n\n # Near |phi|==0, use first order Taylor expansion\n if isclose(angle, 0.):\n skew_phi = torch.Tensor([[0, -phi[2], phi[1]],\n [phi[2], 0, -phi[0]],\n [-phi[1], phi[0], 0]]).double()\n J = TORCHIEKF.Id3 + 0.5 * skew_phi\n Rot = TORCHIEKF.Id3 + skew_phi\n else:\n axis = phi / angle\n skew_axis = torch.Tensor([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]]).double()\n s = torch.sin(angle)\n c = torch.cos(angle)\n\n J = (s / angle) * TORCHIEKF.Id3 + (1 - s / angle) * TORCHIEKF.outer(axis, axis)\\\n + ((1 - c) / angle) * skew_axis\n Rot = c * TORCHIEKF.Id3 + (1 - c) * TORCHIEKF.outer(axis, axis) \\\n + s * skew_axis\n\n x = J.mm(xi[3:].view(-1, 3).t())\n return Rot, x\n\n @staticmethod\n def so3exp(phi):\n angle = phi.norm()\n\n # Near phi==0, use first order Taylor expansion\n if isclose(angle, 0.):\n skew_phi = torch.Tensor([[0, -phi[2], phi[1]],\n [phi[2], 0, -phi[0]],\n [-phi[1], phi[0], 0]]).double()\n Xi = TORCHIEKF.Id3 + skew_phi\n return Xi\n axis = phi / angle\n skew_axis = torch.Tensor([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]]).double()\n c = angle.cos()\n s = angle.sin()\n Xi = c * TORCHIEKF.Id3 + (1 - c) * TORCHIEKF.outer(axis, axis) \\\n + s * skew_axis\n return Xi\n\n @staticmethod\n def outer(a, b):\n ab = a.view(-1, 1)*b.view(1, -1)\n return ab\n\n @staticmethod\n def so3left_jacobian(phi):\n angle = torch.norm(phi)\n\n # Near |phi|==0, use first order Taylor expansion\n if isclose(angle, 0.):\n skew_phi = torch.Tensor([[0, -phi[2], phi[1]],\n [phi[2], 0, -phi[0]],\n [-phi[1], phi[0], 0]]).double()\n return TORCHIEKF.Id3 + 0.5 * skew_phi\n\n axis = phi / angle\n skew_axis = torch.Tensor([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]]).double()\n s = torch.sin(angle)\n c = torch.cos(angle)\n\n return (s / angle) * TORCHIEKF.Id3 + (1 - s / angle) * TORCHIEKF.outer(axis, axis)\\\n + ((1 - c) / angle) * skew_axis\n\n @staticmethod\n def to_rpy(Rot):\n \"\"\"Convert a rotation matrix to RPY Euler angles.\"\"\"\n\n pitch = torch.atan2(-Rot[2, 0], torch.sqrt(Rot[0, 0]**2 + Rot[1, 0]**2))\n\n if isclose(pitch, np.pi / 2.):\n yaw = pitch.new_zeros(1)\n roll = torch.atan2(Rot[0, 1], Rot[1, 1])\n elif isclose(pitch, -np.pi / 2.):\n yaw = pitch.new_zeros(1)\n roll = -torch.atan2(Rot[0, 1], Rot[1, 1])\n else:\n sec_pitch = 1. / pitch.cos()\n yaw = torch.atan2(Rot[1, 0] * sec_pitch, Rot[0, 0] * sec_pitch)\n roll = torch.atan2(Rot[2, 1] * sec_pitch, Rot[2, 2] * sec_pitch)\n return roll, pitch, yaw\n\n @staticmethod\n def from_rpy(roll, pitch, yaw):\n \"\"\"Form a rotation matrix from RPY Euler angles.\"\"\"\n\n return TORCHIEKF.rotz(yaw).mm(TORCHIEKF.roty(pitch).mm(TORCHIEKF.rotx(roll)))\n\n @staticmethod\n def rotx(t):\n \"\"\"Rotation about the x-axis.\"\"\"\n\n c = torch.cos(t)\n s = torch.sin(t)\n return t.new([[1, 0, 0],\n [0, c, -s],\n [0, s, c]])\n\n @staticmethod\n def roty(t):\n \"\"\"Rotation about the y-axis.\"\"\"\n\n c = torch.cos(t)\n s = torch.sin(t)\n return t.new([[c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n\n @staticmethod\n def rotz(t):\n \"\"\"Rotation about the z-axis.\"\"\"\n\n c = torch.cos(t)\n s = torch.sin(t)\n return t.new([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n\n @staticmethod\n def normalize_rot(rot):\n # U, S, V = torch.svd(A) returns the singular value\n # decomposition of a real matrix A of size (n x m) such that A=USV′.\n # Irrespective of the original strides, the returned matrix U will\n # be transposed, i.e. with strides (1, n) instead of (n, 1).\n\n # pytorch SVD seems to be inaccurate, so just move to numpy immediately\n U, _, V = torch.svd(rot)\n S = torch.eye(3).double()\n S[2, 2] = torch.det(U) * torch.det(V)\n return U.mm(S).mm(V.t())\n\n ### feedforward and get measurements_covs\n def forward_nets(self, u):\n ### t(): transpose\n ### unsqueeze(dim=0): extends dimension by (dim+1)\n u_n = self.normalize_u(u).t().unsqueeze(0)\n ### get imu from 0 to 5???\n ### why 6?? -> angular velocity and acceleration!!!\n u_n = u_n[:, :6]\n print(f\"testing u_n.size(): {u_n.size()}\")\n measurements_covs = self.mes_net(u_n, self)\n return measurements_covs\n\n def normalize_u(self, u):\n return (u-self.u_loc)/self.u_std\n\n def get_normalize_u(self, dataset):\n self.u_loc = dataset.normalize_factors['u_loc'].double()\n self.u_std = dataset.normalize_factors['u_std'].double()\n\n def set_Q(self):\n \"\"\"\n Update the process noise covariance\n :return:\n \"\"\"\n ### guessing this Q is the Q in equation(19) in the paper.\n self.Q = torch.diag(torch.Tensor([self.cov_omega, self.cov_omega, self. cov_omega,\n self.cov_acc, self.cov_acc, self.cov_acc,\n self.cov_b_omega, self.cov_b_omega, self.cov_b_omega,\n self.cov_b_acc, self.cov_b_acc, self.cov_b_acc,\n self.cov_Rot_c_i, self.cov_Rot_c_i, self.cov_Rot_c_i,\n self.cov_t_c_i, self.cov_t_c_i, self.cov_t_c_i])\n ).double()\n\n ### But, this paper said it learns Q, which is below part.\n ### Upper part is not used.\n beta = self.initprocesscov_net.init_processcov(self)\n self.Q = torch.zeros(self.Q.shape[0], self.Q.shape[0]).double()\n self.Q[:3, :3] = self.cov_omega*beta[0]*self.Id3\n self.Q[3:6, 3:6] = self.cov_acc*beta[1]*self.Id3\n self.Q[6:9, 6:9] = self.cov_b_omega*beta[2]*self.Id3\n self.Q[9:12, 9:12] = self.cov_b_acc*beta[3]*self.Id3\n self.Q[12:15, 12:15] = self.cov_Rot_c_i*beta[4]*self.Id3\n self.Q[15:18, 15:18] = self.cov_t_c_i*beta[5]*self.Id3\n\n def load(self, args, dataset):\n path_iekf = os.path.join(args.path_temp, \"iekfnets.p\")\n if os.path.isfile(path_iekf):\n mondict = torch.load(path_iekf)\n self.load_state_dict(mondict)\n cprint(\"IEKF nets loaded\", 'green')\n else:\n cprint(\"IEKF nets NOT loaded\", 'yellow')\n self.get_normalize_u(dataset)\n\n\ndef isclose(mat1, mat2, tol=1e-10):\n return (mat1 - mat2).abs().lt(tol)\n\n\ndef prepare_filter(args, dataset):\n torch_iekf = TORCHIEKF()\n torch_iekf.load(args, dataset)\n torch_iekf = TORCHIEKF()\n\n # set dataset parameter\n torch_iekf.filter_parameters = args.parameter_class()\n torch_iekf.set_param_attr()\n if type(torch_iekf.g).__module__ == np.__name__:\n torch_iekf.g = torch.from_numpy(torch_iekf.g).double()\n\n # load model\n torch_iekf.load(args, dataset)\n torch_iekf.get_normalize_u(dataset)\n\n iekf = NUMPYIEKF(args.parameter_class)\n iekf.set_learned_covariance(torch_iekf)\n return iekf, torch_iekf\n"
]
| [
[
"torch.svd",
"torch.sin",
"torch.load",
"torch.zeros",
"torch.nn.Dropout",
"torch.norm",
"torch.ones",
"torch.sqrt",
"torch.eye",
"torch.from_numpy",
"torch.cos",
"torch.nn.ReplicationPad1d",
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.diag",
"torch.atan2",
"torch.nn.Module.__init__",
"torch.Tensor",
"torch.det",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.cross"
]
]
|
leopold-franz/muMatch | [
"5e6a2e9a300e2aa2d2164fdd6f9346fc31055115"
]
| [
"micro_match/correspondence/functional_maps/spectoral_filtering.py"
]
| [
"import jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom scipy.optimize import minimize\n\n\ndef l12_functors(X, Y):\n def l21_norm(X):\n return jnp.sum(jnp.linalg.norm(X, axis=0))\n\n def functor(C):\n return l21_norm(C @ X - Y)\n\n f = jax.jit(functor)\n df = jax.jit(jax.grad(functor, argnums=0))\n return f, df\n\n\ndef l12_solver(X, Y):\n shape = (Y.shape[0], X.shape[0])\n f, df = l12_functors(X, Y)\n fun = lambda c: np.asarray(f(c.reshape(shape)), dtype=np.double)\n jac = lambda c: np.asarray(df(c.reshape(shape)), dtype=np.double).flatten()\n x0 = np.linalg.lstsq(X.T, Y.T)[0].T.flatten()\n res = minimize(fun=fun, jac=jac, x0=x0, method=\"L-BFGS-B\")\n return res.x.reshape(shape)\n\n\ndef l12_filtered_correspondence(src, dst, i, j):\n x, y = src.dirac_deltas(i), dst.dirac_deltas(j)\n return l12_solver(x, y)\n"
]
| [
[
"numpy.linalg.lstsq",
"scipy.optimize.minimize"
]
]
|
lilao/serving | [
"79968817470bf5700eddbd3ad6dc7cdd4c759909"
]
| [
"tensorflow_serving/example/inception_export.py"
]
| [
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n#!/usr/bin/env python2.7\n\"\"\"Export inception model given existing training checkpoints.\n\nThe model is exported with proper signatures that can be loaded by standard\ntensorflow_model_server.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os.path\n\n# This is a placeholder for a Google-internal import.\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib.session_bundle import exporter\nfrom inception import inception_model\n\n\ntf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/inception_train',\n \"\"\"Directory where to read training checkpoints.\"\"\")\ntf.app.flags.DEFINE_string('export_dir', '/tmp/inception_export',\n \"\"\"Directory where to export inference model.\"\"\")\ntf.app.flags.DEFINE_integer('image_size', 299,\n \"\"\"Needs to provide same value as in training.\"\"\")\nFLAGS = tf.app.flags.FLAGS\n\n\nNUM_CLASSES = 1000\nNUM_TOP_CLASSES = 5\n\nWORKING_DIR = os.path.dirname(os.path.realpath(__file__))\nSYNSET_FILE = os.path.join(WORKING_DIR, 'imagenet_lsvrc_2015_synsets.txt')\nMETADATA_FILE = os.path.join(WORKING_DIR, 'imagenet_metadata.txt')\n\n\ndef export():\n # Create index->synset mapping\n synsets = []\n with open(SYNSET_FILE) as f:\n synsets = f.read().splitlines()\n # Create synset->metadata mapping\n texts = {}\n with open(METADATA_FILE) as f:\n for line in f.read().splitlines():\n parts = line.split('\\t')\n assert len(parts) == 2\n texts[parts[0]] = parts[1]\n\n with tf.Graph().as_default():\n # Build inference model.\n # Please refer to Tensorflow inception model for details.\n\n # Input transformation.\n serialized_tf_example = tf.placeholder(tf.string, name='tf_example')\n feature_configs = {\n 'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),\n }\n tf_example = tf.parse_example(serialized_tf_example, feature_configs)\n jpegs = tf_example['image/encoded']\n images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)\n\n # Run inference.\n logits, _ = inception_model.inference(images, NUM_CLASSES + 1)\n\n # Transform output to topK result.\n values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)\n\n # Create a constant string Tensor where the i'th element is\n # the human readable class description for the i'th index.\n # Note that the 0th index is an unused background class\n # (see inception model definition code).\n class_descriptions = ['unused background']\n for s in synsets:\n class_descriptions.append(texts[s])\n class_tensor = tf.constant(class_descriptions)\n\n classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),\n mapping=class_tensor)\n\n # Restore variables from training checkpoint.\n variable_averages = tf.train.ExponentialMovingAverage(\n inception_model.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n with tf.Session() as sess:\n # Restore variables from training checkpoints.\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n # Assuming model_checkpoint_path looks something like:\n # /my-favorite-path/imagenet_train/model.ckpt-0,\n # extract global_step from it.\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n print('Successfully loaded model from %s at step=%s.' %\n (ckpt.model_checkpoint_path, global_step))\n else:\n print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)\n return\n\n # Export inference model.\n init_op = tf.group(tf.initialize_all_tables(), name='init_op')\n classification_signature = exporter.classification_signature(\n input_tensor=serialized_tf_example,\n classes_tensor=classes,\n scores_tensor=values)\n named_graph_signature = {\n 'inputs': exporter.generic_signature({'images': jpegs}),\n 'outputs': exporter.generic_signature({\n 'classes': classes,\n 'scores': values\n })}\n model_exporter = exporter.Exporter(saver)\n model_exporter.init(\n init_op=init_op,\n default_graph_signature=classification_signature,\n named_graph_signatures=named_graph_signature)\n model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)\n print('Successfully exported model to %s' % FLAGS.export_dir)\n\n\ndef preprocess_image(image_buffer):\n \"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\"\n\n # Decode the string as an RGB JPEG.\n # Note that the resulting image contains an unknown height and width\n # that is set dynamically by decode_jpeg. In other words, the height\n # and width of image is unknown at compile-time.\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n # After this point, all image pixels reside in [0,1)\n # until the very end, when they're rescaled to (-1, 1). The various\n # adjust_* ops all require this range for dtype float.\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n # Crop the central region of the image with an area containing 87.5% of\n # the original image.\n image = tf.image.central_crop(image, central_fraction=0.875)\n # Resize the image to the original height and width.\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image,\n [FLAGS.image_size, FLAGS.image_size],\n align_corners=False)\n image = tf.squeeze(image, [0])\n # Finally, rescale to [-1,1] instead of [0, 1)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n return image\n\n\ndef main(unused_argv=None):\n export()\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
]
| [
[
"tensorflow.FixedLenFeature",
"tensorflow.initialize_all_tables",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.map_fn",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.image.central_crop",
"tensorflow.to_int64",
"tensorflow.Graph",
"tensorflow.contrib.session_bundle.exporter.generic_signature",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.squeeze",
"tensorflow.subtract",
"tensorflow.nn.top_k",
"tensorflow.Session",
"tensorflow.contrib.session_bundle.exporter.Exporter",
"tensorflow.train.Saver",
"tensorflow.app.run",
"tensorflow.image.decode_jpeg",
"tensorflow.image.resize_bilinear",
"tensorflow.parse_example",
"tensorflow.placeholder",
"tensorflow.contrib.session_bundle.exporter.classification_signature",
"tensorflow.train.get_checkpoint_state",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.expand_dims",
"tensorflow.image.convert_image_dtype"
]
]
|
SravanChittupalli/Lane-following-bot-in-Gazebo | [
"3f8a775cd9216f1471e1e729898d1a9de59a6a2d"
]
| [
"ros_ws/lane_follow/scripts/utils.py"
]
| [
"\nimport cv2\nimport numpy as np\nimport math\n\nkernel = np.ones((5 , 5) , np.float32)/25\nlower_red = np.array([160,100,100])\nupper_red = np.array([179,255,255])\n\n\ndef show_steering_corection(image , error):\n correction_image = np.zeros_like(image)\n error_radian = (error*math.pi)/180\n diff_in_x = 300*math.tan(error_radian)\n correct_x_coord = int(600-diff_in_x)\n cv2.line(correction_image , (600 , correction_image.shape[0]) , (correct_x_coord , correction_image.shape[0]-300) , (0 , 0 , 255) , 5)\n return correction_image\n\n\n\ndef compute_steering_angles(image , lane_lines):\n if len(lane_lines)==0:\n print(\"No lane lines detected\")\n return -90\n \n height , width , _ = image.shape\n #print(lane_lines)\n if len(lane_lines) == 1:\n x1 , y1 , x2 , y2 = lane_lines[0].reshape(4)\n xoffset = x2 - x1\n else:\n lx1 , ly1 , lx2 , ly2 = lane_lines[0].reshape(4)\n rx1 , ry1 , rx2 , ry2 = lane_lines[1].reshape(4)\n mid = int(width/2)\n xoffset = int(((lx2 + rx2)/2) - mid)\n\n yoffset = int(height/2)\n if (xoffset) == 0:\n print(0)\n return 0\n angle_to_mid_radian = math.atan(yoffset/xoffset)\n angle_to_mid_degree = int(angle_to_mid_radian*180/math.pi)\n if(angle_to_mid_degree > 0 ):\n steering_angle = angle_to_mid_degree - 90\n else:\n steering_angle = angle_to_mid_degree + 90\n print(steering_angle)\n\n return steering_angle\n\ndef make_coordinates(image , line_parameteres):\n slope , intercept = line_parameteres\n width = image.shape[1]\n y1 = image.shape[0]\n y2 = int(y1*1/2)\n x1 = max(-width, min(2 * width, int((y1 - intercept) / slope)))\n x2 = max(-width, min(2 * width, int((y2 - intercept) / slope)))\n #x1 = int((y1-intercept)/slope)\n #x2 = int((y2-intercept)/slope)\n return np.array([x1 , y1 , x2 , y2])\n\n\n\ndef apply_canny(cv_image):\n global kernal\n #ret,cv_image = cv2.threshold(cv_image,230,255,cv2.THRESH_BINARY)\n #cv2.imshow(\"img\" , cv_image)\n #Here applying a 5X5 filter of ones is better than gaussian blur\n blured = cv2.filter2D(cv_image , -1 , kernel)\n canny = cv2.Canny(blured, 150, 190)\n return canny\n\ndef detect_red(cv_image):\n global lower_red , upper_red\n hsv_img = cv2.cvtColor(cv_image , cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv_img , lower_red , upper_red)\n red_edge = apply_canny(mask)\n return red_edge\n\ndef get_reqd_region(image):\n height = image.shape[0]\n polygon = np.array([[(0 , height) , \n (1200 , height), (1190 , 180 ),\n (0 , 180)]])\n mask = np.zeros_like(image)\n cv2.fillPoly(mask , polygon , 255)\n cropped_img = cv2.bitwise_and(image , mask)\n return cropped_img\n\n\ndef average_slope_intercept(image , lines):\n left_fit = []\n right_fit = []\n result = []\n\n boundary = 1/3\n left_region_boundary = image.shape[1] * (1 - boundary)\n right_region_boundary = image.shape[1] * (boundary)\n\n if lines is not None:\n for line in lines:\n x1 , y1 , x2 , y2 = line.reshape(4)\n if x1 == x2:\n print('Avoiding straight line')\n continue\n parameteres = np.polyfit((x1 , x2) , (y1 , y2) , 1) \n slope = parameteres[0]\n intercept = parameteres[1]\n if slope < 0:\n if x1 < left_region_boundary and x2 < left_region_boundary:\n left_fit.append((slope , intercept))\n else:\n if x1 > right_region_boundary and x2 > right_region_boundary:\n right_fit.append((slope , intercept))\n if len(left_fit) != 0:\n left_line_params_avg = np.average(left_fit , axis=0)\n left_line = make_coordinates(image , left_line_params_avg )\n result.append(left_line)\n if len(right_fit) != 0:\n right_line_params_avg = np.average(right_fit , axis=0)\n right_line = make_coordinates(image , right_line_params_avg )\n result.append(right_line)\n return np.asarray(result)\n\n\ndef display_lines(image , lines):\n line_image = np.zeros_like(image)\n if lines is not None:\n for line in lines:\n x1 , y1 , x2 , y2 = line.reshape(4)\n cv2.line(line_image , (x1 , y1) , (x2 , y2) , (255 , 0 , 0) , 10)\n return line_image\n"
]
| [
[
"numpy.polyfit",
"numpy.asarray",
"numpy.ones",
"numpy.zeros_like",
"numpy.average",
"numpy.array"
]
]
|
jcartus/Lanczos | [
"f0660676d6bd28ee101c9fd7299c4d5987514c24"
]
| [
"tests.py"
]
| [
"\"\"\"\nDescription:\n This module provides unit tests for the functionality provided in qm.py\n\nAuthor:\n Johannes Cartus, TU Graz\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom utilities import InfoStream\nfrom qm import BasisState, Sector, simulate_heisenberg_model\n\nclass TestBasisStates(unittest.TestCase):\n \"\"\"This will test the basis state's functionality (Magnetisation, etc.).\"\"\"\n\n def setUp(self):\n\n self._test_state = BasisState(state=np.array([1, 0, 0, 1, 1]))\n\n def test_bin_to_dec(self):\n #carefull lowest bit at index 0, i.e. at the left ;)\n bin = [\n np.array([0, 1, 0, 0, 1]),\n np.array([1, 0, 1]),\n np.array([1, 0, 0, 1, 1, 0])\n ]\n\n expected = [18, 5, 25]\n\n # init with some random value\n state = BasisState(2,4)\n \n for (b,e) in zip(bin, expected):\n self.assertEqual(e, state._binary_to_decimal(b))\n\n def test_dec_to_bin(self):\n #decimals and msb\n decimal = [(18, 4), (18, 5), (5, 2), (25, 5)]\n expected = [\n np.array([0.0, 1.0, 0.0, 0.0, 1.0]),\n np.array([0.0, 1.0, 0.0, 0.0, 1.0, 0.0]),\n np.array([1.0, 0.0, 1.0]),\n np.array([1.0, 0.0, 0.0, 1.0, 1.0, 0.0])\n ]\n\n # init with some random value\n state = BasisState(2,4)\n \n for (d,e) in zip(decimal, expected):\n self.assertListEqual(list(e), list(state._decimal_to_binary(*d)))\n\n # check if out of range assersts\n with self.assertRaises(ValueError):\n state._decimal_to_binary(22, 3)\n\n def test_magnetisation(self):\n self.assertEqual(\n (0.5 - (-0.5) + (-0.5) - (+0.5) + 0.5) / 5,\n self._test_state.magnetisation()\n )\n\n def test_magnetisation_two_spins(self):\n state_all_up = BasisState(np.array([1, 1]))\n self.assertEqual(0, state_all_up.magnetisation())\n\n state_neel_1 = BasisState(np.array([0, 1]))\n self.assertEqual(-0.5, state_neel_1.magnetisation())\n\n state_neel_2 = BasisState(np.array([1, 0]))\n self.assertEqual(0.5, state_neel_2.magnetisation())\n\n def test_energy(self):\n\n self.assertEqual(\n 0,\n self._test_state.energy(jz=0)\n )\n\n self.assertEqual(\n - 0.25 + 0.25 - 0.25 + 0.25 + 0.25,\n self._test_state.energy(jz=1)\n )\n\nclass TestBasisGeneration(unittest.TestCase):\n \"\"\"This class tests the generation of possible basis states for given \n lattice size and number of spin-ups\"\"\"\n\n def test_basis_generation(self):\n sector = Sector(\n number_of_sites=4, \n number_spinups=2, \n jz=1\n )\n\n sector.setup_basis()\n\n expected = [3, 5, 6, 9, 10, 12]\n actual = [x.decimal for x in sector.basis]\n\n self.assertListEqual(expected, actual)\n\n\n def test_hilbertspace_sizes(self):\n \"\"\"Setup basis for given setup and compare to given results (see \n instructions)\"\"\"\n\n # N = 8, nUp = 4 (Sz=0), Jz = 0\n expected = 70\n basis = Sector(8, 4, 0).setup_basis()\n self.assertEqual(expected, len(basis)) \n\n # N = 14, nUp = 7 (Sz=0), Jz = 0\n expected = 3432\n basis = Sector(14, 7, 0).setup_basis()\n self.assertEqual(expected, len(basis)) \n\n # N = 20, nUp = 10 (Sz=0), Jz = 0\n expected = 184756\n basis = Sector(20, 10, 0).setup_basis()\n self.assertEqual(expected, len(basis))\n\nclass TestHamiltonianGeneration(unittest.TestCase):\n \"\"\"Test the setup of the hamiltonian matrix for given N, nUp. \n \n Note:\n Implicitly in all tests the basis is generated first. Thus, these tests \n may fail if there is an error there...\n \"\"\"\n\n def test_generate_2SpinSystem_Sz0(self):\n \"\"\"Setup H for 2 spin-system and compare results to what is given\n in the script for Sz_tot = 0 (center of the matrix)\"\"\"\n # Sz = 0\n for Jz in [0, 1, 2]: \n #expected = np.array([[-Jz/4, 1/2], [1/2, -Jz/4]]) # laut skript\n expected = np.array([[-Jz/2, 1], [1, -Jz/2]]) # was ich mir so denk\n H = Sector(2, 1, Jz).setup_hamiltonian().toarray()\n np.testing.assert_array_almost_equal(expected, H)\n\n\n def test_H_generation_N4_nUp2(self):\n \n for J in [0, 1, 2]:\n sector = Sector(\n number_of_sites=4, \n number_spinups=2, \n jz=J\n )\n\n sector.setup_hamiltonian() \n\n expected = np.array(\n [\n [0, 0.5, 0, 0, 0.5, 0 ], \n [0.5, -J, 0.5, 0.5, 0, 0.5 ],\n [0, 0.5, 0, 0, 0.5, 0 ],\n [0, 0.5, 0, 0, 0.5, 0 ],\n [0.5, 0, 0.5, 0.5, -J, 0.5 ],\n [0, 0.5, 0, 0, 0.5, 0 ]\n ]\n )\n\n np.testing.assert_array_equal(expected, sector.H.toarray())\nclass TestDiagonalisation(unittest.TestCase):\n \"\"\"Test the lanczos algorithm\"\"\"\n def test_lanczos_small(self):\n\n A = np.array([[2, 1], [1, 2]])\n a = 1\n v = np.array([1,-1])\n\n a_act, v_act, _ = Sector.lanczos_diagonalisation(A)\n self._assert_eig_result(a, v, a_act, v_act, 1E-4)\n\n def test_lanzos_middle(self):\n\n A = np.array(\n [\n [ 3, 2, 4, 0, -2 ], \n [ 2, -2, 6, -2, 1 ], \n [ 4, 6, 2, 4, 4 ], \n [ 0, -2, 4, 7, 6 ], \n [ -2, 1, 4, 6, -9 ]\n ]\n )\n a = -12.0509\n v = np.array([0.204647, -0.04609, -0.246984, -0.267927, 1])\n\n a_act, v_act, _ = Sector.lanczos_diagonalisation(A)\n self._assert_eig_result(a, v, a_act, v_act, 1E-4)\n \n\n def test_lanczos_random_10x10(self):\n\n N = 10\n \n # create a hermitian matrix\n A = np.random.rand(N, N)\n A = A + A.T\n\n # expected:\n energies, vectors = np.linalg.eigh(A)\n E_expected = energies[0]\n v_expected = vectors[:, 0]\n\n # actual:\n E_actual, v_actual, _ = Sector.lanczos_diagonalisation(A)\n\n self._assert_eig_result(E_expected, v_expected, E_actual, v_actual)\n \n def _assert_eig_result(self, a, v, a_act, v_act, delta=1E-7):\n \n self.assertAlmostEqual(a, a_act, delta=delta)\n\n self._assert_vector_match(v, v_act, delta=delta)\n\n\n def _assert_vector_match(self, a, b, delta=1E-7):\n\n self.assertEqual(len(a), len(b))\n\n # vectors can only differ by a constant factor in all elements\n self.assertAlmostEqual(np.var(a / b), 0, delta=delta)\n\n def test_highlevel_N2(self):\n\n # todo\n\n sector = Sector(\n number_of_sites=2,\n number_spinups=1,\n jz=1\n )\n\n E, ground_state = sector.calculate_ground_state()\n\n self.assertAlmostEqual(0, ground_state.magnetisation(), delta=1E-7)\n self.assertAlmostEqual(0.25, ground_state.magnetisation_squared(), delta=1E-7)\n \n\n\n def test_highlevel_N6_Jz0_Sz0(self):\n \"\"\"\n N=6,\n Sz=0 => Nup=3,\n Jz=0\n\n Lt. Markus Aichhorn diagonal in 6 Schritten\n \"\"\"\n\n sector = Sector(\n number_of_sites=6,\n number_spinups=3,\n jz=0\n )\n\n steps_expected = 6\n E_expected = -2.000\n m_expected = 0.0\n m2_expected = 0.08333333\n correlation_expected = [\n 0.25, -0.1111111, -0.0, -0.0277777, 0.0, -0.1111111\n ]\n\n E, ground_state, steps = \\\n sector.calculate_ground_state(give_iterations=True)\n\n # check iterations\n self.assertTrue(steps <= steps_expected)\n\n # check energy\n self.assertAlmostEqual(E_expected, E, delta=1E-4)\n\n # check magnetisations\n self.assertAlmostEqual(m_expected, ground_state.magnetisation(), delta=1E-4)\n self.assertAlmostEqual(m2_expected, ground_state.magnetisation_squared(), delta=1E-2)\n\n for exp, act in zip(correlation_expected, ground_state.correlation()):\n self.assertAlmostEqual(exp, act, delta=1E-2)\n\n def test_highlevel_N10_J2_Sz0(self):\n \"\"\"\n N=10,\n Sz=0 => Nup=5,\n Jz=2\n\n Lt. Markus Aichhorn diagonal in 21 Schritten\n \"\"\"\n\n sector = Sector(\n number_of_sites=10,\n number_spinups=5,\n jz=2\n )\n\n steps_expected = 21\n E_expected = -6.24458366\n m_expected = 0.0\n m2_expected = 0.15776331\n correlation_expected = [\n 0.25, \n -0.19414343, \n 0.14145689, \n -0.13587744, \n 0.12795200,\n -0.12877517,\n 0.12795171,\n -0.13587788,\n 0.14145712,\n -0.19414381\n ]\n\n E, ground_state, steps = \\\n sector.calculate_ground_state(give_iterations=True)\n\n # check iterations\n self.assertTrue(steps <= steps_expected)\n\n # check energy\n self.assertAlmostEqual(E_expected, E, delta=1E-4)\n\n # check magnetisations\n self.assertAlmostEqual(m_expected, ground_state.magnetisation(), delta=1E-4)\n self.assertAlmostEqual(m2_expected, ground_state.magnetisation_squared(), delta=1E-2)\n\n for exp, act in zip(correlation_expected, ground_state.correlation()):\n self.assertAlmostEqual(exp, act, delta=1E-2)\n\n\n\n\n\nif __name__ == '__main__':\n InfoStream.suppress_level=1\n unittest.main()\n "
]
| [
[
"numpy.linalg.eigh",
"numpy.random.rand",
"numpy.var",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
]
]
|
lucidrains/compare_gan | [
"2a685ab94129c398620da67d999487fa63b7f741"
]
| [
"compare_gan/gans/clgan.py"
]
| [
"# coding=utf-8\n# Copyright 2018 Google LLC & Hwalsuk Lee.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implementation of Self-Supervised GAN with contrastive loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import flags\nfrom absl import logging\nfrom compare_gan.architectures.arch_ops import linear\nfrom compare_gan.gans import loss_lib\nfrom compare_gan.gans import modular_gan\nfrom compare_gan.gans import penalty_lib\nfrom compare_gan.gans import utils\n\nimport gin\nimport numpy as np\nimport random\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\n# augmentation functions\n\n# augment\n\ndef random_crop_and_resize(images, ratio=0.8):\n b, h, w, c = images.get_shape().as_list()\n ch, cw = map(lambda x: int(x * ratio), (h, w))\n crop = tf.random_crop(images, size=[b, ch, cw, 3])\n crop = tf.image.resize(crop, [h, w])\n return crop\n\ndef random_apply(fn, image, prob=1.):\n b, *_ = image.get_shape().as_list()\n chance = tf.less(tf.random_uniform([b], 0, 1.0), prob)\n return tf.where(chance, fn(image), tf.identity(image))\n\ndef color_distortion(image, s=1.0):\n lower, upper, x = (1 - 0.8 * s), (1 + 0.8 * s), image\n x = tf.image.random_brightness(x, max_delta=0.8*s)\n x = tf.image.random_contrast(x, lower=lower, upper=upper)\n x = tf.image.random_saturation(x, lower=lower, upper=upper)\n x = tf.image.random_hue(x, max_delta=0.2*s)\n x = tf.clip_by_value(x, 0, 1)\n return x\n\ndef color_drop(image):\n image = tf.image.rgb_to_grayscale(image)\n image = tf.tile(image, [1, 1, 1, 3])\n return image\n\n# pylint: disable=not-callable\[email protected](blacklist=[\"kwargs\"])\nclass CLGAN(modular_gan.ModularGAN):\n \"\"\"Self-Supervised GAN with Contrastive Loss\"\"\"\n\n def __init__(self,\n aug_color_jitter_prob=0.8,\n aug_color_drop_prob=0.0,\n weight_contrastive_loss_d=2.0,\n **kwargs):\n \"\"\"Creates a new Self-Supervised GAN using Contrastive Loss.\n\n Args:\n self_supervised_batch_size: The total number images per batch for the self supervised loss.\n weight_contrastive_loss_d: Weight for the contrastive loss for the self supervised learning on real images\n **kwargs: Additional arguments passed to `ModularGAN` constructor.\n \"\"\"\n super(CLGAN, self).__init__(**kwargs)\n\n self._weight_contrastive_loss_d = weight_contrastive_loss_d\n self._aug_color_jitter_prob = aug_color_jitter_prob\n self._aug_color_drop_prob = aug_color_drop_prob\n\n # To safe memory ModularGAN supports feeding real and fake samples\n # separately through the discriminator. CLGAN does not support this to\n # avoid additional additional complexity in create_loss().\n assert not self._deprecated_split_disc_calls, \\\n \"Splitting discriminator calls is not supported in CLGAN.\"\n\n def _latent_projections(self, latents):\n bs, dim = latents.get_shape().as_list()\n\n with tf.variable_scope(\"discriminator_z_projection\", reuse=tf.AUTO_REUSE) as scope:\n k1 = tf.get_variable(\"kernel1\", [dim, dim * 4])\n k2 = tf.get_variable(\"kernel2\", [dim * 4, dim])\n z_proj = tf.matmul(tf.nn.leaky_relu(tf.matmul(latents, k1), name=scope.name), k2)\n z_proj = z_proj / tf.reshape(tf.norm(z_proj, ord=2, axis=-1), [bs, 1])\n return z_proj\n\n def create_loss(self, features, labels, params, is_training=True):\n \"\"\"Build the loss tensors for discriminator and generator.\n\n This method will set self.d_loss and self.g_loss.\n\n Args:\n features: Optional dictionary with inputs to the model (\"images\" should\n contain the real images and \"z\" the noise for the generator).\n labels: Tensor will labels. These are class indices. Use\n self._get_one_hot_labels(labels) to get a one hot encoded tensor.\n params: Dictionary with hyperparameters passed to TPUEstimator.\n Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,\n `tpu_context`. `batch_size` is the batch size for this core.\n is_training: If True build the model in training mode. If False build the\n model for inference mode (e.g. use trained averages for batch norm).\n\n Raises:\n ValueError: If set of meta/hyper parameters is not supported.\n \"\"\"\n images = features[\"images\"] # Input images.\n generated = features[\"generated\"] # Fake images.\n if self.conditional:\n y = self._get_one_hot_labels(labels)\n sampled_y = self._get_one_hot_labels(features[\"sampled_labels\"])\n else:\n y = None\n sampled_y = None\n all_y = None\n\n # Batch size per core.\n bs = images.shape[0].value\n\n def augment(imgs):\n imgs = random_crop_and_resize(imgs)\n imgs = random_apply(color_distortion, imgs, self._aug_color_jitter_prob)\n imgs = random_apply(color_drop, imgs, self._aug_color_drop_prob)\n return tf.stop_gradient(imgs)\n\n aug_images, aug_generated = augment(images), augment(generated)\n\n # concat all images\n all_images = tf.concat([images, generated, aug_images, aug_generated], 0)\n\n if self.conditional:\n all_y = tf.concat([y, sampled_y, y, sampled_y], axis=0)\n\n # Compute discriminator output for real and fake images in one batch.\n\n d_all, d_all_logits, d_latents = self.discriminator(\n x=all_images, y=all_y, is_training=is_training)\n\n z_projs = self._latent_projections(d_latents)\n\n d_real, d_fake, _, _ = tf.split(d_all, 4)\n d_real_logits, d_fake_logits, _, _ = tf.split(d_all_logits, 4)\n z_projs_real, z_projs_fake, z_aug_projs_real, z_aug_projs_fake = tf.split(z_projs, 4)\n\n self.d_loss, _, _, self.g_loss = loss_lib.get_losses(\n d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits,\n d_fake_logits=d_fake_logits)\n\n penalty_loss = penalty_lib.get_penalty_loss(\n x=images, x_fake=generated, y=y, is_training=is_training,\n discriminator=self.discriminator, architecture=self._architecture)\n self.d_loss += self._lambda * penalty_loss\n\n z_projs = tf.concat([z_projs_real, z_projs_fake], 0)\n z_aug_projs = tf.concat([z_aug_projs_real, z_aug_projs_fake], 0)\n\n sims_logits = tf.matmul(z_projs, z_aug_projs, transpose_b=True)\n logits_max = tf.reduce_max(sims_logits,1)\n sims_logits = sims_logits - tf.reshape(logits_max, [-1, 1])\n sims_probs = tf.nn.softmax(sims_logits)\n\n sim_labels = tf.constant(np.arange(bs * 2, dtype=np.int32))\n sims_onehot = tf.one_hot(sim_labels, bs * 2)\n\n c_real_loss = - tf.reduce_mean(\n tf.reduce_sum(sims_onehot * tf.log(sims_probs + 1e-10), 1))\n\n self.d_loss += c_real_loss * self._weight_contrastive_loss_d\n\n self._tpu_summary.scalar(\"loss/c_real_loss\", c_real_loss)\n self._tpu_summary.scalar(\"loss/penalty\", penalty_loss)\n\n"
]
| [
[
"tensorflow.get_variable",
"tensorflow.image.random_contrast",
"tensorflow.concat",
"tensorflow.image.random_saturation",
"tensorflow.image.random_hue",
"numpy.arange",
"tensorflow.stop_gradient",
"tensorflow.image.rgb_to_grayscale",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.image.random_brightness",
"tensorflow.norm",
"tensorflow.identity",
"tensorflow.one_hot",
"tensorflow.split",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.random_crop",
"tensorflow.image.resize",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.random_uniform"
]
]
|
MalihehIzadi/hybrid-linker | [
"5e9e134634e71c9c44d7fb74997d809355cabfd1"
]
| [
"baselines/FRLink/FrLink.py"
]
| [
"import logging\nimport json\nimport os\nfrom gensim.models import TfidfModel\nfrom gensim.corpora import Dictionary\nfrom gensim.matutils import cossim\nimport pandas as pd\n\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nTRAIN_ITERS = 300\n\ndef getCorpus(df, data_type):\n corpus_list = []\n df['summary_processed'] = df['summary_processed'].astype(str)\n df['description_processed'] = df['description_processed'].astype(str)\n df['message_processed'] = df['message_processed'].astype(str)\n df['processDiffCode'] = df['processDiffCode'].astype(str)\n\n if data_type == 'text':\n for index, row in df.iterrows():\n sentence = row['summary_processed'] + row['description_processed'] + row['message_processed']\n words = sentence.split(\" \")\n sentence_segment = []\n for word in words:\n if word.strip() != '':\n sentence_segment.append(word.strip())\n corpus_list.append(sentence_segment)\n else:\n for index, row in df.iterrows():\n sentence = row['processDiffCode']\n words = sentence.split(\" \")\n sentence_segment = []\n for word in words:\n if word.strip() != '':\n sentence_segment.append(word.strip())\n corpus_list.append(sentence_segment)\n \n return corpus_list\n\n# create data frame of all project's data\ndf = pd.DataFrame()\nfiles = os.listdir('data/')\nfor file in files:\n tmp_df = pd.read_parquet('data/'+file)\n textual_data = tmp_df.loc[tmp_df['train_flag'] == 1]\n df = df.append(textual_data, ignore_index=True)\n \n# creating data set for code and text\ntext_dataset = getCorpus(df, data_type='text')\ncode_dataset = getCorpus(df, data_type='code')\ncode_dct = Dictionary(code_dataset)\ntext_dct = Dictionary(text_dataset)\ncode_corpus = [code_dct.doc2bow(line) for line in code_dataset] # convert corpus to BoW format\ntext_corpus = [text_dct.doc2bow(line) for line in text_dataset] # convert corpus to BoW format\n# create Code mode and Text model\ncode_model = TfidfModel(code_corpus)\ncode_model.save(\"tfidf/models/code_tfidf.model\")\ntext_model = TfidfModel(text_corpus)\ntext_model.save(\"tfidf/models/text_tfidf.model\")\n\n\ndef read_data(path):\n res = []\n filepath = path\n logging.info(\"Loaded the file:\"+filepath)\n if os.path.isfile(filepath):\n file = open(filepath, 'rb')\n testlist = json.loads(file.read())\n res.extend(testlist)\n file.close()\n return res\n\n\ndef getSim(vec1, vec2):\n return cossim(vec1, vec2)\n\n\ndef getTextSim(commitText, issueText):\n res = 0\n for cText in commitText:\n cVec = text_model[text_dct.doc2bow([cText])]\n for iText in issueText:\n iVec = text_model[text_dct.doc2bow([iText])]\n res = max(res, getSim(cVec, iVec))\n return res\n\n\ndef getCodeSim(commitCode, issueCode):\n cVec = code_model[code_dct.doc2bow(commitCode)]\n iVec = code_model[code_dct.doc2bow(issueCode)]\n return getSim(cVec, iVec)\n\n\ndef learn(list, ITR):\n ThresVal = 0.0\n Step = 0.01\n LThres = 0.0\n F = 0.0\n RMax = ITR\n while ThresVal <= 1:\n TP = 0\n FP = 0\n FN = 0\n for link in list:\n if link['val'] >= ThresVal:\n if link['type'] == 1:\n TP += 1\n else:\n FP += 1\n else:\n if link['type'] == 1:\n FN += 1\n precision = TP/(TP+FP+1e-8)\n recall = TP/(TP+FN+1e-8)\n f_measure = (2*precision*recall)/(precision + recall+1e-8)\n if recall >= ITR:\n if (f_measure > F) or (f_measure == F and recall > RMax):\n LThres = ThresVal\n RMax = recall\n F = f_measure\n ThresVal = ThresVal + Step\n return LThres\n\n\ndef getRes(test_set, t):\n size = len(test_set)\n right = 0.0\n for link in test_set:\n if link['type'] == 1 and link['val'] >= t:\n right += 1\n elif link['type'] == 0 and link['val'] < t:\n right += 1\n return right/size\n\n\ndef evaluation(test_set, t):\n TP = 0\n FP = 0\n FN = 0\n for link in test_set:\n if link['val'] >= t:\n if link['type'] == 1:\n TP += 1\n else:\n FP += 1\n else:\n if link['type'] == 1:\n FN += 1\n precision = float(TP) / (TP + FP+1e-8)\n recall = float(TP) / (TP + FN+1e-8)\n f_measure = (2 * precision * recall) / (precision + recall+1e-8)\n logging.info(\"precision:%f recall:%f f_measure:%f\" % (precision, recall, f_measure))\n\n \ndef build():\n filelist = os.listdir('data/')\n for i in range(0, len(filelist)):\n filepath = os.path.join('data/', filelist[i])\n logging.info(\"Loaded the file:\" + filepath)\n if os.path.isfile(filepath):\n df1 = pd.read_parquet(filepath)\n df1 = df1.loc[df1['train_flag'] == 1]\n\n df1['summary_processed'] = df1['summary_processed'].astype(str)\n df1['description_processed'] = df1['description_processed'].astype(str)\n df1['message_processed'] = df1['message_processed'].astype(str)\n df1['processDiffCode'] = df1['processDiffCode'].astype(str)\n \n link_list = []\n for index, row in df1.iterrows():\n type = row['label'] \n val = max(getTextSim((row['summary_processed']+row['description_processed']).split(\" \"), row['message_processed'].split(\" \")),\n getCodeSim(row['description_processed'].split(\" \"), row['processDiffCode'].split(\" \"))) \n link_list.append({'type': type, 'val': val})\n res = json.dumps(link_list, indent=4)\n trainSet = open('train/'+filelist[i].split('.')[0]+'.dat', \"w\")\n trainSet.write(res)\n trainSet.close()\n \ndef build_test():\n filelist = os.listdir('data/')\n for i in range(0, len(filelist)):\n filepath = os.path.join('data/', filelist[i])\n logging.info(\"Loaded the file:\" + filepath)\n if os.path.isfile(filepath):\n df1 = pd.read_parquet(filepath)\n df1 = df1.loc[df1['train_flag'] == 0]\n\n df1['summary_processed'] = df1['summary_processed'].astype(str)\n df1['description_processed'] = df1['description_processed'].astype(str)\n df1['message_processed'] = df1['message_processed'].astype(str)\n df1['processDiffCode'] = df1['processDiffCode'].astype(str)\n \n link_list = []\n for index, row in df1.iterrows():\n type = row['label'] \n val = max(getTextSim((row['summary_processed']+row['description_processed']).split(\" \"), row['message_processed'].split(\" \")),\n getCodeSim(row['description_processed'].split(\" \"), row['processDiffCode'].split(\" \"))) \n link_list.append({'type': type, 'val': val})\n res = json.dumps(link_list, indent=4)\n trainSet = open('test/'+filelist[i].split('.')[0]+'.dat', \"w\")\n trainSet.write(res)\n trainSet.close()\n\n \ndef main():\n build()\n build_test()\n files = os.listdir('train/')\n for file in files:\n trainset = read_data(path='train/'+file)\n testset = read_data(path='test/'+file)\n t = learn(trainset, 0.88)\n res = getRes(testset, t)\n logging.info(t)\n logging.info(res)\n evaluation(testset, t)\n logging.info(\"Finished!\")\n\nif __name__ == \"__main__\":\n main()"
]
| [
[
"pandas.read_parquet",
"pandas.DataFrame"
]
]
|
kazum/nnvm | [
"6eb04ae85b7ce22f33563030da36586ee535ac03"
]
| [
"python/nnvm/frontend/darknet.py"
]
| [
"\"\"\"\nDarkNet symbol frontend.\n\"\"\"\n\nfrom __future__ import absolute_import as _abs\nfrom enum import IntEnum\nimport numpy as np\nimport tvm\nfrom .. import symbol as _sym\n\nclass LAYERTYPE(IntEnum):\n \"\"\"Darknet LAYERTYPE Class constant.\"\"\"\n CONVOLUTIONAL = 0\n DECONVOLUTIONAL = 1\n CONNECTED = 2\n MAXPOOL = 3\n SOFTMAX = 4\n DETECTION = 5\n DROPOUT = 6\n CROP = 7\n ROUTE = 8\n COST = 9\n NORMALIZATION = 10\n AVGPOOL = 11\n LOCAL = 12\n SHORTCUT = 13\n ACTIVE = 14\n RNN = 15\n GRU = 16\n LSTM = 17\n CRNN = 18\n BATCHNORM = 19\n NETWORK = 20\n XNOR = 21\n REGION = 22\n REORG = 23\n BLANK = 24\n\nclass ACTIVATION(IntEnum):\n \"\"\"Darknet ACTIVATION Class constant.\"\"\"\n LOGISTIC = 0\n RELU = 1\n RELIE = 2\n LINEAR = 3\n RAMP = 4\n TANH = 5\n PLSE = 6\n LEAKY = 7\n ELU = 8\n LOGGY = 9\n STAIR = 10\n HARDTAN = 11\n LHTAN = 12\n\n__all__ = ['from_darknet']\n\ndef _darknet_get_nnvm_op(op_name):\n \"\"\"Get the nnvm operation from opname, raise error if not supported.\"\"\"\n op = getattr(_sym, op_name)\n if not op:\n raise RuntimeError(\"Not to map op_name {} to nnvm.sym\".format(op_name))\n return op\n\ndef _darknet_required_attr(attr, key):\n \"\"\"Check the attribute exists and return if exists, if not return error.\"\"\"\n assert isinstance(attr, dict)\n if key not in attr:\n raise AttributeError(\"Required attribute {} not found.\".format(key))\n return attr[key]\n\ndef _darknet_raise_not_supported(attr, op='nnvm'):\n \"\"\"Raise error if any operation is not supported.\"\"\"\n err = \"{} is not supported in {}.\".format(attr, op)\n raise NotImplementedError(err)\n\ndef _darknet_warn_not_used(attr, op='nnvm'):\n \"\"\"Raise warning if any operation not supported.\"\"\"\n import warnings\n err = \"{} is ignored in {}.\".format(attr, op)\n warnings.warn(err)\n\ndef _darknet_parse_tshape(tshape):\n \"\"\"Parse tshape in string.\"\"\"\n return [int(x.strip()) for x in tshape.strip('()').split(',')]\n\ndef _darknet_parse_bool_str(attr, key, default='False'):\n \"\"\"Parse bool string to boolean.\"\"\"\n return attr.get(key, default).strip().lower() in \\\n ['true', '1', 't', 'y', 'yes']\n\ndef _darknet_maxpooling(inputs, attrs):\n \"\"\"Process the max pool 2d operation.\"\"\"\n kernel = _darknet_parse_tshape(_darknet_required_attr(attrs, 'kernel'))\n if len(kernel) != 1:\n _darknet_raise_not_supported('non-2d kernel', 'pool_2d')\n\n op_name, new_attrs = 'max_pool2d', {}\n strides = int(attrs.get('stride', (1, 1)))\n pads = int(attrs.get('pad', (0, 0)))\n new_attrs['pool_size'] = [kernel[0], kernel[0]]\n new_attrs['strides'] = str((strides, strides))\n new_attrs['padding'] = str((pads, pads))\n extra_pad_size = attrs.get('extra_pad_size', 0)\n if extra_pad_size:\n pad_width = ((0, 0), (0, 0), (0, extra_pad_size), (0, extra_pad_size))\n inputs = _sym.pad(*inputs, pad_width=pad_width, pad_value=np.finfo(np.float32).min)\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_avgpooling(inputs, attrs):\n \"\"\"Process the average pool 2d operation.\"\"\"\n kernel = _darknet_parse_tshape(_darknet_required_attr(attrs, 'kernel'))\n if len(kernel) != 1:\n _darknet_raise_not_supported('non-2d kernel', 'pool_2d')\n\n op_name, new_attrs = 'avg_pool2d', {}\n strides = int(attrs.get('stride', (1, 1)))\n pads = int(attrs.get('pad', (0, 0)))\n new_attrs['pool_size'] = [kernel[0], kernel[0]]\n new_attrs['strides'] = str((strides, strides))\n new_attrs['padding'] = str((pads, pads))\n\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_batch_norm(inputs, attrs):\n \"\"\"Process the batchnormalization operation.\"\"\"\n op_name, new_attrs = 'darknet_batch_norm', {}\n new_attrs['axis'] = attrs.get('axis', 1)\n new_attrs['epsilon'] = attrs.get('eps', 0.000001)\n new_attrs['center'] = True\n new_attrs['scale'] = True\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_conv2d(inputs, attrs):\n \"\"\"Process the convolution 2d operation.\"\"\"\n kernel = _darknet_parse_tshape(_darknet_required_attr(attrs, 'kernel'))\n if len(kernel) != 1:\n _darknet_raise_not_supported('non 2d kernel', 'conv2d')\n layout = attrs.get('layout', 'NCHW')\n if layout not in ['NCHW', 'NHWC']:\n _darknet_raise_not_supported('layout: ' + layout, 'conv2d')\n strides = int(attrs.get('stride', (1, 1)))\n pads = int(attrs.get('pad', (0, 0)))\n\n op_name, new_attrs = 'conv2d', {}\n new_attrs['channels'] = _darknet_required_attr(attrs, 'num_filter')\n new_attrs['kernel_size'] = [kernel[0], kernel[0]]\n new_attrs['strides'] = (strides, strides)\n new_attrs['padding'] = (pads, pads)\n new_attrs['dilation'] = attrs.get('dilate', (1, 1))\n new_attrs['groups'] = attrs.get('num_group', 1)\n new_attrs['layout'] = layout\n if attrs.get('use_batchNorm', False) is True:\n new_attrs['use_bias'] = False\n else:\n new_attrs['use_bias'] = True\n out_name = {}\n sym = _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs)\n out_name[0] = sym.list_output_names()[0].replace('_output', '')\n\n if attrs.get('use_batchNorm', False) is True:\n op_name, new_attrs = 'batch_norm', {}\n new_attrs['epsilon'] = 0.000001\n sym = _darknet_get_nnvm_op(op_name)(*sym, **new_attrs)\n out_name[1] = sym.list_output_names()[0].replace('_output', '')\n if 'activation' in attrs:\n new_attrs = {}\n new_attrs['activation'] = attrs['activation']\n new_attrs['slope'] = 0.1\n sym, _ = _darknet_activations(sym, new_attrs)\n return sym, out_name\n\n\ndef _darknet_conv2d_transpose(inputs, attrs):\n \"\"\"Process the convolution 2d transpose operation.\"\"\"\n if 'target_shape' in attrs:\n _darknet_raise_not_supported('target_shape', 'conv2d_transpose')\n kernel = _darknet_parse_tshape(_darknet_required_attr(attrs, 'kernel'))\n if len(kernel) != 2:\n _darknet_raise_not_supported('non-2d kernel', 'conv2d_transpose')\n layout = attrs.get('layout', 'NCHW')\n if layout not in ['NCHW', 'NHWC']:\n _darknet_raise_not_supported('layout: ' + layout, 'conv2d_transpose')\n op_name, new_attrs = 'conv2d_transpose', {}\n new_attrs['channels'] = _darknet_required_attr(attrs, 'num_filter')\n new_attrs['kernel_size'] = kernel\n new_attrs['strides'] = attrs.get('stride', (1, 1))\n new_attrs['output_padding'] = attrs.get('adj', (0, 0))\n new_attrs['padding'] = attrs.get('pad', (0, 0))\n new_attrs['dilation'] = attrs.get('dilate', (1, 1))\n new_attrs['groups'] = attrs.get('num_group', 1)\n new_attrs['layout'] = layout\n new_attrs['use_bias'] = not _darknet_parse_bool_str(attrs, 'no_bias')\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_shortcut(inputs, attrs):\n \"\"\"Process the shortcut operation.\"\"\"\n op_name, new_attrs = 'elemwise_add', {}\n input_0 = inputs[0]\n input_1 = inputs[1]\n input_0_channel = int(attrs['out_channel'])\n input_1_channel = int(attrs['add_out_channel'])\n input_0_size = int(attrs['out_size'])\n input_1_size = int(attrs['add_out_size'])\n\n if input_0_size > input_1_size:\n scale = int(input_0_size/input_1_size)\n input_1 = _sym.upsampling(input_1, scale=scale, name=\"_upsampling\")\n elif input_0_size < input_1_size:\n stride = int(input_1_size/input_0_size)\n input_1 = _sym.avg_pool2d(input_1, pool_size=(1, 1),\n strides=(stride, stride), padding=(0, 0), name=\"_downsampling\")\n\n if input_0_channel != input_1_channel:\n pad_channel = input_0_channel - input_1_channel\n input_1 = _sym.pad(input_1, pad_width=((0, 0), (0, pad_channel), (0, 0), (0, 0)),\n pad_value=0.)\n\n new_inputs = _as_list([input_0, input_1])\n sym = _darknet_get_nnvm_op(op_name)(*new_inputs, **new_attrs)\n out_name = sym.list_output_names()[0].replace('_output', '')\n if 'activation' in attrs:\n new_attrs['activation'] = attrs['activation']\n sym, _ = _darknet_activations(sym, new_attrs)\n return sym, out_name\n\ndef _darknet_dense(inputs, attrs):\n \"\"\"Process the dense operation.\"\"\"\n op_name, new_attrs = 'dense', {}\n new_attrs['units'] = _darknet_required_attr(attrs, 'num_hidden')\n\n if attrs.get('use_bias', False) is True:\n new_attrs['use_bias'] = True\n if attrs.get('use_flatten', False) is True:\n inputs[0] = _sym.flatten(inputs[0])\n sym = _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs)\n out_name = sym.list_output_names()[0].replace('_output', '')\n if 'activation' in attrs:\n new_attrs = {}\n new_attrs['activation'] = attrs['activation']\n sym, _ = _darknet_activations(sym, new_attrs)\n return sym, out_name\n\ndef _darknet_dropout(inputs, attrs):\n \"\"\"Process the dropout operation, its a blank operation.\"\"\"\n op_name, new_attrs = 'dropout', {}\n new_attrs['rate'] = attrs.get('p', 0.5)\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_reshape(inputs, attrs):\n \"\"\"Process the reshape operation.\"\"\"\n if _darknet_parse_bool_str(attrs, 'reverse'):\n _darknet_raise_not_supported('reverse', 'reshape')\n op_name, new_attrs = 'reshape', {}\n new_attrs['shape'] = _darknet_required_attr(attrs, 'shape')\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_softmax_output(inputs, attrs):\n \"\"\"Process the softmax operation.\"\"\"\n op_name, new_attrs = 'softmax', {}\n if _darknet_parse_bool_str(attrs, 'multi_output'):\n new_attrs['axis'] = 1\n\n if attrs.get('use_flatten', False) is True:\n inputs[0] = _sym.flatten(inputs[0])\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_route(inputs, attrs):\n \"\"\"Process the route operation, which is equivalent to concat.\"\"\"\n op_name = 'concatenate'\n new_attrs = {'axis': attrs.get('dim', 1)}\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_reorg(inputs, attrs):\n \"\"\"Process the reorg operation.\"\"\"\n op_name, new_attrs = 'yolo2_reorg', {}\n if 'stride' in attrs:\n new_attrs = {'stride': attrs.get('stride', 1)}\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_region(inputs, attrs):\n \"\"\"Process the region operation.\"\"\"\n op_name, new_attrs = 'yolo2_region', {}\n if 'n' in attrs:\n new_attrs['n'] = attrs.get('n', 1)\n if 'classes' in attrs:\n new_attrs['classes'] = attrs.get('classes', 1)\n if 'coords' in attrs:\n new_attrs['coords'] = attrs.get('coords', 0)\n if 'background' in attrs:\n new_attrs['background'] = attrs.get('background', 0)\n if 'softmax' in attrs:\n new_attrs['softmax'] = attrs.get('softmax', 0)\n return _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs), None\n\ndef _darknet_activations(inputs, attrs):\n \"\"\"Process the activation function.\"\"\"\n act = _darknet_required_attr(attrs, 'activation')\n if ACTIVATION.RELU == act:\n act_type = 'relu'\n elif ACTIVATION.TANH == act:\n act_type = 'tanh'\n elif ACTIVATION.LINEAR == act:\n return inputs, None\n elif ACTIVATION.LEAKY == act:\n act_type = 'leaky_relu'\n else:\n _darknet_raise_not_supported('act: ' + act)\n\n if act_type in ['relu', 'tanh']:\n op_name, new_attrs = act_type, {}\n sym = _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs)\n elif act_type in ['leaky_relu']:\n op_name, new_attrs = act_type, {}\n new_attrs['alpha'] = attrs.get('slope', 0.1)\n sym = _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs)\n else:\n _darknet_raise_not_supported('act_type: ' + act_type)\n return sym, None\n\ndef _darknet_op_not_support(inputs, attrs):\n \"\"\"Raise exception if the operation is not supported.\"\"\"\n err = \"{} is not supported in {}.\".format(attrs, inputs)\n raise NotImplementedError(err)\n\n_DARKNET_CONVERT_MAP = {\n 'CONVOLUTIONAL' : _darknet_conv2d,\n 'DECONVOLUTIONAL' : _darknet_conv2d_transpose,\n 'CONNECTED' : _darknet_dense,\n 'MAXPOOL' : _darknet_maxpooling,\n 'SOFTMAX' : _darknet_softmax_output,\n 'DROPOUT' : _darknet_dropout,\n 'AVGPOOL' : _darknet_avgpooling,\n 'BATCHNORM' : _darknet_batch_norm,\n 'RESHAPE' : _darknet_reshape,\n 'ROUTE' : _darknet_route,\n 'REORG' : _darknet_reorg,\n 'REGION' : _darknet_region,\n 'ACTIVATION' : _darknet_activations,\n 'SHORTCUT' : _darknet_shortcut,\n 'DETECTION' : _darknet_op_not_support,\n 'CROP' : _darknet_op_not_support,\n 'COST' : _darknet_op_not_support,\n 'NORMALIZATION' : _darknet_op_not_support,\n 'LOCAL' : _darknet_op_not_support,\n 'ACTIVE' : _darknet_op_not_support,\n 'RNN' : _darknet_op_not_support,\n 'GRU' : _darknet_op_not_support,\n 'LSTM' : _darknet_op_not_support,\n 'CRNN' : _darknet_op_not_support,\n 'NETWORK' : _darknet_op_not_support,\n 'XNOR' : _darknet_op_not_support,\n 'BLANK' : _darknet_op_not_support,\n}\n\ndef _darknet_convert_symbol(op_name, inputs, attrs):\n \"\"\"Convert from darknet op to nnvm op.\n The converter must specify some conversions explicitly to\n support gluon format ops such as conv2d...\n\n Parameters\n ----------\n op_name : str\n Operator name, such as Convolution, Connected, etc\n inputs : list of nnvm.Symbol\n List of input symbols.\n attrs : dict\n Dict of operator attributes\n\n Returns\n -------\n out_name : converted out name of operation\n sym : nnvm.Symbol\n Converted nnvm Symbol\n \"\"\"\n\n if op_name in _DARKNET_CONVERT_MAP:\n sym, out_name = _DARKNET_CONVERT_MAP[op_name](inputs, attrs)\n else:\n _darknet_raise_not_supported('Operator: ' + op_name)\n if out_name is None:\n out_name = sym.list_output_names()[0].replace('_output', '')\n return out_name, sym\n\n\ndef _as_list(arr):\n \"\"\"Force being a list, ignore if already is.\"\"\"\n if isinstance(arr, list):\n return arr\n return [arr]\n\ndef _read_memory_buffer(shape, data, dtype):\n length = 1\n for x in shape:\n length *= x\n data_np = np.zeros(length, dtype=dtype)\n for i in range(length):\n data_np[i] = data[i]\n return data_np.reshape(shape)\n\ndef _get_darknet_layername(layer_type):\n \"\"\"Get the layer name from the darknet enums.\"\"\"\n return str((LAYERTYPE(layer_type))).replace('LAYERTYPE.', '')\n\ndef _get_convolution_weights(layer, opname, params, dtype):\n \"\"\"Get the convolution layer weights and biases.\"\"\"\n if layer.nweights == 0:\n return\n\n if (layer.n * layer.c * layer.size * layer.size) != layer.nweights:\n raise RuntimeError(\"layer weights size not matching with n c h w\")\n\n weights = _read_memory_buffer((layer.n, layer.c, layer.size, layer.size), layer.weights, dtype)\n\n biases = _read_memory_buffer((layer.n, ), layer.biases, dtype)\n\n k = _get_tvm_params_name(opname[0], 'weight')\n params[k] = tvm.nd.array(weights)\n\n if layer.batch_normalize == 1 and layer.dontloadscales != 1:\n _get_batchnorm_weights(layer, opname[1], params, layer.n, dtype)\n k = _get_tvm_params_name(opname[1], 'beta')\n params[k] = tvm.nd.array(biases)\n else:\n k = _get_tvm_params_name(opname[0], 'bias')\n params[k] = tvm.nd.array(biases)\n\ndef _get_connected_weights(layer, opname, params, dtype):\n \"\"\"Parse the weights and biases for fully connected or dense layer.\"\"\"\n size = layer.outputs * layer.inputs\n if size == 0:\n return\n\n weights = _read_memory_buffer((layer.outputs, layer.inputs), layer.weights, dtype)\n biases = _read_memory_buffer((layer.outputs, ), layer.biases, dtype)\n\n k = _get_tvm_params_name(opname, 'weight')\n params[k] = tvm.nd.array(weights)\n k = _get_tvm_params_name(opname, 'bias')\n params[k] = tvm.nd.array(biases)\n\n if layer.batch_normalize == 1 and layer.dontloadscales != 1:\n _get_batchnorm_weights(layer, opname, params, layer.outputs, dtype)\n\ndef _get_batchnorm_weights(layer, opname, params, size, dtype):\n \"\"\"Parse the weights for batchnorm, which includes, scales, moving mean\n and moving variances.\"\"\"\n scales = _read_memory_buffer((size, ), layer.scales, dtype)\n rolling_mean = _read_memory_buffer((size, ), layer.rolling_mean, dtype)\n rolling_variance = _read_memory_buffer((size, ), layer.rolling_variance, dtype)\n\n k = _get_tvm_params_name(opname, 'moving_mean')\n params[k] = tvm.nd.array(rolling_mean)\n k = _get_tvm_params_name(opname, 'moving_var')\n params[k] = tvm.nd.array(rolling_variance)\n k = _get_tvm_params_name(opname, 'gamma')\n params[k] = tvm.nd.array(scales)\n\ndef _get_darknet_attrs(net, layer_num):\n \"\"\"Parse attributes of each layer and return.\"\"\"\n attr = {}\n use_flatten = True\n layer = net.layers[layer_num]\n op_name = _get_darknet_layername(layer.type)\n\n if LAYERTYPE.CONVOLUTIONAL == layer.type:\n attr.update({'layout' : 'NCHW'})\n attr.update({'pad' : str(layer.pad)})\n attr.update({'num_group' : str(layer.groups)})\n attr.update({'num_filter' : str(layer.n)})\n attr.update({'stride' : str(layer.stride)})\n attr.update({'kernel' : str(layer.size)})\n attr.update({'activation' : (layer.activation)})\n\n if layer.nbiases == 0:\n attr.update({'use_bias' : False})\n else:\n attr.update({'use_bias' : True})\n\n if layer.batch_normalize == 1 and layer.dontloadscales != 1:\n attr.update({'use_batchNorm' : True})\n attr.update({'use_scales' : True})\n\n #elif LAYERTYPE.BATCHNORM == layer.type:\n # attr.update({'flatten' : str('True')})\n\n elif LAYERTYPE.CONNECTED == layer.type:\n attr.update({'num_hidden' : str(layer.outputs)})\n attr.update({'activation' : (layer.activation)})\n if layer_num != 0:\n layer_prev = net.layers[layer_num - 1]\n if (layer_prev.out_h == layer.h and\n layer_prev.out_w == layer.w and\n layer_prev.out_c == layer.c):\n use_flatten = False\n attr.update({'use_flatten' : use_flatten})\n if layer.nbiases == 0:\n attr.update({'use_bias' : False})\n else:\n attr.update({'use_bias' : True})\n if layer.batch_normalize == 1 and layer.dontloadscales != 1:\n attr.update({'use_batchNorm' : True})\n attr.update({'use_scales' : True})\n\n elif LAYERTYPE.MAXPOOL == layer.type:\n attr.update({'pad' : str(layer.pad)})\n attr.update({'stride' : str(layer.stride)})\n attr.update({'kernel' : str(layer.size)})\n max_output = (layer.w - layer.size + 2 * layer.pad)/float(layer.stride) + 1\n if max_output < layer.out_w:\n extra_pad = (layer.out_w - max_output)*layer.stride\n attr.update({'extra_pad_size' : int(extra_pad)})\n elif LAYERTYPE.AVGPOOL == layer.type:\n attr.update({'pad' : str(layer.pad)})\n if layer.stride == 0:\n attr.update({'stride' : str(1)})\n else:\n attr.update({'stride' : str(layer.stride)})\n if layer.size == 0 and layer.h == layer.w:\n attr.update({'kernel' : str(layer.h)})\n else:\n attr.update({'kernel' : str(layer.size)})\n\n elif LAYERTYPE.DROPOUT == layer.type:\n attr.update({'p' : str(layer.probability)})\n\n elif LAYERTYPE.SOFTMAX == layer.type:\n attr.update({'axis' : 1})\n attr.update({'use_flatten' : True})\n\n elif LAYERTYPE.SHORTCUT == layer.type:\n add_layer = net.layers[layer.index]\n attr.update({'activation' : (layer.activation)})\n attr.update({'out_channel' : (layer.out_c)})\n attr.update({'out_size' : (layer.out_h)})\n attr.update({'add_out_channel' : (add_layer.out_c)})\n attr.update({'add_out_size' : (add_layer.out_h)})\n\n elif LAYERTYPE.ROUTE == layer.type:\n pass\n\n elif LAYERTYPE.COST == layer.type:\n pass\n\n elif LAYERTYPE.REORG == layer.type:\n attr.update({'stride' : layer.stride})\n\n elif LAYERTYPE.REGION == layer.type:\n attr.update({'n' : layer.n})\n attr.update({'classes' : layer.classes})\n attr.update({'coords' : layer.coords})\n attr.update({'background' : layer.background})\n attr.update({'softmax' : layer.softmax})\n else:\n err = \"Darknet layer {} is not supported in nnvm.\".format(op_name)\n raise NotImplementedError(err)\n\n return op_name, attr\n\ndef _get_tvm_params_name(opname, arg_name):\n \"\"\"Makes the params name for the k,v pair.\"\"\"\n return opname + '_'+ arg_name\n\ndef _get_darknet_params(layer, opname, tvmparams, dtype='float32'):\n \"\"\"To parse and get the darknet params.\"\"\"\n if LAYERTYPE.CONVOLUTIONAL == layer.type:\n _get_convolution_weights(layer, opname, tvmparams, dtype)\n\n #elif LAYERTYPE.BATCHNORM == layer.type:\n # size = layer.outputs\n # _get_batchnorm_weights(layer, opname, tvmparams, size, dtype)\n\n elif LAYERTYPE.CONNECTED == layer.type:\n _get_connected_weights(layer, opname, tvmparams, dtype)\n\ndef _preproc_layer(net, i, sym_array):\n \"\"\"To preprocess each darknet layer, some layer doesnt need processing.\"\"\"\n layer = net.layers[i]\n if i == 0:\n name = 'data'\n attribute = {}\n sym = [_sym.Variable(name, **attribute)]\n else:\n sym = sym_array[i - 1]\n skip_layer = False\n\n if LAYERTYPE.ROUTE == layer.type:\n sym = []\n for j in range(layer.n):\n sym.append(sym_array[layer.input_layers[j]])\n if layer.n == 1:\n skip_layer = True\n\n elif LAYERTYPE.COST == layer.type:\n skip_layer = True\n\n elif LAYERTYPE.SHORTCUT == layer.type:\n sym = [sym, sym_array[layer.index]]\n\n elif LAYERTYPE.BLANK == layer.type:\n skip_layer = True\n\n if skip_layer is True:\n sym_array[i] = sym\n\n return skip_layer, sym\n\ndef _from_darknet(net, dtype='float32'):\n \"\"\"To convert the darknet symbol to nnvm symbols.\"\"\"\n sym_array = {}\n tvmparams = {}\n for i in range(net.n):\n need_skip, sym = _preproc_layer(net, i, sym_array)\n if need_skip is True:\n continue\n op_name, attr = _get_darknet_attrs(net, i)\n layer_name, sym = _darknet_convert_symbol(op_name, _as_list(sym), attr)\n _get_darknet_params(net.layers[i], layer_name, tvmparams, dtype)\n sym_array[i] = sym\n\n return sym, tvmparams\n\ndef from_darknet(net, dtype='float32'):\n \"\"\"Convert from darknet's model into compatible NNVM format.\n Reconstruct a nnvm symbol by traversing the darknet input.\n\n Parameters\n ----------\n net : ctype Pointer to network\n Darknet parsed symbols\n\n dtype : str\n Datatype of the input net structure, default is float32\n\n Returns\n -------\n sym : nnvm.Symbol\n Compatible nnvm symbol\n\n params : dict of str to tvm.NDArray\n The parameter dict to be used by nnvm\n \"\"\"\n\n return _from_darknet(net, dtype)\n"
]
| [
[
"numpy.zeros",
"numpy.finfo"
]
]
|
rkalahasty/nipy | [
"d16d268938dcd5c15748ca051532c21f57cf8a22"
]
| [
"nipy/labs/bindings/tests/test_linalg.py"
]
| [
"from __future__ import absolute_import\n\n#\n# Test fff linear algebra routines \n#\n\nfrom numpy.testing import assert_equal\nimport numpy as np\nfrom .. import vector_get, vector_set \n\nn = 15\n\ndef test_vector_get():\n x = np.random.rand(n)\n i = np.random.randint(n)\n xi = vector_get(x, i)\n assert_equal(xi, x[i])\n\ndef test_vector_get_int32():\n x = (100*np.random.rand(n)).astype('int32')\n i = np.random.randint(n)\n xi = vector_get(x, i)\n assert_equal(xi, x[i])\n\ndef test_vector_set():\n x = np.random.rand(n)\n i = np.random.randint(n)\n y = vector_set(x, i, 3)\n assert_equal(3, y[i])\n \ndef test_vector_set_int32():\n x = (100*np.random.rand(n)).astype('int32')\n i = np.random.randint(n)\n y = vector_set(x, i, 3)\n assert_equal(3, y[i])\n\n\n\nif __name__ == \"__main__\":\n import nose\n nose.run(argv=['', __file__])\n\n"
]
| [
[
"numpy.testing.assert_equal",
"numpy.random.rand",
"numpy.random.randint"
]
]
|
ZeeD26/pysimplevtk | [
"188ef6e776850402e11f59eaa6bd6e511a3259e0"
]
| [
"pysimplevtk/utilities/global_point_list.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom numpy import all, asanyarray, empty, isclose, vstack, where\n\n\n__all__ = ['GlobalPointList']\n\n\nclass GlobalPointList(object):\n\n def __init__(self, rtol=1e-05, atol=1e-08):\n self.points = empty([0, 3])\n self.rtol = rtol\n self.atol = atol\n\n def _add_point(self, point):\n self.points = vstack((self.points, asanyarray(point)))\n\n def get_point_id(self, point):\n potential_ids = where(all(\n isclose(point, self.points, rtol=self.rtol, atol=self.atol),\n axis=1))[0]\n if len(potential_ids) == 0:\n id_ = len(self.points)\n self._add_point(point=point)\n elif len(potential_ids) == 1:\n id_ = potential_ids[0]\n else:\n raise ValueError((\n 'Several points close to {} have been found. This may be due '\n 'to changing the tolerances inbetween or manually adding a '\n 'point.').format(point))\n return id_\n\n"
]
| [
[
"numpy.asanyarray",
"numpy.empty",
"numpy.isclose"
]
]
|
scrambleegg7/Mask_RCNN | [
"f3dd0563278160b1068f0215b5eba81eb3ce7cae"
]
| [
"myTrain/dataImage.py"
]
| [
"\nimport sys\nimport math\nimport cv2\n\nROOT_DIR = \"/Users/donchan/Documents/Miyuki/Mask_RCNN\"\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\n\nfrom mrcnn import utils\nimport numpy as np \nimport pandas as pd \n\nimport random\nimport matplotlib.pyplot as plt\n\nclass ShapesDataset(utils.Dataset):\n \"\"\"Generates the shapes synthetic dataset. The dataset consists of simple\n shapes (triangles, squares, circles) placed randomly on a blank surface.\n The images are generated on the fly. No file access required.\n \"\"\"\n\n def load_shapes(self, count, height, width):\n \"\"\"Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n \"\"\"\n # Add classes\n self.add_class(\"shapes\", 1, \"square\")\n self.add_class(\"shapes\", 2, \"circle\")\n self.add_class(\"shapes\", 3, \"triangle\")\n\n # Add images\n # Generate random specifications of images (i.e. color and\n # list of shapes sizes and locations). This is more compact than\n # actual images. Images are generated on the fly in load_image().\n for i in range(count):\n bg_color, shapes = self.random_image(height, width)\n self.add_image(\"shapes\", image_id=i, path=None,\n width=width, height=height,\n bg_color=bg_color, shapes=shapes)\n\n def load_image(self, image_id):\n \"\"\"Generate an image from the specs of the given image ID.\n Typically this function loads the image from a file, but\n in this case it generates the image on the fly from the\n specs in image_info.\n \"\"\"\n info = self.image_info[image_id]\n bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n image = image * bg_color.astype(np.uint8)\n for shape, color, dims in info['shapes']:\n image = self.draw_shape(image, shape, dims, color)\n return image\n\n def image_reference(self, image_id):\n \"\"\"Return the shapes data of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"shapes\":\n return info[\"shapes\"]\n else:\n super(self.__class__).image_reference(self, image_id)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for shapes of the given image ID.\n \"\"\"\n info = self.image_info[image_id]\n shapes = info['shapes']\n count = len(shapes)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, (shape, _, dims) in enumerate(info['shapes']):\n mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),\n shape, dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in shapes])\n return mask.astype(np.bool), class_ids.astype(np.int32)\n\n def draw_shape(self, image, shape, dims, color):\n \"\"\"Draws a shape from the given specs.\"\"\"\n # Get the center x, y and the size s\n x, y, s = dims\n if shape == 'square':\n cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)\n elif shape == \"circle\":\n cv2.circle(image, (x, y), s, color, -1)\n elif shape == \"triangle\":\n points = np.array([[(x, y-s),\n (x-s/math.sin(math.radians(60)), y+s),\n (x+s/math.sin(math.radians(60)), y+s),\n ]], dtype=np.int32)\n cv2.fillPoly(image, points, color)\n return image\n\n def random_shape(self, height, width):\n \"\"\"Generates specifications of a random shape that lies within\n the given height and width boundaries.\n Returns a tuple of three valus:\n * The shape name (square, circle, ...)\n * Shape color: a tuple of 3 values, RGB.\n * Shape dimensions: A tuple of values that define the shape size\n and location. Differs per shape type.\n \"\"\"\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height//4)\n return shape, color, (x, y, s)\n\n def random_image(self, height, width):\n \"\"\"Creates random specifications of an image with multiple shapes.\n Returns the background color of the image and a list of shape\n specifications that can be used to draw the image.\n \"\"\"\n # Pick random background color\n bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n # Generate a few random shapes and record their\n # bounding boxes\n shapes = []\n boxes = []\n N = random.randint(1, 4)\n for _ in range(N):\n shape, color, dims = self.random_shape(height, width)\n shapes.append((shape, color, dims))\n x, y, s = dims\n boxes.append([y-s, x-s, y+s, x+s])\n # Apply non-max suppression wit 0.3 threshold to avoid\n # shapes covering each other\n keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)\n shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n return bg_color, shapes\n\n \n\n\ndef main():\n\n\n dataSet = ShapesDataset()\n\n dataSet.load_shapes(10, 128,128)\n\n dataSet.prepare()\n\n image = dataSet.load_image(1)\n\n print(image.shape)\n\n mask, class_ids = dataSet.load_mask(1)\n\n\n bbox = utils.extract_bboxes(mask)\n\n print(bbox.shape)\n\n plt.imshow(bbox)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()"
]
| [
[
"numpy.logical_not",
"matplotlib.pyplot.imshow",
"numpy.arange",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
]
|
kwasniakk/CS-GO-Grenade-Classification | [
"7d00f93a9df96f8e892d92aad92b50f060e243ff"
]
| [
"project/utils/data_utils.py"
]
| [
"import pandas as pd\n\nCOLUMNS_TO_DROP = [\"demo_id\", \"demo_round_id\", \"weapon_fire_id\", \"round_start_tick\"]\nDUMMY_COLS = [\"LABEL\", \"team\", \"TYPE\", \"map_name\"]\n\n\ndef preprocess(df):\n data_dropped = df.drop(columns = COLUMNS_TO_DROP)\n data_cleaned = pd.get_dummies(data_dropped, columns = DUMMY_COLS, drop_first = True)\n X = data_cleaned.drop(columns = [\"LABEL_True\"])\n y = data_cleaned[\"LABEL_True\"]\n return X, y\n"
]
| [
[
"pandas.get_dummies"
]
]
|
Top34051/stargan-zsvc | [
"ac9f91ec6ea6fa8c1950d28cca1a23931388d496"
]
| [
"solver.py"
]
| [
"import torch\nfrom torch import nn\nfrom tqdm import tqdm\n\nfrom model.discriminator import Discriminator\nfrom model.generator import Generator\n\n\nclass Solver():\n\n def __init__(self, train_loader, test_loader, config):\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n self.train_loader = train_loader\n self.test_loader = test_loader\n\n # epoch\n self.epoch = 1\n\n # networks\n self.gen = Generator(embed_dim=256).to(self.device)\n self.dis = Discriminator(embed_dim=256).to(self.device)\n\n # gen frequency\n self.gen_freq = config['gen_freq']\n\n # train optimizers\n self.gen_lr = config['optimizers']['gen_lr']\n self.dis_lr = config['optimizers']['dis_lr']\n self.beta1 = config['optimizers']['beta1']\n self.beta2 = config['optimizers']['beta2']\n self.gen_opt = torch.optim.Adam(self.gen.parameters(), self.gen_lr, [self.beta1, self.beta2])\n self.dis_opt = torch.optim.Adam(self.dis.parameters(), self.dis_lr, [self.beta1, self.beta2])\n\n # hyperparams\n self.hparam = config['hparam']\n\n # epoch save\n self.epoch_save = config['epoch_save']\n\n # load checkpoint\n if config['resume'] != '':\n checkpoint = torch.load(config['resume'])\n self.epoch = checkpoint['epoch'] + 1\n self.gen.load_state_dict(checkpoint['gen'])\n self.dis.load_state_dict(checkpoint['dis'])\n \n # losses\n self.l1_loss = nn.L1Loss()\n self.l2_loss = nn.MSELoss()\n\n def reset_grad(self):\n self.dis_opt.zero_grad()\n self.gen_opt.zero_grad()\n\n def train_step(self, idx, x_src, src, trg):\n\n x_src = x_src.to(self.device)\n src = src.unsqueeze(0).to(self.device)\n trg = trg.unsqueeze(0).to(self.device)\n\n # inference\n x_src_src = self.gen(x_src, src, src)\n x_src_trg = self.gen(x_src, src, trg)\n x_src_trg_src = self.gen(x_src_trg, trg, src)\n d_src = self.dis(x_src, src, trg)\n d_src_trg = self.dis(x_src_trg, trg, src)\n\n # Train discriminator\n dis_loss = torch.mean((d_src_trg - self.hparam['b']) ** 2 + (d_src - self.hparam['a']) ** 2)\n\n self.reset_grad()\n dis_loss.backward(retain_graph=True)\n self.dis_opt.step()\n\n # Train generator\n if idx % self.gen_freq == 0:\n \n id_loss = self.l2_loss(x_src, x_src_src)\n cyc_loss = self.l1_loss(x_src, x_src_trg_src)\n\n d_src_trg_2 = self.dis(x_src_trg, trg, src)\n adv_loss = torch.mean((d_src_trg_2 - self.hparam['a']) ** 2)\n\n gen_loss = self.hparam['lambda_id'] * id_loss + self.hparam['lambda_cyc'] * cyc_loss + adv_loss\n\n self.reset_grad()\n gen_loss.backward(retain_graph=True)\n self.gen_opt.step()\n\n return dis_loss.item(), gen_loss.item(), adv_loss.item()\n \n return dis_loss.item(), None, None\n\n def train(self, num_epoch=3000):\n\n # loop epoch\n while self.epoch <= num_epoch:\n\n print('Epoch {}'.format(self.epoch))\n\n gen_losses = []\n dis_losses = []\n adv_losses = []\n\n # loop batch\n for idx, (mel, src, trg) in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):\n dis_loss, gen_loss, adv_loss = self.train_step(idx+1, mel.squeeze(0), src.squeeze(0), trg.squeeze(0))\n \n dis_losses.append(dis_loss)\n if gen_loss is not None:\n gen_losses.append(gen_loss)\n adv_losses.append(adv_loss)\n\n print(' dis loss: {}'.format(sum(dis_losses) / len(dis_losses)))\n print(' gen loss: {}'.format(sum(gen_losses) / len(gen_losses)))\n print(' adv loss: {}'.format(sum(adv_losses) / len(adv_losses)))\n \n # save checkpoint\n if self.epoch % self.epoch_save == 0:\n torch.save({\n 'epoch': self.epoch,\n 'gen': self.gen.state_dict(),\n 'dis': self.dis.state_dict()\n }, f'./checkpoints/checkpoint_{self.epoch}.pt')\n\n self.epoch += 1\n"
]
| [
[
"torch.mean",
"torch.load",
"torch.nn.L1Loss",
"torch.cuda.is_available",
"torch.nn.MSELoss"
]
]
|
fenning-research-group/Instruments | [
"c4e5f854fed1cce20f25076a38842bfbb5396917"
]
| [
"FRG Hardware/frghardware/wardmapper/frghardware/wardmapper/processing.py"
]
| [
"import matplotlib.pyplot as plt\nimport h5py\nimport os\nimport numpy as np\nfrom matplotlib_scalebar.scalebar import ScaleBar\nfrom frgtools.ward import *\nimport cmocean\nimport imreg_dft as ird\nfrom skimage.filters import gaussian\nfrom scipy import ndimage as nd\nfrom matplotlib.widgets import Button\nfrom tqdm import tqdm\ndef fitThreePointWaRD(file, celltype, plot = False):\n\tif str.lower(celltype) in ['albsf', 'al-bsf']:\n\t\twl_eva = 1730\n\t\twl_h2o = 1902\n\t\twl_ref = 1942\n\n\t\tp1 = 34.53\n\t\tp2 = 1.545\n\t\tcelltype = 'albsf'\n\telif str.lower(celltype) in ['perc']:\n\t\twl_eva = 1730\n\t\twl_h2o = 1902\n\t\twl_ref = 1942\n\n\t\tp1 = 29.75\n\t\tp2 = 1.367\n\t\tcelltype = 'perc'\n\telif str.lower(celltype) in ['albsf2', 'al-bsf2']:\n\t\twl_eva = 1730\n\t\twl_h2o = 1902\n\t\twl_ref = 1872\n\n\t\tp1 = 24.75\n\t\tp2 = 0.3461\n\t\tcelltype = 'albsf2'\n\telse:\n\t\tprint('Celltype Error: valid types are \"albsf\", \"perc\", of \"albsf2\" - user provided {0}'.format(celltype))\n\t\treturn\n\n\twith h5py.File(file, 'r') as d:\n\t name = d['info']['name'][()]\n\t x = d['data']['relx'][()]\n\t y = d['data']['rely'][()]\n\t realx = d['data']['x'][()]\n\t realy = d['data']['y'][()]\n\t wl = d['data']['wavelengths'][()]\n\t ref = d['data']['reflectance'][()]\n\t time = d['data']['delay'][()]\n\t\n\tab = -np.log(ref)\t#convert reflectance values to absorbance\n\n\tallWavelengthsPresent = True\n\tmissingWavelength = None\n\tfor each in [wl_eva, wl_h2o, wl_ref]:\n\t\tif each not in wl:\n\t\t\tallWavelengthsPresent = False\n\t\t\tmissingWavelength = each\n\t\t\tbreak\n\n\tif not allWavelengthsPresent:\n\t\tprint('Wavelength Error: Necessary wavelength {0} missing from dataset - cannot fit.'.format(missingWavelength))\n\t\treturn\n\n\tevaIdx = np.where(wl == wl_eva)[0]\n\th2oIdx = np.where(wl == wl_h2o)[0]\n\trefIdx = np.where(wl == wl_ref)[0]\n\t\n\tratio = np.divide(ab[:,:,h2oIdx]-ab[:,:,refIdx], ab[:,:,evaIdx]-ab[:,:,refIdx])[:,:,0]\n\th2o = ratio*p1 + p2\n\t# h2o[h2o < 0] = 0\t\n\n\n\t## Avg Reflectance Fitting\n\tavgRef = np.mean(ref, axis = 2)\n\n\t# h2o_reg_imputed = RegisterToDummy(\n\t# \t\tImputeWater(h2o, avgRef > avgRef.mean()*1.2),\n\t# \t\tavgRef\n\t# \t)\n\n\th2o_reg_imputed = RegisterToDummy(\n\t\t\th2o,\n\t\t\tavgRef\n\t\t)\n\n\t## write fits to h5 file\n\twith h5py.File(file, 'a') as d:\n\t\tif 'fits' in d.keys():\n\t\t\tfits = d['fits']\n\t\telse:\n\t\t\tfits = d.create_group('/fits')\n\n\t\t_fillDataset(d['fits'], 'water', h2o, 'Water content (mg/cm^3) measured by WaRD.')\n\t\t_fillDataset(d['fits'], 'celltype', celltype.encode('utf-8'), 'Cell architecture assumed during fitting.')\n\t\t_fillDataset(d['fits'], 'wl_eva', wl_eva, 'Wavelength used as EVA absorbance point.')\n\t\t_fillDataset(d['fits'], 'wl_h2o', wl_h2o, 'Wavelength used as water absorbance point.')\n\t\t_fillDataset(d['fits'], 'wl_ref', wl_ref, 'Wavelength used as reference absorbance point.')\n\t\t_fillDataset(d['fits'], 'poly', [p1, p2], 'Polynomial fit coefficients used to convert absorbances to water content. From highest to lowest order.')\n\t\t_fillDataset(d['fits'], 'avgref', avgRef, 'Average reflectance at each point')\n\t\t_fillDataset(d['fits'], 'water_reg', h2o_reg_imputed, 'Water map after registration to dummy mask + imputing to replace finger regions')\n\t## Plotting\n\tif plot:\n\t\tfig, ax = plt.subplots(1,2, figsize = (10, 8))\n\n\t\tim1 = ax[0].imshow(\n\t\t h2o,\n\t\t extent = [0, x.max(), 0, y.max()],\n\t\t origin = 'lower',\n\t\t vmin = 0,\n\t\t vmax = 2\n\t\t)\n\t\tcb = fig.colorbar(im1, ax = ax[0],\n\t\t orientation=\"horizontal\",fraction=0.068,anchor=(1.0,0.0), pad = 0.01)\n\t\tcb.set_label('$[H_{2}O]$ $(mg/cm^3)$')\n\t\tax[0].set_title('Water Map')\n\t\tax[0].axis('off')\n\n\n\t\tim2 = ax[1].imshow(\n\t\t avgRef,\n\t\t extent = [0, x.max(), 0, y.max()],\n\t\t origin = 'lower',\n\t\t vmin = 0\n\t\t)\n\t\tcb = plt.colorbar(im2, ax = ax[1],\n\t\t orientation=\"horizontal\",fraction=0.068,anchor=(1.0,0.0), pad = 0.01)\n\t\tcb.set_label('Reflectance')\n\t\tax[1].set_title('Avg Reflectance')\n\t\tplt.axis('off')\n\t\tscalebar = ScaleBar(\n\t\t dx = 1e-3,\n\t\t location = 'lower right',\n\t\t color = [1, 1, 1],\n\t\t box_alpha = 0\n\t\t)\n\t\tax[1].add_artist(scalebar)\n\t\tplt.tight_layout()\n\t\tplt.show()\n\ndef fitFullWaRD(file, celltype = 'Not Inputted', plot = False):\n\t# if str.lower(celltype) in ['albsf', 'al-bsf']:\n\t# \twl_eva = 1730\n\t# \twl_h2o = 1902\n\t# \twl_ref = 1942\n\n\t# \tp1 = 34.53\n\t# \tp2 = 1.545\n\t# \tcelltype = 'albsf'\n\t# elif str.lower(celltype) in ['perc']:\n\t# \twl_eva = 1730\n\t# \twl_h2o = 1902\n\t# \twl_ref = 1942\n\n\t# \tp1 = 29.75\n\t# \tp2 = 1.367\n\t# \tcelltype = 'perc'\n\t# elif str.lower(celltype) in ['albsf2', 'al-bsf2']:\n\t# \twl_eva = 1730\n\t# \twl_h2o = 1902\n\t# \twl_ref = 1872\n\n\t# \tp1 = 24.75\n\t# \tp2 = 0.3461\n\t# \tcelltype = 'albsf2'\n\t# else:\n\t# \tprint('Celltype Error: valid types are \"albsf\", \"perc\", of \"albsf2\" - user provided {0}'.format(celltype))\n\t# \treturn\n\n\twith h5py.File(file, 'r') as d:\n\t name = d['info']['name'][()]\n\t x = d['data']['relx'][()]\n\t y = d['data']['rely'][()]\n\t realx = d['data']['x_full'][()]\n\t realy = d['data']['y_full'][()]\n\t wl = d['data']['wavelengths_full'][()]\n\t ref = d['data']['reflectance_full'][()]\n\t time = d['data']['delay_full'][()]\n\t\n\tab = -np.log(ref)\t#convert reflectance values to absorbance\n\n\tallWavelengthsPresent = True\n\tmissingWavelength = None\n\tfor each in np.linspace(1700, 2000, 151):\n\t\tif each not in wl:\n\t\t\tallWavelengthsPresent = False\n\t\t\tmissingWavelength = each\n\t\t\tbreak\n\n\tif not allWavelengthsPresent:\n\t\tprint('Wavelength Error: Necessary wavelength {0} missing from dataset - cannot fit.'.format(missingWavelength))\n\t\treturn\n\n\th2o = np.zeros(time.shape)\n\tfor m in np.ndindex(h2o.shape):\n\t\th2o[m] = fit_fullspectrum(wl, ref[m], plot = False)\n\t# h2o[h2o < 0] = 0\t\n\n\n\t## Avg Reflectance Fitting\n\tavgRef = np.mean(ref, axis = 1)\n\n\t# h2o_reg_imputed = RegisterToDummy(\n\t# \t\tImputeWater(h2o, avgRef > avgRef.mean()*1.2),\n\t# \t\tavgRef\n\t# \t)\n\n\t# h2o_reg_imputed = RegisterToDummy(\n\t# \t\th2o,\n\t# \t\tavgRef\n\t# \t)\n\n\t## write fits to h5 file\n\twith h5py.File(file, 'a') as d:\n\t\tif 'fits' in d.keys():\n\t\t\tfits = d['fits']\n\t\telse:\n\t\t\tfits = d.create_group('/fits')\n\n\t\t_fillDataset(d['fits'], 'water_full', h2o, 'Water content (mg/cm^3) measured by WaRD.')\n\t\t_fillDataset(d['fits'], 'celltype_full', celltype.encode('utf-8'), 'Cell architecture assumed during fitting.')\n\t\t_fillDataset(d['fits'], 'avgref_full', avgRef, 'Average reflectance at each point')\n\t## Plotting\n\tif plot:\n\t\tfig, ax = plt.subplots(1,2, figsize = (10, 8))\n\n\t\tim1 = ax[0].imshow(\n\t\t h2o,\n\t\t extent = [0, x.max(), 0, y.max()],\n\t\t origin = 'lower',\n\t\t vmin = 0,\n\t\t vmax = 2\n\t\t)\n\t\tcb = fig.colorbar(im1, ax = ax[0],\n\t\t orientation=\"horizontal\",fraction=0.068,anchor=(1.0,0.0), pad = 0.01)\n\t\tcb.set_label('$[H_{2}O]$ $(mg/cm^3)$')\n\t\tax[0].set_title('Water Map')\n\t\tax[0].axis('off')\n\n\n\t\tim2 = ax[1].imshow(\n\t\t avgRef,\n\t\t extent = [0, x.max(), 0, y.max()],\n\t\t origin = 'lower',\n\t\t vmin = 0\n\t\t)\n\t\tcb = plt.colorbar(im2, ax = ax[1],\n\t\t orientation=\"horizontal\",fraction=0.068,anchor=(1.0,0.0), pad = 0.01)\n\t\tcb.set_label('Reflectance')\n\t\tax[1].set_title('Avg Reflectance')\n\t\tplt.axis('off')\n\t\tscalebar = ScaleBar(\n\t\t dx = 1e-3,\n\t\t location = 'lower right',\n\t\t color = [1, 1, 1],\n\t\t box_alpha = 0\n\t\t)\n\t\tax[1].add_artist(scalebar)\n\t\tplt.tight_layout()\n\t\tplt.show()\n\ndef FullSpectrumFit(wavelengths, reflectance, plot = False):\n\teva_peak = 1730\n\teva_tolerance = 5\n\th2o_peak = 1902\n\th20_tolerance = 5\n\n\tif np.mean(reflectance) > 1:\n\t\treflectance = reflectance / 100\n\n\tabsSpectrum = -np.log(reflectance)\n\tabsPeaks, absBaseline = _RemoveBaseline(absSpectrum)\n\n\teva_idx = np.argmin(np.abs(wavelengths - eva_peak))\n\teva_abs = np.max(absPeaks[eva_idx-5 : eva_idx+5])\n\teva_idx_used = np.where(absPeaks == eva_abs)[0][0]\n\n\th2o_idx = np.argmin(np.abs(wavelengths - h2o_peak))\n\th2o_abs = np.max(absPeaks[h2o_idx-5 : h2o_idx+5])\n\th2o_idx_used = np.where(absPeaks == h2o_abs)[0][0]\n\n\th2o_ratio = h2o_abs/eva_abs\n\th2o_meas = (h2o_ratio - 0.002153)/.03491 #from mini module calibration curve 2019-04-09, no data with condensation risk\n\n\tif plot:\n\t\tfig, ax = plt.subplots(1,2, figsize = (8,3))\n\n\t\tax[0].plot(wavelengths, absSpectrum, label = 'Raw')\n\t\tax[0].plot(wavelengths, absBaseline, label = 'Baseline')\n\t\tax[0].legend()\n\t\tax[0].set_xlabel('Wavelengths (nm)')\n\t\tax[0].set_ylabel('Absorbance (AU)')\n\n\t\tax[1].plot(wavelengths, absPeaks, label = 'Corrected')\n\t\tax[1].plot(np.ones((2,)) * wavelengths[eva_idx_used], [0, eva_abs], label = 'EVA Peak', linestyle = '--')\n\t\tax[1].plot(np.ones((2,)) * wavelengths[h2o_idx_used], [0, h2o_abs], label = 'Water Peak', linestyle = '--')\n\t\tax[1].legend()\n\t\tax[1].set_xlabel('Wavelengths (nm)')\n\t\tax[1].set_ylabel('Baseline-Removed Absorbance (AU)')\n\n\t\tplt.tight_layout()\n\t\tplt.show()\n\n\treturn h2o_meas\n\ndef ImputeWater(data, invalid = None):\n\t\"\"\"\n\tReplace the value of invalid 'data' cells (indicated by 'invalid') \n\tby the value of the nearest valid data cell\n\n\tInput:\n\t\tdata: numpy array of any dimension\n\t\tinvalid: a binary array of same shape as 'data'. True cells set where data\n\t\t\t\t value should be replaced.\n\t\t\t\t If None (default), use: invalid = np.isnan(data)\n\n\tOutput: \n\t\tReturn a filled array. \n\t\"\"\"\n\t#import numpy as np\n\t#import scipy.ndimage as nd\n\n\tif invalid is None: invalid = np.isnan(data)\n\n\tind = nd.distance_transform_edt(invalid, return_distances=False, return_indices=True)\n\treturn data[tuple(ind)]\t\n\ndef RegisterToDummy(start, start_ref = None):\n\t#scale dummy mask with busbar oriented upper horizontal.\n\n\tdummy = np.zeros(start.shape)\n\tborder = [int(np.round(x*2/53)) for x in start.shape]\n\tbusbar = int(np.round(start.shape[0]*23/53))\n\tdummy[border[0]:-border[0], border[1]:-border[1]] = 0.05\n\tdummy[busbar:busbar+border[0],:] = 1\n\n\tif start_ref is None:\n\t\tstart_ref = start\n\t\t\n\tstart_gauss = gaussian(start_ref, sigma = 0.5)\n\tresult = ird.similarity(\n\t\tdummy,\n\t\tstart_gauss,\n\t\tnumiter = 30,\n\t\tconstraints = {\n\t\t\t'angle': [0, 360],\n\t\t\t'tx': [0, 2],\n\t\t\t'ty': [0, 2],\n\t\t\t'scale': [1, 0.02]\n\t\t}\n\t)\n\t\n\tstart_reg = ird.transform_img(\n\t\tstart,\n\t\ttvec = result['tvec'].round(4),\n\t\tangle = result['angle'],\n\t\tscale = result['scale']\n\t)\n\t\n\treturn start_reg\n\ndef ManualRegistrationSelection(file, **kwargs):\n\twith h5py.File(file, 'a') as d:\n\t\t# use average reflectance as reference to pick corners \n\t\tavgRef = d['data']['reflectance'][()].mean(axis = 2) #average reflectance across all wavelengths per point\n\t\tp = ImagePointPicker(avgRef, pts = 4, **kwargs)\n\n\t\t# add points to fits group.\n\t\tif 'fits' in d.keys():\n\t\t\tfits = d['fits']\n\t\telse:\n\t\t\tfits = d.create_group('/fits')\n\n\t\t_fillDataset(d['fits'], 'registrationpoints', p, 'Four corners of cell, used for registration. Points are ordered top right, top left, bottom left, bottom right, assuming that the cell is oriented with the busbar horizontal and closer to the top edge of the cell')\n\ndef BatchManualRegistrationSelection(directory, **kwargs):\n\tdef traverse_files(f, files = []):\n\t\tfor f_ in os.listdir(f):\n\t\t\tf__ = os.path.join(f, f_)\n\t\t\tif os.path.isdir(f__):\n\t\t\t\tfiles = traverse_files(f__, files)\n\t\t\telse:\n\t\t\t\tif f__[-3:] == '.h5':\n\t\t\t\t\ttry:\n\t\t\t\t\t\twith h5py.File(f__, 'r') as d:\n\t\t\t\t\t\t\tif 'fits/registrationpoints' not in d:\n\t\t\t\t\t\t\t\tfiles.append(f__)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\treturn files\n\t\n\tfor f in tqdm(traverse_files(directory)):\n\t\ttry:\n\t\t\tManualRegistrationSelection(f, **kwargs)\n\t\texcept:\n\t\t\tprint('Error fitting {0}'.format(f))\n\n## write fits to h5 file\ndef _fillDataset(d, name, data, description):\n\tif name in d.keys():\n\t\tdel d[name]\n\ttemp = d.create_dataset(name, data = data)\n\ttemp.attrs['description'] = description\n\n## Image registration point picking. Taken from frgtools.imageprocessing\nclass __ImgPicker():\n\tdef __init__(self, img, pts, markersize = 0.3, **kwargs):\n\t\tself.numPoints = pts\n\t\tself.currentPoint = 0\n\t\tself.finished = False\n\t\tself.markersize = markersize\n\n\t\tself.fig, self.ax = plt.subplots()\n\t\tself.ax.imshow(img, picker = True, **kwargs)\n\t\tself.fig.canvas.mpl_connect('pick_event', self.onpick)\n\n\t\tself.buttonAx = plt.axes([0.4, 0, 0.1, 0.075])\n\t\tself.stopButton = Button(self.buttonAx, 'Done')\n\t\tself.stopButton.on_clicked(self.setFinished)\n\n\t\tself.pickedPoints = [None for x in range(self.numPoints)]\n\t\tself.pointArtists = [None for x in range(self.numPoints)]\n\t\tself.pointText = [None for x in range(self.numPoints)]\n\n\t\tplt.show(block = True) \n\t\n\tdef setFinished(self, event):\n\t\tself.finished = True\n\t\tplt.close(self.fig)\n\t\n\tdef onpick(self, event):\n\t\tif not self.finished:\n\t\t\tmevt = event.mouseevent\n\t\t\tidx = self.currentPoint % self.numPoints\n\t\t\tself.currentPoint += 1\n\n\t\t\tx = mevt.xdata\n\t\t\ty = mevt.ydata\n\t\t\tself.pickedPoints[idx] = [x,y]\n\n\t\t\tif self.pointArtists[idx] is not None:\n\t\t\t\tself.pointArtists[idx].remove()\n\t\t\tself.pointArtists[idx] = plt.Circle((x,y), self.markersize, color = [1,1,1])\n\t\t\tself.ax.add_patch(self.pointArtists[idx])\n\n\t\t\tif self.pointText[idx] is not None:\n\t\t\t\tself.pointText[idx].set_position((x,y))\n\t\t\telse:\n\t\t\t\tself.pointText[idx] = self.ax.text(x,y, '{0}'.format(idx), color = [0,0,0], ha = 'center', va = 'center')\n\t\t\t\tself.ax.add_artist(self.pointText[idx])\n\n\t\t\tself.fig.canvas.draw()\n\t\t\tself.fig.canvas.flush_events()\n\ndef ImagePointPicker(img, pts = 4, **kwargs):\n\t\"\"\"\n\tGiven an image and a number of points, allows the user to interactively select points on the image.\n\tThese points are returned when the \"Done\" button is pressed. Useful to generate inputs for AffineCalculate.\n\t\"\"\"\n\timgpicker = __ImgPicker(img, pts, **kwargs)\n\treturn imgpicker.pickedPoints"
]
| [
[
"numpy.linspace",
"numpy.round",
"numpy.max",
"matplotlib.pyplot.axes",
"numpy.mean",
"numpy.where",
"numpy.divide",
"matplotlib.pyplot.tight_layout",
"scipy.ndimage.distance_transform_edt",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.zeros",
"numpy.log",
"numpy.isnan",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.widgets.Button",
"matplotlib.pyplot.subplots",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"numpy.ndindex"
]
]
|
lawwu/nni | [
"b869dd48dfe36392e7b78c70ea35eb6d4b4779dc"
]
| [
"src/sdk/pynni/tests/test_compressor.py"
]
| [
"from unittest import TestCase, main\nimport nni.compression.tensorflow as tf_compressor\nimport nni.compression.torch as torch_compressor\nimport torch\nimport torch.nn.functional as F\nimport tensorflow as tf \n\ndef weight_variable(shape):\n return tf.Variable(tf.truncated_normal(shape, stddev = 0.1))\n\ndef bias_variable(shape):\n return tf.Variable(tf.constant(0.1, shape = shape))\n\ndef conv2d(x_input, w_matrix):\n return tf.nn.conv2d(x_input, w_matrix, strides = [ 1, 1, 1, 1 ], padding = 'SAME')\n\ndef max_pool(x_input, pool_size):\n size = [ 1, pool_size, pool_size, 1 ]\n return tf.nn.max_pool(x_input, ksize = size, strides = size, padding = 'SAME')\n\n\nclass TfMnist:\n def __init__(self):\n images = tf.placeholder(tf.float32, [ None, 784 ], name = 'input_x')\n labels = tf.placeholder(tf.float32, [ None, 10 ], name = 'input_y')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n\n self.images = images\n self.labels = labels\n self.keep_prob = keep_prob\n\n self.train_step = None\n self.accuracy = None\n\n self.w1 = None\n self.b1 = None\n self.fcw1 = None\n self.cross = None\n with tf.name_scope('reshape'):\n x_image = tf.reshape(images, [ -1, 28, 28, 1 ])\n with tf.name_scope('conv1'):\n w_conv1 = weight_variable([ 5, 5, 1, 32 ])\n self.w1 = w_conv1\n b_conv1 = bias_variable([ 32 ])\n self.b1 = b_conv1\n h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)\n with tf.name_scope('pool1'):\n h_pool1 = max_pool(h_conv1, 2)\n with tf.name_scope('conv2'):\n w_conv2 = weight_variable([ 5, 5, 32, 64 ])\n b_conv2 = bias_variable([ 64 ])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)\n with tf.name_scope('pool2'):\n h_pool2 = max_pool(h_conv2, 2)\n with tf.name_scope('fc1'):\n w_fc1 = weight_variable([ 7 * 7 * 64, 1024 ])\n self.fcw1 = w_fc1\n b_fc1 = bias_variable([ 1024 ])\n h_pool2_flat = tf.reshape(h_pool2, [ -1, 7 * 7 * 64 ])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)\n with tf.name_scope('dropout'):\n h_fc1_drop = tf.nn.dropout(h_fc1, 0.5)\n with tf.name_scope('fc2'):\n w_fc2 = weight_variable([ 1024, 10 ])\n b_fc2 = bias_variable([ 10 ])\n y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2\n with tf.name_scope('loss'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = labels, logits = y_conv))\n self.cross = cross_entropy\n with tf.name_scope('adam_optimizer'):\n self.train_step = tf.train.AdamOptimizer(0.0001).minimize(cross_entropy)\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(labels, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nclass TorchMnist(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = torch.nn.Conv2d(1, 20, 5, 1)\n self.conv2 = torch.nn.Conv2d(20, 50, 5, 1)\n self.fc1 = torch.nn.Linear(4 * 4 * 50, 500)\n self.fc2 = torch.nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4 * 4 * 50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim = 1)\n\nclass CompressorTestCase(TestCase):\n def test_tf_pruner(self):\n model = TfMnist()\n configure_list = [{'sparsity':0.8, 'op_types':'default'}]\n tf_compressor.LevelPruner(configure_list).compress_default_graph()\n\n\n def test_tf_quantizer(self):\n model = TfMnist()\n tf_compressor.NaiveQuantizer([{'op_types': 'default'}]).compress_default_graph()\n \n def test_torch_pruner(self):\n model = TorchMnist()\n configure_list = [{'sparsity':0.8, 'op_types':'default'}]\n torch_compressor.LevelPruner(configure_list).compress(model)\n \n def test_torch_quantizer(self):\n model = TorchMnist()\n torch_compressor.NaiveQuantizer([{'op_types': 'default'}]).compress(model)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"tensorflow.matmul",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.constant",
"tensorflow.truncated_normal",
"torch.nn.functional.log_softmax",
"tensorflow.nn.max_pool",
"tensorflow.reshape",
"torch.nn.Conv2d",
"tensorflow.placeholder",
"tensorflow.cast",
"torch.nn.Linear",
"tensorflow.name_scope",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.dropout",
"tensorflow.argmax",
"torch.nn.functional.max_pool2d",
"tensorflow.nn.conv2d"
]
]
|
gwaygenomics/pooled-cell-painting-profiling-recipe | [
"886a601761ab62f9ee07b4996c6add9689b41077"
]
| [
"0.preprocess-sites/scripts/spot_utils.py"
]
| [
"import pathlib\nimport pandas as pd\nimport plotnine as gg\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef spot_counts_per_cell_histogram(df, col, file, bins=50):\n plt.figure(num=None, figsize=(4, 3), dpi=300, facecolor=\"w\", edgecolor=\"k\")\n df.loc[:, col].squeeze().value_counts().hist(bins=bins)\n plt.xlabel(\"Number of Barcodes\")\n plt.ylabel(\"Number of Cells\")\n plt.tight_layout()\n plt.savefig(file)\n plt.close()\n\n\ndef spot_score_histogram(df, col, file, bins=50):\n plt.figure(num=None, figsize=(4, 3), dpi=300, facecolor=\"w\", edgecolor=\"k\")\n df.loc[:, col].squeeze().hist(bins=bins)\n plt.xlabel(\"Spot Scores (Alignment)\")\n plt.ylabel(\"Number of Spots\")\n plt.tight_layout()\n plt.savefig(file)\n plt.close()\n\n\ndef spot_count_score_jointplot(df, parent_col, score_col, file):\n avg_df = (\n pd.DataFrame(df.groupby(parent_col)[score_col].mean())\n .reset_index()\n .merge(\n (\n pd.DataFrame(df.loc[:, parent_col].squeeze().value_counts())\n .reset_index()\n .rename(\n {parent_col: \"Barcode_Count\", \"index\": parent_col}, axis=\"columns\"\n )\n ),\n on=parent_col,\n )\n )\n\n sns.jointplot(\n x=score_col,\n y=\"Barcode_Count\",\n kind=\"reg\",\n data=avg_df,\n scatter_kws={\"s\": 0.25},\n height=3,\n )\n plt.xlabel(\"Mean Spot Score per Cell (Alignment)\")\n plt.ylabel(\"Number of Barcodes per Cell\")\n plt.savefig(file, dpi=300)\n plt.close()\n\n\ndef category_counts(df, gene_cols, barcode_cols, score_cols, parent_cols, guide=False):\n ids = parent_cols + gene_cols\n if guide:\n ids += barcode_cols\n\n barcode_group = df.groupby(ids)[score_cols]\n\n count_df = pd.merge(\n barcode_group.mean().reset_index(),\n barcode_group.count().reset_index(),\n on=ids,\n suffixes=[\"_mean\", \"_count\"],\n )\n\n return count_df\n"
]
| [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
]
|
zsamadi/pml_tools | [
"0de0c8a6d5bc7ab2c20aab0649e407799b387bdb"
]
| [
"software/algorithms/linear_regression_cls.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 17 12:07:50 2021\r\n\r\n@author: zayn\r\n\"\"\"\r\n\r\n\"\"\"\r\nLinear regression implementation.\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n\r\nclass LReg:\r\n \"\"\" implementation of multi variable Linear Regression, \r\n with results derived using steepest descent and \r\n theoretical normal equations.\r\n \"\"\"\r\n\r\n def __init__(self, training_data=[], use_norm=True):\r\n \"\"\"Create a linear regression classifier.\r\n :param training_data: training feature data.\r\n :param use_norm: Whether to use normalizing when calculating linear regression.\r\n \"\"\"\r\n if use_norm: \r\n self.normed_training_data, self.mean, self.std = self.featureNormalize(training_data)\r\n else:\r\n self.normed_training_data= training_data\r\n self.mean=[]\r\n self.std=[]\r\n \r\n self.use_norm=use_norm\r\n\r\n def computeCost(self, X, y, theta):\r\n m=len(y)\r\n preiction=X@theta\r\n err_abs=(preiction-y).T@(preiction-y)\r\n J=err_abs/(2*m)\r\n return J\r\n def gradientDescent(self, X, y, theta, alpha, num_iters):\r\n \r\n # Initialize some useful values\r\n m=len(y)\r\n J_history=np.zeros([num_iters, 1])\r\n for iter in range(num_iters):\r\n J_history[iter]=self.computeCost(X, y, theta) #Save the cost J in every iteration \r\n preiction=X@theta\r\n err=(preiction-y)\r\n update=X.T@err\r\n theta=theta-alpha/m*update\r\n \r\n return theta, J_history\r\n def featureNormalize(self, X):\r\n X=np.asarray(X)\r\n XT=X.T;\r\n XnT=np.zeros(XT.shape)\r\n xmeanv=np.zeros(XT.shape[0])\r\n xstdv=np.zeros(XT.shape[0])\r\n for ii in range(XT.shape[0]):\r\n xmean=np.mean(XT[ii,:]);\r\n xstd=np.std(XT[ii,:])\r\n Xub=XT[ii,:]-xmean;\r\n XnT[ii,:]=Xub/xstd\r\n xmeanv[ii]=xmean\r\n xstdv[ii]=xstd\r\n Xn=XnT.T\r\n \r\n return Xn, xmeanv, xstdv\r\n\r\n def normalEqn(self, X, y):\r\n XT=X.T\r\n XTX=XT@X\r\n pinv=np.linalg.pinv(XTX)\r\n XTy=XT@y\r\n theta=pinv@XTy\r\n return theta\r\n"
]
| [
[
"numpy.asarray",
"numpy.linalg.pinv",
"numpy.std",
"numpy.mean",
"numpy.zeros"
]
]
|
espoirMur/lm-prior-for-nmt | [
"e3d736f9da8a20fa5f4983e6ddae86325ebd44ed",
"e3d736f9da8a20fa5f4983e6ddae86325ebd44ed"
]
| [
"modules/data/samplers.py",
"libs/joeynmt/search.py"
]
| [
"import math\n\nimport numpy\nimport torch\nfrom torch.utils.data import Sampler\n\n\nclass BPTTSampler(Sampler):\n \"\"\"\n Samples elements per chunk. Suitable for Language Models.\n Arguments:\n data_source (Dataset): dataset to sample from\n \"\"\"\n\n def __init__(self, size, batch):\n \"\"\"\n Define how to construct batches\n\n Given a list of sequences, organize the sequences in each batch\n in such a way, so that each RNN gets the proper (next) sequence.\n\n For example, given the following sequence and with batch=2:\n ┌ a b c d e ┐\n │ f g h i j │\n │ k l m n o │\n │ p q r s t │\n │ u v w x y │\n └ z - - - - ┘\n\n the batches will be:\n ┌ a b c d e ┐ ┌ f g h i j ┐ ┌ k l m n o ┐\n └ p q r s t ┘ └ u v w x y ┘ └ z - - - - ┘\n\n Args:\n size (int): number of sequences\n batch (int): batch size\n \"\"\"\n self.size = size\n self.batch = batch\n\n # split the corpus in chunks of size `corpus_seqs / batch_size`\n self.chunks = numpy.array_split(numpy.arange(self.size), batch)\n\n def get_batch(self, index):\n \"\"\"\n Fill each batch with the i-th sequence from each chunk.\n If the batch size does not evenly divides the chunks,\n then some chunks will have one less sequence, so the last batch\n will have fewer samples.\n Args:\n index (int):\n\n Returns:\n\n \"\"\"\n batch = []\n for chunk in self.chunks:\n if index < chunk.size:\n batch.append(chunk[index])\n return batch\n\n def batches(self):\n for i in range(self.chunks[0].size):\n yield self.get_batch(i)\n\n def __iter__(self):\n return iter(self.batches())\n\n def __len__(self):\n return self.size\n\n\ndef divide_chunks(l, n):\n # looping till length l\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\nclass SortedSampler(Sampler):\n \"\"\"\n Defines a strategy for drawing samples from the dataset,\n in ascending or descending order, based in the sample lengths.\n \"\"\"\n\n def __init__(self, lengths, descending=False):\n self.lengths = lengths\n self.desc = descending\n\n def __iter__(self):\n\n if self.desc:\n return iter(numpy.flip(numpy.array(self.lengths).argsort(), 0))\n else:\n return iter(numpy.array(self.lengths).argsort())\n\n def __len__(self):\n return len(self.lengths)\n\n\nclass TokenBatchSampler(Sampler):\n \"\"\"\n Defines a strategy for drawing batches of samples from the dataset,\n\n Important: Since we are not shuffling the data and it's inevitable\n that the sentences in the batch will be uneven in terms of their length,\n instead of summing the lengths of the sentences in each batch to compute\n the threshold for creating a new batch, we use as the maximum length\n in the batch times the sentences in the batch\n\n Example:\n [\n [x, x, x, x, x, x, x, x, x, x, x, x, x, x ]\n [x, x, x, x, x, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]\n ....\n [x, x, x, x, x, x, x, 0, 0, 0, 0, 0, 0, 0 ]\n ]\n \"\"\"\n\n def __init__(self, lengths, batch_tokens):\n self.batches = []\n\n accumulator = 0\n\n batch = []\n for index, length in enumerate(lengths):\n\n if len(batch) == 0:\n accumulator = length\n else:\n accumulator = max(accumulator, length)\n\n if accumulator * len(batch) < batch_tokens:\n batch.append(index)\n else:\n # insert new batch\n self.batches.append(batch)\n\n # create new batches\n batch = [index]\n accumulator = length\n\n if len(batch) > 0:\n self.batches.append(batch)\n\n assert not any([sum(lengths[y] for y in x) > batch_tokens\n for x in self.batches])\n\n def __iter__(self):\n return iter(self.batches)\n\n def __len__(self):\n return len(self.batches)\n\n\nclass BucketBatchSampler(Sampler):\n \"\"\"\n Defines a strategy for drawing batches of samples from the dataset,\n in ascending or descending order, based in the sample lengths.\n \"\"\"\n\n def __init__(self, lengths, batch_size,\n shuffle=False, even=False, drop_last=False, reverse=False):\n sorted_indices = numpy.array(lengths).argsort()\n num_sections = math.ceil(len(lengths) / batch_size)\n if even:\n self.batches = list(divide_chunks(sorted_indices, batch_size))\n else:\n self.batches = numpy.array_split(sorted_indices, num_sections)\n\n if reverse:\n self.batches = list(reversed(self.batches))\n\n if drop_last:\n del self.batches[-1]\n\n self.shuffle = shuffle\n\n def __iter__(self):\n if self.shuffle:\n return iter(self.batches[i]\n for i in torch.randperm(len(self.batches)))\n else:\n return iter(self.batches)\n\n def __len__(self):\n return len(self.batches)\n\n\nclass BucketTokensSampler(Sampler):\n \"\"\"\n Defines a strategy for drawing batches of samples from the dataset,\n in ascending or descending order, based in the sample lengths.\n The batches will be contructed based on the total number of tokens.\n \"\"\"\n\n def __init__(self, lengths, batch_tokens,\n shuffle=False, drop_last=False):\n\n self.shuffle = shuffle\n self.lengths = lengths\n self.batch_tokens = batch_tokens\n self.drop_last = drop_last\n\n self.batches, self.reverse_ids = self.get_batches()\n\n def get_batches(self):\n\n if self.shuffle:\n # this ensures shuffling inside batches\n s = numpy.random.randint(-1, 1, len(self.lengths))\n sorted_indices = numpy.array(self.lengths + s).argsort()\n else:\n sorted_indices = numpy.array(self.lengths).argsort()\n\n # self.reverse_ids = sorted_indices[::-1]\n reverse_ids = numpy.array(sorted_indices).argsort()\n batches = []\n batch = []\n accumulator = 0\n\n for index in sorted_indices:\n accumulator += self.lengths[index]\n\n if accumulator < self.batch_tokens:\n batch.append(index)\n else:\n # insert new batch\n batches.append(batch)\n\n # create new batches\n batch = [index]\n accumulator = self.lengths[index]\n\n if self.drop_last:\n del batches[-1]\n elif len(batch) > 0:\n batches.append(batch)\n\n assert not any([sum(self.lengths[y] for y in x) > self.batch_tokens\n for x in batches])\n\n return batches, reverse_ids\n\n def __iter__(self):\n if self.shuffle:\n # get fresh order of batches\n self.batches, self.reverse_ids = self.get_batches()\n\n return iter(self.batches[i]\n for i in torch.randperm(len(self.batches)))\n else:\n return iter(self.batches)\n\n def __len__(self):\n return len(self.batches)\n",
"# coding: utf-8\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nimport numpy as np\n\nfrom libs.joeynmt import Decoder, TransformerDecoder\nfrom libs.joeynmt.embeddings import Embeddings\nfrom libs.joeynmt.helpers import tile\n\n\n__all__ = [\"greedy\", \"transformer_greedy\", \"beam_search\"]\n\n\ndef greedy(src_mask: Tensor, embed: Embeddings, bos_index: int,\n max_output_length: int, decoder: Decoder,\n encoder_output: Tensor, encoder_hidden: Tensor)\\\n -> (np.array, np.array):\n \"\"\"\n Greedy decoding. Select the token word highest probability at each time\n step. This function is a wrapper that calls recurrent_greedy for\n recurrent decoders and transformer_greedy for transformer decoders.\n\n :param src_mask: mask for source inputs, 0 for positions after </s>\n :param embed: target embedding\n :param bos_index: index of <s> in the vocabulary\n :param max_output_length: maximum length for the hypotheses\n :param decoder: decoder to use for greedy decoding\n :param encoder_output: encoder hidden states for attention\n :param encoder_hidden: encoder last state for decoder initialization\n :return:\n \"\"\"\n\n if isinstance(decoder, TransformerDecoder):\n # Transformer greedy decoding\n greedy_fun = transformer_greedy\n else:\n # Recurrent greedy decoding\n greedy_fun = recurrent_greedy\n\n return greedy_fun(\n src_mask, embed, bos_index, max_output_length,\n decoder, encoder_output, encoder_hidden)\n\n\ndef recurrent_greedy(\n src_mask: Tensor, embed: Embeddings, bos_index: int,\n max_output_length: int, decoder: Decoder,\n encoder_output: Tensor, encoder_hidden: Tensor) -> (np.array, np.array):\n \"\"\"\n Greedy decoding: in each step, choose the word that gets highest score.\n Version for recurrent decoder.\n\n :param src_mask: mask for source inputs, 0 for positions after </s>\n :param embed: target embedding\n :param bos_index: index of <s> in the vocabulary\n :param max_output_length: maximum length for the hypotheses\n :param decoder: decoder to use for greedy decoding\n :param encoder_output: encoder hidden states for attention\n :param encoder_hidden: encoder last state for decoder initialization\n :return:\n - stacked_output: output hypotheses (2d array of indices),\n - stacked_attention_scores: attention scores (3d array)\n \"\"\"\n batch_size = src_mask.size(0)\n prev_y = src_mask.new_full(size=[batch_size, 1], fill_value=bos_index,\n dtype=torch.long)\n output = []\n attention_scores = []\n hidden = None\n prev_att_vector = None\n\n # pylint: disable=unused-variable\n for t in range(max_output_length):\n # decode one single step\n logits, hidden, att_probs, prev_att_vector = decoder(\n encoder_output=encoder_output,\n encoder_hidden=encoder_hidden,\n src_mask=src_mask,\n trg_embed=embed(prev_y),\n hidden=hidden,\n prev_att_vector=prev_att_vector,\n unroll_steps=1)\n # logits: batch x time=1 x vocab (logits)\n\n # greedy decoding: choose arg max over vocabulary in each step\n next_word = torch.argmax(logits, dim=-1) # batch x time=1\n output.append(next_word.squeeze(1).detach().cpu().numpy())\n prev_y = next_word\n attention_scores.append(att_probs.squeeze(1).detach().cpu().numpy())\n # batch, max_src_lengths\n stacked_output = np.stack(output, axis=1) # batch, time\n stacked_attention_scores = np.stack(attention_scores, axis=1)\n return stacked_output, stacked_attention_scores\n\n\n# pylint: disable=unused-argument\ndef transformer_greedy(\n src_mask: Tensor, embed: Embeddings,\n bos_index: int, max_output_length: int, decoder: Decoder,\n encoder_output: Tensor, encoder_hidden: Tensor) -> (np.array, None):\n \"\"\"\n Special greedy function for transformer, since it works differently.\n The transformer remembers all previous states and attends to them.\n\n :param src_mask: mask for source inputs, 0 for positions after </s>\n :param embed: target embedding layer\n :param bos_index: index of <s> in the vocabulary\n :param max_output_length: maximum length for the hypotheses\n :param decoder: decoder to use for greedy decoding\n :param encoder_output: encoder hidden states for attention\n :param encoder_hidden: encoder final state (unused in Transformer)\n :return:\n - stacked_output: output hypotheses (2d array of indices),\n - stacked_attention_scores: attention scores (3d array)\n \"\"\"\n\n batch_size = src_mask.size(0)\n\n # start with BOS-symbol for each sentence in the batch\n ys = encoder_output.new_full([batch_size, 1], bos_index, dtype=torch.long)\n\n # a subsequent mask is intersected with this in decoder forward pass\n trg_mask = src_mask.new_ones([1, 1, 1])\n\n for _ in range(max_output_length):\n\n trg_embed = embed(ys) # embed the previous tokens\n\n # pylint: disable=unused-variable\n with torch.no_grad():\n logits, out, _, _ = decoder(\n trg_embed=trg_embed,\n encoder_output=encoder_output,\n encoder_hidden=None,\n src_mask=src_mask,\n unroll_steps=None,\n hidden=None,\n trg_mask=trg_mask\n )\n\n logits = logits[:, -1]\n _, next_word = torch.max(logits, dim=1)\n next_word = next_word.data\n ys = torch.cat([ys, next_word.unsqueeze(-1)], dim=1)\n\n ys = ys[:, 1:] # remove BOS-symbol\n return ys.detach().cpu().numpy(), None\n\n\n# pylint: disable=too-many-statements,too-many-branches\ndef beam_search(\n decoder: Decoder,\n size: int,\n bos_index: int, eos_index: int, pad_index: int,\n encoder_output: Tensor, encoder_hidden: Tensor,\n src_mask: Tensor, max_output_length: int, alpha: float,\n embed: Embeddings, n_best: int = 1) -> (np.array, np.array):\n \"\"\"\n Beam search with size k.\n Inspired by OpenNMT-py, adapted for Transformer.\n\n In each decoding step, find the k most likely partial hypotheses.\n\n :param decoder:\n :param size: size of the beam\n :param bos_index:\n :param eos_index:\n :param pad_index:\n :param encoder_output:\n :param encoder_hidden:\n :param src_mask:\n :param max_output_length:\n :param alpha: `alpha` factor for length penalty\n :param embed:\n :param n_best: return this many hypotheses, <= beam (currently only 1)\n :return:\n - stacked_output: output hypotheses (2d array of indices),\n - stacked_attention_scores: attention scores (3d array)\n \"\"\"\n assert size > 0, 'Beam size must be >0.'\n assert n_best <= size, 'Can only return {} best hypotheses.'.format(size)\n\n # init\n transformer = isinstance(decoder, TransformerDecoder)\n batch_size = src_mask.size(0)\n att_vectors = None # not used for Transformer\n\n # Recurrent models only: initialize RNN hidden state\n # pylint: disable=protected-access\n if not transformer:\n hidden = decoder._init_hidden(encoder_hidden)\n else:\n hidden = None\n\n # tile encoder states and decoder initial states beam_size times\n if hidden is not None:\n hidden = tile(hidden, size, dim=1) # layers x batch*k x dec_hidden_size\n\n encoder_output = tile(encoder_output.contiguous(), size,\n dim=0) # batch*k x src_len x enc_hidden_size\n src_mask = tile(src_mask, size, dim=0) # batch*k x 1 x src_len\n\n # Transformer only: create target mask\n if transformer:\n trg_mask = src_mask.new_ones([1, 1, 1]) # transformer only\n else:\n trg_mask = None\n\n # numbering elements in the batch\n batch_offset = torch.arange(\n batch_size, dtype=torch.long, device=encoder_output.device)\n\n # numbering elements in the extended batch, i.e. beam size copies of each\n # batch element\n beam_offset = torch.arange(\n 0,\n batch_size * size,\n step=size,\n dtype=torch.long,\n device=encoder_output.device)\n\n # keeps track of the top beam size hypotheses to expand for each element\n # in the batch to be further decoded (that are still \"alive\")\n alive_seq = torch.full(\n [batch_size * size, 1],\n bos_index,\n dtype=torch.long,\n device=encoder_output.device)\n\n # Give full probability to the first beam on the first step.\n topk_log_probs = torch.zeros(batch_size, size, device=encoder_output.device)\n topk_log_probs[:, 1:] = float(\"-inf\")\n\n # Structure that holds finished hypotheses.\n hypotheses = [[] for _ in range(batch_size)]\n\n results = {\n \"predictions\": [[] for _ in range(batch_size)],\n \"scores\": [[] for _ in range(batch_size)],\n \"gold_score\": [0] * batch_size,\n }\n\n for step in range(max_output_length):\n\n # This decides which part of the predicted sentence we feed to the\n # decoder to make the next prediction.\n # For Transformer, we feed the complete predicted sentence so far.\n # For Recurrent models, only feed the previous target word prediction\n if transformer: # Transformer\n decoder_input = alive_seq # complete prediction so far\n else: # Recurrent\n decoder_input = alive_seq[:, -1].view(-1, 1) # only the last word\n\n # expand current hypotheses\n # decode one single step\n # logits: logits for final softmax\n # pylint: disable=unused-variable\n trg_embed = embed(decoder_input)\n logits, hidden, att_scores, att_vectors = decoder(\n encoder_output=encoder_output,\n encoder_hidden=encoder_hidden,\n src_mask=src_mask,\n trg_embed=trg_embed,\n hidden=hidden,\n prev_att_vector=att_vectors,\n unroll_steps=1,\n trg_mask=trg_mask # subsequent mask for Transformer only\n )\n\n # For the Transformer we made predictions for all time steps up to\n # this point, so we only want to know about the last time step.\n if transformer:\n logits = logits[:, -1] # keep only the last time step\n hidden = None # we don't need to keep it for transformer\n\n # batch*k x trg_vocab\n log_probs = F.log_softmax(logits, dim=-1).squeeze(1)\n\n # multiply probs by the beam probability (=add logprobs)\n log_probs += topk_log_probs.view(-1).unsqueeze(1)\n curr_scores = log_probs.clone()\n\n # compute length penalty\n if alpha > -1:\n length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha\n curr_scores /= length_penalty\n\n # flatten log_probs into a list of possibilities\n curr_scores = curr_scores.reshape(-1, size * decoder.output_size)\n\n # pick currently best top k hypotheses (flattened order)\n topk_scores, topk_ids = curr_scores.topk(size, dim=-1)\n\n if alpha > -1:\n # recover original log probs\n topk_log_probs = topk_scores * length_penalty\n else:\n topk_log_probs = topk_scores.clone()\n\n # reconstruct beam origin and true word ids from flattened order\n topk_beam_index = topk_ids.div(decoder.output_size)\n topk_ids = topk_ids.fmod(decoder.output_size)\n\n # map beam_index to batch_index in the flat representation\n batch_index = (\n topk_beam_index\n + beam_offset[:topk_beam_index.size(0)].unsqueeze(1))\n select_indices = batch_index.view(-1)\n\n # append latest prediction\n alive_seq = torch.cat(\n [alive_seq.index_select(0, select_indices),\n topk_ids.view(-1, 1)], -1) # batch_size*k x hyp_len\n\n is_finished = topk_ids.eq(eos_index)\n if step + 1 == max_output_length:\n is_finished.fill_(True)\n # end condition is whether the top beam is finished\n end_condition = is_finished[:, 0].eq(True)\n\n # save finished hypotheses\n if is_finished.any():\n predictions = alive_seq.view(-1, size, alive_seq.size(-1))\n for i in range(is_finished.size(0)):\n b = batch_offset[i]\n if end_condition[i]:\n is_finished[i].fill_(1)\n finished_hyp = is_finished[i].nonzero().view(-1)\n # store finished hypotheses for this batch\n for j in finished_hyp:\n # Check if the prediction has more than one EOS.\n # If it has more than one EOS, it means that the\n # prediction should have already been added to\n # the hypotheses, so you don't have to add them again.\n if (predictions[i, j, 1:] == eos_index).nonzero().numel() \\\n < 2:\n # ignore start_token\n hypotheses[b].append(\n (topk_scores[i, j], predictions[i, j, 1:])\n )\n # if the batch reached the end, save the n_best hypotheses\n if end_condition[i]:\n best_hyp = sorted(\n hypotheses[b], key=lambda x: x[0], reverse=True)\n for n, (score, pred) in enumerate(best_hyp):\n if n >= n_best:\n break\n results[\"scores\"][b].append(score)\n results[\"predictions\"][b].append(pred)\n non_finished = end_condition.eq(False).nonzero().view(-1)\n # if all sentences are translated, no need to go further\n # pylint: disable=len-as-condition\n if len(non_finished) == 0:\n break\n # remove finished batches for the next step\n topk_log_probs = topk_log_probs.index_select(0, non_finished)\n batch_index = batch_index.index_select(0, non_finished)\n batch_offset = batch_offset.index_select(0, non_finished)\n alive_seq = predictions.index_select(0, non_finished) \\\n .view(-1, alive_seq.size(-1))\n\n # reorder indices, outputs and masks\n select_indices = batch_index.view(-1)\n encoder_output = encoder_output.index_select(0, select_indices)\n src_mask = src_mask.index_select(0, select_indices)\n\n if hidden is not None and not transformer:\n if isinstance(hidden, tuple):\n # for LSTMs, states are tuples of tensors\n h, c = hidden\n h = h.index_select(1, select_indices)\n c = c.index_select(1, select_indices)\n hidden = (h, c)\n else:\n # for GRUs, states are single tensors\n hidden = hidden.index_select(1, select_indices)\n\n if att_vectors is not None:\n att_vectors = att_vectors.index_select(0, select_indices)\n\n def pad_and_stack_hyps(hyps, pad_value):\n filled = np.ones((len(hyps), max([h.shape[0] for h in hyps])),\n dtype=int) * pad_value\n for j, h in enumerate(hyps):\n for k, i in enumerate(h):\n filled[j, k] = i\n return filled\n\n # from results to stacked outputs\n assert n_best == 1\n # only works for n_best=1 for now\n final_outputs = pad_and_stack_hyps([r[0].cpu().numpy() for r in\n results[\"predictions\"]],\n pad_value=pad_index)\n\n return final_outputs, None\n"
]
| [
[
"numpy.arange",
"numpy.array",
"numpy.array_split"
],
[
"torch.max",
"torch.full",
"torch.zeros",
"torch.nn.functional.log_softmax",
"numpy.stack",
"torch.no_grad",
"torch.arange",
"torch.argmax"
]
]
|
danielsnider/ecosystem-project-website-template | [
"355666d910d80dc51f84192122ed26d9ca06f044"
]
| [
"cli/skyline/tracking/memory/activations.py"
]
| [
"import collections\nimport gc\n\nimport torch\n\nfrom skyline.tracking.backward_interceptor import BackwardInterceptor\nfrom skyline.tracking.base import TrackerBase\nfrom skyline.tracking.call_stack import CallStack\nfrom skyline.tracking.callable_tracker import CallableTracker\nfrom skyline.tracking.utils import remove_dunder\nfrom skyline.user_code_utils import user_code_environment\n\nOperationContext = collections.namedtuple(\n 'OperationContext',\n ['operation_name', 'stack'],\n)\n\nActivationEntry = collections.namedtuple(\n 'ActivationEntry',\n ['operation_name', 'stack', 'size_bytes'],\n)\n\n\nclass ActivationsTracker:\n def __init__(self, project_root):\n self._activations = []\n self._project_root = project_root\n\n def track_memory_usage(self, iteration, input_provider, user_code_path):\n # 1. Run the forward pass of the model with the given inputs. We keep\n # track of all the operations that contribute to the autograd graph.\n model_output, grad_function_contexts = \\\n self._get_grad_function_contexts(\n iteration, input_provider, user_code_path)\n\n # 2. Traverse the autograd graph and get a topological ordering. Filter\n # the function contexts by the gradient functions in our topological\n # ordering.\n gradient_functions_topo_order, grad_function_contexts = \\\n self._extract_relevant_gradient_functions(\n model_output, grad_function_contexts)\n\n # 3. Associate activation sizes with each gradient function by\n # effectively \"freeing\" them one after the other and tracking the\n # change in overall memory allocations.\n\n # NOTE: We reverse the list here to be able to pop from it in\n # topological order.\n gradient_functions_topo_order.reverse()\n del model_output\n gc.collect()\n\n while len(gradient_functions_topo_order) > 0:\n grad_fn = gradient_functions_topo_order.pop()\n context = grad_function_contexts[grad_fn]\n del grad_function_contexts[grad_fn]\n\n mem_before = torch.cuda.memory_allocated()\n del grad_fn\n gc.collect()\n mem_after = torch.cuda.memory_allocated()\n delta = mem_after - mem_before\n self._activations.append(ActivationEntry(\n *context,\n size_bytes=-delta,\n ))\n\n def populate_report(self, builder):\n for entry in self._activations:\n builder.add_activation_entry(\n operation_name=remove_dunder(entry.operation_name),\n size_bytes=entry.size_bytes,\n stack_context=entry.stack,\n )\n\n def populate_breakdown(self, builder):\n # The HierarchicalReportBuilder uses the same activation entry API as\n # the MemoryReportBuilder\n self.populate_report(builder)\n\n def _get_grad_function_contexts(\n self, iteration, input_provider, user_code_path):\n grad_function_tracker = GradFunctionTracker(self._project_root)\n backward_interceptor = BackwardInterceptor()\n with grad_function_tracker.track(), \\\n backward_interceptor.intercept(), \\\n user_code_environment(user_code_path, self._project_root):\n iteration(*input_provider())\n return (\n backward_interceptor.backward_root,\n grad_function_tracker.grad_function_contexts,\n )\n\n def _extract_relevant_gradient_functions(\n self, model_output, grad_function_contexts):\n # 1. Get the gradient functions associated with the model output in\n # topological order\n gradient_functions = \\\n _extract_gradient_functions_in_topological_order(model_output)\n\n # 2. Filter the gradient functions: we only want to keep the ones we\n # know about\n relevant_grad_fns = []\n relevant_contexts = {}\n for grad_fn in gradient_functions:\n if grad_fn not in grad_function_contexts:\n continue\n relevant_grad_fns.append(grad_fn)\n relevant_contexts[grad_fn] = grad_function_contexts[grad_fn]\n\n return relevant_grad_fns, relevant_contexts\n\n\nclass GradFunctionTracker(TrackerBase):\n def __init__(self, project_root):\n super().__init__()\n self._callable_tracker = CallableTracker(self._callable_hook_creator)\n self._project_root = project_root\n self.grad_function_contexts = {}\n self._processing_hook = False\n\n def start_tracking(self):\n super().start_tracking()\n self.grad_function_contexts.clear()\n self._callable_tracker.start_tracking()\n\n def stop_tracking(self):\n super().stop_tracking()\n self._callable_tracker.stop_tracking()\n\n def _callable_hook_creator(self, func):\n def hook(*args, **kwargs):\n # NOTE: We use self._processing_hook to handle cases where we have\n # hooks on nested function calls.\n if self._processing_hook:\n return func(*args, **kwargs)\n\n self._processing_hook = True\n try:\n retval = func(*args, **kwargs)\n finally:\n self._processing_hook = False\n\n # Early return for tensor-producing operations that are not\n # involved in the backward pass\n if (not isinstance(retval, torch.Tensor) and\n not isinstance(retval, tuple) and\n not isinstance(retval, list)):\n return retval\n if (isinstance(retval, torch.Tensor) and\n (not retval.is_cuda or retval.grad_fn is None)):\n return retval\n\n stack = CallStack.from_here(self._project_root, start_from=2)\n if len(stack.frames) == 0:\n return retval\n\n context = OperationContext(\n operation_name=func.__name__,\n stack=stack,\n )\n self._handle_callable_result(retval, context)\n return retval\n\n return hook\n\n def _handle_callable_result(self, retval, context):\n if isinstance(retval, torch.Tensor) and retval.grad_fn is not None:\n self.grad_function_contexts[retval.grad_fn] = context\n\n elif isinstance(retval, tuple) or isinstance(retval, list):\n for inner_value in retval:\n self._handle_callable_result(inner_value, context)\n\n\ndef _extract_gradient_functions_in_topological_order(model_output):\n \"\"\"\n Given a model output (Tensor or nested list/tuple of Tensors), build a\n topological ordering of their gradient functions.\n \"\"\"\n if isinstance(model_output, tuple) or isinstance(model_output, list):\n tensors = _flatten_and_filter_tensors(tensor_iterable)\n elif (isinstance(model_output, torch.Tensor) and\n model_output.grad_fn is not None):\n tensors = [model_output]\n else:\n return []\n\n result = []\n visited = {tensor.grad_fn for tensor in tensors}\n stack = [(grad_fn, 0) for grad_fn in visited]\n\n while len(stack) > 0:\n grad_fn, visit_count = stack.pop()\n\n if visit_count != 0:\n result.append(grad_fn)\n continue\n\n stack.append((grad_fn, 1))\n\n for fn, _ in grad_fn.next_functions:\n if fn is None or fn in visited:\n continue\n visited.add(fn)\n stack.append((fn, 0))\n\n result.reverse()\n return result\n\n\ndef _flatten_and_filter_tensors(tensor_iterable):\n flattened = []\n for iterable_element in tensor_iterable:\n if (isinstance(iterable_element, torch.Tensor) and\n iterable_element.grad_fn is not None):\n flattened.append(iterable_element)\n elif (isinstance(iterable_element, tuple) or\n isinstance(iterable_element, list)):\n flattened.extend(_flatten_and_filter_tensors(iterable_element))\n return flattened\n"
]
| [
[
"torch.cuda.memory_allocated"
]
]
|
rumschuettel/qhomer | [
"9526473c1d25e1e280d11583a03682750986bc89"
]
| [
"VectorQraphics.py"
]
| [
"import svgpathtools\nimport math\nfrom numpy import real, imag\nimport json\n\n# input: list of svgpathtools.path objects, each containing a list of segments\n# output: list of lists of points in 2D space\n\n\ndef get_points(file, density):\n #TODO: calculate density automatically for target point #\n paths = svgpathtools.svg2paths2(file)\n res = []\n for i, path in enumerate(paths[0]):\n pathpoints = []\n for seg in path._segments:\n count = math.ceil(seg.length()/density)\n frac = 1.0/count\n for i in range(1, count):\n pathpoints.append(seg.point(frac*i))\n if len(pathpoints) > 0:\n res.append(pathpoints)\n\n def f(x): return x.start\n starts = list(map(lambda i: f(i), paths[0]))\n return res, starts\n\n\ndef flip_x(input):\n \"\"\"flips the real values of all of the points\"\"\"\n # DOES NOT WORK DO NOT TOUCH STAY AWAY FROM THIS ONE CAUSE IT BITES\n res = []\n for line in input:\n max_x = real(max(line, key=(lambda x: (real(x)))))\n res.append(list(map(lambda x: complex(max_x-real(x), imag(x)), line)))\n return res\n\n\ndef flip_y(inp):\n \"\"\"flips the imaginary values of all of the points, do THAT for good results\"\"\"\n res = []\n for line in inp:\n #max_y = imag(max(line , key=(lambda y: (imag(y)))))\n res.append(list(map(lambda y: complex(real(y), -imag(y)), line)))\n return res\n\n\ndef offset(line, dx, dy):\n return list(map(lambda p: complex(real(p)+dx, imag(p)+dy), line))\n\n\ndef jsonify(lines):\n res = []\n for line in lines:\n tmp = []\n for point in line:\n pt = []\n pt.append(str(real(point)))\n pt.append(str(imag(point)))\n tmp.append(pt)\n if len(tmp) > 0:\n res.append(tmp)\n\n return json.dumps(res)\ndef eww(file,target):\n density = 10\n for i in range(density):\n lines, starts = get_points(file,density -i)\n count = 0\n for line in lines:\n count+=len(line)\n if count>target:\n return get_points(file,density-i+1)\n return get_points(file,2)"
]
| [
[
"numpy.real",
"numpy.imag"
]
]
|
cmmakerclub/nl-covid | [
"2e12a8a02fdad3fc9424ce9111d49fbc8798ff4e"
]
| [
"raw/polyregression.py"
]
| [
"\"\"\"\nCreated on Mon Mar 30 12:10:59 2020\nauthor: yuen\n\npublisher : NL x KBTG\nedit : katopz\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Thailand COVID 19 Dataset\n\n# Predict data\nX = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,\n 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]\nY = [4, 4, 5, 6, 8, 8, 14, 14, 14, 19, 19, 19,\n 19, 19, 25, 25, 25, 25, 32, 32, 33, 33, 33]\n\n# Nonlinear Regression Analysis (Polynomial)\ndegree = 12\npoly_fit = np.polyid(np.polyfit(X, Y, degree))\n\n# Plot data\nxx = np.linspace(0, 77, 100)\nplt.plot(xx, poly_fit(xx), c='r', linestyle='-')\nplt.title('Thailand Prediction Curve : Non-Linear Polynomial fitting')\nplt.xlabel('days')\nplt.ylabel('Confirm cases')\nplt.axis([0, 100, 0, 3000])\nplt.grid(True)\nplt.scatter(X, Y)\n\nplt.show()\n\n# Predict data\nprint('Tomorrow will be ', poly_fit(77))\n"
]
| [
[
"numpy.polyfit",
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
]
|
rainerkelz/ISMIR19 | [
"6dbbb166e8cea80b7233ac1a6fdb23b43df0aadf"
]
| [
"plot_maps_spec2labels_xyz.py"
]
| [
"import matplotlib.pyplot as plt\nimport torch\nimport argparse\nimport numpy as np\n\nfrom plot_input_output import plot_input_output\nfrom reversible import ReversibleModel\nfrom audio_midi_dataset import get_dataset_individually, Spec2MidiDataset, SqueezingDataset\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.sampler import SequentialSampler\nfrom train_loop import normal_noise_like\nimport os\nimport mpl_rc\nimport utils\nrcParams = mpl_rc.default()\n\n\ndef collect_input_output(device, model, loader, n_samples):\n model.eval()\n samples_x_true = []\n samples_x_invs = []\n samples_x_zepa = []\n samples_x_samp = []\n\n samples_y_true = []\n samples_y_pred = []\n\n samples_z_pred = []\n samples_z_samp = []\n for si in range(n_samples):\n x_true = []\n x_invs = []\n x_samp = []\n x_zepa = []\n\n y_pred = []\n y_true = []\n\n z_pred = []\n z_samp = []\n fixed_synth = torch.zeros(9)\n fixed_synth[0] = 1\n for batch in loader:\n x = batch['x'].to(device)\n y = batch['y'].to(device)\n inst_start = 88 + 88\n\n # what happens if we fix a synth?\n # y[:, inst_start:] = fixed_synth\n\n z_hat, zy_padding, y_hat = model.encode(x)\n # print('zy_padding.mean()', zy_padding.mean().cpu().item())\n # x_inv, _ = model.decode_padding(z_hat, zy_padding, y_hat)\n z = normal_noise_like(z_hat, 1)\n\n # decode full bijectivity (*all* information)\n x_inv, _ = model.decode_padding(z_hat, zy_padding, y_hat)\n\n # decode with z_hat, zeros, y_hat\n x_zep, _ = model.decode(z_hat, y_hat)\n\n # decode with sampled z, zeros, perfect y ('use it like a conditional GAN')\n x_sam, _ = model.decode(z, y)\n\n x_true.append(x.detach().cpu().numpy())\n x_invs.append(x_inv.detach().cpu().numpy())\n x_samp.append(x_sam.detach().cpu().numpy())\n x_zepa.append(x_zep.detach().cpu().numpy())\n\n y_pred.append(y_hat.detach().cpu().numpy())\n y_true.append(y.detach().cpu().numpy())\n\n z_pred.append(z_hat.detach().cpu().numpy())\n z_samp.append(z.detach().cpu().numpy())\n\n x_true = np.vstack(x_true)\n x_invs = np.vstack(x_invs)\n x_zepa = np.vstack(x_zepa)\n x_samp = np.vstack(x_samp)\n\n y_pred = np.vstack(y_pred)\n y_true = np.vstack(y_true)\n\n z_pred = np.vstack(z_pred)\n z_samp = np.vstack(z_samp)\n\n samples_x_true.append(x_true)\n samples_x_invs.append(x_invs)\n samples_x_zepa.append(x_zepa)\n samples_x_samp.append(x_samp)\n\n samples_y_pred.append(y_pred)\n samples_y_true.append(y_true)\n\n samples_z_pred.append(z_pred)\n samples_z_samp.append(z_samp)\n\n samples_x_true = np.stack(samples_x_true)\n samples_x_invs = np.stack(samples_x_invs)\n samples_x_zepa = np.stack(samples_x_zepa)\n samples_x_samp = np.stack(samples_x_samp)\n\n samples_y_pred = np.stack(samples_y_pred)\n samples_y_true = np.stack(samples_y_true)\n\n samples_z_pred = np.stack(samples_z_pred)\n samples_z_samp = np.stack(samples_z_samp)\n\n print('samples_x_true.shape', samples_x_true.shape)\n print('samples_x_invs.shape', samples_x_invs.shape)\n print('samples_x_zepa.shape', samples_x_zepa.shape)\n print('samples_x_samp.shape', samples_x_samp.shape)\n\n print('samples_y_pred.shape', samples_y_pred.shape)\n print('samples_y_true.shape', samples_y_true.shape)\n\n print('samples_z_pred.shape', samples_z_pred.shape)\n print('samples_z_samp.shape', samples_z_samp.shape)\n\n return dict(\n samples_x_true=samples_x_true,\n samples_x_invs=samples_x_invs,\n samples_x_zepa=samples_x_zepa,\n samples_x_samp=samples_x_samp,\n\n samples_y_pred=samples_y_pred,\n samples_y_true=samples_y_true,\n\n samples_z_pred=samples_z_pred,\n samples_z_samp=samples_z_samp\n )\n\n\ndef plot_fold(direction,\n base_directory,\n instrument_filename,\n context,\n audio_options,\n batch_size,\n device,\n model,\n fold_file,\n n_samples,\n plot_output_directory):\n\n loaders = get_data_loaders(\n direction=direction,\n base_directory=base_directory,\n fold_file=fold_file,\n instrument_filename=instrument_filename,\n context=context,\n audio_options=audio_options,\n batch_size=batch_size\n )\n\n for fold_file, audiofilename, midifilename, loader in loaders:\n print('fold_file', fold_file)\n print('audiofilename', audiofilename)\n print('midifilename', midifilename)\n sio = collect_input_output(device, model, loader, n_samples)\n\n fold = os.path.basename(fold_file)\n start = 100\n end = 150\n\n adjustments = dict(\n left=0.02,\n right=0.915,\n bottom=0.07,\n wspace=0.38,\n hspace=0.18\n )\n\n ##########################################################################\n fig = plot_input_output(\n '\\hat{\\mathbf{x}} = f_{\\\\theta}^{-1}([\\hat{\\mathbf{z}}; \\hat{\\mathbf{yz}}_{pad}; \\hat{\\mathbf{y}}])',\n '\\mathbf{x}',\n '\\hat{\\mathbf{y}}',\n '\\hat{\\mathbf{z}}',\n '\\hat{\\mathbf{x}}',\n sio['samples_x_true'][0, start:end, :],\n sio['samples_y_pred'][0, start:end, :],\n sio['samples_z_pred'][0, start:end, :],\n sio['samples_x_invs'][0, start:end, :],\n rcParams['figure.figsize']\n )\n\n fig_filename = os.path.join(\n plot_output_directory,\n 'z_hat_pad_y_hat_input_output_{}.pdf'.format(fold)\n )\n fig.subplots_adjust(**adjustments)\n fig.savefig(fig_filename)\n plt.close(fig)\n\n ##########################################################################\n fig = plot_input_output(\n '\\hat{\\mathbf{x}}^{(0)} = f_{\\\\theta}^{-1}([\\hat{\\mathbf{z}}; \\mathbf{0}; \\hat{\\mathbf{y}}])',\n '\\mathbf{x}',\n '\\hat{\\mathbf{y}}',\n '\\hat{\\mathbf{z}}',\n '\\hat{\\mathbf{x}}^{(0)}',\n sio['samples_x_true'][0, start:end, :],\n sio['samples_y_pred'][0, start:end, :],\n sio['samples_z_pred'][0, start:end, :],\n sio['samples_x_zepa'][0, start:end, :],\n rcParams['figure.figsize']\n )\n\n fig_filename = os.path.join(\n plot_output_directory,\n 'z_hat_zero_y_hat_input_output_{}.pdf'.format(fold)\n )\n fig.subplots_adjust(**adjustments)\n fig.savefig(fig_filename)\n plt.close()\n\n ##########################################################################\n fig = plot_input_output(\n '\\mathbf{x}_{sam} = f_{\\\\theta}^{-1}([\\mathbf{z}; \\mathbf{0}; \\mathbf{y}])',\n '\\mathbf{x}',\n '\\hat{\\mathbf{y}}',\n '\\hat{\\mathbf{z}}',\n '\\mathbf{x}_{sam}',\n sio['samples_x_true'][0, start:end, :],\n sio['samples_y_true'][0, start:end, :],\n sio['samples_z_samp'][0, start:end, :],\n sio['samples_x_samp'][0, start:end, :],\n rcParams['figure.figsize']\n )\n\n fig_filename = os.path.join(\n plot_output_directory,\n 'z_samp_zero_y_true_input_output_{}.pdf'.format(fold)\n )\n fig.subplots_adjust(**adjustments)\n fig.savefig(fig_filename)\n plt.close()\n\n\ndef get_data_loaders(direction,\n base_directory,\n fold_file,\n instrument_filename,\n context,\n audio_options,\n batch_size):\n\n print('-' * 30)\n print('getting data loaders:')\n print('direction', direction)\n print('base_directory', base_directory)\n print('fold_file', fold_file)\n print('instrument_filename', instrument_filename)\n\n clazz = Spec2MidiDataset\n\n datasets = get_dataset_individually(\n base_directory,\n fold_file,\n instrument_filename,\n context,\n audio_options,\n clazz\n )\n loaders = []\n for dataset in datasets:\n audiofilename = dataset.audiofilename\n midifilename = dataset.midifilename\n dataset = SqueezingDataset(dataset)\n print('len(dataset)', len(dataset))\n\n sampler = SequentialSampler(dataset)\n\n loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n drop_last=True\n )\n loaders.append((fold_file, audiofilename, midifilename, loader))\n\n return loaders\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('checkpoint')\n parser.add_argument('plot_output_directory')\n parser.add_argument('--n_samples', type=int, default=1)\n args = parser.parse_args()\n batch_size = 8\n direction = 'spec2labels'\n print('direction', direction)\n\n utils.ensure_directory_exists(args.plot_output_directory)\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n audio_options = dict(\n spectrogram_type='LogarithmicFilteredSpectrogram',\n filterbank='LogarithmicFilterbank',\n num_channels=1,\n sample_rate=44100,\n frame_size=4096,\n fft_size=4096,\n hop_size=441 * 4, # 25 fps\n num_bands=24,\n fmin=30,\n fmax=10000.0,\n fref=440.0,\n norm_filters=True,\n unique_filters=True,\n circular_shift=False,\n add=1.\n )\n context = dict(\n frame_size=1,\n hop_size=1,\n origin='center'\n )\n base_directory = './data/maps_piano/data'\n\n print('loading checkpoint')\n checkpoint = torch.load(args.checkpoint)\n model = ReversibleModel(\n device=device,\n batch_size=batch_size,\n depth=5,\n ndim_tot=256,\n ndim_x=144,\n ndim_y=185,\n ndim_z=9,\n clamp=2,\n zeros_noise_scale=3e-2, # very magic, much hack!\n y_noise_scale=3e-2\n )\n # print('model', model)\n model.to(device)\n model.load_state_dict(checkpoint)\n\n instrument_filename = './splits/maps-individual-tracks/instruments'\n fold_base = './splits/maps-individual-tracks'\n fold_filenames = [\n 'train/MAPS_MUS-chpn-p10_AkPnStgb',\n 'test/MAPS_MUS-chpn-p14_ENSTDkAm',\n 'test/MAPS_MUS-chpn-p19_ENSTDkCl'\n ]\n fold_files = []\n for fold_filename in fold_filenames:\n fold_files.append(os.path.join(fold_base, fold_filename))\n\n for fold_file in fold_files:\n plot_fold(\n direction=direction,\n base_directory=base_directory,\n instrument_filename=instrument_filename,\n context=context,\n audio_options=audio_options,\n batch_size=batch_size,\n device=device,\n model=model,\n fold_file=fold_file,\n n_samples=args.n_samples,\n plot_output_directory=args.plot_output_directory\n )\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"torch.zeros",
"torch.load",
"numpy.stack",
"torch.utils.data.dataloader.DataLoader",
"matplotlib.pyplot.close",
"torch.cuda.is_available",
"torch.utils.data.sampler.SequentialSampler",
"numpy.vstack"
]
]
|
Hide927/UDTL | [
"45c256f69c7f0bf06d8b9505834edf991d4484d4"
]
| [
"datasets/CWRUFFT.py"
]
| [
"import os\nfrom scipy.io import loadmat\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.model_selection import train_test_split\nfrom datasets.SequenceDatasets import dataset\nfrom datasets.sequence_aug import *\nfrom tqdm import tqdm\n\n#Digital data was collected at 12,000 samples per second\nsignal_size = 1024\n\ndataname= {0:[\"97.mat\",\"105.mat\", \"118.mat\", \"130.mat\", \"169.mat\", \"185.mat\", \"197.mat\", \"209.mat\", \"222.mat\",\"234.mat\"], # 1797rpm\n 1:[\"98.mat\",\"106.mat\", \"119.mat\", \"131.mat\", \"170.mat\", \"186.mat\", \"198.mat\", \"210.mat\", \"223.mat\",\"235.mat\"], # 1772rpm\n 2:[\"99.mat\",\"107.mat\", \"120.mat\", \"132.mat\", \"171.mat\", \"187.mat\", \"199.mat\", \"211.mat\", \"224.mat\",\"236.mat\"], # 1750rpm\n 3:[\"100.mat\",\"108.mat\", \"121.mat\",\"133.mat\", \"172.mat\", \"188.mat\", \"200.mat\", \"212.mat\", \"225.mat\",\"237.mat\"]} # 1730rpm\n\ndatasetname = [\"12k Drive End Bearing Fault Data\", \"12k Fan End Bearing Fault Data\", \"48k Drive End Bearing Fault Data\",\n \"Normal Baseline Data\"]\naxis = [\"_DE_time\", \"_FE_time\", \"_BA_time\"]\n\nlabel = [i for i in range(0, 10)]\n\ndef get_files(root, N):\n '''\n This function is used to generate the final training set and test set.\n root:The location of the data set\n '''\n data = []\n lab =[]\n for k in range(len(N)):\n for n in tqdm(range(len(dataname[N[k]]))):\n if n==0:\n path1 =os.path.join(root,datasetname[3], dataname[N[k]][n])\n else:\n path1 = os.path.join(root,datasetname[0], dataname[N[k]][n])\n data1, lab1 = data_load(path1,dataname[N[k]][n],label=label[n])\n data += data1\n lab +=lab1\n\n return [data, lab]\n\n\ndef data_load(filename, axisname, label):\n '''\n This function is mainly used to generate test data and training data.\n filename:Data location\n axisname:Select which channel's data,---->\"_DE_time\",\"_FE_time\",\"_BA_time\"\n '''\n datanumber = axisname.split(\".\")\n if eval(datanumber[0]) < 100:\n realaxis = \"X0\" + datanumber[0] + axis[0]\n else:\n realaxis = \"X\" + datanumber[0] + axis[0]\n fl = loadmat(filename)[realaxis]\n fl = fl.reshape(-1,)\n data = []\n lab = []\n start, end = 0, signal_size\n while end <= fl.shape[0]:\n x = fl[start:end]\n x = np.fft.fft(x)\n x = np.abs(x) / len(x)\n x = x[range(int(x.shape[0] / 2))]\n x = x.reshape(-1,1)\n data.append(x)\n lab.append(label)\n start += signal_size\n end += signal_size\n\n return data, lab\n\n#--------------------------------------------------------------------------------------------------------------------\nclass CWRUFFT(object):\n num_classes = 10\n inputchannel = 1\n def __init__(self, data_dir, transfer_task, normlizetype=\"0-1\"):\n self.data_dir = data_dir\n self.source_N = transfer_task[0]\n self.target_N = transfer_task[1]\n self.normlizetype = normlizetype\n self.data_transforms = {\n 'train': Compose([\n Reshape(),\n Normalize(self.normlizetype),\n # RandomAddGaussian(),\n # RandomScale(),\n # RandomStretch(),\n # RandomCrop(),\n Retype(),\n # Scale(1)\n ]),\n 'val': Compose([\n Reshape(),\n Normalize(self.normlizetype),\n Retype(),\n # Scale(1)\n ])\n }\n\n def data_split(self, transfer_learning=True):\n if transfer_learning:\n # get source train and val\n list_data = get_files(self.data_dir, self.source_N)\n data_pd = pd.DataFrame({\"data\": list_data[0], \"label\": list_data[1]})\n train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd[\"label\"])\n source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])\n source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])\n\n # get target train and val\n list_data = get_files(self.data_dir, self.target_N)\n data_pd = pd.DataFrame({\"data\": list_data[0], \"label\": list_data[1]})\n train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd[\"label\"])\n target_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])\n target_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])\n return source_train, source_val, target_train, target_val\n else:\n #get source train and val\n list_data = get_files(self.data_dir, self.source_N)\n data_pd = pd.DataFrame({\"data\": list_data[0], \"label\": list_data[1]})\n train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd[\"label\"])\n source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])\n source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])\n\n # get target train and val\n list_data = get_files(self.data_dir, self.target_N)\n data_pd = pd.DataFrame({\"data\": list_data[0], \"label\": list_data[1]})\n target_val = dataset(list_data=data_pd, transform=self.data_transforms['val'])\n return source_train, source_val, target_val\n\n\n\"\"\"\n def data_split(self):\n\n\"\"\""
]
| [
[
"numpy.abs",
"numpy.fft.fft",
"scipy.io.loadmat",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame"
]
]
|
Tommy-Moffat/pyquil | [
"bc6c0462b11117fe5758b899e10f19b8190b2d96"
]
| [
"pyquil/device/tests/test_device.py"
]
| [
"import networkx as nx\nimport numpy as np\nimport pytest\n\nfrom pyquil.device import (\n Device,\n ISA,\n Qubit,\n Edge,\n Specs,\n QubitSpecs,\n EdgeSpecs,\n THETA,\n gates_in_isa,\n isa_from_graph,\n isa_to_graph,\n NxDevice,\n)\nfrom pyquil.noise import NoiseModel, KrausModel\nfrom pyquil.gates import RZ, RX, I, CZ, ISWAP, CPHASE\nfrom collections import OrderedDict\n\nDEVICE_FIXTURE_NAME = \"mixed_architecture_chip\"\n\n\[email protected]\ndef kraus_model_I_dict():\n return {\n \"gate\": \"I\",\n \"fidelity\": 1.0,\n \"kraus_ops\": [[[[1.0]], [[1.0]]]],\n \"targets\": (0, 1),\n \"params\": (5.0,),\n }\n\n\[email protected]\ndef kraus_model_RX90_dict():\n return {\n \"gate\": \"RX\",\n \"fidelity\": 1.0,\n \"kraus_ops\": [[[[1.0]], [[1.0]]]],\n \"targets\": (0,),\n \"params\": (np.pi / 2.0,),\n }\n\n\ndef test_isa(isa_dict):\n isa = ISA.from_dict(isa_dict)\n assert isa == ISA(\n qubits=[\n Qubit(id=0, type=\"Xhalves\", dead=False),\n Qubit(id=1, type=\"Xhalves\", dead=False),\n Qubit(id=2, type=\"Xhalves\", dead=False),\n Qubit(id=3, type=\"Xhalves\", dead=True),\n ],\n edges=[\n Edge(targets=(0, 1), type=\"CZ\", dead=False),\n Edge(targets=(0, 2), type=\"CPHASE\", dead=False),\n Edge(targets=(0, 3), type=\"CZ\", dead=True),\n Edge(targets=(1, 2), type=\"ISWAP\", dead=False),\n ],\n )\n assert isa == ISA.from_dict(isa.to_dict())\n\n\ndef test_specs(specs_dict):\n specs = Specs.from_dict(specs_dict)\n assert specs == Specs(\n qubits_specs=[\n QubitSpecs(\n id=0,\n f1QRB=0.99,\n f1QRB_std_err=0.01,\n f1Q_simultaneous_RB=0.98,\n f1Q_simultaneous_RB_std_err=0.02,\n fRO=0.93,\n T1=20e-6,\n T2=15e-6,\n fActiveReset=None,\n ),\n QubitSpecs(\n id=1,\n f1QRB=0.989,\n f1QRB_std_err=0.011,\n f1Q_simultaneous_RB=0.979,\n f1Q_simultaneous_RB_std_err=0.021,\n fRO=0.92,\n T1=19e-6,\n T2=12e-6,\n fActiveReset=None,\n ),\n QubitSpecs(\n id=2,\n f1QRB=0.983,\n f1QRB_std_err=0.017,\n f1Q_simultaneous_RB=0.973,\n f1Q_simultaneous_RB_std_err=0.027,\n fRO=0.95,\n T1=21e-6,\n T2=16e-6,\n fActiveReset=None,\n ),\n QubitSpecs(\n id=3,\n f1QRB=0.988,\n f1QRB_std_err=0.012,\n f1Q_simultaneous_RB=0.978,\n f1Q_simultaneous_RB_std_err=0.022,\n fRO=0.94,\n T1=18e-6,\n T2=11e-6,\n fActiveReset=None,\n ),\n ],\n edges_specs=[\n EdgeSpecs(\n targets=(0, 1),\n fBellState=0.90,\n fCZ=0.89,\n fCZ_std_err=0.01,\n fCPHASE=0.88,\n fISWAP=None,\n fXY=None,\n fISWAP_std_err=None,\n fXY_std_err=None,\n fCPHASE_std_err=None,\n ),\n EdgeSpecs(\n targets=(0, 2),\n fBellState=0.92,\n fCZ=0.91,\n fCZ_std_err=0.20,\n fCPHASE=0.90,\n fISWAP=None,\n fXY=None,\n fISWAP_std_err=None,\n fXY_std_err=None,\n fCPHASE_std_err=None,\n ),\n EdgeSpecs(\n targets=(0, 3),\n fBellState=0.89,\n fCZ=0.88,\n fCZ_std_err=0.03,\n fCPHASE=0.87,\n fISWAP=None,\n fXY=None,\n fISWAP_std_err=None,\n fXY_std_err=None,\n fCPHASE_std_err=None,\n ),\n EdgeSpecs(\n targets=(1, 2),\n fBellState=0.91,\n fCZ=0.90,\n fCZ_std_err=0.12,\n fCPHASE=0.89,\n fISWAP=None,\n fXY=None,\n fISWAP_std_err=None,\n fXY_std_err=None,\n fCPHASE_std_err=None,\n ),\n ],\n )\n\n assert specs == Specs.from_dict(specs.to_dict())\n\n assert specs.f1QRBs() == {0: 0.99, 1: 0.989, 2: 0.983, 3: 0.988}\n assert specs.f1Q_simultaneous_RBs() == {0: 0.98, 1: 0.979, 2: 0.973, 3: 0.978}\n assert specs.fROs() == {0: 0.93, 1: 0.92, 2: 0.95, 3: 0.94}\n assert specs.T1s() == {0: 20e-6, 1: 19e-6, 2: 21e-6, 3: 18e-6}\n assert specs.T2s() == {0: 15e-6, 1: 12e-6, 2: 16e-6, 3: 11e-6}\n\n with pytest.warns(DeprecationWarning): # soon to be removed\n assert specs.fBellStates() == {(0, 1): 0.90, (0, 2): 0.92, (0, 3): 0.89, (1, 2): 0.91}\n assert specs.fCZs() == {(0, 1): 0.89, (0, 2): 0.91, (0, 3): 0.88, (1, 2): 0.90}\n assert specs.fCZ_std_errs() == {(0, 1): 0.01, (0, 2): 0.20, (0, 3): 0.03, (1, 2): 0.12}\n with pytest.warns(DeprecationWarning): # soon to be removed\n assert specs.fCPHASEs() == {(0, 1): 0.88, (0, 2): 0.90, (0, 3): 0.87, (1, 2): 0.89}\n\n\ndef test_kraus_model(kraus_model_I_dict):\n km = KrausModel.from_dict(kraus_model_I_dict)\n assert km == KrausModel(\n gate=kraus_model_I_dict[\"gate\"],\n params=kraus_model_I_dict[\"params\"],\n targets=kraus_model_I_dict[\"targets\"],\n kraus_ops=[\n KrausModel.unpack_kraus_matrix(kraus_op) for kraus_op in kraus_model_I_dict[\"kraus_ops\"]\n ],\n fidelity=kraus_model_I_dict[\"fidelity\"],\n )\n d = km.to_dict()\n assert d == OrderedDict(\n [\n (\"gate\", km.gate),\n (\"params\", km.params),\n (\"targets\", (0, 1)),\n (\"kraus_ops\", [[[[1.0]], [[1.0]]]]),\n (\"fidelity\", 1.0),\n ]\n )\n\n\ndef test_noise_model(kraus_model_I_dict, kraus_model_RX90_dict):\n noise_model_dict = {\n \"gates\": [kraus_model_I_dict, kraus_model_RX90_dict],\n \"assignment_probs\": {\"1\": [[1.0, 0.0], [0.0, 1.0]], \"0\": [[1.0, 0.0], [0.0, 1.0]]},\n }\n\n nm = NoiseModel.from_dict(noise_model_dict)\n km1 = KrausModel.from_dict(kraus_model_I_dict)\n km2 = KrausModel.from_dict(kraus_model_RX90_dict)\n assert nm == NoiseModel(gates=[km1, km2], assignment_probs={0: np.eye(2), 1: np.eye(2)})\n assert nm.gates_by_name(\"I\") == [km1]\n assert nm.gates_by_name(\"RX\") == [km2]\n assert nm.to_dict() == noise_model_dict\n\n\ndef test_device(isa_dict, noise_model_dict):\n device_raw = {\n \"isa\": isa_dict,\n \"noise_model\": noise_model_dict,\n \"is_online\": True,\n \"is_retuning\": False,\n }\n\n device = Device(DEVICE_FIXTURE_NAME, device_raw)\n assert device.name == DEVICE_FIXTURE_NAME\n\n isa = ISA.from_dict(isa_dict)\n noise_model = NoiseModel.from_dict(noise_model_dict)\n # Device.isa is deprecated, but seemingly this is what we want here\n with pytest.warns(DeprecationWarning):\n assert isinstance(device.isa, ISA)\n assert device.isa == isa\n assert isinstance(device.noise_model, NoiseModel)\n assert device.noise_model == noise_model\n\n\ndef test_gates_in_isa(isa_dict):\n isa = ISA.from_dict(isa_dict)\n gates = gates_in_isa(isa)\n for q in [0, 1, 2]:\n for g in [\n I(q),\n RX(np.pi / 2, q),\n RX(-np.pi / 2, q),\n RX(np.pi, q),\n RX(-np.pi, q),\n RZ(THETA, q),\n ]:\n assert g in gates\n\n assert CZ(0, 1) in gates\n assert CZ(1, 0) in gates\n assert ISWAP(1, 2) in gates\n assert ISWAP(2, 1) in gates\n assert CPHASE(THETA, 2, 0) in gates\n assert CPHASE(THETA, 0, 2) in gates\n\n\ndef test_isa_from_graph():\n fc = nx.complete_graph(3)\n isa = isa_from_graph(fc)\n isad = isa.to_dict()\n\n assert set(isad.keys()) == {\"1Q\", \"2Q\"}\n assert sorted(int(q) for q in isad[\"1Q\"].keys()) == list(range(3))\n for v in isad[\"1Q\"].values():\n assert v == {}\n\n assert sorted(isad[\"2Q\"]) == [\"0-1\", \"0-2\", \"1-2\"]\n for v in isad[\"2Q\"].values():\n assert v == {}\n\n\ndef test_isa_from_graph_order():\n # since node 16 appears first, even though we ask for the edge (15,16) the networkx internal\n # representation will have it as (16,15)\n fc = nx.from_edgelist([(16, 17), (15, 16)])\n isa = isa_from_graph(fc)\n isad = isa.to_dict()\n for k in isad[\"2Q\"]:\n q1, q2 = k.split(\"-\")\n assert q1 < q2\n\n\ndef test_isa_from_graph_cphase():\n fc = nx.complete_graph(3)\n isa = isa_from_graph(fc, twoq_type=\"CPHASE\")\n isad = isa.to_dict()\n\n assert set(isad.keys()) == {\"1Q\", \"2Q\"}\n assert sorted(int(q) for q in isad[\"1Q\"].keys()) == list(range(3))\n for v in isad[\"1Q\"].values():\n assert v == {}\n\n assert sorted(isad[\"2Q\"]) == [\"0-1\", \"0-2\", \"1-2\"]\n for v in isad[\"2Q\"].values():\n assert v == {\"type\": \"CPHASE\"}\n\n\ndef test_isa_to_graph(isa_dict):\n graph = isa_to_graph(ISA.from_dict(isa_dict))\n should_be = nx.from_edgelist([(0, 1), (1, 2), (0, 2)])\n assert nx.is_isomorphic(graph, should_be)\n\n\ndef test_NxDevice(isa_dict, noise_model_dict):\n graph = isa_to_graph(ISA.from_dict(isa_dict))\n nxdev = NxDevice(graph)\n\n device_raw = {\n \"isa\": isa_dict,\n \"noise_model\": noise_model_dict,\n \"is_online\": True,\n \"is_retuning\": False,\n }\n dev = Device(DEVICE_FIXTURE_NAME, device_raw)\n\n nx.is_isomorphic(nxdev.qubit_topology(), dev.qubit_topology())\n isa = nxdev.get_isa()\n assert isa.qubits[0].type == \"Xhalves\"\n"
]
| [
[
"numpy.eye"
]
]
|
rober5566a/NTUT_109-2_MVA_Final-Project | [
"d18494760750efae1ff0810dcaa281a03d0827c0"
]
| [
"src/application_process/Model/class4_DIP.py"
]
| [
"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport numpy as np\r\nfrom Model.BoundaryDescriptor import get_contours_binary, calc_contour_feature, draw_bbox\r\n\r\n\r\ndef get_canny(img, isShow=True):\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\r\n canny = cv2.Canny(blurred, 70, 150)\r\n result = np.hstack([gray, blurred, canny])\r\n return canny\r\n\r\n\r\ndef get_contours(img, isShow=True):\r\n imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n ret, thresh = cv2.threshold(imgray, 92, 255, 0)\r\n num_white_extreme = len(np.where(imgray > 200)[0])\r\n num_black_extreme = len(np.where(imgray < 25)[0])\r\n\r\n if np.mean(imgray) > 127 or num_white_extreme < 50:\r\n thresh_white = 255 - thresh\r\n else:\r\n thresh_white = thresh\r\n _, contours, hierarchy = cv2.findContours(\r\n thresh_white, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n img_contours = cv2.drawContours(img.copy(), contours, -1, (0, 0, 255), -1)\r\n return img_contours\r\n\r\n\r\ndef get_c4_img(img, position_ls):\r\n for position in position_ls:\r\n x, y, w, h = position\r\n crop_img = img[y:y+h, x:x+w]\r\n img_contours = get_contours(crop_img, isShow=False)\r\n # cv2.imshow(\"Contours_img\", img_contours)\r\n # cv2.waitKey(0)\r\n img[y:y+h, x:x+w] = img_contours # 指定位置填充,大小要一样才能填充\r\n # cv2.imshow(\"Merge\", img_raw)\r\n return img\r\n"
]
| [
[
"numpy.hstack",
"numpy.where",
"numpy.mean"
]
]
|
trisct/AtlasNet | [
"b063fb635ed6ec0304b4a60e41598c214266f492"
]
| [
"training/metro.py"
]
| [
"import argparse\nimport numpy as np\nimport pymesh\nfrom os.path import exists\nimport os\nimport subprocess\nfrom shutil import copy\n\n\"\"\"\n Author : Thibault Groueix 01.11.2019\n\"\"\"\n\n\ndef metro(path1, path2, metro='./auxiliary/metro_sources/build/metro'):\n \"\"\"\n Run the metro compiled program on two meshes and get the output.\n :param path1: mesh 1\n :param path2: mesh 2\n :param metro: path to metro\n :return: metro(mesh 1, mesh 2) [float]\n \"\"\"\n\n print(f\"calculing {path1}\")\n cmd = f\"{metro} {path1} {path2}\"\n returned_output = subprocess.check_output(cmd, shell=True)\n returned_output = returned_output.decode(\"utf-8\")\n location = returned_output.find(\"Hausdorff\")\n returned_output = returned_output[location:location + 40]\n distance = float(returned_output.split(\" \")[2])\n print(f\"calculing {path1} Done {distance}!\")\n\n return distance\n\n\ndef isolate_files():\n \"\"\"\n Utility fonction to generate the metro_file archive. Useless to all users but the author.\n \"\"\"\n with open('./dataset/data/metro_files/files-metro.txt', 'r') as file:\n files = file.read().split('\\n')\n for file in files:\n if file[-3:] == \"ply\":\n cat = file.split('/')[0]\n name = file.split('/')[1][:-4]\n path_points = '/'.join(['.', 'dataset', 'data', 'ShapeNetV1PointCloud', cat, name + '.points.ply.npy'])\n path_png = '/'.join(['.', 'dataset', 'data', 'ShapeNetV1Renderings', cat, name, \"rendering\", '00.png'])\n\n path_obj = '/'.join(['', 'home', 'thibault', 'hdd', 'data', 'ShapeNetCore.v1', cat, name, 'model.obj'])\n mesh = pymesh.load_mesh(path_obj)\n points = np.load((path_points))\n if not exists('/'.join(['.', 'dataset', 'data', 'metro_files', cat])):\n os.mkdir('/'.join(['.', 'dataset', 'data', 'metro_files', cat]))\n\n pymesh.save_mesh('/'.join(['.', 'dataset', 'data', 'metro_files', cat, name + '.ply']), mesh, ascii=True)\n np.save('/'.join(['.', 'dataset', 'data', 'metro_files', cat, name + '.npy']), points)\n copy(path_png, '/'.join(['.', 'dataset', 'data', 'metro_files', cat, name + '.png']))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--path1', help=\"Input file\", required=True)\n parser.add_argument('--path2', help=\"Input file\", required=True)\n parser.add_argument('--metro', type=str, help='Path to the metro executable',\n default='./metro_sources/build/metro')\n\n args = parser.parse_args()\n return metro(args.path1, args.path2, args.metro)\n\n\nif __name__ == '__main__':\n a = isolate_files()\n print(a)\n"
]
| [
[
"numpy.load"
]
]
|
romainloiseau/deep-linear-shapes | [
"faed8e14a8f66ab1c780972f616543552295cb1e"
]
| [
"dlm/viz/gif.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import animation, rc\nfrom IPython.display import display\n\nimport torch\n\nfrom .tools import prepare_ax, prepare_cloud, get_colorscale\n\nfrom ..global_variables import CMAP, PLOT_3D_ALPHA\nfrom ..global_variables import DEFAULT_3D_VIEW, PLOT_MAX_3D_POINTS\n\ndef print_gif(\n point_cloud,\n name = None,\n frames = 60,\n colorscale = None,\n cmap = None):\n \"\"\"\n Prints a point cloud as a .gif\n \n Inspired from:\n http://louistiao.me/posts/notebooks/save-matplotlib-animations-as-gifs/\n \"\"\"\n \n point_cloud = prepare_cloud(point_cloud)\n \n if colorscale is not None:\n assert len(colorscale) == point_cloud.shape[-1], \"Should provide a colorshape of the good lenght\"\n else:\n colorscale = get_colorscale(point_cloud)[0]\n \n rc('animation', html='html5')\n \n fig = plt.figure(figsize = (2, 2))\n ax = fig.add_subplot(1, 1, 1, projection = \"3d\")\n fig.subplots_adjust(0, 0, 1, 1)\n prepare_ax(ax)\n \n plot = ax.scatter(point_cloud[0], point_cloud[2], point_cloud[1],\n c = colorscale, alpha = PLOT_3D_ALPHA,\n cmap = plt.get_cmap(CMAP) if cmap is None else cmap,\n vmin=0, vmax=1)\n \n \n # initialization function: plot the background of each frame\n def init():\n return ()\n\n # animation function. This is called sequentially\n def animate(i):\n ax.view_init(DEFAULT_3D_VIEW[0], DEFAULT_3D_VIEW[1] + 6 * i * 60 / frames)\n ax.margins(x=-.49, y=-.49)\n return ()\n \n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=frames, interval=40,\n blit=True, repeat=True)\n \n plt.close(fig)\n if name is not None:\n anim.save(f'{name}.gif', writer='imagemagick', fps=15 * frames / 60.)\n else:\n display(anim)\n \ndef print_deformed_gif(\n point_cloud,\n deformation,\n name = None,\n scales = None,\n colorscale = None,\n cmap = None):\n \"\"\"\n Prints a deformed point cloud as a .gif\n \n Inspired from:\n http://louistiao.me/posts/notebooks/save-matplotlib-animations-as-gifs/\n \"\"\"\n \n point_cloud, deformation = prepare_cloud(point_cloud, deformation)\n \n if colorscale is not None:\n assert len(colorscale) == point_cloud.shape[-1], \"Should provide a colorshape of the good lenght\"\n else:\n colorscale = get_colorscale(point_cloud)[0]\n \n if scales is None:\n absscale = np.max(np.abs(deformation)) \n absscale = 10**np.floor(np.log10(absscale))\n scales = np.arange(-.2, .21, .01) / absscale\n \n scales = list(scales) + list(scales[::-1][1:-1])\n \n rc('animation', html='html5')\n \n fig = plt.figure(figsize = (2, 2))\n ax = fig.add_subplot(1, 1, 1, projection = \"3d\")\n \n fig.subplots_adjust(0, 0, 1, 1)\n prepare_ax(ax)\n plot = ax.scatter([], [], [])\n legend = ax.text2D(0.05, 0.90, \"\", transform=ax.transAxes)\n \n # initialization function: plot the background of each frame\n def init():\n return (plot, legend)\n\n # animation function. This is called sequentially\n def animate(i):\n if len(deformation.shape) == 2:\n d = point_cloud + scales[i] * deformation\n else:\n d = point_cloud + (np.expand_dims(scales[i], (-1, -2)) * deformation).sum(0)\n colorscale = get_colorscale(d)[0]\n ax.clear()\n prepare_ax(ax) \n plot = ax.scatter(d[0], d[2], d[1], alpha = PLOT_3D_ALPHA,\n c = colorscale,\n cmap = plt.get_cmap(CMAP) if cmap is None else cmap,\n vmin=0, vmax=1)\n #legend = ax.text2D(0.05, 0.90, \"intensity = {:.2f}\".format(scales[i]), transform=ax.transAxes)\n return (plot, legend)\n\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=len(scales), interval=40,\n blit=True, repeat=True)\n \n plt.close(fig)\n if name is not None:\n anim.save(f'{name}.gif', writer='imagemagick', fps=int(len(scales) / 2.))\n else:\n display(anim)\n \nif __name__==\"__main__\":\n \n print_gif(2 * np.random.random((3, 5000)) - 1)\n \n print_deformed_gif(2 * np.random.random((3, 5000)) - 1,\n 2 * np.random.random((3, 5000)) - 1)"
]
| [
[
"numpy.expand_dims",
"numpy.random.random",
"numpy.abs",
"numpy.arange",
"matplotlib.pyplot.get_cmap",
"numpy.log10",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.close",
"matplotlib.rc",
"matplotlib.pyplot.figure"
]
]
|
NarrativeApp/tvm | [
"c7ddb4134cc52916a1931439505a20135ac156b8"
]
| [
"tests/python/relay/test_vm.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport pytest\nimport time\nfrom unittest.mock import patch\n\nimport tvm\nfrom tvm import runtime\nfrom tvm import relay, IRModule\nfrom tvm.relay.backend import vm\nfrom tvm.relay.scope_builder import ScopeBuilder\nfrom tvm.relay.prelude import Prelude\nfrom tvm.relay.loops import while_loop\nfrom tvm.relay import testing\nfrom tvm.contrib import utils\nfrom tvm import rpc\nimport tvm.testing\nfrom tvm.relay.transform import InferType\nfrom tvm.relay.testing import mlp\nfrom tvm.relay.dataflow_pattern import wildcard, is_op\nfrom tvm.relay.backend.vm import VMCompiler\n\n\ndef check_result(target, dev, args, expected_result, mod=None):\n \"\"\"\n Check that evaluating `expr` applied to the arguments produces\n `result` on Relay VM.\n\n Parameters\n ----------\n args: list of Expr\n The arguments to supply the expr.\n\n expected_result:\n The expected result of running the expression.\n \"\"\"\n rts_result = relay.create_executor(\"vm\", device=dev, target=target, mod=mod).evaluate()(*args)\n tvm.testing.assert_allclose(expected_result, rts_result.numpy())\n\n\ndef veval(f, *args, device=tvm.cpu(), target=\"llvm\"):\n if isinstance(f, relay.Expr):\n mod = tvm.IRModule()\n mod[\"main\"] = f\n else:\n assert isinstance(f, tvm.IRModule), \"expected expression or module\"\n mod = f\n exe = relay.vm.compile(mod, target)\n vm = runtime.vm.VirtualMachine(exe, device)\n return vm.invoke(\"main\", *args)\n\n\ndef vmobj_to_list(o):\n if isinstance(o, tvm.nd.NDArray):\n return [o.numpy().tolist()]\n elif isinstance(o, tvm.runtime.container.ADT):\n result = []\n for f in o:\n result.extend(vmobj_to_list(f))\n return result\n else:\n raise RuntimeError(\"Unknown object type: %s\" % type(o))\n\n\ndef test_split(target, dev):\n x = relay.var(\"x\", shape=(12,))\n y = relay.split(x, 3, axis=0).astuple()\n f = relay.Function([x], y)\n\n x_data = np.random.rand(\n 12,\n ).astype(\"float32\")\n ref_res = np.split(x_data, 3, axis=0)\n res = veval(f, x_data, device=dev, target=target)\n for i in range(3):\n tvm.testing.assert_allclose(res[i].numpy(), ref_res[i])\n\n\ndef test_split_no_fuse(target, dev):\n x = relay.var(\"x\", shape=(12,))\n y = relay.split(x, 3, axis=0).astuple()\n z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0)\n z = relay.annotation.stop_fusion(z)\n f = relay.Function([x], z)\n x_data = np.random.rand(\n 12,\n ).astype(\"float32\")\n\n res = veval(f, x_data, device=dev, target=target)\n tvm.testing.assert_allclose(res.numpy(), np.split(x_data, 3, axis=0)[0])\n\n\ndef test_id(target, dev):\n x = relay.var(\"x\", shape=(10, 10), dtype=\"float64\")\n f = relay.Function([x], x)\n x_data = np.random.rand(10, 10).astype(\"float64\")\n mod = tvm.IRModule()\n mod[\"main\"] = f\n check_result(target, dev, [x_data], x_data, mod=mod)\n\n\ndef test_op(target, dev):\n x = relay.var(\"x\", shape=(10, 10))\n f = relay.Function([x], x + x)\n x_data = np.random.rand(10, 10).astype(\"float32\")\n mod = tvm.IRModule()\n mod[\"main\"] = f\n check_result(target, dev, [x_data], 2 * x_data, mod=mod)\n\n\ndef any(x):\n x = relay.op.nn.batch_flatten(x)\n return relay.op.min(x, axis=[0, 1])\n\n\[email protected]_failing_targets(\"vulkan\")\ndef test_cond(target, dev):\n x = relay.var(\"x\", shape=(10, 10))\n y = relay.var(\"y\", shape=(10, 10))\n # f = relay.Function([x, y], relay.op.equal(x, y))\n f = relay.Function([x, y], any(relay.op.equal(x, y)))\n x_data = np.random.rand(10, 10).astype(\"float32\")\n y_data = np.random.rand(10, 10).astype(\"float32\")\n\n mod = tvm.IRModule()\n mod[\"main\"] = f\n # same\n check_result(target, dev, [x_data, x_data], True, mod=mod)\n\n # diff\n check_result(target, dev, [x_data, y_data], False, mod=mod)\n\n\[email protected]_failing_targets(\"vulkan\")\ndef test_simple_if(target, dev):\n x = relay.var(\"x\", shape=(10, 10))\n y = relay.var(\"y\", shape=(10, 10))\n f = relay.Function([x, y], relay.If(any(relay.op.equal(x, y)), x, y))\n x_data = np.random.rand(10, 10).astype(\"float32\")\n y_data = np.random.rand(10, 10).astype(\"float32\")\n\n mod = tvm.IRModule()\n mod[\"main\"] = f\n # same\n check_result(target, dev, [x_data, x_data], x_data, mod=mod)\n\n # diff\n check_result(target, dev, [x_data, y_data], y_data, mod=mod)\n\n\[email protected]_targets(\"llvm\")\ndef test_multiple_ifs(target, dev):\n mod = tvm.IRModule({})\n b = relay.var(\"b\")\n v0 = relay.var(\"v0\")\n v1 = relay.var(\"v1\")\n v2 = relay.var(\"v2\")\n v3 = relay.var(\"v3\")\n out = relay.Tuple([v2, v3])\n out = relay.Let(v3, relay.If(b, v1, v0), out)\n out = relay.Let(v2, relay.If(b, v0, v1), out)\n out = relay.Let(v1, relay.Tuple([relay.const(1)]), out)\n out = relay.Let(v0, relay.Tuple([relay.const(0)]), out)\n fn = relay.Function([b], out)\n mod[\"main\"] = fn\n func = relay.create_executor(device=dev, mod=mod, kind=\"vm\").evaluate()\n res = vmobj_to_list(func(False))\n assert res == [1, 0]\n\n\ndef test_unused_function(target, dev):\n cond = relay.const(True)\n mod = tvm.IRModule()\n then_name = relay.GlobalVar(\"times_2\")\n # define unused function\n else_name = relay.GlobalVar(\"times_3\")\n t1 = relay.TensorType((2, 2), dtype=\"float32\")\n x1 = relay.var(\"x1\", t1, dtype=\"float32\")\n x2 = relay.var(\"x2\", t1, dtype=\"float32\")\n f2 = relay.multiply(x1, relay.const(2.0))\n f3 = relay.multiply(x2, relay.const(3.0))\n mod[then_name] = relay.Function([x1], f2)\n mod[else_name] = relay.Function([x2], f3)\n mod = InferType()(mod)\n x3 = relay.var(\"x3\", t1, dtype=\"float32\")\n # put unused function in else branch\n f = relay.If(cond, then_name(x3), else_name(x3))\n mod[\"main\"] = relay.Function([x3], f)\n x_data = np.random.rand(2, 2).astype(\"float32\")\n y_data = x_data * 2\n\n check_result(target, dev, [x_data], y_data, mod=mod)\n\n\ndef test_simple_call(target, dev):\n mod = tvm.IRModule({})\n sum_up = relay.GlobalVar(\"sum_up\")\n i = relay.var(\"i\", shape=[], dtype=\"int32\")\n sb = ScopeBuilder()\n sb.ret(i)\n func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], \"int32\"))\n mod[sum_up] = func\n i_data = np.array(0, dtype=\"int32\")\n iarg = relay.var(\"iarg\", shape=[], dtype=\"int32\")\n mod[\"main\"] = relay.Function([iarg], sum_up(iarg))\n check_result(target, dev, [i_data], i_data, mod=mod)\n\n\ndef test_count_loop(target, dev):\n mod = tvm.IRModule({})\n sum_up = relay.GlobalVar(\"sum_up\")\n i = relay.var(\"i\", shape=[], dtype=\"int32\")\n sb = ScopeBuilder()\n with sb.if_scope(relay.equal(i, relay.const(0, dtype=\"int32\"))):\n sb.ret(i)\n with sb.else_scope():\n one_less = relay.subtract(i, relay.const(1, dtype=\"int32\"))\n rec_call = relay.Call(sum_up, [one_less])\n sb.ret(relay.add(rec_call, i))\n func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], \"int32\"))\n mod[sum_up] = func\n i_data = np.array(0, dtype=\"int32\")\n iarg = relay.var(\"i\", shape=[], dtype=\"int32\")\n mod[\"main\"] = relay.Function([iarg], sum_up(iarg))\n result = veval(mod, i_data, device=dev, target=target)\n tvm.testing.assert_allclose(result.numpy(), i_data)\n check_result(target, dev, [i_data], i_data, mod=mod)\n\n\ndef test_sum_loop(target, dev):\n mod = tvm.IRModule({})\n sum_up = relay.GlobalVar(\"sum_up\")\n i = relay.var(\"i\", shape=[], dtype=\"int32\")\n accum = relay.var(\"accum\", shape=[], dtype=\"int32\")\n sb = ScopeBuilder()\n with sb.if_scope(relay.equal(i, relay.const(0, \"int32\"))):\n sb.ret(accum)\n with sb.else_scope():\n one_less = relay.subtract(i, relay.const(1, \"int32\"))\n new_accum = relay.add(accum, i)\n sb.ret(relay.Call(sum_up, [one_less, new_accum]))\n func = relay.Function([i, accum], sb.get())\n mod[sum_up] = func\n mod = relay.transform.InferType()(mod)\n loop_bound = 0\n i_data = np.array(loop_bound, dtype=\"int32\")\n accum_data = np.array(0, dtype=\"int32\")\n iarg = relay.var(\"i\", shape=[], dtype=\"int32\")\n aarg = relay.var(\"accum\", shape=[], dtype=\"int32\")\n mod[\"main\"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))\n check_result(target, dev, [i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod)\n\n\ndef test_tuple_fst(target, dev):\n ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])\n tup = relay.var(\"tup\", type_annotation=ttype)\n f = relay.Function([tup], relay.TupleGetItem(tup, 0))\n i_data = np.random.rand(41).astype(\"float32\")\n j_data = np.random.rand(10).astype(\"float32\")\n mod = tvm.IRModule()\n mod[\"main\"] = f\n check_result(target, dev, [(i_data, j_data)], i_data, mod=mod)\n\n\ndef test_tuple_second(target, dev):\n ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])\n tup = relay.var(\"tup\", type_annotation=ttype)\n f = relay.Function([tup], relay.TupleGetItem(tup, 1))\n i_data = np.random.rand(41).astype(\"float32\")\n j_data = np.random.rand(10).astype(\"float32\")\n mod = tvm.IRModule()\n mod[\"main\"] = f\n check_result(target, dev, [(i_data, j_data)], j_data, mod=mod)\n\n\ndef test_list_constructor(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n l, cons, nil = mod.get_type(\"List\")\n\n one2 = cons(relay.const(1), nil())\n one3 = cons(relay.const(2), one2)\n one4 = cons(relay.const(3), one3)\n f = relay.Function([], one4)\n\n mod[\"main\"] = f\n\n result = veval(mod, device=dev, target=target)\n assert len(result) == 2\n assert len(result[1]) == 2\n\n obj = vmobj_to_list(result)\n tvm.testing.assert_allclose(obj, np.array([3, 2, 1]))\n\n\ndef test_let_tensor(target, dev):\n sb = relay.ScopeBuilder()\n shape = (1,)\n x = relay.var(\"x\", shape=shape, dtype=\"float32\")\n x1 = relay.var(\"x1\", shape=shape, dtype=\"float32\")\n\n x1 = sb.let(x1, x)\n xplusone = x1 + relay.const(42.0, \"float32\")\n sb.ret(xplusone)\n body = sb.get()\n\n f = relay.Function([x], body)\n\n x_data = np.random.rand(*shape).astype(\"float32\")\n mod = tvm.IRModule()\n mod[\"main\"] = f\n check_result(target, dev, [x_data], x_data + 42.0, mod=mod)\n\n\ndef test_let_scalar(target, dev):\n sb = relay.ScopeBuilder()\n\n x = relay.var(\"x\", \"float32\")\n x1 = sb.let(\"x1\", x)\n xplusone = x1 + relay.const(42.0, \"float32\")\n sb.ret(xplusone)\n body = sb.get()\n\n f = relay.Function([x], body)\n\n x_data = np.array(np.random.rand()).astype(\"float32\")\n mod = tvm.IRModule()\n mod[\"main\"] = f\n check_result(target, dev, [x_data], x_data + 42.0, mod=mod)\n\n\ndef test_compose(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n compose = p.compose\n\n # add_one = fun x -> x + 1\n sb = relay.ScopeBuilder()\n x = relay.var(\"x\", \"float32\")\n x1 = sb.let(\"x1\", x)\n xplusone = x1 + relay.const(1.0, \"float32\")\n sb.ret(xplusone)\n body = sb.get()\n add_one = relay.GlobalVar(\"add_one\")\n add_one_func = relay.Function([x], body)\n\n # add_two = compose(add_one, add_one)\n sb = relay.ScopeBuilder()\n y = relay.var(\"y\", \"float32\")\n add_two_func = sb.let(\"add_two\", compose(add_one_func, add_one_func))\n add_two_res = add_two_func(y)\n sb.ret(add_two_res)\n add_two_body = sb.get()\n\n mod[add_one] = add_one_func\n\n f = relay.Function([y], add_two_body)\n mod[\"main\"] = f\n\n x_data = np.array(np.random.rand()).astype(\"float32\")\n result = veval(mod, [x_data], device=dev, target=target)\n tvm.testing.assert_allclose(result.numpy(), x_data + 2.0)\n\n\ndef test_list_hd(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n l, cons, nil = mod.get_type(\"List\")\n hd = mod.get_global_var(\"hd\")\n\n one2 = cons(relay.const(1), nil())\n one3 = cons(relay.const(2), one2)\n one4 = cons(relay.const(3), one3)\n three = hd(one4)\n f = relay.Function([], three)\n\n mod[\"main\"] = f\n\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(result.numpy(), 3)\n\n\ndef test_list_tl_empty_list(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n l, cons, nil = mod.get_type(\"List\")\n tl = mod.get_global_var(\"tl\")\n\n f = relay.Function([], tl(nil()))\n\n mod[\"main\"] = f\n\n with pytest.raises(tvm.error.TVMError):\n result = veval(mod, device=dev, target=target)\n\n\ndef test_list_tl(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n l, cons, nil = mod.get_type(\"List\")\n tl = mod.get_global_var(\"tl\")\n\n one2 = cons(relay.const(1), nil())\n one3 = cons(relay.const(2), one2)\n one4 = cons(relay.const(3), one3)\n\n f = relay.Function([], tl(one4))\n\n mod[\"main\"] = f\n\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(vmobj_to_list(result), np.array([2, 1]))\n\n\ndef test_list_nth(target, dev):\n expected = list(range(10))\n\n for i in range(len(expected)):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n _, cons, nil = mod.get_type(\"List\")\n nth = mod.get_global_var(\"nth\")\n\n l = nil()\n for i in reversed(expected):\n l = cons(relay.const(i), l)\n\n f = relay.Function([], nth(l, relay.const(i)))\n mod[\"main\"] = f\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(result.numpy(), expected[i])\n\n\ndef test_list_update(target, dev):\n expected = list(range(10))\n\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n _, cons, nil = mod.get_type(\"List\")\n update = mod.get_global_var(\"update\")\n\n l = nil()\n # create zero initialized list\n for i in range(len(expected)):\n l = cons(relay.const(0), l)\n\n # set value\n for i, v in enumerate(expected):\n l = update(l, relay.const(i), relay.const(v))\n\n f = relay.Function([], l)\n mod[\"main\"] = f\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))\n\n\ndef test_list_length(target, dev):\n expected = list(range(10))\n\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n _, cons, nil = mod.get_type(\"List\")\n length = mod.get_global_var(\"length\")\n\n l = nil()\n # create zero initialized list\n for _ in range(len(expected)):\n l = cons(relay.const(0), l)\n\n l = length(l)\n\n f = relay.Function([], l)\n mod[\"main\"] = f\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(result.numpy(), 10)\n\n\ndef test_list_map(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n x = relay.var(\"x\", \"int32\")\n add_one_func = relay.Function([x], relay.const(1) + x)\n\n _, cons, nil = mod.get_type(\"List\")\n map = mod.get_global_var(\"map\")\n\n l = cons(relay.const(2), cons(relay.const(1), nil()))\n\n f = relay.Function([], map(add_one_func, l))\n mod[\"main\"] = f\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2]))\n\n\ndef test_list_foldl(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n _, cons, nil = mod.get_type(\"List\")\n foldl = mod.get_global_var(\"foldl\")\n\n x = relay.var(\"x\")\n y = relay.var(\"y\")\n rev_dup_func = relay.Function([y, x], cons(x, cons(x, y)))\n\n l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))\n f = relay.Function([], foldl(rev_dup_func, nil(), l))\n mod[\"main\"] = f\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 3, 2, 2, 1, 1]))\n\n\ndef test_list_foldr(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n _, cons, nil = mod.get_type(\"List\")\n foldr = mod.get_global_var(\"foldr\")\n\n x = relay.var(\"x\")\n y = relay.var(\"y\")\n identity_func = relay.Function([x, y], cons(x, y))\n\n l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))\n f = relay.Function([], foldr(identity_func, nil(), l))\n mod[\"main\"] = f\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(vmobj_to_list(result), np.array([1, 2, 3]))\n\n\ndef test_list_sum(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n _, cons, nil = mod.get_type(\"List\")\n sum = mod.get_global_var(\"sum\")\n\n l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))\n f = relay.Function([], sum(l))\n mod[\"main\"] = f\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(result.numpy(), 6)\n\n\ndef test_list_filter(target, dev):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n _, cons, nil = mod.get_type(\"List\")\n filter = mod.get_global_var(\"filter\")\n\n x = relay.var(\"x\", \"int32\")\n greater_than_one = relay.Function([x], x > relay.const(1))\n l = cons(\n relay.const(1),\n cons(\n relay.const(3), cons(relay.const(1), cons(relay.const(5), cons(relay.const(1), nil())))\n ),\n )\n f = relay.Function([], filter(greater_than_one, l))\n mod[\"main\"] = f\n result = veval(mod, device=dev, target=target)\n tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5]))\n\n\ndef test_closure(target, dev):\n x = relay.var(\"x\", shape=())\n y = relay.var(\"y\", shape=())\n f = relay.Function([x], x + y)\n ff = relay.Function([y], f)\n clo = ff(relay.const(1.0))\n main = clo(relay.const(2.0))\n res = veval(main, device=dev, target=target)\n tvm.testing.assert_allclose(res.numpy(), 3.0)\n\n\ndef test_add_op_scalar(target, dev):\n \"\"\"\n test_add_op_scalar:\n fn (x, y) {\n return x + y;\n }\n \"\"\"\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=()) # Default to float32\n y = relay.var(\"y\", shape=()) # Default to float32\n func = relay.Function([x, y], relay.op.add(x, y))\n x_y_data = [\n (np.array(10.0, dtype=\"float32\"), np.array(1.0, dtype=\"float32\")),\n (np.float32(10.0), np.float32(1.0)),\n (10.0, 1.0),\n ]\n for (x_data, y_data) in x_y_data:\n mod[\"main\"] = func\n check_result(target, dev, [x_data, y_data], x_data + y_data, mod=mod)\n\n\ndef test_add_op_scalar_int(target, dev):\n \"\"\"\n test_add_op_scalar_int:\n fn (x, y) {\n return x + y;\n }\n \"\"\"\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=(), dtype=\"int32\")\n y = relay.var(\"y\", shape=(), dtype=\"int32\")\n func = relay.Function([x, y], relay.op.add(x, y))\n x_y_data = [\n (np.array(10.0, dtype=\"int32\"), np.array(1.0, dtype=\"int32\")),\n (np.int32(10), np.int32(1)),\n (10, 1),\n ]\n for (x_data, y_data) in x_y_data:\n mod[\"main\"] = func\n check_result(target, dev, [x_data, y_data], x_data + y_data, mod=mod)\n\n\ndef test_add_op_tensor(target, dev):\n \"\"\"\n test_add_op_tensor:\n fn (x, y) {\n return x + y;\n }\n \"\"\"\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=(10, 5))\n y = relay.var(\"y\", shape=(10, 5))\n func = relay.Function([x, y], relay.op.add(x, y))\n x_data = np.random.rand(10, 5).astype(\"float32\")\n y_data = np.random.rand(10, 5).astype(\"float32\")\n mod[\"main\"] = func\n check_result(target, dev, [x_data, y_data], x_data + y_data, mod=mod)\n\n\ndef test_add_op_broadcast(target, dev):\n \"\"\"\n test_add_op_broadcast:\n fn (x, y) {\n return x + y;\n }\n \"\"\"\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=(10, 5))\n y = relay.var(\"y\", shape=(1, 5))\n func = relay.Function([x, y], relay.op.add(x, y))\n x_data = np.random.rand(10, 5).astype(\"float32\")\n y_data = np.random.rand(1, 5).astype(\"float32\")\n mod[\"main\"] = func\n check_result(target, dev, [x_data, y_data], x_data + y_data, mod=mod)\n\n\ndef test_vm_optimize_dynamic():\n dtype = \"float32\"\n x = relay.var(\"x\", shape=(relay.Any(), relay.Any()), dtype=dtype)\n y = relay.var(\"y\", shape=(relay.Any(), relay.Any()), dtype=dtype)\n mod = tvm.IRModule()\n mod[\"main\"] = relay.Function([x, y], relay.add(x, y))\n comp = relay.vm.VMCompiler()\n opt_mod, _ = comp.optimize(mod, target=\"llvm\")\n assert \"shape_func\" in opt_mod.astext(False)\n\n\ndef test_vm_optimize():\n mod, params = testing.synthetic.get_workload()\n comp = relay.vm.VMCompiler()\n opt_mod, _ = comp.optimize(mod, target=\"llvm\", params=params)\n\n free_vars = relay.analysis.free_vars(opt_mod[\"main\"].body)\n # Paremeters should all be bound, so the only free var is data\n assert len(free_vars) == 1\n\n\ndef test_loop_free_var(target, dev):\n x = relay.var(\"x\", shape=(), dtype=\"int32\")\n i = relay.var(\"i\", shape=(), dtype=\"int32\")\n s = relay.var(\"s\", shape=(), dtype=\"int32\")\n\n def cond(i, _):\n return i < relay.const(10, dtype=\"int32\")\n\n def body_no_free_var(i, acc):\n incr = relay.const(1, \"int32\")\n return i + incr, acc + i\n\n def body_with_free_var(i, acc):\n incr = relay.const(1, \"int32\")\n return i + incr, acc + x\n\n for args, body, expected in zip([[], [1]], [body_no_free_var, body_with_free_var], [45, 10]):\n loop = while_loop(cond, [i, s], body)\n tup = loop(relay.const(0, dtype=\"int32\"), relay.zeros(shape=(), dtype=\"int32\"))\n ret = relay.TupleGetItem(tup, 1)\n mod = tvm.IRModule()\n mod[\"main\"] = relay.Function(relay.analysis.free_vars(ret), ret)\n check_result(target, dev, args, expected, mod=mod)\n\n\ndef test_vm_reshape_tensor(target, dev):\n x_np = np.random.uniform(size=(8, 16)).astype(\"float32\")\n x = relay.var(\"x\", shape=(8, 16), dtype=\"float32\")\n y = relay.reshape(x, [-1, 4, 8])\n mod = tvm.IRModule()\n mod[\"main\"] = relay.Function([x], y)\n with tvm.transform.PassContext(opt_level=3):\n exec = relay.vm.compile(mod, \"llvm\")\n assert \"reshape_tensor\" in exec.bytecode\n check_result(target, dev, [x_np], x_np.reshape([4, 4, 8]), mod)\n\n x = relay.var(\"x\", shape=(8, 16), dtype=\"float32\")\n y = relay.reshape(x, [16, -1])\n y = relay.reverse_reshape(y, [-1, 4, 0])\n mod = tvm.IRModule()\n mod[\"main\"] = relay.Function([x], y)\n with tvm.transform.PassContext(opt_level=3):\n exec = relay.vm.compile(mod, \"llvm\")\n assert exec.bytecode.count(\"reshape_tensor\") == 1\n check_result(target, dev, [x_np], x_np.reshape([4, 4, 8]), mod)\n\n # reshape with symbolic/any shape\n for n in [tvm.tir.Any(), tvm.te.size_var(\"n\")]:\n x = relay.var(\"x\", shape=(n, 16), dtype=\"float32\")\n y = relay.reshape(x, [-1, 4])\n y = relay.reshape(y, [0, 2, -1])\n mod = tvm.IRModule()\n mod[\"main\"] = relay.Function([x], y)\n with tvm.transform.PassContext(opt_level=3):\n exec = relay.vm.compile(mod, \"llvm\")\n assert exec.bytecode.count(\"reshape_tensor\") == 1\n check_result(target, dev, [x_np], x_np.reshape([32, 2, 2]), mod)\n\n # dyn.reshape\n x = relay.var(\"x\", shape=(8, 16), dtype=\"float32\")\n y = relay.var(\"y\", shape=(3,), dtype=\"int32\")\n z = relay.reshape(x, [-1, 4, 8])\n z = relay.reshape(z, y)\n mod = tvm.IRModule()\n mod[\"main\"] = relay.Function([x, y], z)\n with tvm.transform.PassContext(opt_level=3):\n exec = relay.vm.compile(mod, \"llvm\")\n assert exec.bytecode.count(\"reshape_tensor\") == 2\n assert \"reshape_tensor\" in exec.bytecode\n y_np = np.array([8, 2, 8]).astype(\"int32\")\n check_result(target, dev, [x_np, y_np], x_np.reshape([8, 2, 8]), mod)\n\n\ndef test_vm_reshape_and_copy(target, dev):\n \"\"\"Make sure the compiler notices the reshape result shape is a literal and can use\n the immediate-mode alloc_tensor instruction instead of alloc_tensor_reg.\"\"\"\n x_np = np.random.uniform(size=(1, 1)).astype(\"float32\")\n x = relay.var(\"x\", shape=(1, 1), dtype=\"float32\")\n mod = tvm.IRModule.from_expr(relay.Function([x], relay.copy(relay.reshape(x, [0, 1]))))\n with tvm.transform.PassContext(opt_level=3):\n exec = relay.vm.compile(mod, \"llvm\")\n assert \"alloc_tensor\" in exec.bytecode\n assert not \"alloc_tensor_reg\" in exec.bytecode\n check_result(target, dev, [x_np], x_np.reshape([1, 1]), mod)\n\n\ndef test_vm_reshape_tuple(target, dev, x_shape=(1, 4, 2), y_shape=(1, 2, 10)):\n tup = relay.var(\n \"tup\",\n type_annotation=relay.TupleType([relay.TensorType(x_shape), relay.TensorType(y_shape)]),\n )\n out = relay.reshape(relay.TupleGetItem(tup, 0), (1, -1))\n f = relay.Function([tup], out)\n\n x_data = np.random.uniform(size=x_shape).astype(\"float32\")\n y_data = np.random.uniform(size=y_shape).astype(\"float32\")\n\n res = veval(f, (x_data, y_data), device=dev, target=target)\n tvm.testing.assert_allclose(res.numpy(), np.reshape(x_data, (1, -1)))\n\n\ndef test_constant_shape_with_external_codegen():\n @tvm.register_func(\"relay.ext.test1\")\n def relay_ext_test(func):\n return None\n\n mod = tvm.IRModule()\n shape = (relay.Any(), 25)\n dtype = \"float32\"\n\n # external function\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n weight = relay.const(np.random.rand(5, 25).astype(\"float32\"), dtype=\"float32\")\n out = relay.nn.dense(x, weight)\n f1 = relay.Function([x], out)\n f1 = f1.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n f1 = f1.with_attr(\"Inline\", tvm.tir.IntImm(\"int32\", 1))\n f1 = f1.with_attr(\"Compiler\", \"test1\")\n f1 = f1.with_attr(\"global_symbol\", \"f1\")\n glb_f1 = relay.GlobalVar(\"f1\")\n mod[glb_f1] = f1\n mod = relay.transform.InferType()(mod)\n\n # Main function\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n mod[\"main\"] = relay.Function([x], glb_f1(x))\n comp = relay.vm.VMCompiler()\n opt_mod, _ = comp.optimize(mod, target=\"llvm\")\n assert \"shape_func\" in opt_mod.astext(False)\n\n\ndef test_vm_rpc():\n \"\"\"\n This test checks to make sure you can export a VMExecutable,\n upload it to a remote machine using RPC and then execute it\n on the other machine.\n \"\"\"\n target = tvm.target.Target(\"llvm --host=llvm\")\n\n # Build a IRModule.\n x = relay.var(\"x\", shape=(10, 1))\n f = relay.Function([x], x + x)\n mod = IRModule.from_expr(f)\n\n # Compile to VMExecutable.\n vm_exec = vm.compile(mod, target=target)\n\n # Export to Disk\n temp = utils.tempdir()\n path = temp.relpath(\"vm_library.so\")\n vm_exec.mod.export_library(path)\n\n # Use local rpc server for testing.\n # Server must use popen so it doesn't inherit the current process state. It\n # will crash otherwise.\n def check_remote(server):\n remote = rpc.connect(server.host, server.port, session_timeout=10)\n\n # Upload the serialized Executable.\n remote.upload(path)\n # Get a handle to remote Executable.\n rexec = remote.load_module(\"vm_library.so\")\n\n device = remote.cpu()\n # Build a VM out of the executable and context.\n vm_factory = runtime.vm.VirtualMachine(rexec, device)\n np_input = np.random.uniform(size=(10, 1)).astype(\"float32\")\n input_tensor = tvm.nd.array(np_input, device)\n # Invoke its \"main\" function.\n out = vm_factory.invoke(\"main\", input_tensor)\n # Check the result.\n np.testing.assert_allclose(out.numpy(), np_input + np_input)\n\n check_remote(rpc.Server(\"127.0.0.1\"))\n\n\ndef test_get_output_single():\n target = tvm.target.Target(\"llvm\")\n\n # Build a IRModule.\n x = relay.var(\"x\", shape=(10,))\n f = relay.Function([x], x + x)\n mod = IRModule.from_expr(f)\n\n # Compile to VMExecutable.\n vm_exec = vm.compile(mod, target=target)\n vm_factory = runtime.vm.VirtualMachine(vm_exec, tvm.cpu())\n inp = np.ones(10, dtype=\"float32\")\n vm_factory.invoke_stateful(\"main\", inp)\n outputs = vm_factory.get_outputs()\n assert len(outputs) == 1\n np.testing.assert_allclose(outputs[0].numpy(), inp + inp)\n\n\[email protected]_targets(\"llvm\")\ndef test_get_output_multiple(target, dev):\n # Build a IRModule.\n x = relay.var(\"x\", shape=(10,))\n f = relay.Function([x], relay.Tuple([x + x, x]))\n mod = IRModule.from_expr(f)\n\n # Compile to VMExecutable.\n vm_exec = vm.compile(mod, target=target)\n vm_factory = runtime.vm.VirtualMachine(vm_exec, dev)\n inp = np.ones(10, dtype=\"float32\")\n vm_factory.invoke_stateful(\"main\", inp)\n outputs = vm_factory.get_outputs()\n assert len(outputs) == 2\n np.testing.assert_allclose(outputs[0].numpy(), inp + inp)\n np.testing.assert_allclose(outputs[1].numpy(), inp)\n\n\[email protected]_targets(\"llvm\")\ndef test_get_input_index(target, dev):\n # Build a IRModule.\n data_0, data_1 = [\"d1\", \"d2\"]\n x, y = [relay.var(c, shape=(10,)) for c in [data_0, data_1]]\n f = relay.Function([x, y], x + y)\n mod = IRModule.from_expr(f)\n\n # Compile to VMExecutable.\n vm_exec = vm.compile(mod, target=target)\n vm_factory = runtime.vm.VirtualMachine(vm_exec, dev)\n assert vm_factory.get_input_index(data_1) == 1\n assert vm_factory.get_input_index(data_0) == 0\n assert vm_factory.get_input_index(\"invalid\") == -1\n\n\[email protected]_targets(\"llvm\")\ndef test_benchmark(target, dev):\n mod, params = mlp.get_workload(1)\n lib = vm.compile(mod, target=target, params=params)\n exe = runtime.vm.VirtualMachine(lib, tvm.cpu())\n data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype(\"float32\"))\n result = exe.benchmark(tvm.cpu(), data, func_name=\"main\", repeat=2, number=1)\n assert result.mean == result.median\n assert result.mean > 0\n assert len(result.results) == 2\n\n with patch.object(\n tvm.runtime.module.Module,\n \"time_evaluator\",\n return_value=lambda x: tvm.runtime.module.BenchmarkResult([1, 2, 2, 5]),\n ) as method:\n result = exe.benchmark(dev, data, func_name=\"main\", repeat=2, number=1)\n assert result.mean == 2.5\n assert result.median == 2.0\n assert result.max == 5\n assert result.min == 1\n assert result.std == 1.5\n\n\ndef test_benchmark_end_to_end(target, dev):\n mod, params = mlp.get_workload(1)\n lib = vm.compile(mod, target=target, params=params)\n exe = runtime.vm.VirtualMachine(lib, dev)\n data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype(\"float32\"), device=dev)\n result = exe.benchmark(dev, data, func_name=\"main\", repeat=2, number=1, end_to_end=True)\n assert result.mean > 0\n\n\[email protected]_cuda\ndef test_benchmark_end_to_end_rpc():\n server = rpc.Server(\"127.0.0.1\")\n remote = rpc.connect(server.host, server.port)\n\n mod, params = mlp.get_workload(1)\n lib = vm.compile(mod, target=\"cuda\", params=params)\n\n temp = utils.tempdir()\n path = temp.relpath(\"vm_library.so\")\n lib.mod.export_library(path)\n remote.upload(path)\n rlib = remote.load_module(\"vm_library.so\")\n\n exe = runtime.vm.VirtualMachine(rlib, remote.device(\"cuda\"))\n data = tvm.nd.array(\n np.random.rand(1, 1, 28, 28).astype(\"float32\"), device=remote.device(\"cuda\")\n )\n result = exe.benchmark(\n remote.device(\"cuda\"), data=data, func_name=\"main\", repeat=2, number=1, end_to_end=True\n )\n assert result.mean > 0\n\n\ndef test_shape_func_nested_function():\n @tvm.register_func(\"relay.ext.test2\")\n def relay_ext_test(func):\n return None\n\n data_shape = (relay.Any(), 16)\n weight_shape = (relay.Any(), 16)\n\n dense = relay.nn.dense(\n relay.var(\"data\", shape=data_shape), relay.var(\"weight\", shape=weight_shape)\n )\n mod = tvm.IRModule.from_expr(dense)\n\n patterns = [(\"test.dense\", is_op(\"nn.dense\")(wildcard(), wildcard()))]\n passes = tvm.transform.Sequential(\n [\n relay.transform.MergeComposite(patterns),\n relay.transform.AnnotateTarget([\"test2\"]),\n relay.transform.PartitionGraph(),\n ]\n )\n\n mod = passes(mod)\n\n compiler = VMCompiler()\n compiler.lower(mod, \"llvm\")\n\n\[email protected]_cuda\ndef test_storage_size_and_offset_on_cpu():\n \"\"\"Tests allocations place sizes and offsets on the CPU host even if the rest\n of the computation is on a different device type.\"\"\"\n\n # TODO(mbs): Better would be to test ManifestAlloc independently.\n # And/or move this to C++ and test the VM executable in it's C++ instead of\n # pretty-printed form.\n\n # CPU = device type 1\n # GPU = device type 2\n def input():\n return tvm.parser.fromtext(\n \"\"\"\n #[version = \"0.0.5\"]\n def @main(%a: Tensor[(5, 7), float32],\n param_device_types=[2], result_device_type=2) {\n add(%a, %a)\n }\n \"\"\"\n )\n\n exe = relay.vm.compile(\n input(),\n tvm.target.Target(\"cuda\"),\n )\n\n # This program needs two constants:\n # - The size of the tensor's storage (first arg) to alloc_storage\n # - The offset of the tensor within the storage (second arg) to alloc_tensor\n # Both should be on the CPU\n assert \"VirtualDevice[0]: device type 1\" in exe.virtual_devices\n assert \"Const[0]: has shape int64[] on device index 0\" in exe.constants\n assert \"Const[1]: has shape int64[] on device index 0\" in exe.constants\n\n\[email protected]_cuda\ndef test_reshape_shape_on_cpu():\n \"\"\"Tests the argument to a reshape places the shape on the CPU host even if the rest\n of the computation is on a different device type.\"\"\"\n\n # TODO(mbs): Better would be to test ManifestAlloc independently.\n # And/or move this to C++ and test the VM executable in it's C++ instead of\n # pretty-printed form.\n\n # CPU = device type 1\n # GPU = device type 2\n def input():\n return tvm.parser.fromtext(\n \"\"\"\n #[version = \"0.0.5\"]\n def @main(%x: Tensor[(2, 8), float32],\n param_device_types=[2], result_device_type=2) {\n reshape(%x, newshape=[2, 4, 2])\n }\n \"\"\"\n )\n\n exe = relay.vm.compile(\n input(),\n tvm.target.Target(\"cuda\"),\n )\n\n # The newshape annotation should have been turned into a constant on the CPU.\n assert \"VirtualDevice[0]: device type 1\" in exe.virtual_devices\n assert \"Const[0]: has shape int64[3] on device index 0\" in exe.constants\n\n\[email protected]_cuda\ndef test_multi_targets():\n # Build an IRModule.\n n = 10\n x = relay.var(\"x\", shape=(n,))\n y = relay.var(\"y\", shape=(n,))\n z = relay.var(\"z\", shape=(n,))\n f = relay.Function([x, y, z], x + relay.op.annotation.on_device(y + z, tvm.cpu()))\n mod = IRModule.from_expr(f)\n\n # Compile to VMExecutable.\n with tvm.transform.PassContext(\n opt_level=3, config={\"relay.fallback_device_type\": tvm.cuda().device_type}\n ):\n exe = relay.vm.compile(\n mod, target={\"cpu\": tvm.target.Target(\"llvm\"), \"cuda\": tvm.target.Target(\"cuda\")}\n )\n\n # Run\n vm = runtime.vm.VirtualMachine(exe, [tvm.cuda(), tvm.cpu()])\n x_data = np.random.rand(\n n,\n ).astype(\"float32\")\n y_data = np.random.rand(\n n,\n ).astype(\"float32\")\n z_data = np.random.rand(\n n,\n ).astype(\"float32\")\n actual_result = vm.invoke(\"main\", x_data, y_data, z_data)\n\n # Test\n expected_result = x_data + y_data + z_data\n tvm.testing.assert_allclose(actual_result.numpy(), expected_result)\n\n\ndef test_large_constants():\n \"\"\"Large constants can be serialized outside of executable\"\"\"\n target = tvm.target.Target(\"llvm\")\n dev = tvm.cpu()\n\n # fn(x) { add(x, <large constant>) }\n x = relay.var(\"x\", shape=(1000, 1000))\n const_data = np.random.rand(1000, 1000).astype(\"float32\")\n const = relay.const(const_data, dtype=\"float32\")\n func = relay.Function([x], relay.op.add(x, const))\n mod = tvm.IRModule.from_expr(func)\n\n # Compile to executable.\n vm_exec = vm.compile(mod, target=target)\n\n # Save to constants and library files\n temp = utils.tempdir()\n path_consts = temp.relpath(\"consts\")\n vm_exec.move_late_bound_consts(path_consts, byte_limit=256)\n path_dso = temp.relpath(\"lib.so\")\n vm_exec.mod.export_library(path_dso)\n\n # Load library files and constants\n mod = runtime.load_module(path_dso)\n mod[\"load_late_bound_consts\"](path_consts)\n\n # Test main\n x_data = np.random.rand(1000, 1000).astype(\"float32\")\n the_vm = runtime.vm.VirtualMachine(mod, dev)\n actual = the_vm.invoke(\"main\", x_data)\n expected = x_data + const_data\n tvm.testing.assert_allclose(expected, actual.numpy())\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n"
]
| [
[
"numpy.split",
"numpy.reshape",
"numpy.int32",
"numpy.ones",
"numpy.random.rand",
"numpy.float32",
"numpy.random.uniform",
"numpy.array"
]
]
|
artidoro/gpt-2-output-dataset | [
"00651e362aa61d65efa7a0495b231bd6a0af395c"
]
| [
"data_loading.py"
]
| [
"import os\nimport json\n\nimport numpy as np\nfrom sklearn.utils import shuffle\nimport pandas as pd\n\n\ndef _load_split(data_dir, source, split, n=np.inf, start_at=0):\n path = os.path.join(data_dir, f'{source}.{split}.jsonl')\n if not os.path.exists(path):\n path = os.path.join(data_dir, f'{source}.jsonl')\n assert os.path.exists(path), f'source {source} not found.'\n texts = []\n for i, line in enumerate(open(path)):\n if i < start_at:\n continue\n if i >= start_at + n:\n break\n d = json.loads(line)\n if 'text' in d:\n texts.append(d['text'])\n elif 'article' in d:\n texts.append(d['article'])\n return texts\n\ndef _load_split_grover(data_dir, source, split, n=np.inf):\n path = os.path.join(data_dir, f'{source}.{split}.jsonl')\n if not os.path.exists(path):\n path = os.path.join(data_dir, f'{source}.jsonl')\n assert os.path.exists(path), f'source {path} not found.'\n texts = []\n labels = []\n for i, line in enumerate(open(path)):\n if i >= n:\n break\n elt = json.loads(line)\n if 'text' in elt:\n if len(elt['text'].split()) > 0:\n texts.append(elt['text'])\n else:\n continue\n elif 'article' in elt:\n if len(elt['article'].split()) > 0:\n texts.append(elt['article'])\n else:\n continue\n labels.append(elt['label'] == 'machine' or elt['label'] == 'generated')\n return texts, labels\n\ndef _load_split_hackathon(data_dir, source, split, n=np.inf):\n with open(os.path.join('/home/apagnoni/apagnoni/data/challenge/', f'{split}_{source}.json')) as data_file:\n data = json.load(data_file)\n texts = [elt['text'] for elt in data]\n labels = [int('gen' in source)] * len(texts)\n return texts, labels\n\ndef load_split(data_dir, sources, split, n=np.inf):\n texts = []\n labels = []\n webtext_read = 0\n webtext_store = []\n for source in sources.split(';'):\n if len(source) == 0:\n continue\n if not ('generator' in source or 'gpt2' in source or 'gptneo' in source) and (('gen' in source) or ('real' in source)):\n t, l = _load_split_hackathon(data_dir, source, split, n)\n elif not ('generator' in source or 'gpt2' in source or 'gptneo' in source):\n webtext = _load_split(data_dir, 'webtext', split, n=n//2, start_at=webtext_read)\n gen = _load_split(data_dir, source, split, n=n//2)\n t = webtext+gen\n l = [0]*len(webtext)+[1]*len(gen)\n webtext_read += len(webtext)\n webtext_store += webtext\n else:\n t, l = _load_split_grover(data_dir, source, split, n)\n print(f'Loaded {len(t)} datapoints from {source}.')\n texts += t\n labels += l\n \n # for i, text in enumerate(texts):\n # for key in ['Article: ', 'Body: ', 'Abstract: ']:\n # if key in text:\n # texts[i] = text.split(key)[-1]\n\n data = {\n 'text': texts,\n 'labels': labels\n }\n df = shuffle(pd.DataFrame(data=data))\n\n return df"
]
| [
[
"pandas.DataFrame"
]
]
|
IRISMeister/iris-pygw-blueprint | [
"083372421d54a277b55bca7be766d303e4a7abb5"
]
| [
"python/src/PersonDemo.py"
]
| [
"from Company import Company\nfrom math import cos,sin,tan\nimport numpy as np\n\nclass Person:\n\n def __init__(self):\n self._name = \"Tom\"\n self._age = 5\n self.company = Company()\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, newName):\n self._name = newName\n\n @property\n def age(self):\n return str(self._age)\n\n @age.setter\n def age(self, newAge):\n self._age = newAge\n\n def displayPerson(self):\n arr1 = np.array([-3.7, -1.2, 0.5, 4.5])\n return \"name: \" + self.name + \", age: \" + self.age + \",\" + self.company.displayEmployer() + \",\" + \"{0}\".format(cos(1)) + \",\" + str(arr1[0])\n\n\n"
]
| [
[
"numpy.array"
]
]
|
dtabb73/Zorbit-Analyzer | [
"a8ea8e9a11fe93a2a2a9de62b9ca404ff2259c53"
]
| [
"202102115-icechia.py"
]
| [
"import numpy as np\r\nimport pandas as pd\r\nfrom io import StringIO\r\nimport re\r\nimport csv\r\nfrom csv import reader, writer\r\nimport sys\r\nimport os\r\nimport glob\r\nimport fnmatch\r\nfrom os import path\r\nimport matplotlib\r\nfrom matplotlib import pyplot as plt\r\n\r\nprint(\"You are using Zorbit Analyzer v0.1\")\r\ndirectory_path = input(\"Please enter the path to the directory of your files. All files should be in the same location: \") #Asks users for path\r\nos.chdir(directory_path)\r\nx = input('Input your Interproscan output gff3 file(s):') #Asks users for gff3 input\r\nif \"*\" in x: #Handles the case of *.gff3\r\n gff3_input = glob.glob(\"*.gff3\")\r\nelse: \r\n y = re.sub('[|; ]', ', ', x) #Substitutes possible gff3 file delimeters with commas\r\n gff3_input = re.split(', ', y) #Splits gff3 input into a list\r\nfor i in gff3_input:\r\n if os.path.exists(i): #Checks existence of gff3 file\r\n pass\r\n else:\r\n print(\"There does not seem to be a file by that name. Please check your path/filename and try again\")\r\n sys.exit()\r\nfasta_input = input('Input your fasta file:') #Asks users for fasta input file\r\nif os.path.exists(fasta_input): #Checks existence of fasta input file\r\n pass\r\nelse:\r\n print(\"There does not seem to be a file by that name. Please check your path/filename and try again\")\r\n sys.exit()\r\nif fnmatch.fnmatch(fasta_input, '*fastq*'):\r\n print(\"Zorbit Analyzer is not specifically constructed to handle fastq files but will try. If errors convert to fasta format\")\r\northo_input = input ('Input your ProteinOrtho output file:') #Asks users for ProteinOrtho input\r\nif os.path.exists(ortho_input): #Checks existence of ProteinOrtho input\r\n pass\r\nelse:\r\n print(\"There does not seem to be a file by that name. Please check your path/filename and try again\")\r\n sys.exit()\r\northo_input_file_name = input ('Input your ProteinOrtho input file name (faa). Leave blank if unknown though will run slower:') #Asks users for ProteinOrtho output file\r\nwhile True: \r\n file_to_write = input('Input your desired ZorbitAnalyzer output file name: ') #Asks users for output file\r\n if file_to_write != '': #Checks to see if user entered a file name\r\n break\r\n else:\r\n print(\"You did not enter an output file name\") #Repeatedly asks for output file name if not given\r\n continue\r\nChoice = ['yes', 'y', 'no', 'n']\r\nflag = True\r\nwhile flag is True:\r\n exclusion_flag = input(\"Would you like to exclude sequences that do not have either Interproscan or ProteinOrtho hits? (Yes/No) \").lower()\r\n for i in Choice:\r\n if exclusion_flag.startswith(i):\r\n flag = False\r\n break\r\n else: \r\n continue\r\nif exclusion_flag.startswith('y'):\r\n exclusion_flag = 1\r\nelse:\r\n exclusion_flag = 0\r\nprint(\"Analyzing files\") #Lets user know input portion has completed\r\n\r\npdortho = pd.read_csv(ortho_input, \"/t\", engine=\"python\") #Creates ProteinOrtho pd\r\ntest_file = 'test.txt'\r\ntest2_file = 'test2.txt'\r\ntest3_file = 'test3.txt'\r\n\r\n#Testing open/closing files\r\ndef try_file(input_file): #Defining function that creates/opens user output file and truncates it before closing it\r\n try: \r\n open(input_file, 'w+').close()\r\n except IOError:\r\n print(\"Unable to open output file\")\r\ntry_file('file_to_write.txt') #Creates/opens output file and truncates it before closing it\r\ntry_file('test.txt') #Creates/opens test file and truncates it before closing it\r\ntry_file('gff3_file_to_write.txt') #Creates/opens gff3 output file and truncates it before closing it\r\ntry_file('gff3_statsfile_to_write.txt') #Creates/opens gff3 output file and truncates it before closing i\r\ntry_file('fasta_file_to_write.txt') #Creates/opens fasta output file and truncates it before closing it\r\ntry_file('ortho_file_to_write.txt') #Creates/opens ProteinOrtho output file and truncates it before closing it\r\ntry_file('ortho_file_to_write2.txt') #Creates/opens a second ProteinOrtho output file and truncates it before closing it\r\ntry_file('zorbit_statistics.txt') #Creates/opens a statistics file and truncates it before closing it\r\n\r\n#Defining variables for later use\r\nfasta_file_to_write = 'fasta_file_to_write.txt' #Defining the interim fasta file to write\r\ngff3_file_to_write = 'gff3_file_to_write.txt' #Defining the interim gff3 file to write\r\ngff3_statsfile_to_write = 'gff3_statsfile_to_write.txt'\r\northo_file_to_write = 'ortho_file_to_write.txt' #Defining the interim Protein Ortho file to write\r\nzorbit_statistics = 'zorbit_statistics.txt' #Defining the Zorbit Statistics variable\r\nstring_to_remove1 = '##' #Removes header and gene introduction lines\r\nstring_to_remove2 = 'polypeptide' #Removes redundant polypeptide line\r\nstring_to_remove3 = 'MobiDBLite' #Removes results from MobiDBLite database\r\nstring_to_end = '##FASTA' #Sets end of file as the start of the fasta/code part of gff3 files\r\n\r\n#fasta\r\nfasta_file = None\r\nfastq_file = None\r\nfasta_type = \"amino_acid\"\r\nfastq_start_character = '@'\r\nfasta_start_character = '>' #Setting start character for fasta information line\r\nfastq_third_line_character ='+'\r\nfna_type = \"fna\"\r\nif fna_type in fasta_input:\r\n fasta_type = \"nucleotide\"\r\nwith open(fasta_input, 'r') as fasta: #Opening fasta input file to read\r\n for line in fasta: #reading lines in fasta file\r\n if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines\r\n fasta_file = fasta_input\r\n break\r\n elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)\r\n fastq_file = fasta_input\r\n fasta_type = \"nucleotide\"\r\n break\r\n else:\r\n print(\"The fasta input file does not seem to have typical fasta or fastq format\")\r\n sys.exit()\r\nif fasta_file is not None: #Checking to see if fasta input was fasta file (should not be empty)\r\n print(\"Working on fasta file\")\r\n with open(fasta_input, 'r') as fasta: #Opening fasta input file to read\r\n with open(fasta_file_to_write, 'a') as f: #Opens the output file to append\r\n for line in fasta: #reading lines in fasta file\r\n if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines\r\n fasta_nostart = re.sub('>', '\\n', line) #Removing > symbol and replacing with carriage return from each occurrence\r\n fasta_nospace = ', '.join(fasta_nostart.rsplit('\\n',1)) #Removes carriage return (before aa or na code) and replaces with comma\r\n fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma\r\n f.write(fasta_csv) #Writes output to file\r\n else:\r\n if not line.isspace(): #Will not write blank lines\r\n sequence_no_carriage = re.sub('\\n', '', line) #Removes carriage return from before the sequence data\r\n sequence_no_line_break = re.sub('\\r', '', sequence_no_carriage) #Removes line break from before the sequence data\r\n f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns\r\n else:\r\n continue\r\nelif fastq_file is not None: #Checking to see if fasta input was fastq file (should not be empty)\r\n print(\"Working on fastq file\")\r\n with open(fasta_input, 'r', encoding=\"latin-1\") as fasta: #Opening fasta input file to read\r\n with open(fasta_file_to_write, 'a', encoding=\"latin-1\") as f: #Opens the output file to append\r\n for i, line in enumerate(fasta): #reading lines in fasta file\r\n if i == 0: # Dealing with first line differently (no line break)\r\n fasta_nostart = re.sub('@', '', line) #Removing @ symbol from each occurrence and replaces with nothing\r\n fasta_nospace = ', '.join(fasta_nostart.rsplit('\\n',1)) #Removes carriage return (before aa or na code) and replaces with comma\r\n fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma\r\n f.write(fasta_csv) #Writes output to file\r\n elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)\r\n fasta_nostart = re.sub('@', '\\n', line) #Removing @ symbol from each occurrence and replaces with carriage return\r\n fasta_nospace = ', '.join(fasta_nostart.rsplit('\\n',1)) #Removes carriage return (before aa or na code) and replaces with comma\r\n fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma\r\n f.write(fasta_csv) #Writes output to file\r\n elif i % 4 == 1: #Writing line 2/4 (sequence file) to output file\r\n sequence_no_carriage = re.sub('\\n', '', line) #Removes carriage return from before the sequence data\r\n sequence_no_line_break = re.sub('\\r', '', sequence_no_carriage) #Removes line break from before the sequence data\r\n f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns\r\n else:\r\n pass\r\nelse:\r\n print(\"The input file does not seem to be in typical fasta or fastq format. Please check and try again\") #Ending if atypical fasta/fastq format\r\n sys.exit()\r\n\r\nfor i in gff3_input: #Cleaning up gff3 file prior to conversion to dataframe\r\n with open(i, 'r') as stack:\r\n with open(gff3_file_to_write, 'a') as f:\r\n for line in stack:\r\n if string_to_end in line: #Closes file at the start of the sequence data without including\r\n f.close()\r\n break\r\n elif string_to_remove1 in line: #Removing header and gene introduction lines (if present)\r\n continue\r\n elif string_to_remove2 in line: #Removing polypeptide line (if present)\r\n continue\r\n elif string_to_remove3 in line: #Removing MobiDBLite database (if present)\r\n continue\r\n else:\r\n f.write(line) \r\nfor i in gff3_input: #Saving unedited gff3 input into file for statistics purposes later\r\n with open(i, 'r') as stack:\r\n with open(gff3_statsfile_to_write, 'a') as f:\r\n for line in stack:\r\n if string_to_end in line: #Closes file at the start of the sequence data without including\r\n f.close()\r\n break\r\n elif string_to_remove1 in line: #Removing header and gene introduction lines (if present)\r\n continue\r\n else:\r\n f.write(line) \r\nfasta_column_names = ['SeqID', 'Information', 'Sequence'] #Defining the list of fasta column names to pass to the dataframe\r\nfastapd = pd.read_csv(fasta_file_to_write, names=fasta_column_names, engine = \"python\", header=None) #Creating a Pandas dataframe from the fasta output csv\r\nSeqID_list = fastapd[\"SeqID\"].tolist() #Saving contents of the SeqID column to a list\r\nfasta_row_number = len(fastapd) #Counting the number of rows in the fasta dataframe for the statistics output\r\nwith open(zorbit_statistics, 'a') as f:\r\n f.write(\"The number of sequences in the fasta is \" + str(fasta_row_number) + \"\\n\")\r\n\r\n\r\n#Start orthopd\r\nprint(\"Working on ProteinOrtho dataframe\")\r\northopd = pd.read_csv(ortho_input, sep='\\t', engine=\"python\", na_values=\"*\") #Creates a Pandas dataframe from ProteinOrtho input csv\r\northo_column_names = list(orthopd.columns)\r\n#Defining the SeqID column\r\nif ortho_input_file_name != \"\":\r\n orthopd.columns = [\"SeqID\" if col.startswith(ortho_input_file_name) else col for col in orthopd.columns] #Renaming the fasta input column in ProteinOrtho dataframe to SeqID to match other dataframes\r\nelse: pass\r\n#Attempting to identify which column corresponds to the input fasta\r\nfasta_input_split = fasta_input.split('.', 1)[0] #Trying to delete file handle from the fasta input file in case there was .fasta versus .faa, etc\r\northopd_pruned = orthopd.drop(columns=['# Species', 'Genes', 'Alg.-Conn.']) #Creating a new dataframe without the first three columns which will always have data in each row in order to id longest column\r\nif orthopd.columns.astype(str).str.contains(\"SeqID\").any(): #Checking to see if fasta input file name is in the ProteinOrtho column name list\r\n print(\"Found fasta Sequence ID column in ProteinOrtho file\")\r\nelse:\r\n print(\"Trying to find fasta file in ProteinOrtho file through other means\")\r\n orthopd.columns = [\"SeqID\" if col.startswith(fasta_input_split) else col for col in orthopd.columns] #Using the input fasta file name as a guess for the faa file name\r\n if orthopd.columns.astype(str).str.contains(\"SeqID\").any(): #Breaks loops if the column name has been found/replaced\r\n print(\"Found fasta Sequence ID column in ProteinOrtho file\") \r\n else: \r\n print(\"Attempting another way of identifying fasta file column. This may take some time\")\r\n orthopd_fasta_column_name = orthopd_pruned.count().idxmax() #Finding column with the least number of NaN which is likely the input fasta\r\n for l in SeqID_list: #Searching to see if any values from the fastapd SeqID column (l) are in the putative SeqID ProteinOrtho column\r\n if orthopd[orthopd_fasta_column_name].astype(str).str.contains(l).any(): \r\n orthopd.rename(columns=lambda x: x.replace(orthopd_fasta_column_name, \"SeqID\"), inplace=True) #Renaming the ProteinOrtho column with fasta sequence names as SeqID\r\n break\r\n else:\r\n print(\"Final method to identify fasta file column. This may take hours\")\r\n orthopd = orthopd.drop(orthopd[(orthopd['Genes'] == 1)].index) #Gets rid of rows with just a single gene found in order to speed up full frame search\r\n for l in SeqID_list: #Searching to see if any values from the fastapd SeqID column (l) are in the ProteinOrtho dataframe\r\n for i in orthopd.columns:\r\n if orthopd[i].astype(str).str.contains(l).any():\r\n orthopd.rename(columns=lambda x: x.replace(i, \"SeqID\"), inplace=True) #Renaming the ProteinOrtho column with fasta sequence names as SeqID\r\n break\r\northopd = orthopd.drop(orthopd[(orthopd['SeqID'].isna())].index)#Removing SeqID rows with NaN\r\n#Splitting the duplicated entries in the SeqID column and making new rows with a SeqID member on each but with same data otherwise\r\ndef pir2(df, c): #Defining function to split the SeqID column at each comma and place one of each split value onto a new, otherwise duplicated row\r\n colc = df[c].astype(str).str.split(',')\r\n clst = colc.values.astype(object).tolist()\r\n lens = [len(l) for l in clst]\r\n j = df.columns.get_loc(c)\r\n v = df.values\r\n n, m = v.shape\r\n r = np.arange(n).repeat(lens)\r\n return pd.DataFrame(\r\n np.column_stack([v[r, 0:j], np.concatenate(clst), v[r, j+1:]]),\r\n columns=orthopd.columns\r\n )\r\n\r\northopd3 = pir2(orthopd, \"SeqID\") #Running column split function on the SeqID column on orthopd\r\nprint(\"Beginning data analysis on the ProteinOrtho dataframe\")\r\n\r\n#Graph Algebraic Connectivity\r\northopd_algconn_nozero = orthopd3[orthopd3['Alg.-Conn.'] != 0] #Removing zero and one counts in orthopd for graph\r\northopd_algconn_noone = orthopd_algconn_nozero[orthopd_algconn_nozero['Alg.-Conn.'] != 1] #Getting the count of each Alg.Conn in the gff3 dataframe\r\northopd_algconn_noone['Alg.-Conn.'].plot.hist(grid=True, bins=100, \r\n color='#607c8e')\r\nplt.title('Distribution of Algebraic Connectivity without Unity')\r\nplt.xlabel('Degree of Connectivity')\r\nplt.ylabel('Number of Genes with Degree of Connectivity')\r\nplt.tight_layout()\r\nplt.savefig(\"ProteinOrtho_AlgConn_graph_noone.png\")#Saving graph to file\r\nplt.clf()\r\northopd_algconn_nozero['Alg.-Conn.'].plot.hist(grid=True, bins=100, \r\n color='#607c8e')\r\nplt.title('Distribution of Algebraic Connectivity')\r\nplt.xlabel('Degree of Connectivity')\r\nplt.ylabel('Number of Genes with Degree of Connectivity')\r\nplt.tight_layout()\r\nplt.savefig(\"ProteinOrtho_AlgConn_graph.png\")#Saving graph to file\r\nplt.clf()\r\n\r\n#Graph Gene Counts\r\northopd_gene_count_values = orthopd3['Genes'].value_counts() #Getting the count of each database in the gff3 dataframe\r\northopd_gene_count_values.plot(kind='bar') #Graphing the database counts\r\nplt.title('Graph of Gene Counts')\r\nplt.xlabel('Number of Shared transcripts')\r\nplt.ylabel('Number of Genes with same frequency')\r\nplt.tight_layout()\r\nplt.savefig(\"ProteinOrtho_gene_graph.png\")#Saving graph to file\r\nplt.clf()\r\n\r\n\r\n#Start gff3pd\r\nprint(\"Working on gff3 dataframe\")\r\ngff3pd_column_names = ['SeqID', 'Database', 'Match type', 'Start', 'Stop', 'Score', 'Strand', 'Phase', 'Match information'] #Renaming static gff3 columns\r\nstatsgff3pd = pd.read_csv(gff3_statsfile_to_write, sep='\\t', names=gff3pd_column_names, header=None, engine=\"python\") #Creating a dataframe for gff3 stats\r\ngff3pd_original_row_number = len(statsgff3pd) #Counting the number of rows in the original gff3pd dataframe for the statistics output\r\nwith open(zorbit_statistics, 'a') as f: #Writing the number of rows in the original gff3pd dataframe to the statistics output\r\n f.write(\"The number of sequences in the original gff3 file is \" + str(gff3pd_original_row_number) + \"\\n\")\r\ngff3pd = pd.read_csv(gff3_file_to_write, sep='\\t', names=gff3pd_column_names, header=None, engine = \"python\") #Creating a Pandas dataframe from the gff3 output csv\r\ngff3pd_row_number = len(gff3pd) #Counting the number of rows in the final gff3 file dataframe for the statistics output\r\ngff3pd_max_score = gff3pd['Score'].max() #Finding maximum value in Score column of gff3 dataframe\r\ngff3pd_without_null = gff3pd[gff3pd['Score'] != \".\"] #Finding minimum value in Score column of gff3 dataframe\r\ngff3pd_without_null_or_zero = gff3pd_without_null[gff3pd_without_null['Score'] != 0.0]\r\ngff3pd_min_score = gff3pd_without_null_or_zero['Score'].min() \r\nstatsgff3pd_without_null = statsgff3pd[statsgff3pd['Score'] != \".\"]\r\nstatsgff3pd_max_score = statsgff3pd_without_null['Score'].max()\r\nwith open(zorbit_statistics, 'a') as f:\r\n f.write(\"The number of sequences in the gff3 file after removal of MobiDBLite and duplicates is \" + str(gff3pd_row_number) + \"\\n\") #Adding cleaned gff3 stastitics to file\r\n f.write(\"The range of quality scores for the gff3 file range from \" + str(gff3pd_min_score) + \" to \" + str(gff3pd_max_score) + \"\\n\")#Adding range of scores to statistics file\r\n f.write(\"The maximum quality score for the original gff3 file is \" + str(statsgff3pd_max_score) + \"\\n\")\r\n\r\n#Graph database distribution\r\ngff3pd_database_count_values = gff3pd['Database'].value_counts() #Getting the count of each database in the gff3 dataframe\r\ngff3pd_database_count_values.plot(kind='bar') #Graphing the database counts\r\nplt.title('Distribution of Database hits')\r\nplt.xlabel('Database name')\r\nplt.ylabel('Number of Database hits')\r\nplt.tight_layout()\r\nplt.savefig(\"Gff3_database_graph.png\")#Saving graph to file\r\nplt.clf()\r\n\r\n#Preparing dataframes for merging\r\nprint(\"Preparing dataframes for merge\")\r\ngff3pd['SeqID'] = gff3pd['SeqID'].astype(str) #Setting column type as string\r\northopd3['SeqID'] = orthopd3['SeqID'].astype(str) #Setting column type as string\r\nfastapd['SeqID'] = fastapd['SeqID'].astype(str) #Setting column type as string\r\n\r\n#Dealing with fna versus faa\r\nprotein_flag = 0\r\nif fasta_type == \"nucleotide\": #Checking to see if the fasta_type is nucleotide\r\n gff3pd_split = gff3pd['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match\r\n gff3pd['SeqID'] = gff3pd_split[0] #Setting the gff3 SeqID column as the split column\r\n orthopd_split = orthopd3['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match\r\n orthopd['SeqID'] = orthopd_split[0] #Setting the ProteinOrtho SeqID column as the split column\r\nelse:\r\n #Pulling out reading frame information\r\n protein_flag = 1\r\n gff3pd['SeqID2'] = gff3pd['SeqID']\r\n gff3pd_split = gff3pd['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra number after the fasta SeqID \r\n gff3pd['SeqID2'] = gff3pd_split[0] #Setting the gff3 SeqID column as the split column\r\n gff3pd_split = gff3pd['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out\r\n gff3pd['SeqID2'] = gff3pd_split[0] #Setting the gff3 SeqID column\r\n gff3pd['Reading_Frame'] = gff3pd_split[1] #Setting the gff3 Frame column\r\n gff3pd = gff3pd.drop(['SeqID2'], axis=1)\r\n orthopd3['SeqID2'] = orthopd3['SeqID']\r\n orthopd_split = orthopd3['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match\r\n orthopd3['SeqID2'] = orthopd_split[0] #Setting the ProteinOrtho SeqID column as the split column\r\n orthopd_split = orthopd3['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out\r\n orthopd3['SeqID2'] = orthopd_split[0] #Setting the orthopd SeqID column\r\n orthopd3['Reading_Frame'] = orthopd_split[1] #Setting the gff3 Frame column\r\n orthopd = orthopd3.drop(['SeqID2'], axis=1)\r\n \r\n\r\n#Merging\r\nprint(\"Combining dataframes\") \r\ngff3_ortho_merge = pd.merge(orthopd, gff3pd, how='outer', on=['SeqID']) #Merging the ProteinOrtho and interproscan dataframes\r\nall_merge = pd.merge(gff3_ortho_merge, fastapd, how='outer', on=['SeqID']) #Merging the fasta dataframe with the combined ProteinOrtho/Interproscan dataframes\r\n\r\n\r\n\r\n#Adding marks to merged dataframe to make fasta\r\nall_merge['SeqID'] = all_merge['SeqID'].apply(lambda x: f'>{x}') #Placing > at the beginning of each new line and a tab at the end of SeqID\r\nall_merge['Sequence'] = all_merge['Sequence'].apply(lambda x: f'\\n{x}') #Placing a new line before the Sequence data\r\nall_merge = all_merge[ ['SeqID'] + [ col for col in all_merge.columns if col != 'SeqID' ] ] #Moving SeqID to the far left of the dataframe\r\nall_merge = all_merge[ [ col for col in all_merge.columns if col != 'Sequence' ] + ['Sequence'] ] #Moving Sequence to the far right of the dataframe\r\n\r\n#Statistics on the merged dataframe\r\nall_merge_both = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] == 1))].index)\r\nall_merge_neither = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] !=1))].index)\r\nall_merge_just_ortho = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] == 1))].index)\r\nall_merge_just_inter = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] !=1))].index)\r\n\r\n\r\nall_merge_all = len(pd.unique(all_merge['SeqID'])) #Calculating the number of unique sequences\r\nall_merge_both = len(pd.unique(all_merge_both['SeqID'])) #Calculating unique sequences with both interproscan and proteinortho hits\r\nall_merge_neither = len(pd.unique(all_merge_neither['SeqID'])) #Calculating unique sequences without interproscan or proteinortho hits\r\nall_merge_just_ortho = len(pd.unique(all_merge_just_ortho['SeqID'])) #Calculating unique sequences with proteinortho but not interproscan hits\r\nall_merge_just_inter = len(pd.unique(all_merge_just_inter['SeqID'])) #Calculating unique sequences with interproscan but no proteinortho hits\r\n\r\n#Writing merged dataframe statistics to file\r\nwith open(zorbit_statistics, 'a') as f:\r\n f.write(\"The total number of unique sequences is: \" + str(all_merge_all) + \"\\n\")\r\n f.write(\"The number of unique sequences with both ProteinOrtho and Interproscan hits is: \" + str(all_merge_both) + \"\\n\")\r\n f.write(\"The number of unique sequences with neither ProteinOrtho nor Interproscan hits is: \" + str(all_merge_neither) + \"\\n\")\r\n f.write(\"The number of unique sequences with only ProteinOrtho and not Interproscan hits is: \" + str(all_merge_just_ortho) + \"\\n\")\r\n f.write(\"The number of unique sequences with only Interproscan and not ProteinOrtho hits is: \" + str(all_merge_just_inter) + \"\\n\")\r\n\r\n#Excluding based on user preference\r\nif exclusion_flag == 1:\r\n all_merge = all_merge.drop(all_merge[(all_merge['Genes'] == 1 & all_merge['Database'].isna())].index)\r\n print(\"Dropped rows\")\r\n #Statistics on the merged dataframe\r\n all_merge_both = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] == 1))].index)\r\n all_merge_neither = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] !=1))].index)\r\n all_merge_just_ortho = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] == 1))].index)\r\n all_merge_just_inter = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] !=1))].index)\r\n all_merge_all = len(pd.unique(all_merge['SeqID'])) #Calculating the number of unique sequences\r\n all_merge_both = len(pd.unique(all_merge_both['SeqID'])) #Calculating unique sequences with both interproscan and proteinortho hits\r\n all_merge_neither = len(pd.unique(all_merge_neither['SeqID'])) #Calculating unique sequences without interproscan or proteinortho hits\r\n all_merge_just_ortho = len(pd.unique(all_merge_just_ortho['SeqID'])) #Calculating unique sequences with proteinortho but not interproscan hits\r\n all_merge_just_inter = len(pd.unique(all_merge_just_inter['SeqID'])) #Calculating unique sequences with interproscan but no proteinortho hits\r\n #Writing merged dataframe statistics to file\r\n with open(zorbit_statistics, 'a') as f:\r\n f.write(\"The total number of unique sequences (after dropping) is: \" + str(all_merge_all) + \"\\n\")\r\n f.write(\"The number of unique sequences with both ProteinOrtho and Interproscan hits (after dropping) is: \" + str(all_merge_both) + \"\\n\")\r\n f.write(\"The number of unique sequences with neither ProteinOrtho nor Interproscan hits (after dropping) is: \" + str(all_merge_neither) + \"\\n\")\r\n f.write(\"The number of unique sequences with only ProteinOrtho and not Interproscan hits (after dropping) is: \" + str(all_merge_just_ortho) + \"\\n\")\r\n f.write(\"The number of unique sequences with only Interproscan and not ProteinOrtho hits (after dropping) is: \" + str(all_merge_just_inter) + \"\\n\")\r\nelse:\r\n pass\r\n\r\nif protein_flag == 1: #Taking care of aa sequence fastas\r\n #Plotting Frame data as a histogram\r\n all_merge = all_merge.drop(['Reading_Frame_y'], axis=1)\r\n all_merge = all_merge.rename(columns = {\"Reading_Frame_x\": \"Reading_Frame\"})\r\n all_merge['Reading_Frame'] = all_merge['Reading_Frame'].astype(str) #Setting column type as string\r\n all_merge_frame_counts = all_merge['Reading_Frame'].value_counts() #Getting the count of Frames in the all_merge dataframe\r\n all_merge_frame_counts.plot(kind='bar') #Graphing the frame counts\r\n plt.title('Distribution of Frame hits')\r\n plt.xlabel('Frame number')\r\n plt.ylabel('Number of Frame hits')\r\n plt.tight_layout()\r\n plt.savefig(\"Reading_frame.png\")#Saving graph to file\r\n plt.clf()\r\n #Calculating number of duplicated, multi-frame sequences there are\r\n all_merge_duplicates = all_merge\r\n all_merge_split = all_merge['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match\r\n all_merge_duplicates['SeqID'] = all_merge_split[0] #Setting the ProteinOrtho SeqID column as the split column\r\n all_merge_two_columns = all_merge_duplicates[['SeqID', 'Reading_Frame']]\r\n all_merge_two_columns.to_csv(test2_file, sep='\\t', index=False, header=False, quoting=csv.QUOTE_NONE, escapechar=\"\\\\\")\r\n all_merge_unique_sequence_count = len(pd.unique(all_merge_two_columns['SeqID']))\r\n quotient = all_merge_unique_sequence_count/all_merge_all\r\n percentage = quotient*100\r\n with open(zorbit_statistics, 'a') as f:\r\n f.write(\"The number of unique sequences is: \" + str(all_merge_unique_sequence_count) + \"\\n\")\r\n f.write(\"The percentage of unique sequences is: \" + str(percentage) + \"%\" + \"\\n\")\r\n\r\n\r\n#Writing to file and cleaning up\r\nall_merge.to_csv(test_file, sep='\\t', index=False, header=False, quoting=csv.QUOTE_NONE, escapechar=\"\\\\\") #Printing final dataframe to csv file with tab delimeters and no index or header\r\nwith open('test.txt', 'r') as infile, \\\r\n open(file_to_write, 'w') as outfile:\r\n data = infile.read()\r\n data = data.replace(\"\\\\\", \"\")\r\n outfile.write(data)\r\n\r\n \r\n\r\nos.remove(\"test.txt\") #Cleaning up files\r\nos.remove(\"gff3_statsfile_to_write.txt\")\r\nos.remove(\"fasta_file_to_write.txt\")\r\nos.remove(\"gff3_file_to_write.txt\")\r\nos.remove(\"file_to_write.txt\")\r\nos.remove(\"ortho_file_to_write.txt\")\r\nos.remove(\"ortho_file_to_write2.txt\")\r\nprint(\"Zorbit Analysis Complete\")\r\ninput(\"Press any key to exit program\")\r\n"
]
| [
[
"pandas.merge",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"matplotlib.pyplot.clf",
"pandas.unique",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
]
|
francisr/TorchPQ | [
"cb851053ac01a64b9fa374c9985c354f6249a628"
]
| [
"torchpq/kmeans/kernels/CustomKernel.py"
]
| [
"import cupy as cp\nimport torch\n\[email protected](for_each_device=True)\ndef cunnex(func_name, func_body):\n return cp.cuda.compile_with_cache(func_body).get_function(func_name)\n # return cp.cuda.compile_with_cache(globals()[strFunction]).get_function(strFunction)\n\nclass Stream:\n def __init__(self, ptr):\n self.ptr = ptr\n \nclass CustomKernel:\n def __init__(self):\n self._use_torch_in_cupy_malloc()\n self.stream = Stream(torch.cuda.current_stream().cuda_stream)\n \n @staticmethod\n def _torch_alloc(size):\n device = cp.cuda.Device().id\n tensor = torch.empty(size, dtype=torch.uint8, device=device)\n return cp.cuda.MemoryPointer(\n cp.cuda.UnownedMemory(tensor.data_ptr(), size, tensor), 0)\n\n def _use_torch_in_cupy_malloc(self):\n cp.cuda.set_allocator(self._torch_alloc)\n\n def _compile_kernel_str(\n self,\n kernel,\n name,\n options=(),\n backend=\"nvrtc\",\n max_dynamic_smem=None\n ):\n fn = cp.RawKernel(\n kernel,\n name,\n options=options,\n backend=backend,\n )\n if max_dynamic_smem:\n fn.max_dynamic_shared_size_bytes = max_dynamic_smem\n return fn"
]
| [
[
"torch.cuda.current_stream",
"torch.empty"
]
]
|
amueller/scikit-image | [
"a8bfc5c5814a3c7fe363cfcad0c68b935706cd3c"
]
| [
"skimage/measure/tests/test_find_contours.py"
]
| [
"import numpy as np\nfrom numpy.testing import *\n\nfrom skimage.measure import find_contours\n\na = np.ones((8, 8), dtype=np.float32)\na[1:-1, 1] = 0\na[1, 1:-1] = 0\n\n## array([[ 1., 1., 1., 1., 1., 1., 1., 1.],\n## [ 1., 0., 0., 0., 0., 0., 0., 1.],\n## [ 1., 0., 1., 1., 1., 1., 1., 1.],\n## [ 1., 0., 1., 1., 1., 1., 1., 1.],\n## [ 1., 0., 1., 1., 1., 1., 1., 1.],\n## [ 1., 0., 1., 1., 1., 1., 1., 1.],\n## [ 1., 0., 1., 1., 1., 1., 1., 1.],\n## [ 1., 1., 1., 1., 1., 1., 1., 1.]], dtype=float32)\n\nx, y = np.mgrid[-1:1:5j, -1:1:5j]\nr = np.sqrt(x ** 2 + y ** 2)\n\n\ndef test_binary():\n contours = find_contours(a, 0.5)\n assert len(contours) == 1\n assert_array_equal(contours[0],\n [[ 6. , 1.5],\n [ 5. , 1.5],\n [ 4. , 1.5],\n [ 3. , 1.5],\n [ 2. , 1.5],\n [ 1.5, 2. ],\n [ 1.5, 3. ],\n [ 1.5, 4. ],\n [ 1.5, 5. ],\n [ 1.5, 6. ],\n [ 1. , 6.5],\n [ 0.5, 6. ],\n [ 0.5, 5. ],\n [ 0.5, 4. ],\n [ 0.5, 3. ],\n [ 0.5, 2. ],\n [ 0.5, 1. ],\n [ 1. , 0.5],\n [ 2. , 0.5],\n [ 3. , 0.5],\n [ 4. , 0.5],\n [ 5. , 0.5],\n [ 6. , 0.5],\n [ 6.5, 1. ],\n [ 6. , 1.5]])\n\ndef test_float():\n contours = find_contours(r, 0.5)\n assert len(contours) == 1\n assert_array_equal(contours[0],\n [[ 2., 3.],\n [ 1., 2.],\n [ 2., 1.],\n [ 3., 2.],\n [ 2., 3.]])\n\n\n \nif __name__ == '__main__':\n from numpy.testing import run_module_suite\n run_module_suite()\n"
]
| [
[
"numpy.testing.run_module_suite",
"numpy.sqrt",
"numpy.ones"
]
]
|
felixitous/snowflake-connector-python | [
"e4a1304246d7ed8ba42e3f6fbe34badf8e6990ad"
]
| [
"setup.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.\n#\nfrom codecs import open\nfrom os import path\nimport os\nimport sys\nfrom sys import platform\nfrom shutil import copy\nimport glob\n\nfrom setuptools import setup, Extension\n\nTHIS_DIR = path.dirname(path.realpath(__file__))\n\ntry:\n from generated_version import VERSION\nexcept:\n from version import VERSION\nversion = '.'.join([str(v) for v in VERSION if v is not None])\n\nwith open(path.join(THIS_DIR, 'DESCRIPTION.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\n\n# Parse command line flags\noptions = {k: 'OFF' for k in ['--opt', '--debug']}\nfor flag in options.keys():\n if flag in sys.argv:\n options[flag] = 'ON'\n sys.argv.remove(flag)\n\nextensions = None\ncmd_class = {}\n\nisBuildExtEnabled = (os.getenv('ENABLE_EXT_MODULES', 'false')).lower()\n\nif isBuildExtEnabled == 'true':\n from Cython.Distutils import build_ext\n from Cython.Build import cythonize\n import os\n import pyarrow\n import numpy\n\n extensions = cythonize(\n [\n Extension(name='snowflake.connector.arrow_iterator', sources=['arrow_iterator.pyx']),\n Extension(name='snowflake.connector.arrow_result', sources=['arrow_result.pyx'])\n ],\n build_dir=os.path.join('build', 'cython'))\n\n class MyBuildExt(build_ext):\n\n def build_extension(self, ext):\n current_dir = os.getcwd()\n\n if ext.name == 'snowflake.connector.arrow_iterator':\n self._copy_arrow_lib()\n\n ext.sources += ['cpp/ArrowIterator/CArrowIterator.cpp',\n 'cpp/ArrowIterator/CArrowChunkIterator.cpp',\n 'cpp/ArrowIterator/CArrowTableIterator.cpp',\n 'cpp/ArrowIterator/SnowflakeType.cpp',\n 'cpp/ArrowIterator/BinaryConverter.cpp',\n 'cpp/ArrowIterator/BooleanConverter.cpp',\n 'cpp/ArrowIterator/DecimalConverter.cpp',\n 'cpp/ArrowIterator/DateConverter.cpp',\n 'cpp/ArrowIterator/FloatConverter.cpp',\n 'cpp/ArrowIterator/IntConverter.cpp',\n 'cpp/ArrowIterator/StringConverter.cpp',\n 'cpp/ArrowIterator/TimeConverter.cpp',\n 'cpp/ArrowIterator/TimeStampConverter.cpp',\n 'cpp/ArrowIterator/Python/Common.cpp',\n 'cpp/ArrowIterator/Python/Helpers.cpp',\n 'cpp/ArrowIterator/Util/time.cpp',\n 'cpp/Logging/logging.cpp']\n ext.include_dirs.append('cpp/ArrowIterator/')\n ext.include_dirs.append('cpp/Logging')\n\n if platform == 'win32':\n ext.include_dirs.append(pyarrow.get_include())\n ext.include_dirs.append(numpy.get_include())\n elif self._is_unix():\n ext.extra_compile_args.append('-isystem' + pyarrow.get_include())\n ext.extra_compile_args.append('-isystem' + numpy.get_include())\n ext.extra_compile_args.append('-std=c++11')\n ext.extra_compile_args.append('-D_GLIBCXX_USE_CXX11_ABI=0')\n\n ext.library_dirs.append(os.path.join(current_dir, self.build_lib, 'snowflake', 'connector'))\n ext.extra_link_args += self._get_arrow_lib_as_linker_input()\n\n if self._is_unix():\n ext.extra_link_args += ['-Wl,-rpath,$ORIGIN']\n\n build_ext.build_extension(self, ext)\n\n def _is_unix(self):\n return platform.startswith('linux') or platform == 'darwin'\n\n def _get_arrow_lib_dir(self):\n return pyarrow.get_library_dirs()[0]\n\n def _copy_arrow_lib(self):\n arrow_lib = self._get_libs_to_copy()\n\n for lib in arrow_lib:\n lib_pattern = self._get_pyarrow_lib_pattern(lib)\n source = glob.glob(lib_pattern)[0]\n copy(source, os.path.join(self.build_lib, 'snowflake', 'connector'))\n\n def _get_arrow_lib_as_linker_input(self):\n arrow_lib = pyarrow.get_libraries()\n link_lib = []\n for lib in arrow_lib:\n lib_pattern = self._get_pyarrow_lib_pattern(lib)\n source = glob.glob(lib_pattern)[0]\n link_lib.append(source)\n\n return link_lib\n\n def _get_libs_to_copy(self):\n if self._is_unix():\n return pyarrow.get_libraries() + \\\n ['arrow_flight', 'arrow_boost_regex', 'arrow_boost_system', 'arrow_boost_filesystem']\n elif platform == 'win32':\n return pyarrow.get_libraries() + ['arrow_flight']\n else:\n raise RuntimeError('Building on platform {} is not supported yet.'.format(platform))\n\n def _get_pyarrow_lib_pattern(self, lib_name):\n if platform.startswith('linux'):\n return '{}/lib{}.so.*'.format(self._get_arrow_lib_dir(), lib_name)\n elif platform == 'darwin':\n return '{}/lib{}*dylib'.format(self._get_arrow_lib_dir(), lib_name)\n elif platform == 'win32':\n return '{}\\\\{}.lib'.format(self._get_arrow_lib_dir(), lib_name)\n else:\n raise RuntimeError('Building on platform {} is not supported yet.'.format(platform))\n\n cmd_class = {\n \"build_ext\": MyBuildExt\n }\n\nsetup(\n name='snowflake-connector-python',\n version=version,\n description=u\"Snowflake Connector for Python\",\n ext_modules=extensions,\n cmdclass=cmd_class,\n long_description=long_description,\n author='Snowflake, Inc',\n author_email='[email protected]',\n license='Apache License, Version 2.0',\n keywords=\"Snowflake db database cloud analytics warehouse\",\n url='https://www.snowflake.com/',\n download_url='https://www.snowflake.com/',\n use_2to3=False,\n\n # NOTE: Python 3.4 will be dropped within one month.\n python_requires='>=2.7.9,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n\n install_requires=[\n 'azure-common<2.0.0',\n 'azure-storage-blob<12.0.0',\n 'boto3>=1.4.4,<1.11.0',\n 'botocore>=1.5.0,<1.14.0',\n 'requests<2.23.0',\n 'urllib3>=1.20,<1.26.0',\n 'certifi<2021.0.0',\n 'future<1.0.0',\n 'six<2.0.0',\n 'pytz<2021.0',\n 'pycryptodomex>=3.2,!=3.5.0,<4.0.0',\n 'pyOpenSSL>=16.2.0,<21.0.0',\n 'cffi>=1.9,<1.14',\n 'cryptography>=1.8.2,<3.0.0',\n 'ijson<3.0.0',\n 'pyjwt<2.0.0',\n 'idna<3.0.0',\n 'oscrypto<2.0.0',\n 'asn1crypto>0.24.0,<2.0.0',\n 'pyasn1>=0.4.0,<0.5.0;python_version<\"3.0\"',\n 'pyasn1-modules>=0.2.0,<0.3.0;python_version<\"3.0\"',\n 'enum34;python_version<\"3.4\"',\n ],\n\n namespace_packages=['snowflake'],\n packages=[\n 'snowflake.connector',\n 'snowflake.connector.tool',\n ],\n package_dir={\n 'snowflake.connector': '.',\n 'snowflake.connector.tool': 'tool',\n },\n package_data={\n 'snowflake.connector': ['*.pem', '*.json', '*.rst', 'LICENSE.txt'],\n },\n\n entry_points={\n 'console_scripts': [\n 'snowflake-dump-ocsp-response = '\n 'snowflake.connector.tool.dump_ocsp_response:main',\n 'snowflake-dump-ocsp-response-cache = '\n 'snowflake.connector.tool.dump_ocsp_response_cache:main',\n 'snowflake-dump-certs = '\n 'snowflake.connector.tool.dump_certs:main',\n 'snowflake-export-certs = '\n 'snowflake.connector.tool.export_certs:main',\n ],\n },\n extras_require={\n \"secure-local-storage\": [\n 'keyring!=16.1.0'\n ],\n \"pandas\": [\n 'pyarrow>=0.15.1,<0.16.0;python_version>\"3.4\"',\n 'pandas==0.24.2;python_version==\"2.7\" or python_version==\"3.5\"',\n 'pandas<1.0.0;python_version>\"3.5\"',\n ],\n \"development\": [\n 'pytest==4.6.6', # Last Python 2.7 supported version\n 'pytest-cov',\n 'pytest-rerunfailures',\n 'pytest-timeout',\n 'coverage',\n 'pexpect',\n 'mock',\n 'pytz',\n 'pytzdata',\n 'Cython',\n 'pendulum',\n 'more-itertools==4.3.0;python_version==\"2.7\"', # Last Python 2.7 supported version\n 'more-itertools;python_version!=\"2.7\"',\n 'numpy==1.16.5;python_version==\"2.7\"', # Last Python 2.7 supported version\n 'numpy;python_version!=\"2.7\"',\n ],\n },\n\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'Environment :: Console',\n 'Environment :: Other Environment',\n\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: SQL',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n\n 'Topic :: Database',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n ],\n)\n"
]
| [
[
"numpy.get_include"
]
]
|
yuxiangsun/RTFNet | [
"7dc43e6af8b6bfb2ebd2248dd9606f548e017b66"
]
| [
"run_demo.py"
]
| [
"# By Yuxiang Sun, Dec. 14, 2020\n# Email: [email protected]\n\nimport os, argparse, time, datetime, sys, shutil, stat, torch\nimport numpy as np \nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom util.MF_dataset import MF_dataset \nfrom util.util import compute_results, visualize\nfrom sklearn.metrics import confusion_matrix\nfrom scipy.io import savemat \nfrom model import RTFNet\n\n#############################################################################################\nparser = argparse.ArgumentParser(description='Test with pytorch')\n#############################################################################################\nparser.add_argument('--model_name', '-m', type=str, default='RTFNet')\nparser.add_argument('--weight_name', '-w', type=str, default='RTFNet_152') # RTFNet_152, RTFNet_50, please change the number of layers in the network file\nparser.add_argument('--file_name', '-f', type=str, default='final.pth')\nparser.add_argument('--dataset_split', '-d', type=str, default='test') # test, test_day, test_night\nparser.add_argument('--gpu', '-g', type=int, default=0)\n#############################################################################################\nparser.add_argument('--img_height', '-ih', type=int, default=480) \nparser.add_argument('--img_width', '-iw', type=int, default=640) \nparser.add_argument('--num_workers', '-j', type=int, default=16)\nparser.add_argument('--n_class', '-nc', type=int, default=9)\nparser.add_argument('--data_dir', '-dr', type=str, default='./dataset/')\nparser.add_argument('--model_dir', '-wd', type=str, default='./weights_backup/')\nargs = parser.parse_args()\n#############################################################################################\n \nif __name__ == '__main__':\n \n torch.cuda.set_device(args.gpu)\n print(\"\\nthe pytorch version:\", torch.__version__)\n print(\"the gpu count:\", torch.cuda.device_count())\n print(\"the current used gpu:\", torch.cuda.current_device(), '\\n')\n\n # prepare save direcotry\n if os.path.exists(\"./runs\"):\n print(\"previous \\\"./runs\\\" folder exist, will delete this folder\")\n shutil.rmtree(\"./runs\")\n os.makedirs(\"./runs\")\n os.chmod(\"./runs\", stat.S_IRWXO) # allow the folder created by docker read, written, and execuated by local machine\n model_dir = os.path.join(args.model_dir, args.weight_name)\n if os.path.exists(model_dir) is False:\n sys.exit(\"the %s does not exit.\" %(model_dir))\n model_file = os.path.join(model_dir, args.file_name)\n if os.path.exists(model_file) is True:\n print('use the final model file.')\n else:\n sys.exit('no model file found.') \n print('testing %s: %s on GPU #%d with pytorch' % (args.model_name, args.weight_name, args.gpu))\n \n conf_total = np.zeros((args.n_class, args.n_class))\n model = eval(args.model_name)(n_class=args.n_class)\n if args.gpu >= 0: model.cuda(args.gpu)\n print('loading model file %s... ' % model_file)\n pretrained_weight = torch.load(model_file, map_location = lambda storage, loc: storage.cuda(args.gpu))\n own_state = model.state_dict()\n for name, param in pretrained_weight.items():\n if name not in own_state:\n continue\n own_state[name].copy_(param) \n print('done!')\n\n batch_size = 1\n test_dataset = MF_dataset(data_dir=args.data_dir, split=args.dataset_split, input_h=args.img_height, input_w=args.img_width)\n test_loader = DataLoader(\n dataset = test_dataset,\n batch_size = batch_size,\n shuffle = False,\n num_workers = args.num_workers,\n pin_memory = True,\n drop_last = False\n )\n ave_time_cost = 0.0\n\n model.eval()\n with torch.no_grad():\n for it, (images, labels, names) in enumerate(test_loader):\n images = Variable(images).cuda(args.gpu)\n labels = Variable(labels).cuda(args.gpu)\n start_time = time.time()\n logits = model(images) # logits.size(): mini_batch*num_class*480*640\n end_time = time.time()\n if it>=5: # # ignore the first 5 frames\n ave_time_cost += (end_time-start_time)\n # convert tensor to numpy 1d array\n label = labels.cpu().numpy().squeeze().flatten()\n prediction = logits.argmax(1).cpu().numpy().squeeze().flatten() # prediction and label are both 1-d array, size: minibatch*640*480\n # generate confusion matrix frame-by-frame\n conf = confusion_matrix(y_true=label, y_pred=prediction, labels=[0,1,2,3,4,5,6,7,8]) # conf is an n_class*n_class matrix, vertical axis: groundtruth, horizontal axis: prediction\n conf_total += conf\n # save demo images\n visualize(image_name=names, predictions=logits.argmax(1), weight_name=args.weight_name)\n print(\"%s, %s, frame %d/%d, %s, time cost: %.2f ms, demo result saved.\"\n %(args.model_name, args.weight_name, it+1, len(test_loader), names, (end_time-start_time)*1000))\n \n precision_per_class, recall_per_class, iou_per_class = compute_results(conf_total)\n conf_total_matfile = os.path.join(\"./runs\", 'conf_'+args.weight_name+'.mat')\n savemat(conf_total_matfile, {'conf': conf_total}) # 'conf' is the variable name when loaded in Matlab\n \n print('\\n###########################################################################')\n print('\\n%s: %s test results (with batch size %d) on %s using %s:' %(args.model_name, args.weight_name, batch_size, datetime.date.today(), torch.cuda.get_device_name(args.gpu))) \n print('\\n* the tested dataset name: %s' % args.dataset_split)\n print('* the tested image count: %d' % len(test_loader))\n print('* the tested image size: %d*%d' %(args.img_height, args.img_width)) \n print('* the weight name: %s' %args.weight_name) \n print('* the file name: %s' %args.file_name) \n print(\"* recall per class: \\n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f\" \\\n %(recall_per_class[0], recall_per_class[1], recall_per_class[2], recall_per_class[3], recall_per_class[4], recall_per_class[5], recall_per_class[6], recall_per_class[7], recall_per_class[8]))\n print(\"* iou per class: \\n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f\" \\\n %(iou_per_class[0], iou_per_class[1], iou_per_class[2], iou_per_class[3], iou_per_class[4], iou_per_class[5], iou_per_class[6], iou_per_class[7], iou_per_class[8])) \n print(\"\\n* average values (np.mean(x)): \\n recall: %.6f, iou: %.6f\" \\\n %(recall_per_class.mean(), iou_per_class.mean()))\n print(\"* average values (np.mean(np.nan_to_num(x))): \\n recall: %.6f, iou: %.6f\" \\\n %(np.mean(np.nan_to_num(recall_per_class)), np.mean(np.nan_to_num(iou_per_class))))\n print('\\n* the average time cost per frame (with batch size %d): %.2f ms, namely, the inference speed is %.2f fps' %(batch_size, ave_time_cost*1000/(len(test_loader)-5), 1.0/(ave_time_cost/(len(test_loader)-5)))) # ignore the first 10 frames\n #print('\\n* the total confusion matrix: ') \n #np.set_printoptions(precision=8, threshold=np.inf, linewidth=np.inf, suppress=True)\n #print(conf_total)\n print('\\n###########################################################################')\n"
]
| [
[
"torch.cuda.set_device",
"torch.cuda.current_device",
"torch.utils.data.DataLoader",
"sklearn.metrics.confusion_matrix",
"torch.cuda.get_device_name",
"numpy.nan_to_num",
"torch.no_grad",
"scipy.io.savemat",
"torch.cuda.device_count",
"numpy.zeros",
"torch.autograd.Variable"
]
]
|
tjunderhill/beam | [
"1eaa8694528f44c8254992ad30da98aa8fb999cf"
]
| [
"sdks/python/apache_beam/dataframe/frames.py"
]
| [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Analogs for :class:`pandas.DataFrame` and :class:`pandas.Series`:\n:class:`DeferredDataFrame` and :class:`DeferredSeries`.\n\nThese classes are effectively wrappers around a `schema-aware`_\n:class:`~apache_beam.pvalue.PCollection` that provide a set of operations\ncompatible with the `pandas`_ API.\n\nNote that we aim for the Beam DataFrame API to be completely compatible with\nthe pandas API, but there are some features that are currently unimplemented\nfor various reasons. Pay particular attention to the **'Differences from\npandas'** section for each operation to understand where we diverge.\n\n.. _schema-aware:\n https://beam.apache.org/documentation/programming-guide/#what-is-a-schema\n.. _pandas:\n https://pandas.pydata.org/\n\"\"\"\n\nimport collections\nimport inspect\nimport itertools\nimport math\nimport re\nimport warnings\nfrom typing import List\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.groupby.generic import DataFrameGroupBy\n\nfrom apache_beam.dataframe import expressions\nfrom apache_beam.dataframe import frame_base\nfrom apache_beam.dataframe import io\nfrom apache_beam.dataframe import partitionings\n\n__all__ = [\n 'DeferredSeries',\n 'DeferredDataFrame',\n]\n\n\ndef populate_not_implemented(pd_type):\n def wrapper(deferred_type):\n for attr in dir(pd_type):\n # Don't auto-define hidden methods or dunders\n if attr.startswith('_'):\n continue\n if not hasattr(deferred_type, attr):\n pd_value = getattr(pd_type, attr)\n if isinstance(pd_value, property) or inspect.isclass(pd_value):\n # Some of the properties on pandas types (cat, dt, sparse), are\n # actually attributes with class values, not properties\n setattr(\n deferred_type,\n attr,\n property(frame_base.not_implemented_method(attr)))\n elif callable(pd_value):\n setattr(deferred_type, attr, frame_base.not_implemented_method(attr))\n return deferred_type\n\n return wrapper\n\n\nclass DeferredDataFrameOrSeries(frame_base.DeferredFrame):\n\n __array__ = frame_base.wont_implement_method(\n pd.Series, '__array__', reason=\"non-deferred-result\")\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def drop(self, labels, axis, index, columns, errors, **kwargs):\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'/'columns'\")\n if axis in (0, 'index'):\n index = labels\n columns = None\n elif axis in (1, 'columns'):\n index = None\n columns = labels\n else:\n raise ValueError(\n \"axis must be one of (0, 1, 'index', 'columns'), \"\n \"got '%s'\" % axis)\n\n if columns is not None:\n # Compute the proxy based on just the columns that are dropped.\n proxy = self._expr.proxy().drop(columns=columns, errors=errors)\n else:\n proxy = self._expr.proxy()\n\n if index is not None and errors == 'raise':\n # In order to raise an error about missing index values, we'll\n # need to collect the entire dataframe.\n requires = partitionings.Singleton(\n reason=(\n \"drop(errors='raise', axis='index') is not currently \"\n \"parallelizable. This requires collecting all data on a single \"\n f\"node in order to detect if one of {index!r} is missing.\"))\n else:\n requires = partitionings.Arbitrary()\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'drop',\n lambda df: df.drop(\n axis=axis,\n index=index,\n columns=columns,\n errors=errors,\n **kwargs), [self._expr],\n proxy=proxy,\n requires_partition_by=requires))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def droplevel(self, level, axis):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'droplevel',\n lambda df: df.droplevel(level, axis=axis), [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()\n if axis in (1, 'column') else partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def fillna(self, value, method, axis, limit, **kwargs):\n # Default value is None, but is overriden with index.\n axis = axis or 'index'\n\n if axis in (0, 'index'):\n if method is not None:\n raise frame_base.WontImplementError(\n f\"fillna(method={method!r}, axis={axis!r}) is not supported \"\n \"because it is order-sensitive. Only fillna(method=None) is \"\n f\"supported with axis={axis!r}.\",\n reason=\"order-sensitive\")\n if limit is not None:\n raise frame_base.WontImplementError(\n f\"fillna(limit={method!r}, axis={axis!r}) is not supported because \"\n \"it is order-sensitive. Only fillna(limit=None) is supported with \"\n f\"axis={axis!r}.\",\n reason=\"order-sensitive\")\n\n if isinstance(value, frame_base.DeferredBase):\n value_expr = value._expr\n else:\n value_expr = expressions.ConstantExpression(value)\n\n return frame_base.DeferredFrame.wrap(\n # yapf: disable\n expressions.ComputedExpression(\n 'fillna',\n lambda df,\n value: df.fillna(\n value, method=method, axis=axis, limit=limit, **kwargs),\n [self._expr, value_expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Arbitrary()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def ffill(self, **kwargs):\n return self.fillna(method='ffill', **kwargs)\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def bfill(self, **kwargs):\n return self.fillna(method='bfill', **kwargs)\n\n pad = ffill\n backfill = bfill\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def groupby(self, by, level, axis, as_index, group_keys, **kwargs):\n if not as_index:\n raise NotImplementedError('groupby(as_index=False)')\n if not group_keys:\n raise NotImplementedError('groupby(group_keys=False)')\n\n if axis in (1, 'columns'):\n return _DeferredGroupByCols(\n expressions.ComputedExpression(\n 'groupbycols',\n lambda df: df.groupby(by, axis=axis, **kwargs), [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()))\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n\n elif level is not None:\n if isinstance(level, (list, tuple)):\n grouping_indexes = level\n else:\n grouping_indexes = [level]\n\n grouping_columns = []\n\n index = self._expr.proxy().index\n\n # Translate to level numbers only\n grouping_indexes = [\n l if isinstance(l, int) else index.names.index(l)\n for l in grouping_indexes\n ]\n\n if index.nlevels == 1:\n to_group_with_index = self._expr\n to_group = self._expr\n else:\n levels_to_drop = [\n i for i in range(index.nlevels) if i not in grouping_indexes\n ]\n\n # Reorder so the grouped indexes are first\n to_group_with_index = self.reorder_levels(\n grouping_indexes + levels_to_drop)\n\n grouping_indexes = list(range(len(grouping_indexes)))\n levels_to_drop = list(range(len(grouping_indexes), index.nlevels))\n if levels_to_drop:\n to_group = to_group_with_index.droplevel(levels_to_drop)._expr\n else:\n to_group = to_group_with_index._expr\n to_group_with_index = to_group_with_index._expr\n\n elif callable(by):\n\n def map_index(df):\n df = df.copy()\n df.index = df.index.map(by)\n return df\n\n to_group = expressions.ComputedExpression(\n 'map_index',\n map_index, [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n\n orig_nlevels = self._expr.proxy().index.nlevels\n to_group_with_index = expressions.ComputedExpression(\n 'map_index_keep_orig',\n lambda df: df.set_index([df.index.map(by), df.index]),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n # Partitioning by the original indexes is preserved\n preserves_partition_by=partitionings.Index(\n list(range(1, orig_nlevels + 1))))\n\n grouping_columns = []\n # The index we need to group by is the last one\n grouping_indexes = [0]\n\n elif isinstance(by, DeferredSeries):\n\n raise NotImplementedError(\n \"grouping by a Series is not yet implemented. You can group by a \"\n \"DataFrame column by specifying its name.\")\n\n elif isinstance(by, np.ndarray):\n raise frame_base.WontImplementError(\n \"Grouping by a concrete ndarray is order sensitive.\",\n reason=\"order-sensitive\")\n\n elif isinstance(self, DeferredDataFrame):\n if not isinstance(by, list):\n by = [by]\n # Find the columns that we need to move into the index so we can group by\n # them\n column_names = self._expr.proxy().columns\n grouping_columns = list(set(by).intersection(column_names))\n index_names = self._expr.proxy().index.names\n for label in by:\n if label not in index_names and label not in self._expr.proxy().columns:\n raise KeyError(label)\n grouping_indexes = list(set(by).intersection(index_names))\n\n if grouping_indexes:\n if set(by) == set(index_names):\n to_group = self._expr\n elif set(by).issubset(index_names):\n to_group = self.droplevel(index_names.difference(by))._expr\n else:\n to_group = self.reset_index(grouping_indexes).set_index(by)._expr\n else:\n to_group = self.set_index(by)._expr\n\n if grouping_columns:\n # TODO(BEAM-11711): It should be possible to do this without creating an\n # expression manually, by using DeferredDataFrame.set_index, i.e.:\n # to_group_with_index = self.set_index([self.index] +\n # grouping_columns)._expr\n to_group_with_index = expressions.ComputedExpression(\n 'move_grouped_columns_to_index',\n lambda df: df.set_index([df.index] + grouping_columns),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Index(\n list(range(self._expr.proxy().index.nlevels))))\n else:\n to_group_with_index = self._expr\n\n else:\n raise NotImplementedError(by)\n\n return DeferredGroupBy(\n expressions.ComputedExpression(\n 'groupbyindex',\n lambda df: df.groupby(\n level=list(range(df.index.nlevels)), **kwargs), [to_group],\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Arbitrary()),\n kwargs,\n to_group,\n to_group_with_index,\n grouping_columns=grouping_columns,\n grouping_indexes=grouping_indexes)\n\n abs = frame_base._elementwise_method('abs', base=pd.core.generic.NDFrame)\n astype = frame_base._elementwise_method(\n 'astype', base=pd.core.generic.NDFrame)\n copy = frame_base._elementwise_method('copy', base=pd.core.generic.NDFrame)\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def tz_localize(self, ambiguous, **kwargs):\n if isinstance(ambiguous, np.ndarray):\n raise frame_base.WontImplementError(\n \"tz_localize(ambiguous=ndarray) is not supported because it makes \"\n \"this operation sensitive to the order of the data. Please use a \"\n \"DeferredSeries instead.\",\n reason=\"order-sensitive\")\n elif isinstance(ambiguous, frame_base.DeferredFrame):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'tz_localize',\n lambda df,\n ambiguous: df.tz_localize(ambiguous=ambiguous, **kwargs),\n [self._expr, ambiguous._expr],\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Singleton()))\n elif ambiguous == 'infer':\n # infer attempts to infer based on the order of the timestamps\n raise frame_base.WontImplementError(\n f\"tz_localize(ambiguous={ambiguous!r}) is not allowed because it \"\n \"makes this operation sensitive to the order of the data.\",\n reason=\"order-sensitive\")\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'tz_localize',\n lambda df: df.tz_localize(ambiguous=ambiguous, **kwargs),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton()))\n\n @property\n def size(self):\n sizes = expressions.ComputedExpression(\n 'get_sizes',\n # Wrap scalar results in a Series for easier concatenation later\n lambda df: pd.Series(df.size),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'sum_sizes',\n lambda sizes: sizes.sum(), [sizes],\n requires_partition_by=partitionings.Singleton(),\n preserves_partition_by=partitionings.Singleton()))\n\n @property\n def empty(self):\n empties = expressions.ComputedExpression(\n 'get_empties',\n # Wrap scalar results in a Series for easier concatenation later\n lambda df: pd.Series(df.empty),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'check_all_empty',\n lambda empties: empties.all(), [empties],\n requires_partition_by=partitionings.Singleton(),\n preserves_partition_by=partitionings.Singleton()))\n\n def bool(self):\n # Will throw if any partition has >1 element\n bools = expressions.ComputedExpression(\n 'get_bools',\n # Wrap scalar results in a Series for easier concatenation later\n lambda df: pd.Series([], dtype=bool)\n if df.empty else pd.Series([df.bool()]),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n\n with expressions.allow_non_parallel_operations(True):\n # Will throw if overall dataset has != 1 element\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'combine_all_bools',\n lambda bools: bools.bool(), [bools],\n proxy=bool(),\n requires_partition_by=partitionings.Singleton(),\n preserves_partition_by=partitionings.Singleton()))\n\n def equals(self, other):\n intermediate = expressions.ComputedExpression(\n 'equals_partitioned',\n # Wrap scalar results in a Series for easier concatenation later\n lambda df,\n other: pd.Series(df.equals(other)),\n [self._expr, other._expr],\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Singleton())\n\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'aggregate_equals',\n lambda df: df.all(), [intermediate],\n requires_partition_by=partitionings.Singleton(),\n preserves_partition_by=partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def sort_values(self, axis, **kwargs):\n \"\"\"``sort_values`` is not implemented.\n\n It is not implemented for ``axis=index`` because it imposes an ordering on\n the dataset, and we cannot guarantee it will be maintained (see\n https://s.apache.org/dataframe-order-sensitive-operations).\n\n It is not implemented for ``axis=columns`` because it makes the order of\n the columns depend on the data (see\n https://s.apache.org/dataframe-non-deferred-column-names).\"\"\"\n if axis in (0, 'index'):\n # axis=index imposes an ordering on the DataFrame rows which we do not\n # support\n raise frame_base.WontImplementError(\n \"sort_values(axis=index) is not supported because it imposes an \"\n \"ordering on the dataset which we cannot guarantee will be \"\n \"preserved.\",\n reason=\"order-sensitive\")\n else:\n # axis=columns will reorder the columns based on the data\n raise frame_base.WontImplementError(\n \"sort_values(axis=columns) is not supported because the order of the \"\n \"columns in the result depends on the data.\",\n reason=\"non-deferred-columns\")\n\n @frame_base.with_docs_from(pd.DataFrame)\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def sort_index(self, axis, **kwargs):\n \"\"\"``axis=index`` is not allowed because it imposes an ordering on the\n dataset, and we cannot guarantee it will be maintained (see\n https://s.apache.org/dataframe-order-sensitive-operations). Only\n ``axis=columns`` is allowed.\"\"\"\n if axis in (0, 'index'):\n # axis=rows imposes an ordering on the DataFrame which we do not support\n raise frame_base.WontImplementError(\n \"sort_index(axis=index) is not supported because it imposes an \"\n \"ordering on the dataset which we cannot guarantee will be \"\n \"preserved.\",\n reason=\"order-sensitive\")\n\n # axis=columns reorders the columns by name\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'sort_index',\n lambda df: df.sort_index(axis, **kwargs),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary(),\n ))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def where(self, cond, other, errors, **kwargs):\n requires = partitionings.Arbitrary()\n deferred_args = {}\n actual_args = {}\n\n # TODO(bhulette): This is very similar to the logic in\n # frame_base.elementwise_method, can we unify it?\n if isinstance(cond, frame_base.DeferredFrame):\n deferred_args['cond'] = cond\n requires = partitionings.Index()\n else:\n actual_args['cond'] = cond\n\n if isinstance(other, frame_base.DeferredFrame):\n deferred_args['other'] = other\n requires = partitionings.Index()\n else:\n actual_args['other'] = other\n\n if errors == \"ignore\":\n # We need all data in order to ignore errors and propagate the original\n # data.\n requires = partitionings.Singleton(\n reason=(\n f\"where(errors={errors!r}) is currently not parallelizable, \"\n \"because all data must be collected on one node to determine if \"\n \"the original data should be propagated instead.\"))\n\n actual_args['errors'] = errors\n\n def where_execution(df, *args):\n runtime_values = {\n name: value\n for (name, value) in zip(deferred_args.keys(), args)\n }\n return df.where(**runtime_values, **actual_args, **kwargs)\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n \"where\",\n where_execution,\n [self._expr] + [df._expr for df in deferred_args.values()],\n requires_partition_by=requires,\n preserves_partition_by=partitionings.Index(),\n ))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def mask(self, cond, **kwargs):\n return self.where(~cond, **kwargs)\n\n @property\n def dtype(self):\n return self._expr.proxy().dtype\n\n isin = frame_base._elementwise_method('isin', base=pd.DataFrame)\n\n @property\n def ndim(self):\n return self._expr.proxy().ndim\n\n def _get_index(self):\n return _DeferredIndex(self)\n\n index = property(\n _get_index, frame_base.not_implemented_method('index (setter)'))\n\n hist = frame_base.wont_implement_method(\n pd.DataFrame, 'hist', reason=\"plotting-tools\")\n\n attrs = property(\n frame_base.wont_implement_method(\n pd.DataFrame, 'attrs', reason='experimental'))\n\n reorder_levels = frame_base._proxy_method(\n 'reorder_levels',\n base=pd.DataFrame,\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n\n\n@populate_not_implemented(pd.Series)\n@frame_base.DeferredFrame._register_for(pd.Series)\nclass DeferredSeries(DeferredDataFrameOrSeries):\n @property\n def name(self):\n return self._expr.proxy().name\n\n @name.setter\n def name(self, value):\n def fn(s):\n s = s.copy()\n s.name = value\n return s\n\n self._expr = expressions.ComputedExpression(\n 'series_set_name',\n fn, [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary())\n\n @property\n def dtype(self):\n return self._expr.proxy().dtype\n\n dtypes = dtype\n\n def __getitem__(self, key):\n if _is_null_slice(key) or key is Ellipsis:\n return self\n\n elif (isinstance(key, int) or _is_integer_slice(key)\n ) and self._expr.proxy().index._should_fallback_to_positional():\n raise frame_base.WontImplementError(\n \"Accessing an item by an integer key is order sensitive for this \"\n \"Series.\",\n reason=\"order-sensitive\")\n\n elif isinstance(key, slice) or callable(key):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n # yapf: disable\n 'getitem',\n lambda df: df[key],\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()))\n\n elif isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool:\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n # yapf: disable\n 'getitem',\n lambda df,\n indexer: df[indexer],\n [self._expr, key._expr],\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Arbitrary()))\n\n elif pd.core.series.is_iterator(key) or pd.core.common.is_bool_indexer(key):\n raise frame_base.WontImplementError(\n \"Accessing a DeferredSeries with an iterator is sensitive to the \"\n \"order of the data.\",\n reason=\"order-sensitive\")\n\n else:\n # We could consider returning a deferred scalar, but that might\n # be more surprising than a clear error.\n raise frame_base.WontImplementError(\n f\"Indexing a series with key of type {type(key)} is not supported \"\n \"because it produces a non-deferred result.\",\n reason=\"non-deferred-result\")\n\n def keys(self):\n return self.index\n\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n def append(self, to_append, ignore_index, verify_integrity, **kwargs):\n if not isinstance(to_append, DeferredSeries):\n raise frame_base.WontImplementError(\n \"append() only accepts DeferredSeries instances, received \" +\n str(type(to_append)))\n if ignore_index:\n raise frame_base.WontImplementError(\n \"append(ignore_index=True) is order sensitive because it requires \"\n \"generating a new index based on the order of the data.\",\n reason=\"order-sensitive\")\n\n if verify_integrity:\n # We can verify the index is non-unique within index partitioned data.\n requires = partitionings.Index()\n else:\n requires = partitionings.Arbitrary()\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'append',\n lambda s,\n to_append: s.append(\n to_append, verify_integrity=verify_integrity, **kwargs),\n [self._expr, to_append._expr],\n requires_partition_by=requires,\n preserves_partition_by=partitionings.Arbitrary()))\n\n @frame_base.with_docs_from(pd.Series)\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n def align(self, other, join, axis, level, method, **kwargs):\n \"\"\"Aligning per-level is not yet supported. Only the default,\n ``level=None``, is allowed.\n\n Filling NaN values via ``method`` is not supported, because it is\n sensitive to the order of the data\n (see https://s.apache.org/dataframe-order-sensitive-operations). Only the\n default, ``method=None``, is allowed.\n \"\"\"\n if level is not None:\n raise NotImplementedError('per-level align')\n if method is not None:\n raise frame_base.WontImplementError(\n f\"align(method={method!r}) is not supported because it is \"\n \"order sensitive. Only align(method=None) is supported.\",\n reason=\"order-sensitive\")\n # We're using pd.concat here as expressions don't yet support\n # multiple return values.\n aligned = frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'align',\n lambda x,\n y: pd.concat([x, y], axis=1, join='inner'),\n [self._expr, other._expr],\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Arbitrary()))\n return aligned.iloc[:, 0], aligned.iloc[:, 1]\n\n array = property(\n frame_base.wont_implement_method(\n pd.Series, 'array', reason=\"non-deferred-result\"))\n\n ravel = frame_base.wont_implement_method(\n pd.Series, 'ravel', reason=\"non-deferred-result\")\n\n rename = frame_base._elementwise_method('rename', base=pd.Series)\n between = frame_base._elementwise_method('between', base=pd.Series)\n\n add_suffix = frame_base._proxy_method(\n 'add_suffix',\n base=pd.DataFrame,\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n add_prefix = frame_base._proxy_method(\n 'add_prefix',\n base=pd.DataFrame,\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n\n def dot(self, other):\n left = self._expr\n if isinstance(other, DeferredSeries):\n right = expressions.ComputedExpression(\n 'to_dataframe',\n pd.DataFrame, [other._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary())\n right_is_series = True\n elif isinstance(other, DeferredDataFrame):\n right = other._expr\n right_is_series = False\n else:\n raise frame_base.WontImplementError(\n \"other must be a DeferredDataFrame or DeferredSeries instance. \"\n \"Passing a concrete list or numpy array is not supported. Those \"\n \"types have no index and must be joined based on the order of the \"\n \"data.\",\n reason=\"order-sensitive\")\n\n dots = expressions.ComputedExpression(\n 'dot',\n # Transpose so we can sum across rows.\n (lambda left, right: pd.DataFrame(left @ right).T),\n [left, right],\n requires_partition_by=partitionings.Index())\n with expressions.allow_non_parallel_operations(True):\n sums = expressions.ComputedExpression(\n 'sum',\n lambda dots: dots.sum(), #\n [dots],\n requires_partition_by=partitionings.Singleton())\n\n if right_is_series:\n result = expressions.ComputedExpression(\n 'extract',\n lambda df: df[0], [sums],\n requires_partition_by=partitionings.Singleton())\n else:\n result = sums\n return frame_base.DeferredFrame.wrap(result)\n\n __matmul__ = dot\n\n @frame_base.with_docs_from(pd.Series)\n def std(self, *args, **kwargs):\n # Compute variance (deferred scalar) with same args, then sqrt it\n return self.var(*args, **kwargs).apply(lambda var: math.sqrt(var))\n\n @frame_base.with_docs_from(pd.Series)\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n def var(self, axis, skipna, level, ddof, **kwargs):\n \"\"\"Per-level aggregation is not yet supported (BEAM-11777). Only the\n default, ``level=None``, is allowed.\"\"\"\n if level is not None:\n raise NotImplementedError(\"per-level aggregation\")\n if skipna is None or skipna:\n self = self.dropna() # pylint: disable=self-cls-assignment\n\n # See the online, numerically stable formulae at\n # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n # and\n # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm\n def compute_moments(x):\n n = len(x)\n m = x.std(ddof=0)**2 * n\n s = x.sum()\n return pd.DataFrame(dict(m=[m], s=[s], n=[n]))\n\n def combine_moments(data):\n m = s = n = 0.0\n for datum in data.itertuples():\n if datum.n == 0:\n continue\n elif n == 0:\n m, s, n = datum.m, datum.s, datum.n\n else:\n delta = s / n - datum.s / datum.n\n m += datum.m + delta**2 * n * datum.n / (n + datum.n)\n s += datum.s\n n += datum.n\n if n <= ddof:\n return float('nan')\n else:\n return m / (n - ddof)\n\n moments = expressions.ComputedExpression(\n 'compute_moments',\n compute_moments, [self._expr],\n requires_partition_by=partitionings.Arbitrary())\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'combine_moments',\n combine_moments, [moments],\n requires_partition_by=partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n def corr(self, other, method, min_periods):\n if method == 'pearson': # Note that this is the default.\n x, y = self.dropna().align(other.dropna(), 'inner')\n return x._corr_aligned(y, min_periods)\n\n else:\n reason = (\n f\"Encountered corr(method={method!r}) which cannot be \"\n \"parallelized. Only corr(method='pearson') is currently \"\n \"parallelizable.\")\n # The rank-based correlations are not obviously parallelizable, though\n # perhaps an approximation could be done with a knowledge of quantiles\n # and custom partitioning.\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'corr',\n lambda df,\n other: df.corr(other, method=method, min_periods=min_periods),\n [self._expr, other._expr],\n requires_partition_by=partitionings.Singleton(reason=reason)))\n\n def _corr_aligned(self, other, min_periods):\n std_x = self.std()\n std_y = other.std()\n cov = self._cov_aligned(other, min_periods)\n return cov.apply(\n lambda cov, std_x, std_y: cov / (std_x * std_y), args=[std_x, std_y])\n\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n def cov(self, other, min_periods, ddof):\n x, y = self.dropna().align(other.dropna(), 'inner')\n return x._cov_aligned(y, min_periods, ddof)\n\n def _cov_aligned(self, other, min_periods, ddof=1):\n # Use the formulae from\n # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Covariance\n def compute_co_moments(x, y):\n n = len(x)\n if n <= 1:\n c = 0\n else:\n c = x.cov(y) * (n - 1)\n sx = x.sum()\n sy = y.sum()\n return pd.DataFrame(dict(c=[c], sx=[sx], sy=[sy], n=[n]))\n\n def combine_co_moments(data):\n c = sx = sy = n = 0.0\n for datum in data.itertuples():\n if datum.n == 0:\n continue\n elif n == 0:\n c, sx, sy, n = datum.c, datum.sx, datum.sy, datum.n\n else:\n c += (\n datum.c + (sx / n - datum.sx / datum.n) *\n (sy / n - datum.sy / datum.n) * n * datum.n / (n + datum.n))\n sx += datum.sx\n sy += datum.sy\n n += datum.n\n if n < max(2, ddof, min_periods or 0):\n return float('nan')\n else:\n return c / (n - ddof)\n\n moments = expressions.ComputedExpression(\n 'compute_co_moments',\n compute_co_moments, [self._expr, other._expr],\n requires_partition_by=partitionings.Index())\n\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'combine_co_moments',\n combine_co_moments, [moments],\n requires_partition_by=partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n @frame_base.maybe_inplace\n def dropna(self, **kwargs):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'dropna',\n lambda df: df.dropna(**kwargs), [self._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Arbitrary()))\n\n isnull = isna = frame_base._elementwise_method('isna', base=pd.Series)\n notnull = notna = frame_base._elementwise_method('notna', base=pd.Series)\n\n items = frame_base.wont_implement_method(\n pd.Series, 'items', reason=\"non-deferred-result\")\n iteritems = frame_base.wont_implement_method(\n pd.Series, 'iteritems', reason=\"non-deferred-result\")\n tolist = frame_base.wont_implement_method(\n pd.Series, 'tolist', reason=\"non-deferred-result\")\n to_numpy = frame_base.wont_implement_method(\n pd.Series, 'to_numpy', reason=\"non-deferred-result\")\n to_string = frame_base.wont_implement_method(\n pd.Series, 'to_string', reason=\"non-deferred-result\")\n\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n def aggregate(self, func, axis, *args, **kwargs):\n if kwargs.get('skipna', False):\n # Eagerly generate a proxy to make sure skipna is a valid argument\n # for this aggregation method\n _ = self._expr.proxy().aggregate(func, axis, *args, **kwargs)\n kwargs.pop('skipna')\n return self.dropna().aggregate(func, axis, *args, **kwargs)\n\n if isinstance(func, list) and len(func) > 1:\n # level arg is ignored for multiple aggregations\n _ = kwargs.pop('level', None)\n\n # Aggregate with each method separately, then stick them all together.\n rows = [self.agg([f], *args, **kwargs) for f in func]\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'join_aggregate',\n lambda *rows: pd.concat(rows), [row._expr for row in rows]))\n else:\n # We're only handling a single column. It could be 'func' or ['func'],\n # which produce different results. 'func' produces a scalar, ['func']\n # produces a single element Series.\n base_func = func[0] if isinstance(func, list) else func\n\n if (_is_numeric(base_func) and\n not pd.core.dtypes.common.is_numeric_dtype(self.dtype)):\n warnings.warn(\n f\"Performing a numeric aggregation, {base_func!r}, on \"\n f\"Series {self._expr.proxy().name!r} with non-numeric type \"\n f\"{self.dtype!r}. This can result in runtime errors or surprising \"\n \"results.\")\n\n if 'level' in kwargs:\n # Defer to groupby.agg for level= mode\n return self.groupby(\n level=kwargs.pop('level'), axis=axis).agg(func, *args, **kwargs)\n\n singleton_reason = None\n if 'min_count' in kwargs:\n # Eagerly generate a proxy to make sure min_count is a valid argument\n # for this aggregation method\n _ = self._expr.proxy().agg(func, axis, *args, **kwargs)\n\n singleton_reason = (\n \"Aggregation with min_count= requires collecting all data on a \"\n \"single node.\")\n\n # We have specialized distributed implementations for std and var\n if base_func in ('std', 'var'):\n result = getattr(self, base_func)(*args, **kwargs)\n if isinstance(func, list):\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'wrap_aggregate',\n lambda x: pd.Series(x, index=[base_func]), [result._expr],\n requires_partition_by=partitionings.Singleton(),\n preserves_partition_by=partitionings.Singleton()))\n else:\n return result\n\n agg_kwargs = kwargs.copy()\n if ((_is_associative(base_func) or _is_liftable_with_sum(base_func)) and\n singleton_reason is None):\n intermediate = expressions.ComputedExpression(\n 'pre_aggregate',\n # Coerce to a Series, if the result is scalar we still want a Series\n # so we can combine and do the final aggregation next.\n lambda s: pd.Series(s.agg(func, *args, **kwargs)),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n allow_nonparallel_final = True\n if _is_associative(base_func):\n agg_func = func\n else:\n agg_func = ['sum'] if isinstance(func, list) else 'sum'\n else:\n intermediate = self._expr\n allow_nonparallel_final = None # i.e. don't change the value\n agg_func = func\n singleton_reason = (\n f\"Aggregation function {func!r} cannot currently be \"\n \"parallelized, it requires collecting all data for \"\n \"this Series on a single node.\")\n with expressions.allow_non_parallel_operations(allow_nonparallel_final):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'aggregate',\n lambda s: s.agg(agg_func, *args, **agg_kwargs), [intermediate],\n preserves_partition_by=partitionings.Singleton(),\n requires_partition_by=partitionings.Singleton(\n reason=singleton_reason)))\n\n agg = aggregate\n\n @property\n def axes(self):\n return [self.index]\n\n clip = frame_base._elementwise_method('clip', base=pd.Series)\n\n all = frame_base._agg_method('all')\n any = frame_base._agg_method('any')\n # TODO(BEAM-12074): Document that Series.count(level=) will drop NaN's\n count = frame_base._agg_method('count')\n min = frame_base._agg_method('min')\n max = frame_base._agg_method('max')\n prod = product = frame_base._agg_method('prod')\n sum = frame_base._agg_method('sum')\n mean = frame_base._agg_method('mean')\n median = frame_base._agg_method('median')\n\n argmax = frame_base.wont_implement_method(\n pd.Series, 'argmax', reason='order-sensitive')\n argmin = frame_base.wont_implement_method(\n pd.Series, 'argmin', reason='order-sensitive')\n cummax = frame_base.wont_implement_method(\n pd.Series, 'cummax', reason='order-sensitive')\n cummin = frame_base.wont_implement_method(\n pd.Series, 'cummin', reason='order-sensitive')\n cumprod = frame_base.wont_implement_method(\n pd.Series, 'cumprod', reason='order-sensitive')\n cumsum = frame_base.wont_implement_method(\n pd.Series, 'cumsum', reason='order-sensitive')\n diff = frame_base.wont_implement_method(\n pd.Series, 'diff', reason='order-sensitive')\n first = frame_base.wont_implement_method(\n pd.Series, 'first', reason='order-sensitive')\n head = frame_base.wont_implement_method(\n pd.Series, 'head', reason='order-sensitive')\n interpolate = frame_base.wont_implement_method(\n pd.Series, 'interpolate', reason='order-sensitive')\n last = frame_base.wont_implement_method(\n pd.Series, 'last', reason='order-sensitive')\n searchsorted = frame_base.wont_implement_method(\n pd.Series, 'searchsorted', reason='order-sensitive')\n shift = frame_base.wont_implement_method(\n pd.Series, 'shift', reason='order-sensitive')\n tail = frame_base.wont_implement_method(\n pd.Series, 'tail', reason='order-sensitive')\n\n filter = frame_base._elementwise_method('filter', base=pd.Series)\n\n memory_usage = frame_base.wont_implement_method(\n pd.Series, 'memory_usage', reason=\"non-deferred-result\")\n\n # In Series __contains__ checks the index\n __contains__ = frame_base.wont_implement_method(\n pd.Series, '__contains__', reason=\"non-deferred-result\")\n\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n def nlargest(self, keep, **kwargs):\n # TODO(robertwb): Document 'any' option.\n # TODO(robertwb): Consider (conditionally) defaulting to 'any' if no\n # explicit keep parameter is requested.\n if keep == 'any':\n keep = 'first'\n elif keep != 'all':\n raise frame_base.WontImplementError(\n \"nlargest(keep={keep!r}) is not supported because it is \"\n \"order sensitive. Only keep=\\\"all\\\" is supported.\",\n reason=\"order-sensitive\")\n kwargs['keep'] = keep\n per_partition = expressions.ComputedExpression(\n 'nlargest-per-partition',\n lambda df: df.nlargest(**kwargs), [self._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Arbitrary())\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'nlargest',\n lambda df: df.nlargest(**kwargs), [per_partition],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n def nsmallest(self, keep, **kwargs):\n if keep == 'any':\n keep = 'first'\n elif keep != 'all':\n raise frame_base.WontImplementError(\n \"nsmallest(keep={keep!r}) is not supported because it is \"\n \"order sensitive. Only keep=\\\"all\\\" is supported.\",\n reason=\"order-sensitive\")\n kwargs['keep'] = keep\n per_partition = expressions.ComputedExpression(\n 'nsmallest-per-partition',\n lambda df: df.nsmallest(**kwargs), [self._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Arbitrary())\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'nsmallest',\n lambda df: df.nsmallest(**kwargs), [per_partition],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Singleton()))\n\n @property\n def is_unique(self):\n def set_index(s):\n s = s[:]\n s.index = s\n return s\n\n self_index = expressions.ComputedExpression(\n 'set_index',\n set_index, [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton())\n\n is_unique_distributed = expressions.ComputedExpression(\n 'is_unique_distributed',\n lambda s: pd.Series(s.is_unique), [self_index],\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Singleton())\n\n with expressions.allow_non_parallel_operations():\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'combine',\n lambda s: s.all(), [is_unique_distributed],\n requires_partition_by=partitionings.Singleton(),\n preserves_partition_by=partitionings.Singleton()))\n\n plot = frame_base.wont_implement_method(\n pd.Series, 'plot', reason=\"plotting-tools\")\n pop = frame_base.wont_implement_method(\n pd.Series, 'pop', reason=\"non-deferred-result\")\n\n rename_axis = frame_base._elementwise_method('rename_axis', base=pd.Series)\n\n @frame_base.args_to_kwargs(pd.Series)\n @frame_base.populate_defaults(pd.Series)\n @frame_base.maybe_inplace\n def replace(self, to_replace, value, limit, method, **kwargs):\n if method is not None and not isinstance(to_replace,\n dict) and value is None:\n # pandas only relies on method if to_replace is not a dictionary, and\n # value is None\n raise frame_base.WontImplementError(\n f\"replace(method={method!r}) is not supported because it is \"\n \"order sensitive. Only replace(method=None) is supported.\",\n reason=\"order-sensitive\")\n\n if limit is None:\n requires_partition_by = partitionings.Arbitrary()\n else:\n requires_partition_by = partitionings.Singleton(\n reason=(\n f\"replace(limit={limit!r}) cannot currently be parallelized, it \"\n \"requires collecting all data on a single node.\"))\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'replace',\n lambda df: df.replace(\n to_replace=to_replace,\n value=value,\n limit=limit,\n method=method,\n **kwargs), [self._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=requires_partition_by))\n\n round = frame_base._elementwise_method('round', base=pd.Series)\n\n take = frame_base.wont_implement_method(\n pd.Series, 'take', reason='deprecated')\n\n to_dict = frame_base.wont_implement_method(\n pd.Series, 'to_dict', reason=\"non-deferred-result\")\n\n to_frame = frame_base._elementwise_method('to_frame', base=pd.Series)\n\n def unique(self, as_series=False):\n if not as_series:\n raise frame_base.WontImplementError(\n \"unique() is not supported by default because it produces a \"\n \"non-deferred result, a numpy array. You may call it with \"\n \"unique(as_series=True) to get the result as a DeferredSeries\",\n reason=\"non-deferred-result\")\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'unique',\n lambda df: pd.Series(df.unique()), [self._expr],\n preserves_partition_by=partitionings.Singleton(),\n requires_partition_by=partitionings.Singleton(\n reason=\"unique() cannot currently be parallelized.\")))\n\n def update(self, other):\n self._expr = expressions.ComputedExpression(\n 'update',\n lambda df,\n other: df.update(other) or df, [self._expr, other._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Index())\n\n unstack = frame_base.wont_implement_method(\n pd.Series, 'unstack', reason='non-deferred-columns')\n\n values = property(\n frame_base.wont_implement_method(\n pd.Series, 'values', reason=\"non-deferred-result\"))\n\n view = frame_base.wont_implement_method(\n pd.Series,\n 'view',\n explanation=(\n \"because it relies on memory-sharing semantics that are \"\n \"not compatible with the Beam model\"))\n\n @property\n def str(self):\n return _DeferredStringMethods(self._expr)\n\n apply = frame_base._elementwise_method('apply', base=pd.Series)\n map = frame_base._elementwise_method('map', base=pd.Series)\n # TODO(BEAM-11636): Implement transform using type inference to determine the\n # proxy\n #transform = frame_base._elementwise_method('transform', base=pd.Series)\n\n\n@populate_not_implemented(pd.DataFrame)\n@frame_base.DeferredFrame._register_for(pd.DataFrame)\nclass DeferredDataFrame(DeferredDataFrameOrSeries):\n @property\n def T(self):\n return self.transpose()\n\n @property\n def columns(self):\n return self._expr.proxy().columns\n\n @columns.setter\n def columns(self, columns):\n def set_columns(df):\n df = df.copy()\n df.columns = columns\n return df\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'set_columns',\n set_columns, [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()))\n\n def keys(self):\n return self.columns\n\n def __getattr__(self, name):\n # Column attribute access.\n if name in self._expr.proxy().columns:\n return self[name]\n else:\n return object.__getattribute__(self, name)\n\n def __getitem__(self, key):\n # TODO: Replicate pd.DataFrame.__getitem__ logic\n if isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool:\n return self.loc[key]\n\n elif isinstance(key, frame_base.DeferredBase):\n # Fail early if key is a DeferredBase as it interacts surprisingly with\n # key in self._expr.proxy().columns\n raise NotImplementedError(\n \"Indexing with a non-bool deferred frame is not yet supported. \"\n \"Consider using df.loc[...]\")\n\n elif isinstance(key, slice):\n if _is_null_slice(key):\n return self\n elif _is_integer_slice(key):\n # This depends on the contents of the index.\n raise frame_base.WontImplementError(\n \"Integer slices are not supported as they are ambiguous. Please \"\n \"use iloc or loc with integer slices.\")\n else:\n return self.loc[key]\n\n elif (\n (isinstance(key, list) and all(key_column in self._expr.proxy().columns\n for key_column in key)) or\n key in self._expr.proxy().columns):\n return self._elementwise(lambda df: df[key], 'get_column')\n\n else:\n raise NotImplementedError(key)\n\n def __contains__(self, key):\n # Checks if proxy has the given column\n return self._expr.proxy().__contains__(key)\n\n def __setitem__(self, key, value):\n if isinstance(\n key, str) or (isinstance(key, list) and\n all(isinstance(c, str)\n for c in key)) or (isinstance(key, DeferredSeries) and\n key._expr.proxy().dtype == bool):\n # yapf: disable\n return self._elementwise(\n lambda df, key, value: df.__setitem__(key, value),\n 'set_column',\n (key, value),\n inplace=True)\n else:\n raise NotImplementedError(key)\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def align(self, other, join, axis, copy, level, method, **kwargs):\n if not copy:\n raise frame_base.WontImplementError(\n \"align(copy=False) is not supported because it might be an inplace \"\n \"operation depending on the data. Please prefer the default \"\n \"align(copy=True).\")\n if method is not None:\n raise frame_base.WontImplementError(\n f\"align(method={method!r}) is not supported because it is \"\n \"order sensitive. Only align(method=None) is supported.\",\n reason=\"order-sensitive\")\n if kwargs:\n raise NotImplementedError('align(%s)' % ', '.join(kwargs.keys()))\n\n if level is not None:\n # Could probably get by partitioning on the used levels.\n requires_partition_by = partitionings.Singleton(reason=(\n f\"align(level={level}) is not currently parallelizable. Only \"\n \"align(level=None) can be parallelized.\"))\n elif axis in ('columns', 1):\n requires_partition_by = partitionings.Arbitrary()\n else:\n requires_partition_by = partitionings.Index()\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'align',\n lambda df, other: df.align(other, join=join, axis=axis),\n [self._expr, other._expr],\n requires_partition_by=requires_partition_by,\n preserves_partition_by=partitionings.Arbitrary()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def append(self, other, ignore_index, verify_integrity, sort, **kwargs):\n if not isinstance(other, DeferredDataFrame):\n raise frame_base.WontImplementError(\n \"append() only accepts DeferredDataFrame instances, received \" +\n str(type(other)))\n if ignore_index:\n raise frame_base.WontImplementError(\n \"append(ignore_index=True) is order sensitive because it requires \"\n \"generating a new index based on the order of the data.\",\n reason=\"order-sensitive\")\n\n if verify_integrity:\n # We can verify the index is non-unique within index partitioned data.\n requires = partitionings.Index()\n else:\n requires = partitionings.Arbitrary()\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'append',\n lambda s, other: s.append(other, sort=sort,\n verify_integrity=verify_integrity,\n **kwargs),\n [self._expr, other._expr],\n requires_partition_by=requires,\n preserves_partition_by=partitionings.Arbitrary()\n )\n )\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def set_index(self, keys, **kwargs):\n if isinstance(keys, str):\n keys = [keys]\n\n if any(isinstance(k, (_DeferredIndex, frame_base.DeferredFrame))\n for k in keys):\n raise NotImplementedError(\"set_index with Index or Series instances is \"\n \"not yet supported (BEAM-11711)\")\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'set_index',\n lambda df: df.set_index(keys, **kwargs),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton()))\n\n @property\n def loc(self):\n return _DeferredLoc(self)\n\n @property\n def iloc(self):\n return _DeferredILoc(self)\n\n @property\n def axes(self):\n return (self.index, self.columns)\n\n @property\n def dtypes(self):\n return self._expr.proxy().dtypes\n\n def assign(self, **kwargs):\n for name, value in kwargs.items():\n if not callable(value) and not isinstance(value, DeferredSeries):\n raise frame_base.WontImplementError(\n f\"Unsupported value for new column '{name}': '{value}'. Only \"\n \"callables and DeferredSeries instances are supported. Other types \"\n \"make this operation sensitive to the order of the data\",\n reason=\"order-sensitive\")\n return self._elementwise(\n lambda df, *args, **kwargs: df.assign(*args, **kwargs),\n 'assign',\n other_kwargs=kwargs)\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def explode(self, column, ignore_index):\n # ignoring the index will not preserve it\n preserves = (partitionings.Singleton() if ignore_index\n else partitionings.Index())\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'explode',\n lambda df: df.explode(column, ignore_index),\n [self._expr],\n preserves_partition_by=preserves,\n requires_partition_by=partitionings.Arbitrary()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def insert(self, value, **kwargs):\n if isinstance(value, list):\n raise frame_base.WontImplementMethod(\n \"insert(value=list) is not supported because it joins the input \"\n \"list to the deferred DataFrame based on the order of the data.\",\n reason=\"order-sensitive\")\n\n if isinstance(value, pd.core.generic.NDFrame):\n value = frame_base.DeferredFrame.wrap(\n expressions.ConstantExpression(value))\n\n if isinstance(value, frame_base.DeferredFrame):\n def func_zip(df, value):\n df = df.copy()\n df.insert(value=value, **kwargs)\n return df\n\n inserted = frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'insert',\n func_zip,\n [self._expr, value._expr],\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Arbitrary()))\n else:\n def func_elementwise(df):\n df = df.copy()\n df.insert(value=value, **kwargs)\n return df\n inserted = frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'insert',\n func_elementwise,\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()))\n\n self._expr = inserted._expr\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def aggregate(self, func, axis=0, *args, **kwargs):\n if 'numeric_only' in kwargs and kwargs['numeric_only']:\n # Eagerly generate a proxy to make sure numeric_only is a valid argument\n # for this aggregation method\n _ = self._expr.proxy().agg(func, axis, *args, **kwargs)\n\n projected = self[[name for name, dtype in self.dtypes.items()\n if pd.core.dtypes.common.is_numeric_dtype(dtype)]]\n kwargs.pop('numeric_only')\n return projected.agg(func, axis, *args, **kwargs)\n\n if 'bool_only' in kwargs and kwargs['bool_only']:\n # Eagerly generate a proxy to make sure bool_only is a valid argument\n # for this aggregation method\n _ = self._expr.proxy().agg(func, axis, *args, **kwargs)\n\n projected = self[[name for name, dtype in self.dtypes.items()\n if pd.core.dtypes.common.is_bool_dtype(dtype)]]\n kwargs.pop('bool_only')\n return projected.agg(func, axis, *args, **kwargs)\n\n nonnumeric_columns = [name for (name, dtype) in self.dtypes.items()\n if not pd.core.dtypes.common.is_numeric_dtype(dtype)]\n if _is_numeric(func) and len(nonnumeric_columns):\n if 'numeric_only' in kwargs and kwargs['numeric_only'] is False:\n # User has opted in to execution with non-numeric columns, they\n # will accept runtime errors\n pass\n else:\n raise frame_base.WontImplementError(\n f\"Numeric aggregation ({func!r}) on a DataFrame containing \"\n f\"non-numeric columns ({*nonnumeric_columns,!r} is not supported, \"\n \"unless `numeric_only=` is specified.\\n\"\n \"Use `numeric_only=True` to only aggregate over numeric columns.\\n\"\n \"Use `numeric_only=False` to aggregate over all columns. Note this \"\n \"is not recommended, as it could result in execution time errors.\")\n\n if axis is None:\n # Aggregate across all elements by first aggregating across columns,\n # then across rows.\n return self.agg(func, *args, **dict(kwargs, axis=1)).agg(\n func, *args, **dict(kwargs, axis=0))\n elif axis in (1, 'columns'):\n # This is an easy elementwise aggregation.\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'aggregate',\n lambda df: df.agg(func, axis=1, *args, **kwargs),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary()))\n elif len(self._expr.proxy().columns) == 0:\n # For this corner case, just colocate everything.\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'aggregate',\n lambda df: df.agg(func, *args, **kwargs),\n [self._expr],\n requires_partition_by=partitionings.Singleton()))\n else:\n # In the general case, compute the aggregation of each column separately,\n # then recombine.\n if not isinstance(func, dict):\n col_names = list(self._expr.proxy().columns)\n func = {col: func for col in col_names}\n else:\n col_names = list(func.keys())\n aggregated_cols = []\n has_lists = any(isinstance(f, list) for f in func.values())\n for col in col_names:\n funcs = func[col]\n if has_lists and not isinstance(funcs, list):\n # If any of the columns do multiple aggregations, they all must use\n # \"list\" style output\n funcs = [funcs]\n aggregated_cols.append(self[col].agg(funcs, *args, **kwargs))\n # The final shape is different depending on whether any of the columns\n # were aggregated by a list of aggregators.\n with expressions.allow_non_parallel_operations():\n if (any(isinstance(funcs, list) for funcs in func.values()) or\n 'level' in kwargs):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'join_aggregate',\n lambda *cols: pd.DataFrame(\n {col: value for col, value in zip(col_names, cols)}),\n [col._expr for col in aggregated_cols],\n requires_partition_by=partitionings.Singleton()))\n else:\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'join_aggregate',\n lambda *cols: pd.Series(\n {col: value for col, value in zip(col_names, cols)}),\n [col._expr for col in aggregated_cols],\n requires_partition_by=partitionings.Singleton(),\n proxy=self._expr.proxy().agg(func, *args, **kwargs)))\n\n agg = aggregate\n\n applymap = frame_base._elementwise_method('applymap', base=pd.DataFrame)\n add_prefix = frame_base._elementwise_method('add_prefix', base=pd.DataFrame)\n add_suffix = frame_base._elementwise_method('add_suffix', base=pd.DataFrame)\n\n memory_usage = frame_base.wont_implement_method(\n pd.DataFrame, 'memory_usage', reason=\"non-deferred-result\")\n info = frame_base.wont_implement_method(\n pd.DataFrame, 'info', reason=\"non-deferred-result\")\n\n clip = frame_base._elementwise_method(\n 'clip', restrictions={'axis': lambda axis: axis in (0, 'index')},\n base=pd.DataFrame)\n\n @frame_base.with_docs_from(pd.DataFrame)\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def corr(self, method, min_periods):\n \"\"\"Only ``method=\"pearson\"`` can be parallelized. Other methods require\n collecting all data on a single worker (see\n https://s.apache.org/dataframe-non-parallelizable-operations for details).\n \"\"\"\n if method == 'pearson':\n proxy = self._expr.proxy().corr()\n columns = list(proxy.columns)\n args = []\n arg_indices = []\n for col1, col2 in itertools.combinations(columns, 2):\n arg_indices.append((col1, col2))\n args.append(self[col1].corr(self[col2], method=method,\n min_periods=min_periods))\n def fill_matrix(*args):\n data = collections.defaultdict(dict)\n for col in columns:\n data[col][col] = 1.0\n for ix, (col1, col2) in enumerate(arg_indices):\n data[col1][col2] = data[col2][col1] = args[ix]\n return pd.DataFrame(data, columns=columns, index=columns)\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'fill_matrix',\n fill_matrix,\n [arg._expr for arg in args],\n requires_partition_by=partitionings.Singleton(),\n proxy=proxy))\n\n else:\n reason = (f\"Encountered corr(method={method!r}) which cannot be \"\n \"parallelized. Only corr(method='pearson') is currently \"\n \"parallelizable.\")\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'corr',\n lambda df: df.corr(method=method, min_periods=min_periods),\n [self._expr],\n requires_partition_by=partitionings.Singleton(reason=reason)))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def cov(self, min_periods, ddof):\n proxy = self._expr.proxy().corr()\n columns = list(proxy.columns)\n args = []\n arg_indices = []\n for col in columns:\n arg_indices.append((col, col))\n std = self[col].std(ddof)\n args.append(std.apply(lambda x: x*x, 'square'))\n for ix, col1 in enumerate(columns):\n for col2 in columns[ix+1:]:\n arg_indices.append((col1, col2))\n # Note that this set may be different for each pair.\n no_na = self.loc[self[col1].notna() & self[col2].notna()]\n args.append(no_na[col1]._cov_aligned(no_na[col2], min_periods, ddof))\n def fill_matrix(*args):\n data = collections.defaultdict(dict)\n for ix, (col1, col2) in enumerate(arg_indices):\n data[col1][col2] = data[col2][col1] = args[ix]\n return pd.DataFrame(data, columns=columns, index=columns)\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'fill_matrix',\n fill_matrix,\n [arg._expr for arg in args],\n requires_partition_by=partitionings.Singleton(),\n proxy=proxy))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def corrwith(self, other, axis, drop, method):\n if axis not in (0, 'index'):\n raise NotImplementedError('corrwith(axis=%r)' % axis)\n if not isinstance(other, frame_base.DeferredFrame):\n other = frame_base.DeferredFrame.wrap(\n expressions.ConstantExpression(other))\n\n if isinstance(other, DeferredSeries):\n proxy = self._expr.proxy().corrwith(other._expr.proxy(), method=method)\n self, other = self.align(other, axis=0, join='inner')\n col_names = proxy.index\n other_cols = [other] * len(col_names)\n elif isinstance(other, DeferredDataFrame):\n proxy = self._expr.proxy().corrwith(\n other._expr.proxy(), method=method, drop=drop)\n self, other = self.align(other, axis=0, join='inner')\n col_names = list(\n set(self.columns)\n .intersection(other.columns)\n .intersection(proxy.index))\n other_cols = [other[col_name] for col_name in col_names]\n else:\n # Raise the right error.\n self._expr.proxy().corrwith(other._expr.proxy())\n # Just in case something else becomes valid.\n raise NotImplementedError('corrwith(%s)' % type(other._expr.proxy))\n\n # Generate expressions to compute the actual correlations.\n corrs = [\n self[col_name].corr(other_col, method)\n for col_name, other_col in zip(col_names, other_cols)]\n\n # Combine the results\n def fill_dataframe(*args):\n result = proxy.copy(deep=True)\n for col, value in zip(proxy.index, args):\n result[col] = value\n return result\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'fill_dataframe',\n fill_dataframe,\n [corr._expr for corr in corrs],\n requires_partition_by=partitionings.Singleton(),\n proxy=proxy))\n\n cummax = frame_base.wont_implement_method(pd.DataFrame, 'cummax',\n reason='order-sensitive')\n cummin = frame_base.wont_implement_method(pd.DataFrame, 'cummin',\n reason='order-sensitive')\n cumprod = frame_base.wont_implement_method(pd.DataFrame, 'cumprod',\n reason='order-sensitive')\n cumsum = frame_base.wont_implement_method(pd.DataFrame, 'cumsum',\n reason='order-sensitive')\n # TODO(BEAM-12071): Consider adding an order-insensitive implementation for\n # diff that relies on the index\n diff = frame_base.wont_implement_method(pd.DataFrame, 'diff',\n reason='order-sensitive')\n first = frame_base.wont_implement_method(pd.DataFrame, 'first',\n reason='order-sensitive')\n head = frame_base.wont_implement_method(pd.DataFrame, 'head',\n reason='order-sensitive')\n interpolate = frame_base.wont_implement_method(pd.DataFrame, 'interpolate',\n reason='order-sensitive')\n last = frame_base.wont_implement_method(pd.DataFrame, 'last',\n reason='order-sensitive')\n tail = frame_base.wont_implement_method(pd.DataFrame, 'tail',\n reason='order-sensitive')\n\n def dot(self, other):\n # We want to broadcast the right hand side to all partitions of the left.\n # This is OK, as its index must be the same size as the columns set of self,\n # so cannot be too large.\n class AsScalar(object):\n def __init__(self, value):\n self.value = value\n\n if isinstance(other, frame_base.DeferredFrame):\n proxy = other._expr.proxy()\n with expressions.allow_non_parallel_operations():\n side = expressions.ComputedExpression(\n 'as_scalar',\n lambda df: AsScalar(df),\n [other._expr],\n requires_partition_by=partitionings.Singleton())\n else:\n proxy = pd.DataFrame(columns=range(len(other[0])))\n side = expressions.ConstantExpression(AsScalar(other))\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'dot',\n lambda left, right: left @ right.value,\n [self._expr, side],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary(),\n proxy=proxy))\n\n __matmul__ = dot\n\n def mode(self, axis=0, *args, **kwargs):\n if axis == 1 or axis == 'columns':\n # Number of columns is max(number mode values for each row), so we can't\n # determine how many there will be before looking at the data.\n raise frame_base.WontImplementError(\n \"mode(axis=columns) is not supported because it produces a variable \"\n \"number of columns depending on the data.\",\n reason=\"non-deferred-columns\")\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'mode',\n lambda df: df.mode(*args, **kwargs),\n [self._expr],\n #TODO(BEAM-12181): Can we add an approximate implementation?\n requires_partition_by=partitionings.Singleton(reason=(\n \"mode(axis='index') cannot currently be parallelized. See \"\n \"BEAM-12181 tracking the possble addition of an approximate, \"\n \"parallelizable implementation of mode.\"\n )),\n preserves_partition_by=partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def dropna(self, axis, **kwargs):\n # TODO(robertwb): This is a common pattern. Generalize?\n if axis in (1, 'columns'):\n requires_partition_by = partitionings.Singleton(reason=(\n \"dropna(axis=1) cannot currently be parallelized. It requires \"\n \"checking all values in each column for NaN values, to determine \"\n \"if that column should be dropped.\"\n ))\n else:\n requires_partition_by = partitionings.Arbitrary()\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'dropna',\n lambda df: df.dropna(axis=axis, **kwargs),\n [self._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=requires_partition_by))\n\n def _eval_or_query(self, name, expr, inplace, **kwargs):\n for key in ('local_dict', 'global_dict', 'level', 'target', 'resolvers'):\n if key in kwargs:\n raise NotImplementedError(f\"Setting '{key}' is not yet supported\")\n\n # look for '@<py identifier>'\n if re.search(r'\\@[^\\d\\W]\\w*', expr, re.UNICODE):\n raise NotImplementedError(\"Accessing locals with @ is not yet supported \"\n \"(BEAM-11202)\")\n\n result_expr = expressions.ComputedExpression(\n name,\n lambda df: getattr(df, name)(expr, **kwargs),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary())\n\n if inplace:\n self._expr = result_expr\n else:\n return frame_base.DeferredFrame.wrap(result_expr)\n\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def eval(self, expr, inplace, **kwargs):\n return self._eval_or_query('eval', expr, inplace, **kwargs)\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def query(self, expr, inplace, **kwargs):\n return self._eval_or_query('query', expr, inplace, **kwargs)\n\n isnull = isna = frame_base._elementwise_method('isna', base=pd.DataFrame)\n notnull = notna = frame_base._elementwise_method('notna', base=pd.DataFrame)\n\n items = frame_base.wont_implement_method(pd.DataFrame, 'items',\n reason=\"non-deferred-result\")\n itertuples = frame_base.wont_implement_method(pd.DataFrame, 'itertuples',\n reason=\"non-deferred-result\")\n iterrows = frame_base.wont_implement_method(pd.DataFrame, 'iterrows',\n reason=\"non-deferred-result\")\n iteritems = frame_base.wont_implement_method(pd.DataFrame, 'iteritems',\n reason=\"non-deferred-result\")\n\n def _cols_as_temporary_index(self, cols, suffix=''):\n original_index_names = list(self._expr.proxy().index.names)\n new_index_names = [\n '__apache_beam_temp_%d_%s' % (ix, suffix)\n for (ix, _) in enumerate(original_index_names)]\n def reindex(df):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'reindex',\n lambda df:\n df.rename_axis(index=new_index_names, copy=False)\n .reset_index().set_index(cols),\n [df._expr],\n preserves_partition_by=partitionings.Singleton(),\n requires_partition_by=partitionings.Arbitrary()))\n def revert(df):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'join_restoreindex',\n lambda df:\n df.reset_index().set_index(new_index_names)\n .rename_axis(index=original_index_names, copy=False),\n [df._expr],\n preserves_partition_by=partitionings.Singleton(),\n requires_partition_by=partitionings.Arbitrary()))\n return reindex, revert\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def join(self, other, on, **kwargs):\n if on is not None:\n reindex, revert = self._cols_as_temporary_index(on)\n return revert(reindex(self).join(other, **kwargs))\n if isinstance(other, list):\n other_is_list = True\n else:\n other = [other]\n other_is_list = False\n placeholder = object()\n other_exprs = [\n df._expr for df in other if isinstance(df, frame_base.DeferredFrame)]\n const_others = [\n placeholder if isinstance(df, frame_base.DeferredFrame) else df\n for df in other]\n def fill_placeholders(values):\n values = iter(values)\n filled = [\n next(values) if df is placeholder else df for df in const_others]\n if other_is_list:\n return filled\n else:\n return filled[0]\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'join',\n lambda df, *deferred_others: df.join(\n fill_placeholders(deferred_others), **kwargs),\n [self._expr] + other_exprs,\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Index()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def merge(\n self,\n right,\n on,\n left_on,\n right_on,\n left_index,\n right_index,\n suffixes,\n **kwargs):\n self_proxy = self._expr.proxy()\n right_proxy = right._expr.proxy()\n # Validate with a pandas call.\n _ = self_proxy.merge(\n right_proxy,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n **kwargs)\n if kwargs.get('how', None) == 'cross':\n raise NotImplementedError(\"cross join is not yet implemented (BEAM-9547)\")\n if not any([on, left_on, right_on, left_index, right_index]):\n on = [col for col in self_proxy.columns if col in right_proxy.columns]\n if not left_on:\n left_on = on\n if left_on and not isinstance(left_on, list):\n left_on = [left_on]\n if not right_on:\n right_on = on\n if right_on and not isinstance(right_on, list):\n right_on = [right_on]\n\n if left_index:\n indexed_left = self\n else:\n indexed_left = self.set_index(left_on, drop=False)\n\n if right_index:\n indexed_right = right\n else:\n indexed_right = right.set_index(right_on, drop=False)\n\n if left_on and right_on:\n common_cols = set(left_on).intersection(right_on)\n if len(common_cols):\n # When merging on the same column name from both dfs, we need to make\n # sure only one df has the column. Otherwise we end up with\n # two duplicate columns, one with lsuffix and one with rsuffix.\n # It's safe to drop from either because the data has already been duped\n # to the index.\n indexed_right = indexed_right.drop(columns=common_cols)\n\n\n merged = frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'merge',\n lambda left, right: left.merge(right,\n left_index=True,\n right_index=True,\n suffixes=suffixes,\n **kwargs),\n [indexed_left._expr, indexed_right._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Index()))\n\n if left_index or right_index:\n return merged\n else:\n\n return merged.reset_index(drop=True)\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def nlargest(self, keep, **kwargs):\n if keep == 'any':\n keep = 'first'\n elif keep != 'all':\n raise frame_base.WontImplementError(\n \"nlargest(keep={keep!r}) is not supported because it is \"\n \"order sensitive. Only keep=\\\"all\\\" is supported.\",\n reason=\"order-sensitive\")\n kwargs['keep'] = keep\n per_partition = expressions.ComputedExpression(\n 'nlargest-per-partition',\n lambda df: df.nlargest(**kwargs),\n [self._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Arbitrary())\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'nlargest',\n lambda df: df.nlargest(**kwargs),\n [per_partition],\n preserves_partition_by=partitionings.Singleton(),\n requires_partition_by=partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def nsmallest(self, keep, **kwargs):\n if keep == 'any':\n keep = 'first'\n elif keep != 'all':\n raise frame_base.WontImplementError(\n \"nsmallest(keep={keep!r}) is not supported because it is \"\n \"order sensitive. Only keep=\\\"all\\\" is supported.\",\n reason=\"order-sensitive\")\n kwargs['keep'] = keep\n per_partition = expressions.ComputedExpression(\n 'nsmallest-per-partition',\n lambda df: df.nsmallest(**kwargs),\n [self._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Arbitrary())\n with expressions.allow_non_parallel_operations(True):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'nsmallest',\n lambda df: df.nsmallest(**kwargs),\n [per_partition],\n preserves_partition_by=partitionings.Singleton(),\n requires_partition_by=partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n def nunique(self, **kwargs):\n if kwargs.get('axis', None) in (1, 'columns'):\n requires_partition_by = partitionings.Arbitrary()\n preserves_partition_by = partitionings.Index()\n else:\n # TODO(BEAM-9547): This could be implemented in a distributed fashion,\n # perhaps by deferring to a distributed drop_duplicates\n requires_partition_by = partitionings.Singleton(reason=(\n \"nunique(axis='index') is not currently parallelizable.\"\n ))\n preserves_partition_by = partitionings.Singleton()\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'nunique',\n lambda df: df.nunique(**kwargs),\n [self._expr],\n preserves_partition_by=preserves_partition_by,\n requires_partition_by=requires_partition_by))\n\n plot = frame_base.wont_implement_method(pd.DataFrame, 'plot',\n reason=\"plotting-tools\")\n\n def pop(self, item):\n result = self[item]\n\n self._expr = expressions.ComputedExpression(\n 'popped',\n lambda df: df.drop(columns=[item]),\n [self._expr],\n preserves_partition_by=partitionings.Arbitrary(),\n requires_partition_by=partitionings.Arbitrary())\n return result\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def quantile(self, q, axis, **kwargs):\n if axis in (1, 'columns'):\n if isinstance(q, list):\n raise frame_base.WontImplementError(\n \"quantile(axis=columns) with multiple q values is not supported \"\n \"because it transposes the input DataFrame. Note computing \"\n \"an individual quantile across columns (e.g. \"\n f\"df.quantile(q={q[0]!r}, axis={axis!r}) is supported.\",\n reason=\"non-deferred-columns\")\n else:\n requires = partitionings.Arbitrary()\n else: # axis='index'\n # TODO(BEAM-12167): Provide an option for approximate distributed\n # quantiles\n requires = partitionings.Singleton(reason=(\n \"Computing quantiles across index cannot currently be parallelized. \"\n \"See BEAM-12167 tracking the possible addition of an approximate, \"\n \"parallelizable implementation of quantile.\"\n ))\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'quantile',\n lambda df: df.quantile(q=q, axis=axis, **kwargs),\n [self._expr],\n requires_partition_by=requires,\n preserves_partition_by=partitionings.Singleton()))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.maybe_inplace\n def rename(self, **kwargs):\n rename_index = (\n 'index' in kwargs\n or kwargs.get('axis', None) in (0, 'index')\n or ('columns' not in kwargs and 'axis' not in kwargs))\n rename_columns = (\n 'columns' in kwargs\n or kwargs.get('axis', None) in (1, 'columns'))\n\n if rename_index:\n # Technically, it's still partitioned by index, but it's no longer\n # partitioned by the hash of the index.\n preserves_partition_by = partitionings.Singleton()\n else:\n preserves_partition_by = partitionings.Index()\n\n if kwargs.get('errors', None) == 'raise' and rename_index:\n # TODO: We could do this in parallel by creating a ConstantExpression\n # with a series created from the mapper dict. Then Index() partitioning\n # would co-locate the necessary index values and we could raise\n # individually within each partition. Execution time errors are\n # discouraged anyway so probably not worth the effort.\n requires_partition_by = partitionings.Singleton(reason=(\n \"rename(errors='raise', axis='index') requires collecting all \"\n \"data on a single node in order to detect missing index values.\"\n ))\n else:\n requires_partition_by = partitionings.Arbitrary()\n\n proxy = None\n if rename_index:\n # The proxy can't be computed by executing rename, it will error\n # renaming the index.\n if rename_columns:\n # Note if both are being renamed, index and columns must be specified\n # (not axis)\n proxy = self._expr.proxy().rename(**{k: v for (k, v) in kwargs.items()\n if not k == 'index'})\n else:\n # No change in columns, reuse proxy\n proxy = self._expr.proxy()\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'rename',\n lambda df: df.rename(**kwargs),\n [self._expr],\n proxy=proxy,\n preserves_partition_by=preserves_partition_by,\n requires_partition_by=requires_partition_by))\n\n rename_axis = frame_base._elementwise_method('rename_axis', base=pd.DataFrame)\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def replace(self, limit, **kwargs):\n if limit is None:\n requires_partition_by = partitionings.Arbitrary()\n else:\n requires_partition_by = partitionings.Singleton(reason=(\n f\"replace(limit={limit!r}) cannot currently be parallelized, it \"\n \"requires collecting all data on a single node.\"))\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'replace',\n lambda df: df.replace(limit=limit, **kwargs),\n [self._expr],\n preserves_partition_by=partitionings.Singleton(),\n requires_partition_by=requires_partition_by))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n @frame_base.maybe_inplace\n def reset_index(self, level=None, **kwargs):\n # TODO: Docs should note that the index is not in the same order as it would\n # be with pandas. Technically an order sensitive operation\n if level is not None and not isinstance(level, (tuple, list)):\n level = [level]\n if level is None or len(level) == self._expr.proxy().index.nlevels:\n # TODO(BEAM-12182): Could do distributed re-index with offsets.\n requires_partition_by = partitionings.Singleton(reason=(\n \"reset_index(level={level!r}) drops the entire index and creates a \"\n \"new one, so it cannot currently be parallelized (BEAM-12182).\"\n ))\n else:\n requires_partition_by = partitionings.Arbitrary()\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'reset_index',\n lambda df: df.reset_index(level=level, **kwargs),\n [self._expr],\n preserves_partition_by=partitionings.Singleton(),\n requires_partition_by=requires_partition_by))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def round(self, decimals, *args, **kwargs):\n\n if isinstance(decimals, frame_base.DeferredFrame):\n # Disallow passing a deferred Series in, our current partitioning model\n # prevents us from using it correctly.\n raise NotImplementedError(\"Passing a deferred series to round() is not \"\n \"supported, please use a concrete pd.Series \"\n \"instance or a dictionary\")\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'round',\n lambda df: df.round(decimals, *args, **kwargs),\n [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Index()\n )\n )\n\n select_dtypes = frame_base._elementwise_method('select_dtypes',\n base=pd.DataFrame)\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def shift(self, axis, freq, **kwargs):\n if axis in (1, 'columns'):\n preserves = partitionings.Arbitrary()\n proxy = None\n else:\n if freq is None or 'fill_value' in kwargs:\n fill_value = kwargs.get('fill_value', 'NOT SET')\n raise frame_base.WontImplementError(\n f\"shift(axis={axis!r}) is only supported with freq defined, and \"\n f\"fill_value undefined (got freq={freq!r},\"\n f\"fill_value={fill_value!r}). Other configurations are sensitive \"\n \"to the order of the data because they require populating shifted \"\n \"rows with `fill_value`.\",\n reason=\"order-sensitive\")\n # proxy generation fails in pandas <1.2\n # Seems due to https://github.com/pandas-dev/pandas/issues/14811,\n # bug with shift on empty indexes.\n # Fortunately the proxy should be identical to the input.\n proxy = self._expr.proxy().copy()\n\n # index is modified, so no partitioning is preserved.\n preserves = partitionings.Singleton()\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'shift',\n lambda df: df.shift(axis=axis, freq=freq, **kwargs),\n [self._expr],\n proxy=proxy,\n preserves_partition_by=preserves,\n requires_partition_by=partitionings.Arbitrary()))\n\n shape = property(frame_base.wont_implement_method(\n pd.DataFrame, 'shape', reason=\"non-deferred-result\"))\n\n stack = frame_base._elementwise_method('stack', base=pd.DataFrame)\n\n all = frame_base._agg_method('all')\n any = frame_base._agg_method('any')\n count = frame_base._agg_method('count')\n max = frame_base._agg_method('max')\n min = frame_base._agg_method('min')\n prod = product = frame_base._agg_method('prod')\n sum = frame_base._agg_method('sum')\n mean = frame_base._agg_method('mean')\n median = frame_base._agg_method('median')\n std = frame_base._agg_method('std')\n var = frame_base._agg_method('var')\n\n take = frame_base.wont_implement_method(pd.DataFrame, 'take',\n reason='deprecated')\n\n to_records = frame_base.wont_implement_method(pd.DataFrame, 'to_records',\n reason=\"non-deferred-result\")\n to_dict = frame_base.wont_implement_method(pd.DataFrame, 'to_dict',\n reason=\"non-deferred-result\")\n to_numpy = frame_base.wont_implement_method(pd.DataFrame, 'to_numpy',\n reason=\"non-deferred-result\")\n to_string = frame_base.wont_implement_method(pd.DataFrame, 'to_string',\n reason=\"non-deferred-result\")\n\n to_sparse = frame_base.wont_implement_method(pd.DataFrame, 'to_sparse',\n reason=\"non-deferred-result\")\n\n transpose = frame_base.wont_implement_method(\n pd.DataFrame, 'transpose', reason='non-deferred-columns')\n\n def unstack(self, *args, **kwargs):\n if self._expr.proxy().index.nlevels == 1:\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'unstack',\n lambda df: df.unstack(*args, **kwargs),\n [self._expr],\n requires_partition_by=partitionings.Index()))\n else:\n raise frame_base.WontImplementError(\n \"unstack() is not supported on DataFrames with a multiple indexes, \"\n \"because the columns in the output depend on the input data.\",\n reason=\"non-deferred-columns\")\n\n update = frame_base._proxy_method(\n 'update',\n inplace=True,\n base=pd.DataFrame,\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Arbitrary())\n\n values = property(frame_base.wont_implement_method(\n pd.DataFrame, 'values', reason=\"non-deferred-result\"))\n\n @frame_base.args_to_kwargs(pd.DataFrame)\n @frame_base.populate_defaults(pd.DataFrame)\n def melt(self, ignore_index, **kwargs):\n if ignore_index:\n raise frame_base.WontImplementError(\n \"melt(ignore_index=True) is order sensitive because it requires \"\n \"generating a new index based on the order of the data.\",\n reason=\"order-sensitive\")\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'melt',\n lambda df: df.melt(ignore_index=False, **kwargs), [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Singleton()))\n\n\nfor io_func in dir(io):\n if io_func.startswith('to_'):\n setattr(DeferredDataFrame, io_func, getattr(io, io_func))\n setattr(DeferredSeries, io_func, getattr(io, io_func))\n\n\nfor meth in ('filter', ):\n setattr(DeferredDataFrame, meth,\n frame_base._elementwise_method(meth, base=pd.DataFrame))\n\n\n@populate_not_implemented(DataFrameGroupBy)\nclass DeferredGroupBy(frame_base.DeferredFrame):\n def __init__(self, expr, kwargs,\n ungrouped: expressions.Expression,\n ungrouped_with_index: expressions.Expression,\n grouping_columns,\n grouping_indexes,\n projection=None):\n \"\"\"This object represents the result of::\n\n ungrouped.groupby(level=[grouping_indexes + grouping_columns],\n **kwargs)[projection]\n\n :param expr: An expression to compute a pandas GroupBy object. Convenient\n for unliftable aggregations.\n :param ungrouped: An expression to compute the DataFrame pre-grouping, the\n (Multi)Index contains only the grouping columns/indexes.\n :param ungrouped_with_index: Same as ungrouped, except the index includes\n all of the original indexes as well as any grouping columns. This is\n important for operations that expose the original index, e.g. .apply(),\n but we only use it when necessary to avoid unnessary data transfer and\n GBKs.\n :param grouping_columns: list of column labels that were in the original\n groupby(..) ``by`` parameter. Only relevant for grouped DataFrames.\n :param grouping_indexes: list of index names (or index level numbers) to be\n grouped.\n :param kwargs: Keywords args passed to the original groupby(..) call.\"\"\"\n super(DeferredGroupBy, self).__init__(expr)\n self._ungrouped = ungrouped\n self._ungrouped_with_index = ungrouped_with_index\n self._projection = projection\n self._grouping_columns = grouping_columns\n self._grouping_indexes = grouping_indexes\n self._kwargs = kwargs\n\n def __getattr__(self, name):\n return DeferredGroupBy(\n expressions.ComputedExpression(\n 'groupby_project',\n lambda gb: getattr(gb, name), [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()),\n self._kwargs,\n self._ungrouped,\n self._ungrouped_with_index,\n self._grouping_columns,\n self._grouping_indexes,\n projection=name)\n\n def __getitem__(self, name):\n return DeferredGroupBy(\n expressions.ComputedExpression(\n 'groupby_project',\n lambda gb: gb[name], [self._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()),\n self._kwargs,\n self._ungrouped,\n self._ungrouped_with_index,\n self._grouping_columns,\n self._grouping_indexes,\n projection=name)\n\n def agg(self, fn, *args, **kwargs):\n if _is_associative(fn):\n return _liftable_agg(fn)(self, *args, **kwargs)\n elif _is_liftable_with_sum(fn):\n return _liftable_agg(fn, postagg_meth='sum')(self, *args, **kwargs)\n elif _is_unliftable(fn):\n return _unliftable_agg(fn)(self, *args, **kwargs)\n elif callable(fn):\n return DeferredDataFrame(\n expressions.ComputedExpression(\n 'agg',\n lambda gb: gb.agg(fn, *args, **kwargs), [self._expr],\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Singleton()))\n else:\n raise NotImplementedError(f\"GroupBy.agg(func={fn!r})\")\n\n\n def apply(self, fn, *args, **kwargs):\n if self._grouping_columns and not self._projection:\n grouping_columns = self._grouping_columns\n def fn_wrapper(x, *args, **kwargs):\n # TODO(BEAM-11710): Moving a column to an index and back is lossy\n # since indexes dont support as many dtypes. We should keep the original\n # column in groupby() instead. We need it anyway in case the grouping\n # column is projected, which is allowed.\n\n # Move the columns back to columns\n x = x.assign(**{col: x.index.get_level_values(col)\n for col in grouping_columns})\n x = x.droplevel(grouping_columns)\n return fn(x, *args, **kwargs)\n else:\n fn_wrapper = fn\n\n project = _maybe_project_func(self._projection)\n\n # Unfortunately pandas does not execute fn to determine the right proxy.\n # We run user fn on a proxy here to detect the return type and generate the\n # proxy.\n result = fn_wrapper(project(self._ungrouped_with_index.proxy()))\n if isinstance(result, pd.core.generic.NDFrame):\n proxy = result[:0]\n\n def index_to_arrays(index):\n return [index.get_level_values(level)\n for level in range(index.nlevels)]\n\n # The final result will have the grouped indexes + the indexes from the\n # result\n proxy.index = pd.MultiIndex.from_arrays(\n index_to_arrays(self._ungrouped.proxy().index) +\n index_to_arrays(proxy.index),\n names=self._ungrouped.proxy().index.names + proxy.index.names)\n else:\n # The user fn returns some non-pandas type. The expected result is a\n # Series where each element is the result of one user fn call.\n dtype = pd.Series([result]).dtype\n proxy = pd.Series([], dtype=dtype, index=self._ungrouped.proxy().index)\n\n levels = self._grouping_indexes + self._grouping_columns\n\n return DeferredDataFrame(\n expressions.ComputedExpression(\n 'apply',\n lambda df: project(df.groupby(level=levels)).apply(\n fn_wrapper,\n *args,\n **kwargs),\n [self._ungrouped_with_index],\n proxy=proxy,\n requires_partition_by=partitionings.Index(levels),\n preserves_partition_by=partitionings.Index(levels)))\n\n aggregate = agg\n\n hist = frame_base.wont_implement_method(DataFrameGroupBy, 'hist',\n reason=\"plotting-tools\")\n plot = frame_base.wont_implement_method(DataFrameGroupBy, 'plot',\n reason=\"plotting-tools\")\n boxplot = frame_base.wont_implement_method(DataFrameGroupBy, 'boxplot',\n reason=\"plotting-tools\")\n\n first = frame_base.wont_implement_method(\n DataFrameGroupBy, 'first', reason='order-sensitive')\n last = frame_base.wont_implement_method(\n DataFrameGroupBy, 'last', reason='order-sensitive')\n head = frame_base.wont_implement_method(\n DataFrameGroupBy, 'head', reason='order-sensitive')\n tail = frame_base.wont_implement_method(\n DataFrameGroupBy, 'tail', reason='order-sensitive')\n nth = frame_base.wont_implement_method(\n DataFrameGroupBy, 'nth', reason='order-sensitive')\n cumcount = frame_base.wont_implement_method(\n DataFrameGroupBy, 'cumcount', reason='order-sensitive')\n cummax = frame_base.wont_implement_method(\n DataFrameGroupBy, 'cummax', reason='order-sensitive')\n cummin = frame_base.wont_implement_method(\n DataFrameGroupBy, 'cummin', reason='order-sensitive')\n cumsum = frame_base.wont_implement_method(\n DataFrameGroupBy, 'cumsum', reason='order-sensitive')\n cumprod = frame_base.wont_implement_method(\n DataFrameGroupBy, 'cumprod', reason='order-sensitive')\n diff = frame_base.wont_implement_method(DataFrameGroupBy, 'diff',\n reason='order-sensitive')\n shift = frame_base.wont_implement_method(DataFrameGroupBy, 'shift',\n reason='order-sensitive')\n\n # TODO(BEAM-12169): Consider allowing this for categorical keys.\n __len__ = frame_base.wont_implement_method(\n DataFrameGroupBy, '__len__', reason=\"non-deferred-result\")\n groups = property(frame_base.wont_implement_method(\n DataFrameGroupBy, 'groups', reason=\"non-deferred-result\"))\n\ndef _maybe_project_func(projection: Optional[List[str]]):\n \"\"\" Returns identity func if projection is empty or None, else returns\n a function that projects the specified columns. \"\"\"\n if projection:\n return lambda df: df[projection]\n else:\n return lambda x: x\n\n\ndef _liftable_agg(meth, postagg_meth=None):\n agg_name, _ = frame_base.name_and_func(meth)\n\n if postagg_meth is None:\n post_agg_name = agg_name\n else:\n post_agg_name, _ = frame_base.name_and_func(postagg_meth)\n\n def wrapper(self, *args, **kwargs):\n assert isinstance(self, DeferredGroupBy)\n\n if 'min_count' in kwargs:\n return _unliftable_agg(meth)(self, *args, **kwargs)\n\n to_group = self._ungrouped.proxy().index\n is_categorical_grouping = any(to_group.get_level_values(i).is_categorical()\n for i in self._grouping_indexes)\n groupby_kwargs = self._kwargs\n\n # Don't include un-observed categorical values in the preagg\n preagg_groupby_kwargs = groupby_kwargs.copy()\n preagg_groupby_kwargs['observed'] = True\n\n project = _maybe_project_func(self._projection)\n pre_agg = expressions.ComputedExpression(\n 'pre_combine_' + agg_name,\n lambda df: getattr(\n project(\n df.groupby(level=list(range(df.index.nlevels)),\n **preagg_groupby_kwargs)\n ),\n agg_name)(**kwargs),\n [self._ungrouped],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary())\n\n\n post_agg = expressions.ComputedExpression(\n 'post_combine_' + post_agg_name,\n lambda df: getattr(\n df.groupby(level=list(range(df.index.nlevels)),\n **groupby_kwargs),\n post_agg_name)(**kwargs),\n [pre_agg],\n requires_partition_by=(partitionings.Singleton(reason=(\n \"Aggregations grouped by a categorical column are not currently \"\n \"parallelizable (BEAM-11190).\"\n ))\n if is_categorical_grouping\n else partitionings.Index()),\n preserves_partition_by=partitionings.Arbitrary())\n return frame_base.DeferredFrame.wrap(post_agg)\n\n return wrapper\n\n\ndef _unliftable_agg(meth):\n agg_name, _ = frame_base.name_and_func(meth)\n\n def wrapper(self, *args, **kwargs):\n assert isinstance(self, DeferredGroupBy)\n\n to_group = self._ungrouped.proxy().index\n is_categorical_grouping = any(to_group.get_level_values(i).is_categorical()\n for i in self._grouping_indexes)\n\n groupby_kwargs = self._kwargs\n project = _maybe_project_func(self._projection)\n post_agg = expressions.ComputedExpression(\n agg_name,\n lambda df: getattr(project(\n df.groupby(level=list(range(df.index.nlevels)),\n **groupby_kwargs),\n ), agg_name)(**kwargs),\n [self._ungrouped],\n requires_partition_by=(partitionings.Singleton(reason=(\n \"Aggregations grouped by a categorical column are not currently \"\n \"parallelizable (BEAM-11190).\"\n ))\n if is_categorical_grouping\n else partitionings.Index()),\n preserves_partition_by=partitionings.Arbitrary())\n return frame_base.DeferredFrame.wrap(post_agg)\n\n return wrapper\n\nLIFTABLE_AGGREGATIONS = ['all', 'any', 'max', 'min', 'prod', 'sum']\nLIFTABLE_WITH_SUM_AGGREGATIONS = ['size', 'count']\nUNLIFTABLE_AGGREGATIONS = ['mean', 'median', 'std', 'var']\n\nfor meth in LIFTABLE_AGGREGATIONS:\n setattr(DeferredGroupBy, meth, _liftable_agg(meth))\nfor meth in LIFTABLE_WITH_SUM_AGGREGATIONS:\n setattr(DeferredGroupBy, meth, _liftable_agg(meth, postagg_meth='sum'))\nfor meth in UNLIFTABLE_AGGREGATIONS:\n setattr(DeferredGroupBy, meth, _unliftable_agg(meth))\n\ndef _check_str_or_np_builtin(agg_func, func_list):\n return agg_func in func_list or (\n getattr(agg_func, '__name__', None) in func_list\n and agg_func.__module__ in ('numpy', 'builtins'))\n\n\ndef _is_associative(agg_func):\n return _check_str_or_np_builtin(agg_func, LIFTABLE_AGGREGATIONS)\n\ndef _is_liftable_with_sum(agg_func):\n return _check_str_or_np_builtin(agg_func, LIFTABLE_WITH_SUM_AGGREGATIONS)\n\ndef _is_unliftable(agg_func):\n return _check_str_or_np_builtin(agg_func, UNLIFTABLE_AGGREGATIONS)\n\nNUMERIC_AGGREGATIONS = ['max', 'min', 'prod', 'sum', 'mean', 'median', 'std',\n 'var']\n\ndef _is_numeric(agg_func):\n return _check_str_or_np_builtin(agg_func, NUMERIC_AGGREGATIONS)\n\n\n@populate_not_implemented(DataFrameGroupBy)\nclass _DeferredGroupByCols(frame_base.DeferredFrame):\n # It's not clear that all of these make sense in Pandas either...\n agg = aggregate = frame_base._elementwise_method('agg', base=DataFrameGroupBy)\n any = frame_base._elementwise_method('any', base=DataFrameGroupBy)\n all = frame_base._elementwise_method('all', base=DataFrameGroupBy)\n boxplot = frame_base.wont_implement_method(\n DataFrameGroupBy, 'boxplot', reason=\"plotting-tools\")\n describe = frame_base.not_implemented_method('describe')\n diff = frame_base._elementwise_method('diff', base=DataFrameGroupBy)\n fillna = frame_base._elementwise_method('fillna', base=DataFrameGroupBy)\n filter = frame_base._elementwise_method('filter', base=DataFrameGroupBy)\n first = frame_base.wont_implement_method(\n DataFrameGroupBy, 'first', reason=\"order-sensitive\")\n get_group = frame_base._elementwise_method('get_group', base=DataFrameGroupBy)\n head = frame_base.wont_implement_method(\n DataFrameGroupBy, 'head', reason=\"order-sensitive\")\n hist = frame_base.wont_implement_method(\n DataFrameGroupBy, 'hist', reason=\"plotting-tools\")\n idxmax = frame_base._elementwise_method('idxmax', base=DataFrameGroupBy)\n idxmin = frame_base._elementwise_method('idxmin', base=DataFrameGroupBy)\n last = frame_base.wont_implement_method(\n DataFrameGroupBy, 'last', reason=\"order-sensitive\")\n mad = frame_base._elementwise_method('mad', base=DataFrameGroupBy)\n max = frame_base._elementwise_method('max', base=DataFrameGroupBy)\n mean = frame_base._elementwise_method('mean', base=DataFrameGroupBy)\n median = frame_base._elementwise_method('median', base=DataFrameGroupBy)\n min = frame_base._elementwise_method('min', base=DataFrameGroupBy)\n nunique = frame_base._elementwise_method('nunique', base=DataFrameGroupBy)\n plot = frame_base.wont_implement_method(\n DataFrameGroupBy, 'plot', reason=\"plotting-tools\")\n prod = frame_base._elementwise_method('prod', base=DataFrameGroupBy)\n quantile = frame_base._elementwise_method('quantile', base=DataFrameGroupBy)\n shift = frame_base._elementwise_method('shift', base=DataFrameGroupBy)\n size = frame_base._elementwise_method('size', base=DataFrameGroupBy)\n skew = frame_base._elementwise_method('skew', base=DataFrameGroupBy)\n std = frame_base._elementwise_method('std', base=DataFrameGroupBy)\n sum = frame_base._elementwise_method('sum', base=DataFrameGroupBy)\n tail = frame_base.wont_implement_method(\n DataFrameGroupBy, 'tail', reason=\"order-sensitive\")\n take = frame_base.wont_implement_method(\n DataFrameGroupBy, 'take', reason='deprecated')\n tshift = frame_base._elementwise_method('tshift', base=DataFrameGroupBy)\n var = frame_base._elementwise_method('var', base=DataFrameGroupBy)\n\n @property\n def groups(self):\n return self._expr.proxy().groups\n\n @property\n def indices(self):\n return self._expr.proxy().indices\n\n @property\n def ndim(self):\n return self._expr.proxy().ndim\n\n @property\n def ngroups(self):\n return self._expr.proxy().ngroups\n\n\n@populate_not_implemented(pd.core.indexes.base.Index)\nclass _DeferredIndex(object):\n def __init__(self, frame):\n self._frame = frame\n\n @property\n def names(self):\n return self._frame._expr.proxy().index.names\n\n @names.setter\n def names(self, value):\n def set_index_names(df):\n df = df.copy()\n df.index.names = value\n return df\n\n self._frame._expr = expressions.ComputedExpression(\n 'set_index_names',\n set_index_names,\n [self._frame._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary())\n\n @property\n def ndim(self):\n return self._frame._expr.proxy().index.ndim\n\n @property\n def nlevels(self):\n return self._frame._expr.proxy().index.nlevels\n\n def __getattr__(self, name):\n raise NotImplementedError('index.%s' % name)\n\n\n@populate_not_implemented(pd.core.indexing._LocIndexer)\nclass _DeferredLoc(object):\n def __init__(self, frame):\n self._frame = frame\n\n def __getitem__(self, index):\n if isinstance(index, tuple):\n rows, cols = index\n return self[rows][cols]\n elif isinstance(index, list) and index and isinstance(index[0], bool):\n # Aligned by numerical index.\n raise NotImplementedError(type(index))\n elif isinstance(index, list):\n # Select rows, but behaves poorly on missing values.\n raise NotImplementedError(type(index))\n elif isinstance(index, slice):\n args = [self._frame._expr]\n func = lambda df: df.loc[index]\n elif isinstance(index, frame_base.DeferredFrame):\n args = [self._frame._expr, index._expr]\n func = lambda df, index: df.loc[index]\n elif callable(index):\n\n def checked_callable_index(df):\n computed_index = index(df)\n if isinstance(computed_index, tuple):\n row_index, _ = computed_index\n else:\n row_index = computed_index\n if isinstance(row_index, list) and row_index and isinstance(\n row_index[0], bool):\n raise NotImplementedError(type(row_index))\n elif not isinstance(row_index, (slice, pd.Series)):\n raise NotImplementedError(type(row_index))\n return computed_index\n\n args = [self._frame._expr]\n func = lambda df: df.loc[checked_callable_index]\n else:\n raise NotImplementedError(type(index))\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'loc',\n func,\n args,\n requires_partition_by=(\n partitionings.Index()\n if len(args) > 1\n else partitionings.Arbitrary()),\n preserves_partition_by=partitionings.Arbitrary()))\n\n __setitem__ = frame_base.not_implemented_method('loc.setitem')\n\n@populate_not_implemented(pd.core.indexing._iLocIndexer)\nclass _DeferredILoc(object):\n def __init__(self, frame):\n self._frame = frame\n\n def __getitem__(self, index):\n if isinstance(index, tuple):\n rows, _ = index\n if rows != slice(None, None, None):\n raise frame_base.WontImplementError(\n \"Using iloc to select rows is not supported because it's \"\n \"position-based indexing is sensitive to the order of the data.\",\n reason=\"order-sensitive\")\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'iloc',\n lambda df: df.iloc[index],\n [self._frame._expr],\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()))\n else:\n raise frame_base.WontImplementError(\n \"Using iloc to select rows is not supported because it's \"\n \"position-based indexing is sensitive to the order of the data.\",\n reason=\"order-sensitive\")\n\n def __setitem__(self, index, value):\n raise frame_base.WontImplementError(\n \"Using iloc to mutate a frame is not supported because it's \"\n \"position-based indexing is sensitive to the order of the data.\",\n reason=\"order-sensitive\")\n\n\nclass _DeferredStringMethods(frame_base.DeferredBase):\n @frame_base.args_to_kwargs(pd.core.strings.StringMethods)\n @frame_base.populate_defaults(pd.core.strings.StringMethods)\n def cat(self, others, join, **kwargs):\n if others is None:\n # Concatenate series into a single String\n requires = partitionings.Singleton(reason=(\n \"cat(others=None) concatenates all data in a Series into a single \"\n \"string, so it requires collecting all data on a single node.\"\n ))\n func = lambda df: df.str.cat(join=join, **kwargs)\n args = [self._expr]\n\n elif (isinstance(others, frame_base.DeferredBase) or\n (isinstance(others, list) and\n all(isinstance(other, frame_base.DeferredBase) for other in others))):\n\n if isinstance(others, frame_base.DeferredBase):\n others = [others]\n\n requires = partitionings.Index()\n def func(*args):\n return args[0].str.cat(others=args[1:], join=join, **kwargs)\n args = [self._expr] + [other._expr for other in others]\n\n else:\n raise frame_base.WontImplementError(\n \"others must be None, DeferredSeries, or List[DeferredSeries] \"\n f\"(encountered {type(others)}). Other types are not supported \"\n \"because they make this operation sensitive to the order of the \"\n \"data.\", reason=\"order-sensitive\")\n\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'cat',\n func,\n args,\n requires_partition_by=requires,\n preserves_partition_by=partitionings.Arbitrary()))\n\n @frame_base.args_to_kwargs(pd.core.strings.StringMethods)\n def repeat(self, repeats):\n if isinstance(repeats, int):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'repeat',\n lambda series: series.str.repeat(repeats),\n [self._expr],\n # TODO(BEAM-11155): Defer to pandas to compute this proxy.\n # Currently it incorrectly infers dtype bool, may require upstream\n # fix.\n proxy=self._expr.proxy(),\n requires_partition_by=partitionings.Arbitrary(),\n preserves_partition_by=partitionings.Arbitrary()))\n elif isinstance(repeats, frame_base.DeferredBase):\n return frame_base.DeferredFrame.wrap(\n expressions.ComputedExpression(\n 'repeat',\n lambda series, repeats_series: series.str.repeat(repeats_series),\n [self._expr, repeats._expr],\n # TODO(BEAM-11155): Defer to pandas to compute this proxy.\n # Currently it incorrectly infers dtype bool, may require upstream\n # fix.\n proxy=self._expr.proxy(),\n requires_partition_by=partitionings.Index(),\n preserves_partition_by=partitionings.Arbitrary()))\n elif isinstance(repeats, list):\n raise frame_base.WontImplementError(\n \"str.repeat(repeats=) repeats must be an int or a DeferredSeries. \"\n \"Lists are not supported because they make this operation sensitive \"\n \"to the order of the data.\", reason=\"order-sensitive\")\n else:\n raise TypeError(\"str.repeat(repeats=) value must be an int or a \"\n f\"DeferredSeries (encountered {type(repeats)}).\")\n\n get_dummies = frame_base.wont_implement_method(\n pd.core.strings.StringMethods, 'get_dummies',\n reason='non-deferred-columns')\n\n\nELEMENTWISE_STRING_METHODS = [\n 'capitalize',\n 'casefold',\n 'contains',\n 'count',\n 'endswith',\n 'extract',\n 'extractall',\n 'findall',\n 'fullmatch',\n 'get',\n 'isalnum',\n 'isalpha',\n 'isdecimal',\n 'isdigit',\n 'islower',\n 'isnumeric',\n 'isspace',\n 'istitle',\n 'isupper',\n 'join',\n 'len',\n 'lower',\n 'lstrip',\n 'match',\n 'pad',\n 'partition',\n 'replace',\n 'rpartition',\n 'rsplit',\n 'rstrip',\n 'slice',\n 'slice_replace',\n 'split',\n 'startswith',\n 'strip',\n 'swapcase',\n 'title',\n 'upper',\n 'wrap',\n 'zfill',\n '__getitem__',\n]\n\ndef make_str_func(method):\n def func(df, *args, **kwargs):\n try:\n df_str = df.str\n except AttributeError:\n # If there's a non-string value in a Series passed to .str method, pandas\n # will generally just replace it with NaN in the result. However if\n # there are _only_ non-string values, pandas will raise:\n #\n # AttributeError: Can only use .str accessor with string values!\n #\n # This can happen to us at execution time if we split a partition that is\n # only non-strings. This branch just replaces all those values with NaN\n # in that case.\n return df.map(lambda _: np.nan)\n else:\n return getattr(df_str, method)(*args, **kwargs)\n\n return func\n\nfor method in ELEMENTWISE_STRING_METHODS:\n setattr(_DeferredStringMethods,\n method,\n frame_base._elementwise_method(make_str_func(method),\n name=method,\n base=pd.core.strings.StringMethods))\n\nfor base in ['add',\n 'sub',\n 'mul',\n 'div',\n 'truediv',\n 'floordiv',\n 'mod',\n 'divmod',\n 'pow',\n 'and',\n 'or']:\n for p in ['%s', 'r%s', '__%s__', '__r%s__']:\n # TODO: non-trivial level?\n name = p % base\n if hasattr(pd.Series, name):\n setattr(\n DeferredSeries,\n name,\n frame_base._elementwise_method(name, restrictions={'level': None},\n base=pd.Series))\n if hasattr(pd.DataFrame, name):\n setattr(\n DeferredDataFrame,\n name,\n frame_base._elementwise_method(name, restrictions={'level': None},\n base=pd.DataFrame))\n inplace_name = '__i%s__' % base\n if hasattr(pd.Series, inplace_name):\n setattr(\n DeferredSeries,\n inplace_name,\n frame_base._elementwise_method(inplace_name, inplace=True,\n base=pd.Series))\n if hasattr(pd.DataFrame, inplace_name):\n setattr(\n DeferredDataFrame,\n inplace_name,\n frame_base._elementwise_method(inplace_name, inplace=True,\n base=pd.DataFrame))\n\nfor name in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:\n for p in '%s', '__%s__':\n # Note that non-underscore name is used for both as the __xxx__ methods are\n # order-sensitive.\n setattr(DeferredSeries, p % name,\n frame_base._elementwise_method(name, base=pd.Series))\n setattr(DeferredDataFrame, p % name,\n frame_base._elementwise_method(name, base=pd.DataFrame))\n\nfor name in ['__neg__', '__pos__', '__invert__']:\n setattr(DeferredSeries, name,\n frame_base._elementwise_method(name, base=pd.Series))\n setattr(DeferredDataFrame, name,\n frame_base._elementwise_method(name, base=pd.DataFrame))\n\nDeferredSeries.multiply = DeferredSeries.mul # type: ignore\nDeferredDataFrame.multiply = DeferredDataFrame.mul # type: ignore\n\n\ndef _slice_parts(s):\n yield s.start\n yield s.stop\n yield s.step\n\ndef _is_null_slice(s):\n return isinstance(s, slice) and all(x is None for x in _slice_parts(s))\n\ndef _is_integer_slice(s):\n return isinstance(s, slice) and all(\n x is None or isinstance(x, int)\n for x in _slice_parts(s)) and not _is_null_slice(s)\n"
]
| [
[
"pandas.concat",
"pandas.core.series.is_iterator",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.Series",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.common.is_bool_indexer",
"pandas.DataFrame"
]
]
|
ziatdinovmax/im2spec | [
"60fa6f44db6c0d69e7c6f1d608553b574190e704"
]
| [
"im2spec/utils.py"
]
| [
"from typing import Tuple, Type, Union\n\nimport numpy as np\nimport torch\nfrom shapely.geometry import Polygon\nfrom sklearn.mixture import GaussianMixture\n\n\ndef predict(model, feature_arr: Union[np.ndarray, torch.Tensor]) -> np.ndarray:\n \"\"\"\n Makes a prediction with a trained NN\n \"\"\"\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if isinstance(feature_arr, np.ndarray):\n feature_arr = torch.from_numpy(feature_arr).float()\n feature_arr = feature_arr.to(device)\n model.eval()\n model.to(device)\n with torch.no_grad():\n prediction = model(feature_arr)\n prediction = prediction.cpu().numpy()\n return prediction\n\n\ndef init_dataloaders(X_train: np.ndarray,\n y_train: np.ndarray,\n X_test: np.ndarray,\n y_test: np.ndarray,\n batch_size: int\n ) -> Tuple[Type[torch.utils.data.DataLoader]]:\n \"\"\"\n Returns train and test dataloaders for training images\n in a native PyTorch format\n \"\"\"\n device_ = 'cuda' if torch.cuda.is_available() else 'cpu'\n X_train = torch.from_numpy(X_train).float().to(device_)\n y_train = torch.from_numpy(y_train).float().to(device_)\n X_test = torch.from_numpy(X_test).float().to(device_)\n y_test = torch.from_numpy(y_test).float().to(device_)\n\n data_train = torch.utils.data.TensorDataset(X_train, y_train)\n data_test = torch.utils.data.TensorDataset(X_test, y_test)\n train_iterator = torch.utils.data.DataLoader(\n data_train, batch_size=batch_size, shuffle=True)\n test_iterator = torch.utils.data.DataLoader(\n data_test, batch_size=batch_size)\n return train_iterator, test_iterator\n\n\ndef make_window(imgsrc: np.ndarray, window_size: int,\n xpos: int, ypos: int) -> np.ndarray:\n \"\"\"\n Returns the portion of the image within the window given the\n image (imgsrc), the x position and the y position\n \"\"\"\n imgsrc = imgsrc[int(xpos-window_size/2):int(xpos+window_size/2),\n int(ypos-window_size/2):int(ypos+window_size/2)]\n return imgsrc\n\n\ndef create_training_set(hdata: np.ndarray, window_size: int,\n slice: int = 0) -> Tuple[np.ndarray]:\n \"\"\"\n Creates arrays with features (local subimages)\n and targets (corresponding spectra) from hyperspectral data\n \"\"\"\n feature_arr, target_arr = [], []\n pos = []\n s1, s2 = hdata.shape[:-1]\n for i in range(s1):\n for j in range(s2):\n arr_loc = make_window(hdata[..., slice], window_size, i, j)\n if arr_loc.shape != (window_size, window_size):\n continue\n feature_arr.append(arr_loc)\n target_arr.append(hdata[i, j, :])\n pos.append([i, j])\n return (np.array(pos), np.array(feature_arr)[:, None],\n np.array(target_arr)[:, None])\n\n\ndef loop_area(pred_loop: np.ndarray,\n target_arr: np.ndarray,\n spec_val: np.ndarray) -> Tuple[np.ndarray]:\n \"\"\"\n Calculates loop area for predicted and ground truth data\n and computes absolute error\n \"\"\"\n polygons, polygons_pred = [], []\n for val1, val2 in zip(target_arr[:, 0], pred_loop[:, 0]):\n polygon, polygon_pred = [], []\n for i, v in enumerate(spec_val):\n polygon.append([v, val1[i]])\n polygon_pred.append([v, val2[i]])\n polygons.append(polygon)\n polygons_pred.append(polygon_pred)\n pred_area = np.array([Polygon(p).area for p in polygons_pred])\n target_area = np.array([Polygon(p).area for p in polygons])\n area_error = np.abs(pred_area-target_area)\n return pred_area, target_area, area_error\n\n\ndef encode(model: Type[torch.nn.Module],\n feature_arr: Union[torch.Tensor, np.ndarray],\n ) -> Tuple[np.ndarray]:\n \"\"\"\n Encodes features in a latent space\n \"\"\"\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if isinstance(feature_arr, np.ndarray):\n feature_arr = torch.from_numpy(feature_arr).float()\n feature_arr = feature_arr.to(device)\n model.eval()\n model.to(device)\n with torch.no_grad():\n latent_features = model.encoder(feature_arr)\n return latent_features.cpu().numpy()\n\n\ndef encode_gmm(model: Type[torch.nn.Module],\n feature_arr: Union[torch.Tensor, np.ndarray],\n nc: int) -> Tuple[np.ndarray]:\n \"\"\"\n Encodes features in latent space and peforms Gaussian mixture model-based\n classification of the latent space\n \"\"\"\n latent_features = encode(model, feature_arr)\n gmmp = GaussianMixture(n_components=nc, random_state=42)\n gmm_zp = gmmp.fit_predict(latent_features)\n centroids = gmmp.fit(latent_features).means_\n return latent_features, gmm_zp, centroids\n\n\ndef decode(model: Type[torch.nn.Module],\n latent_coord: Union[np.ndarray, torch.Tensor],\n ) -> np.ndarray:\n \"\"\"\n Projects latent coordinates to a signal space\n \"\"\"\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if isinstance(latent_coord, np.ndarray):\n latent_coord = torch.from_numpy(latent_coord).float()\n latent_coord = latent_coord.to(device)\n model.eval()\n model.to(device)\n with torch.no_grad():\n decoded = model.decoder(latent_coord)\n return decoded.cpu().numpy()\n\n\ndef latent_gmm(model: Type[torch.nn.Module],\n feature_arr: Union[torch.Tensor, np.ndarray],\n nc: int) -> Tuple[np.ndarray]:\n \"\"\"\n Encodes a feature array in latent space, performs Gaussian mixture model\n based classification and projects centroids of each mixture component\n to a signal space\n \"\"\"\n latent_features, gmm_zp, centroids = encode_gmm(model, feature_arr, nc)\n decoded_centroids = decode(model, centroids)\n return latent_features, gmm_zp, centroids, decoded_centroids\n\n"
]
| [
[
"numpy.abs",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.no_grad",
"torch.cuda.is_available",
"sklearn.mixture.GaussianMixture",
"numpy.array"
]
]
|
ellery92/deep-text-recognition-benchmark | [
"43304dc94589ceb9fd832e0d3c24f53554763e9a"
]
| [
"gencharset.py"
]
| [
"import pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--train_list', required=True, help='path to train_list file')\nparser.add_argument('--test_list', required=True, help='path to test_list file')\nopt = parser.parse_args()\n\nchar_set = set()\ntrain_list = pd.read_csv(opt.train_list, sep=\"\\t\", names=[\"width\", \"height\", \"img_name\", \"img_text\"])\ntest_list = pd.read_csv(opt.test_list, sep=\"\\t\", names=[\"width\", \"height\", \"img_name\", \"img_text\"])\n\ntrain_list = list(train_list.img_text)\ntest_list = list(test_list.img_text)\n\nfor s in train_list:\n for c in s:\n char_set.add(c)\n\nfor s in test_list:\n for c in s:\n char_set.add(c)\n\nprint (len(char_set))\n\nf = open(\"char_set.txt\", \"w\", encoding=\"utf-8\")\nfor i, c in enumerate(char_set):\n f.write(\"{}\\t{}\\n\".format(i, c))\nf.close()\n"
]
| [
[
"pandas.read_csv"
]
]
|
ymtz03/freqerica | [
"d79e76181a037da5c11b47f8a4e1bf4387a0468f"
]
| [
"freqerica/op/orbital.py"
]
| [
"import numpy as np\nfrom pyscf import gto, scf, mcscf, ao2mo, symm\nfrom collections import namedtuple\n\nMoleInput = namedtuple('MoleInput', ['mol', 'norb', 'nelec'])\nOrbitalProperty = namedtuple('OrbitalProperty', ['hcore', 'hint', 'gint', 'irreps', 'mo_energy', 'mo_occ', 'mo_coeff', 'ncore', 'ncas', 'nelecas'])\n\ndef unfold_compact_h2mat(h2mat, norb):\n retval = np.empty([norb]*4, float)\n r=0\n for r1 in range(norb):\n for r2 in range(r1+1):\n c=0\n for c1 in range(norb):\n for c2 in range(c1+1):\n retval[r1,r2,c1,c2] = h2mat[r,c]\n retval[r2,r1,c1,c2] = h2mat[r,c]\n retval[r1,r2,c2,c1] = h2mat[r,c]\n retval[r2,r1,c2,c1] = h2mat[r,c]\n c+=1\n r+=1\n return retval\n\n\ndef calculate_moint_and_energy(moleInput):\n mol = moleInput.mol\n \n mf = scf.RHF(moleInput.mol)\n mf.max_cycle = 2000\n hf_energy = mf.kernel()\n print('hf_energy : ', hf_energy)\n \n print('mf.mo_coeff : \\n', mf.mo_coeff)\n print('mf.mo_energy : \\n', mf.mo_energy)\n print('mf.mo_occ : \\n', mf.mo_occ)\n irreps = symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mf.mo_coeff) if mol.symmetry else ['A']*moleInput.norb\n print('irreps :', irreps)\n \n norb = moleInput.norb\n nelec = moleInput.nelec\n mc = mcscf.CASCI(mf, norb, nelec)\n h1, hcore = mc.get_h1eff()\n eri = mc.get_h2eff()\n result_casci = mc.kernel()\n energy_casci = result_casci[0]\n print('energy_casci :', energy_casci)\n print(mc.ncore)\n print('hcore', hcore)\n\n #mo_coeff = mean_field.mo_coeff\n #hint_spacial = mo_coeff.T.dot(mean_field.get_hcore()).dot(mo_coeff)\n #n_orbs = hint_spacial.shape[0]\n #gint_spacial = ao2mo.full(mol, mo_coeff, compact=False).reshape([n_orbs]*4)\n n_site=norb*2\n\n hint_spatial = h1\n gint_spatial = unfold_compact_h2mat(eri, norb)\n\n hint = np.zeros([n_site]*2, float)\n hint[0::2, 0::2] = hint_spatial\n hint[1::2, 1::2] = hint_spatial\n\n gint = np.zeros([n_site]*4, float)\n gint[0::2, 0::2, 0::2, 0::2] = gint_spatial\n gint[1::2, 1::2, 0::2, 0::2] = gint_spatial\n gint[0::2, 0::2, 1::2, 1::2] = gint_spatial\n gint[1::2, 1::2, 1::2, 1::2] = gint_spatial\n\n return OrbitalProperty(hcore, hint, gint, irreps, mf.mo_energy, mf.mo_occ, mf.mo_coeff, mc.ncore, mc.ncas, mc.nelecas), energy_casci\n"
]
| [
[
"numpy.zeros",
"numpy.empty"
]
]
|
Sui-Siann-Dataset/HunLian | [
"fc3cebcb34bcc342005527a46ab9a2d048b58e7d"
]
| [
"Tacotron2-Ray/hparams.py"
]
| [
"import numpy as np\nimport tensorflow as tf\n\n# Default hyperparameters\nhparams = tf.contrib.training.HParams(\n\t# Comma-separated list of cleaners to run on text prior to training and eval. For non-English\n\t# text, you may want to use \"basic_cleaners\" or \"transliteration_cleaners\".\n\tcleaners='english_cleaners',\n\n\n\t#If you only have 1 GPU or want to use only one GPU, please set num_gpus=0 and specify the GPU idx on run. example:\n\t\t#expample 1 GPU of index 2 (train on \"/gpu2\" only): CUDA_VISIBLE_DEVICES=2 python train.py --model='Tacotron' --hparams='tacotron_gpu_start_idx=2'\n\t#If you want to train on multiple GPUs, simply specify the number of GPUs available, and the idx of the first GPU to use. example:\n\t\t#example 4 GPUs starting from index 0 (train on \"/gpu0\"->\"/gpu3\"): python train.py --model='Tacotron' --hparams='tacotron_num_gpus=4, tacotron_gpu_start_idx=0'\n\t#The hparams arguments can be directly modified on this hparams.py file instead of being specified on run if preferred!\n\n\t#If one wants to train both Tacotron and WaveNet in parallel (provided WaveNet will be trained on True mel spectrograms), one needs to specify different GPU idxes.\n\t#example Tacotron+WaveNet on a machine with 4 or plus GPUs. Two GPUs for each model: \n\t\t# CUDA_VISIBLE_DEVICES=0,1 python train.py --model='Tacotron' --hparams='tacotron_gpu_start_idx=0, tacotron_num_gpus=2'\n\t\t# Cuda_VISIBLE_DEVICES=2,3 python train.py --model='WaveNet' --hparams='wavenet_gpu_start_idx=2; wavenet_num_gpus=2'\n\n\t#IMPORTANT NOTE: If using N GPUs, please multiply the tacotron_batch_size by N below in the hparams! (tacotron_batch_size = 32 * N)\n\t#Never use lower batch size than 32 on a single GPU!\n\t#Same applies for Wavenet: wavenet_batch_size = 8 * N (wavenet_batch_size can be smaller than 8 if GPU is having OOM, minimum 2)\n\t#Please also apply the synthesis batch size modification likewise. (if N GPUs are used for synthesis, minimal batch size must be N, minimum of 1 sample per GPU)\n\t#We did not add an automatic multi-GPU batch size computation to avoid confusion in the user's mind and to provide more control to the user for\n\t#resources related decisions.\n\n\t#Acknowledgement:\n\t#\tMany thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a little faster than the original\n\t#\tpipeline for a single GPU as well. Great work!\n\n\t#Hardware setup: Default supposes user has only one GPU: \"/gpu:0\" (Tacotron only for now! WaveNet does not support multi GPU yet, WIP)\n\t#Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis.\n tacotron_gpu_start_idx = 0, #idx of the first GPU to be used for Tacotron training.\n tacotron_num_gpus = 1, #Determines the number of gpus in use for Tacotron training.\n wavenet_gpu_start_idx = 0, #idx of the first GPU to be used for WaveNet training. (WIP)\n wavenet_num_gpus = 1, #Determines the number of gpus in use for WaveNet training. (WIP)\n split_on_cpu = True, #Determines whether to split data on CPU or on first GPU. This is automatically True when more than 1 GPU is used.\n\t###########################################################################################################################################\n\n\t#Audio\n\t#Audio parameters are the most important parameters to tune when using this work on your personal data. Below are the beginner steps to adapt\n\t#this work to your personal data:\n\t#\t1- Determine my data sample rate: First you need to determine your audio sample_rate (how many samples are in a second of audio). This can be done using sox: \"sox --i <filename>\"\n\t#\t\t(For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz, so there are plenty of examples to refer to)\n\t#\t2- set sample_rate parameter to your data correct sample rate\n\t#\t3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms window_size, and 12.5ms frame_shift(hop_size))\n\t#\t\ta- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200\n\t#\t\tb- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead)\n\t#\t4- Fix n_fft, num_freq and upsample_scales parameters accordingly.\n\t#\t\ta- n_fft can be either equal to win_size or the first power of 2 that comes after win_size. I usually recommend using the latter\n\t#\t\t\tto be more consistent with signal processing friends. No big difference to be seen however. For the tuto example: n_fft = 2048 = 2**11\n\t#\t\tb- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 + 1 = 1025.\n\t#\t\tc- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto example: upsample_scales=[15, 20] where 15 * 20 = 300\n\t#\t\t\tit is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0]\n\t#\t\t\tso the training segments should be long enough (2.8~3x upsample_scales[0] * hop_size or longer) so that the first kernel size can see the middle \n\t#\t\t\tof the samples efficiently. The length of WaveNet training segments is under the parameter \"max_time_steps\".\n\t#\t5- Finally comes the silence trimming. This very much data dependent, so I suggest trying preprocessing (or part of it, ctrl-C to stop), then use the\n\t#\t\t.ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That will first give you some idea about your above parameters, and\n\t#\t\tit will also give you an idea about trimming. If silences persist, try reducing trim_top_db slowly. If samples are trimmed mid words, try increasing it.\n\t#\t6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are showing black silent regions on top), then restart from step 2.\n\tnum_mels = 80, #Number of mel-spectrogram channels and local conditioning dimensionality\n\tnum_freq = 1025, # (= n_fft / 2 + 1) only used when adding linear spectrograms post processing network\n\trescale = True, #Whether to rescale audio prior to preprocessing\n\trescaling_max = 0.999, #Rescaling value\n\ttrim_silence = True, #Whether to clip silence in Audio (at beginning and end of audio only, not the middle)\n\t#train samples of lengths between 3sec and 14sec are more than enough to make a model capable of good parallelization.\n\tclip_mels_length = True, #For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors, also consider clipping your samples to smaller chunks)\n\tmax_mel_frames = 1000, #Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3 and still getting OOM errors.\n\n\t# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction\n\t# It's preferred to set True to use with https://github.com/r9y9/wavenet_vocoder\n\t# Does not work if n_ffit is not multiple of hop_size!!\n\tuse_lws=False, #Only used to set as True if using WaveNet, no difference in performance is observed in either cases.\n\tsilence_threshold=2, #silence threshold used for sound trimming for wavenet preprocessing\n\n\t#Mel spectrogram\n\tn_fft = 2048, #Extra window size is filled with 0 paddings to match this parameter\n\thop_size = 200, #For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)\n\twin_size = 800, #For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)\n\tsample_rate = 16000, #22050 Hz (corresponding to ljspeech dataset) (sox --i <filename>)\n\tframe_shift_ms = None, #Can replace hop_size parameter. (Recommended: 12.5)\n\n\t#M-AILABS (and other datasets) trim params (there parameters are usually correct for any data, but definitely must be tuned for specific speakers)\n\ttrim_fft_size = 512, \n\ttrim_hop_size = 128,\n\ttrim_top_db = 23,\n\n\t#Mel and Linear spectrograms normalization/scaling and clipping\n\tsignal_normalization = True, #Whether to normalize mel spectrograms to some predefined range (following below parameters)\n\tallow_clipping_in_normalization = True, #Only relevant if mel_normalization = True\n\tsymmetric_mels = True, #Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, faster and cleaner convergence)\n\tmax_abs_value = 4., #max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not be too big to avoid gradient explosion, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #not too small for fast convergence)\n\tnormalize_for_wavenet = True, #whether to rescale to [0, 1] for wavenet. (better audio quality)\n\tclip_for_wavenet = True, #whether to clip [-max, max] before training/synthesizing with wavenet (better audio quality)\n\n\t#Contribution by @begeekmyfriend\n\t#Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude levels. Also allows for better G&L phase reconstruction)\n\tpreemphasize = True, #whether to apply filter\n\tpreemphasis = 0.97, #filter coefficient.\n\n\t#Limits\n\tmin_level_db = -100,\n\tref_level_db = 20,\n\tfmin = 55, #Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])\n\tfmax = 7600, #To be increased/reduced depending on data.\n\n\t#Griffin Lim\n\tpower = 1.5, #Only used in G&L inversion, usually values between 1.2 and 1.5 are a good choice.\n\tgriffin_lim_iters = 60, #Number of G&L iterations, typically 30 is enough but we use 60 to ensure convergence.\n\t###########################################################################################################################################\n\n\t#Tacotron\n\toutputs_per_step = 3, #number of frames to generate at each decoding step (increase to speed up computation and allows for higher batch size, decreases G&L audio quality)\n\tstop_at_any = True, #Determines whether the decoder should stop when predicting <stop> to any frame or to all of them (True works pretty well)\n\n\tembedding_dim = 512, #dimension of embedding space\n\n\t#Encoder parameters\n\tenc_conv_num_layers = 3, #number of encoder convolutional layers\n\tenc_conv_kernel_size = (5, ), #size of encoder convolution filters for each layer\n\tenc_conv_channels = 512, #number of encoder convolutions filters for each layer\n\tencoder_lstm_units = 256, #number of lstm units for each direction (forward and backward)\n\n\t#Attention mechanism\n\tsmoothing = False, #Whether to smooth the attention normalization function\n\tattention_dim = 128, #dimension of attention space\n\tattention_filters = 32, #number of attention convolution filters\n\tattention_kernel = (31, ), #kernel size of attention convolution\n\tcumulative_weights = True, #Whether to cumulate (sum) all previous attention weights or simply feed previous weights (Recommended: True)\n\n\t#Decoder\n\tprenet_layers = [256, 256], #number of layers and number of units of prenet\n\tdecoder_layers = 2, #number of decoder lstm layers\n\tdecoder_lstm_units = 1024, #number of decoder lstm units on each layer\n\tmax_iters = 2000, #Max decoder steps during inference (Just for safety from infinite loop cases)\n\n\t#Residual postnet\n\tpostnet_num_layers = 5, #number of postnet convolutional layers\n\tpostnet_kernel_size = (5, ), #size of postnet convolution filters for each layer\n\tpostnet_channels = 512, #number of postnet convolution filters for each layer\n\n\t#CBHG mel->linear postnet\n\tcbhg_kernels = 8, #All kernel sizes from 1 to cbhg_kernels will be used in the convolution bank of CBHG to act as \"K-grams\"\n\tcbhg_conv_channels = 128, #Channels of the convolution bank\n\tcbhg_pool_size = 2, #pooling size of the CBHG\n\tcbhg_projection = 256, #projection channels of the CBHG (1st projection, 2nd is automatically set to num_mels)\n\tcbhg_projection_kernel_size = 3, #kernel_size of the CBHG projections\n\tcbhg_highwaynet_layers = 4, #Number of HighwayNet layers\n\tcbhg_highway_units = 128, #Number of units used in HighwayNet fully connected layers\n\tcbhg_rnn_units = 128, #Number of GRU units used in bidirectional RNN of CBHG block. CBHG output is 2x rnn_units in shape\n\n\t#Loss params\n\tmask_encoder = True, #whether to mask encoder padding while computing attention. Set to True for better prosody but slower convergence.\n\tmask_decoder = False, #Whether to use loss mask for padded sequences (if False, <stop_token> loss function will not be weighted, else recommended pos_weight = 20)\n\tcross_entropy_pos_weight = 20, #Use class weights to reduce the stop token classes imbalance (by adding more penalty on False Negatives (FN)) (1 = disabled)\n\tpredict_linear = True, #Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!)\n\t###########################################################################################################################################\n\n\n\t#Wavenet\n\t# Input type:\n\t# 1. raw [-1, 1]\n\t# 2. mulaw [-1, 1]\n\t# 3. mulaw-quantize [0, mu]\n\t# If input_type is raw or mulaw, network assumes scalar input and\n\t# discretized mixture of logistic distributions output, otherwise one-hot\n\t# input and softmax output are assumed.\n\t#Model generatl type\n\tinput_type=\"raw\",\n\tquantize_channels=2 ** 16, # 65536 (16-bit) (raw) or 256 (8-bit) (mulaw or mulaw-quantize) // number of classes = 256 <=> mu = 255\n\n\t#Minimal scales ranges for MoL and Gaussian modeling\n\tlog_scale_min=float(np.log(1e-14)), #Mixture of logistic distributions minimal log scale\n\tlog_scale_min_gauss = float(np.log(1e-7)), #Gaussian distribution minimal allowed log scale\n\n\t#model parameters\n\t#To use Gaussian distribution as output distribution instead of mixture of logistics, set \"out_channels = 2\" instead of \"out_channels = 10 * 3\". (UNDER TEST)\n\tout_channels = 2, #This should be equal to quantize channels when input type is 'mulaw-quantize' else: num_distributions * 3 (prob, mean, log_scale).\n\tlayers = 20, #Number of dilated convolutions (Default: Simplified Wavenet of Tacotron-2 paper)\n\tstacks = 2, #Number of dilated convolution stacks (Default: Simplified Wavenet of Tacotron-2 paper)\n\tresidual_channels = 128, #Number of residual block input/output channels.\n\tgate_channels = 256, #split in 2 in gated convolutions\n\tskip_out_channels = 128, #Number of residual block skip convolution channels.\n\tkernel_size = 3, #The number of inputs to consider in dilated convolutions.\n\n\t#Upsampling parameters (local conditioning)\n\tcin_channels = 80, #Set this to -1 to disable local conditioning, else it must be equal to num_mels!!\n\tupsample_conditional_features = True, #Whether to repeat conditional features or upsample them (The latter is recommended)\n\tupsample_type = '1D', #Type of the upsampling deconvolution. Can be ('1D' or '2D'). 1D spans all frequency bands for each frame while 2D spans \"freq_axis_kernel_size\" bands at a time\n\tupsample_activation = 'LeakyRelu', #Activation function used during upsampling. Can be ('LeakyRelu', 'Relu' or None)\n\tupsample_scales = [5, 5, 11], #prod(upsample_scales) should be equal to hop_size\n\tfreq_axis_kernel_size = 3, #Only used for 2D upsampling. This is the number of requency bands that are spanned at a time for each frame.\n\tleaky_alpha = 0.4, #slope of the negative portion of LeakyRelu (LeakyRelu: y=x if x>0 else y=alpha * x)\n\n\t#global conditioning\n\tgin_channels = -1, #Set this to -1 to disable global conditioning, Only used for multi speaker dataset. It defines the depth of the embeddings (Recommended: 16)\n\tuse_speaker_embedding = True, #whether to make a speaker embedding\n\tn_speakers = 5, #number of speakers (rows of the embedding)\n\n\t#the bias debate! :)\n\tuse_bias = True, #Whether to use bias in convolutional layers of the Wavenet\n\n\t#training samples length\n\tmax_time_sec = None, #Max time of audio for training. If None, we use max_time_steps.\n\tmax_time_steps = 11000, #Max time steps in audio used to train wavenet (decrease to save memory) (Recommend: 8000 on modest GPUs, 13000 on stronger ones)\n\t###########################################################################################################################################\n\n\t#Tacotron Training\n\t#Reproduction seeds\n\ttacotron_random_seed = 5339, #Determines initial graph and operations (i.e: model) random state for reproducibility\n\ttacotron_data_random_state = 1234, #random state for train test split repeatability\n\n\t#performance parameters\n\ttacotron_swap_with_cpu = False, #Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause major slowdowns! Only use when critical!)\n\n\t#train/test split ratios, mini-batches sizes\n\ttacotron_batch_size = 16, #32, #number of training samples on each training steps\n\t#Tacotron Batch synthesis supports ~16x the training batch size (no gradients during testing). \n\t#Training Tacotron with unmasked paddings makes it aware of them, which makes synthesis times different from training. We thus recommend masking the encoder.\n\ttacotron_synthesis_batch_size = 1, #DO NOT MAKE THIS BIGGER THAN 1 IF YOU DIDN'T TRAIN TACOTRON WITH \"mask_encoder=True\"!!\n\ttacotron_test_size = 0.05, #% of data to keep as test data, if None, tacotron_test_batches must be not None. (5% is enough to have a good idea about overfit)\n\ttacotron_test_batches = None, #number of test batches.\n\n\t#Learning rate schedule\n\ttacotron_decay_learning_rate = True, #boolean, determines if the learning rate will follow an exponential decay\n\ttacotron_start_decay = 50000, #Step at which learning decay starts\n\ttacotron_decay_steps = 50000, #Determines the learning rate decay slope (UNDER TEST)\n\ttacotron_decay_rate = 0.5, #learning rate decay rate (UNDER TEST)\n\ttacotron_initial_learning_rate = 1e-3, #starting learning rate\n\ttacotron_final_learning_rate = 1e-5, #minimal learning rate\n\n\t#Optimization parameters\n\ttacotron_adam_beta1 = 0.9, #AdamOptimizer beta1 parameter\n\ttacotron_adam_beta2 = 0.999, #AdamOptimizer beta2 parameter\n\ttacotron_adam_epsilon = 1e-6, #AdamOptimizer Epsilon parameter\n\n\t#Regularization parameters\n\ttacotron_reg_weight = 1e-7, #regularization weight (for L2 regularization)\n\ttacotron_scale_regularization = False, #Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is high and biasing the model)\n\ttacotron_zoneout_rate = 0.1, #zoneout rate for all LSTM cells in the network\n\ttacotron_dropout_rate = 0.5, #dropout rate for all convolutional layers + prenet\n\ttacotron_clip_gradients = True, #whether to clip gradients\n\n\t#Evaluation parameters\n\tnatural_eval = False, #Whether to use 100% natural eval (to evaluate Curriculum Learning performance) or with same teacher-forcing ratio as in training (just for overfit)\n\n\t#Decoder RNN learning can take be done in one of two ways:\n\t#\tTeacher Forcing: vanilla teacher forcing (usually with ratio = 1). mode='constant'\n\t#\tCurriculum Learning Scheme: From Teacher-Forcing to sampling from previous outputs is function of global step. (teacher forcing ratio decay) mode='scheduled'\n\t#The second approach is inspired by:\n\t#Bengio et al. 2015: Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks.\n\t#Can be found under: https://arxiv.org/pdf/1506.03099.pdf\n\ttacotron_teacher_forcing_mode = 'constant', #Can be ('constant' or 'scheduled'). 'scheduled' mode applies a cosine teacher forcing ratio decay. (Preference: scheduled)\n\ttacotron_teacher_forcing_ratio = 1., #Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder inputs, Only relevant if mode='constant'\n\ttacotron_teacher_forcing_init_ratio = 1., #initial teacher forcing ratio. Relevant if mode='scheduled'\n\ttacotron_teacher_forcing_final_ratio = 0., #final teacher forcing ratio. Relevant if mode='scheduled'\n\ttacotron_teacher_forcing_start_decay = 10000, #starting point of teacher forcing ratio decay. Relevant if mode='scheduled'\n\ttacotron_teacher_forcing_decay_steps = 280000, #Determines the teacher forcing ratio decay slope. Relevant if mode='scheduled'\n\ttacotron_teacher_forcing_decay_alpha = 0., #teacher forcing ratio decay rate. Relevant if mode='scheduled'\n\t###########################################################################################################################################\n\n\t#Wavenet Training\n\twavenet_random_seed = 5339, # S=5, E=3, D=9 :)\n\twavenet_data_random_state = 1234, #random state for train test split repeatability\n\n\t#performance parameters\n\twavenet_swap_with_cpu = False, #Whether to use cpu as support to gpu for synthesis computation (while loop).(Not recommended: may cause major slowdowns! Only use when critical!)\n\n\t#train/test split ratios, mini-batches sizes\n\twavenet_batch_size = 8, #batch size used to train wavenet.\n\t#During synthesis, there is no max_time_steps limitation so the model can sample much longer audio than 8k(or 13k) steps. (Audio can go up to 500k steps, equivalent to ~21sec on 24kHz)\n\t#Usually your GPU can handle ~2x wavenet_batch_size during synthesis for the same memory amount during training (because no gradients to keep and ops to register for backprop)\n\twavenet_synthesis_batch_size = 10 * 2, #This ensure that wavenet synthesis goes up to 4x~8x faster when synthesizing multiple sentences. Watch out for OOM with long audios.\n\twavenet_test_size = 0.0441, #% of data to keep as test data, if None, wavenet_test_batches must be not None\n\twavenet_test_batches = None, #number of test batches.\n\n\t#Learning rate schedule\n\twavenet_lr_schedule = 'exponential', #learning rate schedule. Can be ('exponential', 'noam')\n\twavenet_learning_rate = 1e-4, #wavenet initial learning rate\n\twavenet_warmup = float(4000), #Only used with 'noam' scheme. Defines the number of ascending learning rate steps.\n\twavenet_decay_rate = 0.5, #Only used with 'exponential' scheme. Defines the decay rate.\n\twavenet_decay_steps = 300000, #Only used with 'exponential' scheme. Defines the decay steps.\n\n\t#Optimization parameters\n\twavenet_adam_beta1 = 0.9, #Adam beta1\n\twavenet_adam_beta2 = 0.999, #Adam beta2\n\twavenet_adam_epsilon = 1e-8, #Adam Epsilon\n\n\t#Regularization parameters\n\twavenet_clip_gradients = False, #Whether the clip the gradients during wavenet training.\n\twavenet_ema_decay = 0.9999, #decay rate of exponential moving average\n\twavenet_weight_normalization = False, #Whether to Apply Saliman & Kingma Weight Normalization (reparametrization) technique. (NEEDS VERIFICATION)\n\twavenet_init_scale = 1., #Only relevent if weight_normalization=True. Defines the initial scale in data dependent initialization of parameters.\n\twavenet_dropout = 0.05, #drop rate of wavenet layers\n\n\t#Tacotron-2 integration parameters\n\ttrain_with_GTA = False, #Whether to use GTA mels to train WaveNet instead of ground truth mels.\n\t###########################################################################################################################################\n\n\t#Eval sentences (if no eval text file was specified during synthesis, these sentences are used for eval)\n\tsentences = [\n\t# From July 8, 2017 New York Times:\n\t'Scientists at the CERN laboratory say they have discovered a new particle.',\n\t'There\\'s a way to measure the acute emotional intelligence that has never gone out of style.',\n\t'President Trump met with other leaders at the Group of 20 conference.',\n\t'The Senate\\'s bill to repeal and replace the Affordable Care Act is now imperiled.',\n\t# From Google's Tacotron example page:\n\t'Generative adversarial network or variational auto-encoder.',\n\t'Basilar membrane and otolaryngology are not auto-correlations.',\n\t'He has read the whole thing.',\n\t'He reads books.',\n\t'He thought it was time to present the present.',\n\t'Thisss isrealy awhsome.',\n\t'Punctuation sensitivity, is working.',\n\t'Punctuation sensitivity is working.',\n\t\"Peter Piper picked a peck of pickled peppers. How many pickled peppers did Peter Piper pick?\",\n\t\"She sells sea-shells on the sea-shore. The shells she sells are sea-shells I'm sure.\",\n\t\"Tajima Airport serves Toyooka.\",\n\t#From The web (random long utterance)\n\t'Sequence to sequence models have enjoyed great success in a variety of tasks such as machine translation, speech recognition, and text summarization.\\\n\tThis project covers a sequence to sequence model trained to predict a speech representation from an input sequence of characters. We show that\\\n\tthe adopted architecture is able to perform this task with wild success.',\n\t'Thank you so much for your support!',\n\t]\n\n\t)\n\ndef hparams_debug_string():\n\tvalues = hparams.values()\n\thp = [' %s: %s' % (name, values[name]) for name in sorted(values) if name != 'sentences']\n\treturn 'Hyperparameters:\\n' + '\\n'.join(hp)\n"
]
| [
[
"numpy.log"
]
]
|
NEUNLP-RE/Erich_Relation_Classification | [
"31c3e9dc7461dcd1fcf72969f150a603b3edaf28"
]
| [
"src/utils.py"
]
| [
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport torch\nimport random\nimport numpy as np\n\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\n\ndef semeval_score():\n output = os.popen('bash test.sh')\n text = [i for i in output.read().split(\"\\n\") if i != \"\"]\n score = text[-2]\n words = score.split()\n prec, recall, f1 = float(words[2][:-1]), float(words[5][:-1]), float(words[8][:-1])\n return {\"precision\": prec, \"recall\": recall, \"f1 score\": f1}\n\n\ndef compute_metrics(args, preds, labels):\n assert len(preds) == len(labels)\n if args.task_name == \"semeval\":\n return semeval_score()\n else:\n raise KeyError(args.task_name)\n\n\ndef set_seed(args, add_param=0):\n random.seed(args.seed + add_param)\n np.random.seed(args.seed + add_param)\n torch.manual_seed(args.seed + add_param)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed + add_param)\n"
]
| [
[
"torch.manual_seed",
"numpy.random.seed",
"torch.cuda.manual_seed_all"
]
]
|
cthtuf/v-diffusion-pytorch | [
"3525561043113bb855a49f91b4bccd2afdb94f8d"
]
| [
"clip_sample.py"
]
| [
"#!/usr/bin/env python3\n\n\"\"\"CLIP guided sampling from a diffusion model.\"\"\"\n\nimport argparse\nfrom functools import partial\nfrom pathlib import Path\n\nfrom PIL import Image\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as TF\nfrom tqdm import trange\n\nfrom CLIP import clip\nfrom diffusion import get_model, get_models, sampling, utils\n\nMODULE_DIR = Path(__file__).resolve().parent\n\n\nclass MakeCutouts(nn.Module):\n def __init__(self, cut_size, cutn, cut_pow=1.):\n super().__init__()\n self.cut_size = cut_size\n self.cutn = cutn\n self.cut_pow = cut_pow\n\n def forward(self, input):\n sideY, sideX = input.shape[2:4]\n max_size = min(sideX, sideY)\n min_size = min(sideX, sideY, self.cut_size)\n cutouts = []\n for _ in range(self.cutn):\n size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)\n offsetx = torch.randint(0, sideX - size + 1, ())\n offsety = torch.randint(0, sideY - size + 1, ())\n cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]\n cutout = F.adaptive_avg_pool2d(cutout, self.cut_size)\n cutouts.append(cutout)\n return torch.cat(cutouts)\n\n\ndef spherical_dist_loss(x, y):\n x = F.normalize(x, dim=-1)\n y = F.normalize(y, dim=-1)\n return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)\n\n\ndef parse_prompt(prompt):\n if prompt.startswith('http://') or prompt.startswith('https://'):\n vals = prompt.rsplit(':', 2)\n vals = [vals[0] + ':' + vals[1], *vals[2:]]\n else:\n vals = prompt.rsplit(':', 1)\n vals = vals + ['', '1'][len(vals):]\n return vals[0], float(vals[1])\n\n\ndef resize_and_center_crop(image, size):\n fac = max(size[0] / image.size[0], size[1] / image.size[1])\n image = image.resize((int(fac * image.size[0]), int(fac * image.size[1])), Image.LANCZOS)\n return TF.center_crop(image, size[::-1])\n\n\ndef main():\n p = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('prompts', type=str, default=[], nargs='*',\n help='the text prompts to use')\n p.add_argument('--images', type=str, default=[], nargs='*', metavar='IMAGE',\n help='the image prompts')\n p.add_argument('--batch-size', '-bs', type=int, default=1,\n help='the number of images per batch')\n p.add_argument('--checkpoint', type=str,\n help='the checkpoint to use')\n p.add_argument('--clip-guidance-scale', '-cs', type=float, default=500.,\n help='the CLIP guidance scale')\n p.add_argument('--cutn', type=int, default=16,\n help='the number of random crops to use')\n p.add_argument('--cut-pow', type=float, default=1.,\n help='the random crop size power')\n p.add_argument('--device', type=str,\n help='the device to use')\n p.add_argument('--eta', type=float, default=1.,\n help='the amount of noise to add during sampling (0-1)')\n p.add_argument('--init', type=str,\n help='the init image')\n p.add_argument('--model', type=str, default='cc12m_1', choices=get_models(),\n help='the model to use')\n p.add_argument('-n', type=int, default=1,\n help='the number of images to sample')\n p.add_argument('--seed', type=int, default=0,\n help='the random seed')\n p.add_argument('--size', type=int, nargs=2,\n help='the output image size')\n p.add_argument('--starting-timestep', '-st', type=float, default=0.9,\n help='the timestep to start at (used with init images)')\n p.add_argument('--steps', type=int, default=1000,\n help='the number of timesteps')\n args = p.parse_args()\n\n if args.device:\n device = torch.device(args.device)\n else:\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print('Using device:', device)\n\n model = get_model(args.model)()\n _, side_y, side_x = model.shape\n if args.size:\n side_x, side_y = args.size\n checkpoint = args.checkpoint\n if not checkpoint:\n checkpoint = MODULE_DIR / f'checkpoints/{args.model}.pth'\n model.load_state_dict(torch.load(checkpoint, map_location='cpu'))\n if device.type == 'cuda':\n model = model.half()\n model = model.to(device).eval().requires_grad_(False)\n clip_model_name = model.clip_model if hasattr(model, 'clip_model') else 'ViT-B/16'\n clip_model = clip.load(clip_model_name, jit=False, device=device)[0]\n clip_model.eval().requires_grad_(False)\n normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],\n std=[0.26862954, 0.26130258, 0.27577711])\n make_cutouts = MakeCutouts(clip_model.visual.input_resolution, args.cutn, args.cut_pow)\n\n if args.init:\n init = Image.open(utils.fetch(args.init)).convert('RGB')\n init = resize_and_center_crop(init, (side_x, side_y))\n init = utils.from_pil_image(init).cuda()[None].repeat([args.n, 1, 1, 1])\n\n target_embeds, weights = [], []\n\n for prompt in args.prompts:\n txt, weight = parse_prompt(prompt)\n target_embeds.append(clip_model.encode_text(clip.tokenize(txt).to(device)).float())\n weights.append(weight)\n\n for prompt in args.images:\n path, weight = parse_prompt(prompt)\n img = Image.open(utils.fetch(path)).convert('RGB')\n img = TF.resize(img, min(side_x, side_y, *img.size),\n transforms.InterpolationMode.LANCZOS)\n batch = make_cutouts(TF.to_tensor(img)[None].to(device))\n embeds = F.normalize(clip_model.encode_image(normalize(batch)).float(), dim=-1)\n target_embeds.append(embeds)\n weights.extend([weight / args.cutn] * args.cutn)\n\n if not target_embeds:\n raise RuntimeError('At least one text or image prompt must be specified.')\n target_embeds = torch.cat(target_embeds)\n weights = torch.tensor(weights, device=device)\n if weights.sum().abs() < 1e-3:\n raise RuntimeError('The weights must not sum to 0.')\n weights /= weights.sum().abs()\n\n clip_embed = F.normalize(target_embeds.mul(weights[:, None]).sum(0, keepdim=True), dim=-1)\n clip_embed = clip_embed.repeat([args.n, 1])\n\n torch.manual_seed(args.seed)\n\n def cond_fn(x, t, pred, clip_embed):\n clip_in = normalize(make_cutouts((pred + 1) / 2))\n image_embeds = clip_model.encode_image(clip_in).view([args.cutn, x.shape[0], -1])\n losses = spherical_dist_loss(image_embeds, clip_embed[None])\n loss = losses.mean(0).sum() * args.clip_guidance_scale\n grad = -torch.autograd.grad(loss, x)[0]\n return grad\n\n def run(x, steps, clip_embed):\n if hasattr(model, 'clip_model'):\n extra_args = {'clip_embed': clip_embed}\n cond_fn_ = cond_fn\n else:\n extra_args = {}\n cond_fn_ = partial(cond_fn, clip_embed=clip_embed)\n if not args.clip_guidance_scale:\n return sampling.sample(model, x, steps, args.eta, extra_args)\n return sampling.cond_sample(model, x, steps, args.eta, extra_args, cond_fn_)\n\n def run_all(n, batch_size):\n x = torch.randn([args.n, 3, side_y, side_x], device=device)\n t = torch.linspace(1, 0, args.steps + 1, device=device)[:-1]\n steps = utils.get_spliced_ddpm_cosine_schedule(t)\n if args.init:\n steps = steps[steps < args.starting_timestep]\n alpha, sigma = utils.t_to_alpha_sigma(steps[0])\n x = init * alpha + x * sigma\n for i in trange(0, n, batch_size):\n cur_batch_size = min(n - i, batch_size)\n outs = run(x[i:i+cur_batch_size], steps, clip_embed[i:i+cur_batch_size])\n for j, out in enumerate(outs):\n utils.to_pil_image(out).save(f'out_{i + j:05}.png')\n\n try:\n run_all(args.n, args.batch_size)\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"torch.nn.functional.normalize",
"torch.linspace",
"torch.randint",
"torch.cat",
"torch.load",
"torch.manual_seed",
"torch.randn",
"torch.tensor",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.rand",
"torch.cuda.is_available",
"torch.device",
"torch.autograd.grad"
]
]
|
vladfi1/ray | [
"3b141b26cd4af491b3c1fb8ce4dbb00265246b1e"
]
| [
"python/ray/rllib/agents/agent.py"
]
| [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport copy\nimport logging\nimport os\nimport pickle\nimport six\nimport tempfile\nimport tensorflow as tf\nfrom types import FunctionType\n\nimport ray\nfrom ray.exceptions import RayError\nfrom ray.rllib.offline import NoopOutput, JsonReader, MixedInput, JsonWriter, \\\n ShuffledInput\nfrom ray.rllib.models import MODEL_DEFAULTS\nfrom ray.rllib.evaluation.policy_evaluator import PolicyEvaluator\nfrom ray.rllib.evaluation.sample_batch import DEFAULT_POLICY_ID\nfrom ray.rllib.optimizers.policy_optimizer import PolicyOptimizer\nfrom ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI\nfrom ray.rllib.utils import FilterManager, deep_update, merge_dicts\nfrom ray.tune.registry import ENV_CREATOR, register_env, _global_registry\nfrom ray.tune.trainable import Trainable\nfrom ray.tune.trial import Resources, ExportFormat\nfrom ray.tune.logger import UnifiedLogger\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\n\nlogger = logging.getLogger(__name__)\n\n# Max number of times to retry a worker failure. We shouldn't try too many\n# times in a row since that would indicate a persistent cluster issue.\nMAX_WORKER_FAILURE_RETRIES = 3\n\n# yapf: disable\n# __sphinx_doc_begin__\nCOMMON_CONFIG = {\n # === Debugging ===\n # Whether to write episode stats and videos to the agent log dir\n \"monitor\": False,\n # Set the ray.rllib.* log level for the agent process and its evaluators\n \"log_level\": \"INFO\",\n # Callbacks that will be run during various phases of training. These all\n # take a single \"info\" dict as an argument. For episode callbacks, custom\n # metrics can be attached to the episode by updating the episode object's\n # custom metrics dict (see examples/custom_metrics_and_callbacks.py).\n \"callbacks\": {\n \"on_episode_start\": None, # arg: {\"env\": .., \"episode\": ...}\n \"on_episode_step\": None, # arg: {\"env\": .., \"episode\": ...}\n \"on_episode_end\": None, # arg: {\"env\": .., \"episode\": ...}\n \"on_sample_end\": None, # arg: {\"samples\": .., \"evaluator\": ...}\n \"on_train_result\": None, # arg: {\"agent\": ..., \"result\": ...}\n },\n # Whether to attempt to continue training if a worker crashes.\n \"ignore_worker_failures\": False,\n\n # === Policy ===\n # Arguments to pass to model. See models/catalog.py for a full list of the\n # available model options.\n \"model\": MODEL_DEFAULTS,\n # Arguments to pass to the policy optimizer. These vary by optimizer.\n \"optimizer\": {},\n\n # === Environment ===\n # Discount factor of the MDP\n \"gamma\": 0.99,\n # Number of steps after which the episode is forced to terminate\n \"horizon\": None,\n # Arguments to pass to the env creator\n \"env_config\": {},\n # Environment name can also be passed via config\n \"env\": None,\n # Whether to clip rewards prior to experience postprocessing. Setting to\n # None means clip for Atari only.\n \"clip_rewards\": None,\n # Whether to np.clip() actions to the action space low/high range spec.\n \"clip_actions\": True,\n # Whether to use rllib or deepmind preprocessors by default\n \"preprocessor_pref\": \"deepmind\",\n\n # === Resources ===\n # Number of actors used for parallelism\n \"num_workers\": 2,\n # Number of GPUs to allocate to the driver. Note that not all algorithms\n # can take advantage of driver GPUs. This can be fraction (e.g., 0.3 GPUs).\n \"num_gpus\": 0,\n # Number of CPUs to allocate per worker.\n \"num_cpus_per_worker\": 1,\n # Number of GPUs to allocate per worker. This can be fractional.\n \"num_gpus_per_worker\": 0,\n # Any custom resources to allocate per worker.\n \"custom_resources_per_worker\": {},\n # Number of CPUs to allocate for the driver. Note: this only takes effect\n # when running in Tune.\n \"num_cpus_for_driver\": 1,\n\n # === Execution ===\n # Number of environments to evaluate vectorwise per worker.\n \"num_envs_per_worker\": 1,\n # Default sample batch size\n \"sample_batch_size\": 200,\n # Training batch size, if applicable. Should be >= sample_batch_size.\n # Samples batches will be concatenated together to this size for training.\n \"train_batch_size\": 200,\n # Whether to rollout \"complete_episodes\" or \"truncate_episodes\"\n \"batch_mode\": \"truncate_episodes\",\n # (Deprecated) Use a background thread for sampling (slightly off-policy)\n \"sample_async\": False,\n # Element-wise observation filter, either \"NoFilter\" or \"MeanStdFilter\"\n \"observation_filter\": \"NoFilter\",\n # Whether to synchronize the statistics of remote filters.\n \"synchronize_filters\": True,\n # Configure TF for single-process operation by default\n \"tf_session_args\": {\n # note: overriden by `local_evaluator_tf_session_args`\n \"intra_op_parallelism_threads\": 2,\n \"inter_op_parallelism_threads\": 2,\n \"gpu_options\": {\n \"allow_growth\": True,\n },\n \"log_device_placement\": False,\n \"device_count\": {\n \"CPU\": 1\n },\n \"allow_soft_placement\": True, # required by PPO multi-gpu\n },\n # Override the following tf session args on the local evaluator\n \"local_evaluator_tf_session_args\": {\n # Allow a higher level of parallelism by default, but not unlimited\n # since that can cause crashes with many concurrent drivers.\n \"intra_op_parallelism_threads\": 8,\n \"inter_op_parallelism_threads\": 8,\n },\n # Whether to LZ4 compress individual observations\n \"compress_observations\": False,\n # Drop metric batches from unresponsive workers after this many seconds\n \"collect_metrics_timeout\": 180,\n # If using num_envs_per_worker > 1, whether to create those new envs in\n # remote processes instead of in the same worker. This adds overheads, but\n # can make sense if your envs are very CPU intensive (e.g., for StarCraft).\n \"remote_worker_envs\": False,\n # Similar to remote_worker_envs, but runs the envs asynchronously in the\n # background for greater efficiency. Conflicts with remote_worker_envs.\n \"async_remote_worker_envs\": False,\n\n # === Offline Datasets ===\n # __sphinx_doc_input_begin__\n # Specify how to generate experiences:\n # - \"sampler\": generate experiences via online simulation (default)\n # - a local directory or file glob expression (e.g., \"/tmp/*.json\")\n # - a list of individual file paths/URIs (e.g., [\"/tmp/1.json\",\n # \"s3://bucket/2.json\"])\n # - a dict with string keys and sampling probabilities as values (e.g.,\n # {\"sampler\": 0.4, \"/tmp/*.json\": 0.4, \"s3://bucket/expert.json\": 0.2}).\n # - a function that returns a rllib.offline.InputReader\n \"input\": \"sampler\",\n # Specify how to evaluate the current policy. This only has an effect when\n # reading offline experiences. Available options:\n # - \"wis\": the weighted step-wise importance sampling estimator.\n # - \"is\": the step-wise importance sampling estimator.\n # - \"simulation\": run the environment in the background, but use\n # this data for evaluation only and not for learning.\n \"input_evaluation\": [\"is\", \"wis\"],\n # Whether to run postprocess_trajectory() on the trajectory fragments from\n # offline inputs. Note that postprocessing will be done using the *current*\n # policy, not the *behaviour* policy, which is typically undesirable for\n # on-policy algorithms.\n \"postprocess_inputs\": False,\n # If positive, input batches will be shuffled via a sliding window buffer\n # of this number of batches. Use this if the input data is not in random\n # enough order. Input is delayed until the shuffle buffer is filled.\n \"shuffle_buffer_size\": 0,\n # __sphinx_doc_input_end__\n # __sphinx_doc_output_begin__\n # Specify where experiences should be saved:\n # - None: don't save any experiences\n # - \"logdir\" to save to the agent log dir\n # - a path/URI to save to a custom output directory (e.g., \"s3://bucket/\")\n # - a function that returns a rllib.offline.OutputWriter\n \"output\": None,\n # What sample batch columns to LZ4 compress in the output data.\n \"output_compress_columns\": [\"obs\", \"new_obs\"],\n # Max output file size before rolling over to a new file.\n \"output_max_file_size\": 64 * 1024 * 1024,\n # __sphinx_doc_output_end__\n\n # === Multiagent ===\n \"multiagent\": {\n # Map from policy ids to tuples of (policy_graph_cls, obs_space,\n # act_space, config). See policy_evaluator.py for more info.\n \"policy_graphs\": {},\n # Function mapping agent ids to policy ids.\n \"policy_mapping_fn\": None,\n # Optional whitelist of policies to train, or None for all policies.\n \"policies_to_train\": None,\n },\n}\n# __sphinx_doc_end__\n# yapf: enable\n\n\n@DeveloperAPI\ndef with_common_config(extra_config):\n \"\"\"Returns the given config dict merged with common agent confs.\"\"\"\n\n return with_base_config(COMMON_CONFIG, extra_config)\n\n\ndef with_base_config(base_config, extra_config):\n \"\"\"Returns the given config dict merged with a base agent conf.\"\"\"\n\n config = copy.deepcopy(base_config)\n config.update(extra_config)\n return config\n\n\n@PublicAPI\nclass Agent(Trainable):\n \"\"\"All RLlib agents extend this base class.\n\n Agent objects retain internal model state between calls to train(), so\n you should create a new agent instance for each training session.\n\n Attributes:\n env_creator (func): Function that creates a new training env.\n config (obj): Algorithm-specific configuration data.\n logdir (str): Directory in which training outputs should be placed.\n \"\"\"\n\n _allow_unknown_configs = False\n _allow_unknown_subkeys = [\n \"tf_session_args\", \"env_config\", \"model\", \"optimizer\", \"multiagent\",\n \"custom_resources_per_worker\"\n ]\n\n @PublicAPI\n def __init__(self, config=None, env=None, logger_creator=None):\n \"\"\"Initialize an RLLib agent.\n\n Args:\n config (dict): Algorithm-specific configuration data.\n env (str): Name of the environment to use. Note that this can also\n be specified as the `env` key in config.\n logger_creator (func): Function that creates a ray.tune.Logger\n object. If unspecified, a default logger is created.\n \"\"\"\n\n config = config or {}\n\n # Vars to synchronize to evaluators on each train call\n self.global_vars = {\"timestep\": 0}\n\n # Agents allow env ids to be passed directly to the constructor.\n self._env_id = self._register_if_needed(env or config.get(\"env\"))\n\n # Create a default logger creator if no logger_creator is specified\n if logger_creator is None:\n timestr = datetime.today().strftime(\"%Y-%m-%d_%H-%M-%S\")\n logdir_prefix = \"{}_{}_{}\".format(self._agent_name, self._env_id,\n timestr)\n\n def default_logger_creator(config):\n \"\"\"Creates a Unified logger with a default logdir prefix\n containing the agent name and the env id\n \"\"\"\n if not os.path.exists(DEFAULT_RESULTS_DIR):\n os.makedirs(DEFAULT_RESULTS_DIR)\n logdir = tempfile.mkdtemp(\n prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)\n return UnifiedLogger(config, logdir, None)\n\n logger_creator = default_logger_creator\n\n Trainable.__init__(self, config, logger_creator)\n\n @classmethod\n @override(Trainable)\n def default_resource_request(cls, config):\n cf = dict(cls._default_config, **config)\n Agent._validate_config(cf)\n # TODO(ekl): add custom resources here once tune supports them\n return Resources(\n cpu=cf[\"num_cpus_for_driver\"],\n gpu=cf[\"num_gpus\"],\n extra_cpu=cf[\"num_cpus_per_worker\"] * cf[\"num_workers\"],\n extra_gpu=cf[\"num_gpus_per_worker\"] * cf[\"num_workers\"])\n\n @override(Trainable)\n @PublicAPI\n def train(self):\n \"\"\"Overrides super.train to synchronize global vars.\"\"\"\n\n if self._has_policy_optimizer():\n self.global_vars[\"timestep\"] = self.optimizer.num_steps_sampled\n self.optimizer.local_evaluator.set_global_vars(self.global_vars)\n for ev in self.optimizer.remote_evaluators:\n ev.set_global_vars.remote(self.global_vars)\n logger.debug(\"updated global vars: {}\".format(self.global_vars))\n\n result = None\n for _ in range(1 + MAX_WORKER_FAILURE_RETRIES):\n try:\n result = Trainable.train(self)\n except RayError as e:\n if self.config[\"ignore_worker_failures\"]:\n logger.exception(\n \"Error in train call, attempting to recover\")\n self._try_recover()\n else:\n logger.info(\n \"Worker crashed during call to train(). To attempt to \"\n \"continue training without the failed worker, set \"\n \"`'ignore_worker_failures': True`.\")\n raise e\n else:\n break\n if result is None:\n raise RuntimeError(\"Failed to recover from worker crash\")\n\n if (self.config.get(\"observation_filter\", \"NoFilter\") != \"NoFilter\"\n and hasattr(self, \"local_evaluator\")):\n FilterManager.synchronize(\n self.local_evaluator.filters,\n self.remote_evaluators,\n update_remote=self.config[\"synchronize_filters\"])\n logger.debug(\"synchronized filters: {}\".format(\n self.local_evaluator.filters))\n\n if self._has_policy_optimizer():\n result[\"num_healthy_workers\"] = len(\n self.optimizer.remote_evaluators)\n return result\n\n @override(Trainable)\n def _log_result(self, result):\n if self.config[\"callbacks\"].get(\"on_train_result\"):\n self.config[\"callbacks\"][\"on_train_result\"]({\n \"agent\": self,\n \"result\": result,\n })\n # log after the callback is invoked, so that the user has a chance\n # to mutate the result\n Trainable._log_result(self, result)\n\n @override(Trainable)\n def _setup(self, config):\n env = self._env_id\n if env:\n config[\"env\"] = env\n if _global_registry.contains(ENV_CREATOR, env):\n self.env_creator = _global_registry.get(ENV_CREATOR, env)\n else:\n import gym # soft dependency\n self.env_creator = lambda env_config: gym.make(env)\n else:\n self.env_creator = lambda env_config: None\n\n # Merge the supplied config with the class default\n merged_config = copy.deepcopy(self._default_config)\n merged_config = deep_update(merged_config, config,\n self._allow_unknown_configs,\n self._allow_unknown_subkeys)\n self.raw_user_config = config\n self.config = merged_config\n Agent._validate_config(self.config)\n if self.config.get(\"log_level\"):\n logging.getLogger(\"ray.rllib\").setLevel(self.config[\"log_level\"])\n\n # TODO(ekl) setting the graph is unnecessary for PyTorch agents\n with tf.Graph().as_default():\n self._init()\n\n @override(Trainable)\n def _stop(self):\n # workaround for https://github.com/ray-project/ray/issues/1516\n if hasattr(self, \"remote_evaluators\"):\n for ev in self.remote_evaluators:\n ev.__ray_terminate__.remote()\n if hasattr(self, \"optimizer\"):\n self.optimizer.stop()\n\n @override(Trainable)\n def _save(self, checkpoint_dir):\n checkpoint_path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(self.iteration))\n pickle.dump(self.__getstate__(), open(checkpoint_path, \"wb\"))\n return checkpoint_path\n\n @override(Trainable)\n def _restore(self, checkpoint_path):\n extra_data = pickle.load(open(checkpoint_path, \"rb\"))\n self.__setstate__(extra_data)\n\n @DeveloperAPI\n def _init(self):\n \"\"\"Subclasses should override this for custom initialization.\"\"\"\n\n raise NotImplementedError\n\n @PublicAPI\n def compute_action(self,\n observation,\n state=None,\n prev_action=None,\n prev_reward=None,\n info=None,\n policy_id=\"default\"):\n \"\"\"Computes an action for the specified policy.\n\n Note that you can also access the policy object through\n self.get_policy(policy_id) and call compute_actions() on it directly.\n\n Arguments:\n observation (obj): observation from the environment.\n state (list): RNN hidden state, if any. If state is not None,\n then all of compute_single_action(...) is returned\n (computed action, rnn state, logits dictionary).\n Otherwise compute_single_action(...)[0] is\n returned (computed action).\n prev_action (obj): previous action value, if any\n prev_reward (int): previous reward, if any\n info (dict): info object, if any\n policy_id (str): policy to query (only applies to multi-agent).\n \"\"\"\n\n if state is None:\n state = []\n preprocessed = self.local_evaluator.preprocessors[policy_id].transform(\n observation)\n filtered_obs = self.local_evaluator.filters[policy_id](\n preprocessed, update=False)\n if state:\n return self.get_policy(policy_id).compute_single_action(\n filtered_obs, state, prev_action, prev_reward, info)\n return self.get_policy(policy_id).compute_single_action(\n filtered_obs, state, prev_action, prev_reward, info)[0]\n\n @property\n def iteration(self):\n \"\"\"Current training iter, auto-incremented with each train() call.\"\"\"\n\n return self._iteration\n\n @property\n def _agent_name(self):\n \"\"\"Subclasses should override this to declare their name.\"\"\"\n\n raise NotImplementedError\n\n @property\n def _default_config(self):\n \"\"\"Subclasses should override this to declare their default config.\"\"\"\n\n raise NotImplementedError\n\n @PublicAPI\n def get_policy(self, policy_id=DEFAULT_POLICY_ID):\n \"\"\"Return policy graph for the specified id, or None.\n\n Arguments:\n policy_id (str): id of policy graph to return.\n \"\"\"\n\n return self.local_evaluator.get_policy(policy_id)\n\n @PublicAPI\n def get_weights(self, policies=None):\n \"\"\"Return a dictionary of policy ids to weights.\n\n Arguments:\n policies (list): Optional list of policies to return weights for,\n or None for all policies.\n \"\"\"\n return self.local_evaluator.get_weights(policies)\n\n @PublicAPI\n def set_weights(self, weights):\n \"\"\"Set policy weights by policy id.\n\n Arguments:\n weights (dict): Map of policy ids to weights to set.\n \"\"\"\n self.local_evaluator.set_weights(weights)\n\n @DeveloperAPI\n def make_local_evaluator(self,\n env_creator,\n policy_graph,\n extra_config=None):\n \"\"\"Convenience method to return configured local evaluator.\"\"\"\n\n return self._make_evaluator(\n PolicyEvaluator,\n env_creator,\n policy_graph,\n 0,\n merge_dicts(\n # important: allow local tf to use more CPUs for optimization\n merge_dicts(\n self.config, {\n \"tf_session_args\": self.\n config[\"local_evaluator_tf_session_args\"]\n }),\n extra_config or {}))\n\n @DeveloperAPI\n def make_remote_evaluators(self, env_creator, policy_graph, count):\n \"\"\"Convenience method to return a number of remote evaluators.\"\"\"\n\n remote_args = {\n \"num_cpus\": self.config[\"num_cpus_per_worker\"],\n \"num_gpus\": self.config[\"num_gpus_per_worker\"],\n \"resources\": self.config[\"custom_resources_per_worker\"],\n }\n\n cls = PolicyEvaluator.as_remote(**remote_args).remote\n\n return [\n self._make_evaluator(cls, env_creator, policy_graph, i + 1,\n self.config) for i in range(count)\n ]\n\n @DeveloperAPI\n def export_policy_model(self, export_dir, policy_id=DEFAULT_POLICY_ID):\n \"\"\"Export policy model with given policy_id to local directory.\n\n Arguments:\n export_dir (string): Writable local directory.\n policy_id (string): Optional policy id to export.\n\n Example:\n >>> agent = MyAgent()\n >>> for _ in range(10):\n >>> agent.train()\n >>> agent.export_policy_model(\"/tmp/export_dir\")\n \"\"\"\n self.local_evaluator.export_policy_model(export_dir, policy_id)\n\n @DeveloperAPI\n def export_policy_checkpoint(self,\n export_dir,\n filename_prefix=\"model\",\n policy_id=DEFAULT_POLICY_ID):\n \"\"\"Export tensorflow policy model checkpoint to local directory.\n\n Arguments:\n export_dir (string): Writable local directory.\n filename_prefix (string): file name prefix of checkpoint files.\n policy_id (string): Optional policy id to export.\n\n Example:\n >>> agent = MyAgent()\n >>> for _ in range(10):\n >>> agent.train()\n >>> agent.export_policy_checkpoint(\"/tmp/export_dir\")\n \"\"\"\n self.local_evaluator.export_policy_checkpoint(\n export_dir, filename_prefix, policy_id)\n\n @classmethod\n def resource_help(cls, config):\n return (\"\\n\\nYou can adjust the resource requests of RLlib agents by \"\n \"setting `num_workers`, `num_gpus`, and other configs. See \"\n \"the DEFAULT_CONFIG defined by each agent for more info.\\n\\n\"\n \"The config of this agent is: {}\".format(config))\n\n @staticmethod\n def _validate_config(config):\n if \"gpu\" in config:\n raise ValueError(\n \"The `gpu` config is deprecated, please use `num_gpus=0|1` \"\n \"instead.\")\n if \"gpu_fraction\" in config:\n raise ValueError(\n \"The `gpu_fraction` config is deprecated, please use \"\n \"`num_gpus=<fraction>` instead.\")\n if \"use_gpu_for_workers\" in config:\n raise ValueError(\n \"The `use_gpu_for_workers` config is deprecated, please use \"\n \"`num_gpus_per_worker=1` instead.\")\n if type(config[\"input_evaluation\"]) != list:\n raise ValueError(\n \"`input_evaluation` must be a list of strings, got {}\".format(\n config[\"input_evaluation\"]))\n\n def _try_recover(self):\n \"\"\"Try to identify and blacklist any unhealthy workers.\n\n This method is called after an unexpected remote error is encountered\n from a worker. It issues check requests to all current workers and\n blacklists any that respond with error. If no healthy workers remain,\n an error is raised.\n \"\"\"\n\n if not self._has_policy_optimizer():\n raise NotImplementedError(\n \"Recovery is not supported for this algorithm\")\n\n logger.info(\"Health checking all workers...\")\n checks = []\n for ev in self.optimizer.remote_evaluators:\n _, obj_id = ev.sample_with_count.remote()\n checks.append(obj_id)\n\n healthy_evaluators = []\n for i, obj_id in enumerate(checks):\n ev = self.optimizer.remote_evaluators[i]\n try:\n ray.get(obj_id)\n healthy_evaluators.append(ev)\n logger.info(\"Worker {} looks healthy\".format(i + 1))\n except RayError:\n logger.exception(\"Blacklisting worker {}\".format(i + 1))\n try:\n ev.__ray_terminate__.remote()\n except Exception:\n logger.exception(\"Error terminating unhealthy worker\")\n\n if len(healthy_evaluators) < 1:\n raise RuntimeError(\n \"Not enough healthy workers remain to continue.\")\n\n self.optimizer.reset(healthy_evaluators)\n\n def _has_policy_optimizer(self):\n return hasattr(self, \"optimizer\") and isinstance(\n self.optimizer, PolicyOptimizer)\n\n def _make_evaluator(self, cls, env_creator, policy_graph, worker_index,\n config):\n def session_creator():\n logger.debug(\"Creating TF session {}\".format(\n config[\"tf_session_args\"]))\n return tf.Session(\n config=tf.ConfigProto(**config[\"tf_session_args\"]))\n\n if isinstance(config[\"input\"], FunctionType):\n input_creator = config[\"input\"]\n elif config[\"input\"] == \"sampler\":\n input_creator = (lambda ioctx: ioctx.default_sampler_input())\n elif isinstance(config[\"input\"], dict):\n input_creator = (lambda ioctx: ShuffledInput(\n MixedInput(config[\"input\"], ioctx),\n config[\"shuffle_buffer_size\"]))\n else:\n input_creator = (lambda ioctx: ShuffledInput(\n JsonReader(config[\"input\"], ioctx),\n config[\"shuffle_buffer_size\"]))\n\n if isinstance(config[\"output\"], FunctionType):\n output_creator = config[\"output\"]\n elif config[\"output\"] is None:\n output_creator = (lambda ioctx: NoopOutput())\n elif config[\"output\"] == \"logdir\":\n output_creator = (lambda ioctx: JsonWriter(\n ioctx.log_dir,\n ioctx,\n max_file_size=config[\"output_max_file_size\"],\n compress_columns=config[\"output_compress_columns\"]))\n else:\n output_creator = (lambda ioctx: JsonWriter(\n config[\"output\"],\n ioctx,\n max_file_size=config[\"output_max_file_size\"],\n compress_columns=config[\"output_compress_columns\"]))\n\n if config[\"input\"] == \"sampler\":\n input_evaluation = []\n else:\n input_evaluation = config[\"input_evaluation\"]\n\n return cls(\n env_creator,\n self.config[\"multiagent\"][\"policy_graphs\"] or policy_graph,\n policy_mapping_fn=self.config[\"multiagent\"][\"policy_mapping_fn\"],\n policies_to_train=self.config[\"multiagent\"][\"policies_to_train\"],\n tf_session_creator=(session_creator\n if config[\"tf_session_args\"] else None),\n batch_steps=config[\"sample_batch_size\"],\n batch_mode=config[\"batch_mode\"],\n episode_horizon=config[\"horizon\"],\n preprocessor_pref=config[\"preprocessor_pref\"],\n sample_async=config[\"sample_async\"],\n compress_observations=config[\"compress_observations\"],\n num_envs=config[\"num_envs_per_worker\"],\n observation_filter=config[\"observation_filter\"],\n clip_rewards=config[\"clip_rewards\"],\n clip_actions=config[\"clip_actions\"],\n env_config=config[\"env_config\"],\n model_config=config[\"model\"],\n policy_config=config,\n worker_index=worker_index,\n monitor_path=self.logdir if config[\"monitor\"] else None,\n log_dir=self.logdir,\n log_level=config[\"log_level\"],\n callbacks=config[\"callbacks\"],\n input_creator=input_creator,\n input_evaluation=input_evaluation,\n output_creator=output_creator,\n remote_worker_envs=config[\"remote_worker_envs\"],\n async_remote_worker_envs=config[\"async_remote_worker_envs\"])\n\n @override(Trainable)\n def _export_model(self, export_formats, export_dir):\n ExportFormat.validate(export_formats)\n exported = {}\n if ExportFormat.CHECKPOINT in export_formats:\n path = os.path.join(export_dir, ExportFormat.CHECKPOINT)\n self.export_policy_checkpoint(path)\n exported[ExportFormat.CHECKPOINT] = path\n if ExportFormat.MODEL in export_formats:\n path = os.path.join(export_dir, ExportFormat.MODEL)\n self.export_policy_model(path)\n exported[ExportFormat.MODEL] = path\n return exported\n\n def __getstate__(self):\n state = {}\n if hasattr(self, \"local_evaluator\"):\n state[\"evaluator\"] = self.local_evaluator.save()\n if hasattr(self, \"optimizer\") and hasattr(self.optimizer, \"save\"):\n state[\"optimizer\"] = self.optimizer.save()\n return state\n\n def __setstate__(self, state):\n if \"evaluator\" in state:\n self.local_evaluator.restore(state[\"evaluator\"])\n remote_state = ray.put(state[\"evaluator\"])\n for r in self.remote_evaluators:\n r.restore.remote(remote_state)\n if \"optimizer\" in state:\n self.optimizer.restore(state[\"optimizer\"])\n\n def _register_if_needed(self, env_object):\n if isinstance(env_object, six.string_types):\n return env_object\n elif isinstance(env_object, type):\n name = env_object.__name__\n register_env(name, lambda config: env_object(config))\n return name\n raise ValueError(\n \"{} is an invalid env specification. \".format(env_object) +\n \"You can specify a custom env as either a class \"\n \"(e.g., YourEnvCls) or a registered env id (e.g., \\\"your_env\\\").\")\n"
]
| [
[
"tensorflow.ConfigProto",
"tensorflow.Graph"
]
]
|
wx1356021541/flow-statistics | [
"7a2ba366c2448cea8afa5226897f02010d491f29"
]
| [
"utils/backbone.py"
]
| [
"import glob, os\nimport tensorflow as tf\nfrom utils import label_map_util\n\n\ndef set_model(model_name):\n\tmodel_found = 0\n\n\tfor file in glob.glob(\"*\"):\n\t\tif (file == model_name):\n\t\t\tmodel_found = 1\n\n\t# What model to download.\n\tmodel_name = model_name\n\tmodel_file = model_name + '.tar.gz'\n\tdownload_base = 'http://download.tensorflow.org/models/object_detection/'\n\n\t# Path to frozen detection graph. This is the actual model that is used for the object detection.\n\tpath_to_ckpt = model_name + '/frozen_inference_graph.pb'\n\n\t# List of the strings that is used to add correct label for each box.\n\tpath_to_labels = os.path.join('data', 'mscoco_label_map.pbtxt')\n\n\tnum_classes = 90\n\n\t# Download Model if it has not been downloaded yet\n\tif (model_found == 0):\t\t\n\t\topener = urllib.request.URLopener()\n\t\topener.retrieve(download_base + model_file, model_file)\n\t\ttar_file = tarfile.open(model_file)\n\t\tfor file in tar_file.getmembers():\n\t\t file_name = os.path.basename(file.name)\n\t\t if 'frozen_inference_graph.pb' in file_name:\n\t\t tar_file.extract(file, os.getcwd())\n\n\t# Load a (frozen) Tensorflow model into memory.\n\tdetection_graph = tf.Graph()\n\twith detection_graph.as_default():\n\t od_graph_def = tf.GraphDef()\n\t with tf.gfile.GFile(path_to_ckpt, 'rb') as fid:\n\t serialized_graph = fid.read()\n\t od_graph_def.ParseFromString(serialized_graph)\n\t tf.import_graph_def(od_graph_def, name='')\n\n# Loading label map\n# Label maps map indices to category names, so that when our convolution network predicts 5, we know that this corresponds to airplane. Here I \t\tuse internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine\n\tlabel_map = label_map_util.load_labelmap(path_to_labels)\n\tcategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes, use_display_name=True)\n\tcategory_index = label_map_util.create_category_index(categories)\n\n\treturn detection_graph, category_index\n"
]
| [
[
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.GraphDef"
]
]
|
Qbuer/SMP_2018 | [
"6a7f167e6791edd8a518b030bda2d3adb84ee51d"
]
| [
"Classfiy.py"
]
| [
"\n# coding: utf-8\n\n\n\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nfrom tensorflow.contrib.seq2seq import BahdanauAttention\nimport logging\nimport util\nimport time\nimport sys\nimport argparse\nimport numpy as np\nimport json\nimport pickle\nfrom datetime import datetime\nimport os \n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.DEBUG)\n\n\nclass Config(object):\n n_word_features = 1\n\n n_word_embed_size = -1\n n_classes = -1\n max_length = -1\n \n dropout = 0.5\n hidden_size = 300\n batch_size = 32\n n_epochs = 200\n n_layer = 3\n lr = 0.001\n\n n_context_vector = 320\n\n \n pass\n\n def __init__(self, args):\n print (args)\n if \"model_path\" in args:\n self.model_path = args.model_path\n else:\n self.model_path = \"results/{:%Y%m%d_%H%M%S}/\".format(datetime.now())\n \n print(self.model_path)\n self.output_model = self.model_path + args.model_name\n \n\n\ndef preprocess_data(data, token2id):\n ret = []\n for sentence, label in data:\n token_sentence = np.array([token2id[x] if token2id.get(x) else 0 for x in sentence])\n ret.append((token_sentence, label))\n return ret\n \nclass Classifier(object):\n \"\"\"分类器\n \n Attributes:\n \n \"\"\"\n\n \n def add_placeholders(self):\n \"\"\"Some docs...\"\"\"\n max_length = self.config.max_length\n self.input_placeholder = tf.placeholder(\n tf.int32, shape=[None, max_length])\n self.labels_placeholder = tf.placeholder(\n tf.int32, shape=[None, ])\n self.mask_placehoder = tf.placeholder(\n tf.bool, shape=[None, max_length])\n self.dropout_placehoder = tf.placeholder(tf.float32, shape=[])\n \n def create_feed_dict(self, inputs_batch, labels_batch=None, dropout=1):\n \"\"\"Some docs...\"\"\"\n \n feed_dict = {\n self.input_placeholder: inputs_batch,\n self.dropout_placehoder: dropout\n }\n if labels_batch is not None:\n feed_dict[self.labels_placeholder] = labels_batch\n return feed_dict\n\n def add_embedding(self):\n embeddings = tf.nn.embedding_lookup(self.pretrained_embeddings, self.input_placeholder)\n embeddings = tf.reshape(\n embeddings, \n (-1, self.config.max_length, self.config.n_word_embed_size * self.config.n_word_features))\n return embeddings\n \n def add_prediction_op(self):\n x = self.add_embedding()\n #x.shape: [batch_size, max_length, vector_dim]\n dropout_rate = self.dropout_placehoder\n\n self.config.cell = 'lstm'\n\n if self.config.cell == 'lstm':\n lstm_cell = tf.nn.rnn_cell.LSTMCell(self.config.hidden_size)\n initial_state = lstm_cell.zero_state(self.config.batch_size, dtype=tf.float32)\n outputs, state = tf.nn.dynamic_rnn(lstm_cell, x, initial_state=initial_state)\n # outputs: [batch_size, max_length, cell_state_size]\n\n # outputs: [batch_size * max_length, self.config.hidden_size]\n outputs = tf.reshape(outputs, (self.config.batch_size * self.config.max_length, self.config.hidden_size))\n\n\n\n # word attention\n C = tf.get_variable( # C as context embedding\n \"C\",\n shape=[self.config.n_context_vector, 1],\n initializer=tf.contrib.layers.xavier_initializer())\n\n W = tf.get_variable(\n \"W\",\n shape=[self.config.hidden_size, self.config.n_context_vector],\n initializer=tf.contrib.layers.xavier_initializer())\n \n b = tf.get_variable(\n \"b\",\n shape=[self.config.n_context_vector],\n initializer=tf.constant_initializer(0.)\n )\n \n sentence_representation = []\n \n #shape: batch_size * max_length, context_size\n U1 = tf.tanh(tf.matmul(outputs, W)) + b\n \n exp = tf.matmul(U1, C)\n\n \n\n\n #shape (batch_size, max_length)\n exp = tf.reshape(exp, (self.config.batch_size, self.config.max_length, 1))\n print(exp.shape)\n\n #shape (batch_size, 1)\n tmp = tf.reduce_sum(exp, axis=1)\n print(tmp.shape)\n \n # TODO 并行\n\n outputs = tf.reshape(outputs, (self.config.batch_size, self.config.max_length, self.config.hidden_size))\n for index in range(self.config.batch_size):\n \n outputs_ = outputs[index]\n \n sent = tf.reduce_sum(outputs_ * tmp[index], axis=0)\n\n sentence_representation.append(sent)\n \n S = tf.convert_to_tensor(sentence_representation)\n\n \n \n\n \n\n\n \n \n\n\n \n\n\n\n\n \n\n\n \n\n # def attention():\n \n \n\n\n # lstm_layers = [tf.nn.rnn_cell.LSTMCell(self.config.hidden_size) for _ in range(self.config.n_layer)]\n # mutil_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_layers)\n\n # forward_cell = tf.nn.rnn_cell.LSTMCell(self.config.hidden_size)\n # backward_cell = tf.nn.rnn_cell.LSTMCell(self.config.hidden_size)\n \n # fw_initial_state = \n\n # outputs, output_states = tf.nn.bidirectional_dynamic_rnn(forward_cell, backward_cell, x, dtype=tf.float32)\n # initial_state = mutil_rnn_cell.zero_state(self.config.batch_size, dtype=tf.float32)\n # outputs, state = tf.nn.dynamic_rnn(mutil_rnn_cell, x, initial_state=initial_state)\n \n\n\n # attention\n \n U = tf.get_variable(\n \"U\",\n shape=[self.config.hidden_size, self.config.n_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n b2 = tf.get_variable(\n \"b2\",\n shape=[self.config.n_classes],\n initializer=tf.constant_initializer(0.))\n \n \n # fw_bw_concat = tf.concat(outputs, 2)\n # logger.info(\"output.shape: %s\", str(outputs[0].shape))\n # logger.info(\"fw_bw_concat.shape: %s\", str(fw_bw_concat.shape))\n # h_dropout = tf.nn.dropout(fw_bw_concat[:,-1,:], dropout_rate)\n \n\n\n preds = tf.matmul(S, U) + b2\n return preds\n \n def add_loss_op(self, preds):\n \n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder,\n logits=preds,\n name=\"loss\"))\n return loss\n\n def add_training_op(self, loss):\n train_op = tf.train.AdamOptimizer(self.config.lr).minimize(loss)\n return train_op\n \n def train_on_batch(self, sess, inputs_batch, labels_batch):\n feed = self.create_feed_dict(\n inputs_batch, labels_batch=labels_batch, dropout=self.config.dropout)\n _, loss = sess.run([self.train_op, self.loss], feed_dict=feed)\n return loss\n\n def predict_on_batch(self, sess, inputs_batch):\n feed = self.create_feed_dict(\n inputs_batch)\n predictions = np.argmax(sess.run(self.pred, feed_dict=feed), axis=1)\n return predictions\n\n def evaluate(self, sess, data, data_obj):\n correct_preds, total_correct, total_preds = 0., 0., 0.\n\n labels , preds = self.output(sess, data_obj, data)\n\n for pred, label in zip(preds, labels):\n if pred == label:\n correct_preds += 1\n total_preds = len(preds)\n \n \n return correct_preds / total_preds\n\n def output(self, sess, inputs_obj, inputs=None):\n if inputs is None:\n inputs = preprocess_data(inputs_obj.padding_data, self.token2id)\n\n batch = util.minibatches(inputs, self.config.batch_size, shuffle=False)\n predictions = []\n labels = []\n for x in batch:\n if len(x[0]) < self.config.batch_size:\n continue\n predictions += list(self.predict_on_batch(sess, x[0]))\n labels += list(x[1])\n \n return labels, predictions\n\n def fit(self, sess, saver, train_data_obj, dev_data_obj):\n best_score = 0.\n train_data = preprocess_data(train_data_obj.padding_data, self.token2id)\n dev_data = preprocess_data(dev_data_obj.padding_data, self.token2id)\n for epoch in range(self.config.n_epochs):\n logger.info(\"Epoch %d out of %d\", epoch + 1, self.config.n_epochs)\n batch = util.minibatches(train_data, self.config.batch_size, shuffle=True)\n loss_list = []\n for x in batch: \n # print(len(x[0]))\n if len(x[0]) < self.config.batch_size:\n logger.info('insufficient batch with length of %d' % (len(x[0])))\n continue\n loss_list.append(self.train_on_batch(sess, *x))\n \n logger.info(\"average loss: %f\" % (np.average(loss_list)))\n logger.info(\"training finished\")\n \n logger.info(\"Evaluating on development data\")\n \n \n score = self.evaluate(sess, dev_data, train_data_obj)\n logger.info(\"P: %f\" % (score))\n \n \n if score > best_score:\n best_score = score\n logger.info(\"New best score! Saving model in %s\", self.config.output_model)\n if best_score > 0.85:\n saver.save(sess, self.config.output_model)\n\n return best_score\n\n \n \n\n def build(self):\n \"\"\"Some docs\"\"\"\n self.add_placeholders()\n self.pred = self.add_prediction_op()\n self.loss = self.add_loss_op(self.pred)\n self.train_op = self.add_training_op(self.loss)\n\n def __init__(self, pretrained_embeddings, token2id, config):\n \"\"\"Some docs\"\"\"\n self.pretrained_embeddings = pretrained_embeddings\n self.token2id = token2id\n self.input_placeholder = None\n self.labels_placeholder = None\n self.mask_placehoder = None\n self.dropout_placehoder = None\n self.config = config\n \n self.build()\n\n\ndef do_train(args):\n\n pretrained_embeddings, token2id = util.load_word_embedding(input_file=args.vectors, cache='cache')\n stopwords = util.load_stopwords()\n stopwords = None\n train_data = util.Data(args.data_train, args.ltp_data, stopwords=stopwords)\n dev_data = util.Data(args.data_dev, args.ltp_data, max_length=train_data.max_length, stopwords=stopwords)\n config = Config(args)\n print(train_data.max_length)\n # 配置参数. 测试集如何设置?\n _, config.max_length = train_data.get_metadata()\n config.n_classes = len(train_data.LABELS)\n config.n_word_embed_size = len(pretrained_embeddings[0])\n\n\n with tf.Graph().as_default():\n logger.info(\"Building model...\",)\n start = time.time()\n model = Classifier(pretrained_embeddings, token2id, config)\n logger.info(\"took %.2f seconds\", time.time() - start)\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n gpu_options = tf.GPUOptions(allow_growth=True)\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:\n \n session.run(init)\n score = model.fit(session, saver, train_data, dev_data) \n print(\"\\n\")\n logger.info(\"training finished, took %.2f seconds with P: %.2f\", time.time() - start, score)\n\ndef do_predict(args):\n\n pretrained_embeddings, token2id = util.load_word_embedding(input_file=args.vectors, cache='cache')\n stopwords = util.load_stopwords()\n stopwords = None\n train_data = util.Data(args.data_train, args.ltp_data, stopwords=stopwords)\n test_data = util.Data(args.data_test, args.ltp_data, max_length=train_data.max_length, stopwords=stopwords)\n config = Config(args)\n # 配置参数. 测试集如何设置?\n _, config.max_length = train_data.get_metadata()\n config.n_classes = len(train_data.LABELS)\n config.n_word_embed_size = len(pretrained_embeddings[0])\n config.batch_size = len(test_data.data)\n \n\n with tf.Graph().as_default():\n logger.info(\"Building model...\",)\n start = time.time()\n model = Classifier(pretrained_embeddings, token2id, config)\n logger.info(\"took %.2f seconds\", time.time() - start)\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n gpu_options = tf.GPUOptions(allow_growth=True)\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:\n \n session.run(init)\n saver.restore(session, model.config.output_model)\n labels, prediction = model.output(session, test_data, None)\n print(labels)\n print(prediction)\n \n test_data.update_labels(prediction).save_result()\n # print(model.evaluate(session, None, test_data))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='SMP2018 -- Text Classification')\n \n parser.add_argument('-mp', '--model-path', default=\"result/\", help=\"Path to model\")\n parser.add_argument('-mn', '--model-name', help=\"Name of model\", required=True)\n parser.add_argument('-vv', '--vectors', default=\"/users4/zhqiao/sgns.target.word-word.dynwin5.thr10.neg5.dim300.iter5\", help=\"Path to word vectors file\")\n parser.add_argument('-ltp', '--ltp-data', default=\"/users4/zhqiao/workspace/ltp/\", help=\"Path to ltp_data\")\n\n subparsers = parser.add_subparsers()\n command_parser = subparsers.add_parser('train', help='')\n command_parser.add_argument('-dt', '--data-train', default=\"data/train.json\", help=\"Training data\")\n command_parser.add_argument('-dd', '--data-dev', default=\"data/dev.json\", help=\"Dev data\")\n command_parser.set_defaults(func=do_train)\n\n command_parser = subparsers.add_parser('predict', help='')\n command_parser.add_argument('-dt', '--data-train', default=\"data/train.json\", help=\"Training data\")\n command_parser.add_argument('-dd', '--data-test', default=\"data/test.json\", help=\"Training data\")\n command_parser.add_argument('-out', '--out-put', default=\"out.json\", help=\"predict file\")\n \n command_parser.set_defaults(func=do_predict)\n\n command_parser = subparsers.add_parser('env_test', help='')\n command_parser.set_defaults(func=util.env_testing)\n\n\n ARGS = parser.parse_args()\n \n \n\n if ARGS.func is None:\n parser.print_help()\n sys.exit(1)\n else:\n ARGS.func(ARGS)\n \n\n"
]
| [
[
"tensorflow.convert_to_tensor",
"tensorflow.nn.dynamic_rnn",
"tensorflow.matmul",
"tensorflow.Graph",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.constant_initializer",
"tensorflow.global_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver",
"numpy.average",
"tensorflow.nn.embedding_lookup"
]
]
|
MiningMyBusiness/HMMExperiments | [
"555b9f5833b1422bd3c6b7fc64894266fb6ddcc8"
]
| [
"CreateFakeData.py"
]
| [
"import numpy as np\r\nimport scipy.sparse as sp\r\nimport matplotlib.pyplot as plt\r\n\r\n# set initial probability distribution of semi-Markov Process \r\ndef SetInitProb(num_states):\r\n init_prob = np.random.rand(num_states)\r\n init_prob = init_prob/np.sum(init_prob)\r\n return init_prob\r\n\r\ndef SetTransMat(num_states):\r\n trans_mat = sp.rand(num_states, num_states,\r\n density=0.35).toarray()\r\n for i in range(num_states):\r\n trans_mat[i,i] = 0\r\n for i,row in enumerate(trans_mat):\r\n trans_mat[i,:] = row/np.sum(row)\r\n return trans_mat\r\n\r\ndef SetStateDur(num_states, avg_case_dur):\r\n avg_state_dur = avg_case_dur/num_states\r\n state_dur_mean = 8*np.sqrt(avg_state_dur)*np.random.randn(num_states) + avg_state_dur\r\n state_dur_mean_sqt = 8*np.sqrt(state_dur_mean)\r\n state_dur_std = 3*np.sqrt(np.mean(state_dur_mean_sqt))*np.random.randn(num_states) + state_dur_mean_sqt\r\n return state_dur_mean, state_dur_std\r\n\r\ndef SampDist(prob_array):\r\n array_cdf = np.zeros(len(prob_array))\r\n array_cdf[0] = prob_array[0]\r\n for i in range(1,len(prob_array)):\r\n array_cdf[i] = prob_array[i] + array_cdf[i-1]\r\n state = np.min(np.where(array_cdf >= np.random.rand(1)))\r\n return state\r\n\r\n## set parameters\r\nnum_states = 12\r\navg_case_dur = 12000\r\ninit_prob = SetInitProb(num_states)\r\ntrans_mat = SetTransMat(num_states)\r\nstate_dur_mean, state_dur_std = SetStateDur(num_states, avg_case_dur)\r\n\r\n## generate samples\r\nall_gt = []\r\nnum_cases = 100\r\n\r\nfor i in range(0,num_cases):\r\n this_case_dur = int(8000*np.random.rand(1) + 8000)\r\n this_case = []\r\n iter = 0\r\n while len(this_case) < this_case_dur:\r\n if iter == 0:\r\n state = int(SampDist(init_prob))\r\n else:\r\n state = int(SampDist(trans_mat[state,:]))\r\n iter += 1\r\n this_state_dur = int(state_dur_std[state]*np.random.randn(1) + state_dur_mean[state])\r\n sub_list = [state]*this_state_dur\r\n this_case.extend(sub_list)\r\n this_case_arr = np.array(this_case)\r\n all_gt.append(this_case_arr)\r\n\r\nnp.save('all_gt.npy', all_gt)\r\n"
]
| [
[
"numpy.sqrt",
"scipy.sparse.rand",
"numpy.save",
"numpy.random.randn",
"numpy.random.rand",
"numpy.mean",
"numpy.array",
"numpy.sum"
]
]
|
psrc/populationsim | [
"8307c5a53a4d84994a224058a201b8c4f42543b8"
]
| [
"populationsim/steps/setup_data_structures.py"
]
| [
"\n# PopulationSim\n# See full license in LICENSE.txt.\n\nimport logging\nimport os\n\nimport pandas as pd\nimport numpy as np\n\nfrom activitysim.core import inject\nfrom activitysim.core import pipeline\n\nfrom ..assign import assign_variable\nfrom helper import control_table_name\nfrom helper import get_control_table\nfrom helper import get_control_data_table\n\nfrom populationsim.util import setting\n\nlogger = logging.getLogger(__name__)\n\n\ndef read_control_spec(data_filename, configs_dir):\n\n # read the csv file\n data_file_path = os.path.join(configs_dir, data_filename)\n if not os.path.exists(data_file_path):\n raise RuntimeError(\n \"initial_seed_balancing - control file not found: %s\" % (data_file_path,))\n\n logger.info(\"Reading control file %s\" % data_file_path)\n control_spec = pd.read_csv(data_file_path, comment='#')\n\n geographies = setting('geographies')\n\n if 'geography' not in control_spec.columns:\n raise RuntimeError(\"missing geography column in controls file\")\n\n for g in control_spec.geography.unique():\n if g not in geographies:\n raise RuntimeError(\"unknown geography column '%s' in control file\" % g)\n\n return control_spec\n\n\ndef build_incidence_table(control_spec, households_df, persons_df, crosswalk_df):\n\n hh_col = setting('household_id_col')\n\n incidence_table = pd.DataFrame(index=households_df.index)\n\n seed_tables = {\n 'households': households_df,\n 'persons': persons_df,\n }\n\n for control_row in control_spec.itertuples():\n\n logger.info(\"control target %s\" % control_row.target)\n logger.debug(\"control_row.seed_table %s\" % control_row.seed_table)\n logger.debug(\"control_row.expression %s\" % control_row.expression)\n\n incidence, trace_results = assign_variable(\n target=control_row.target,\n expression=control_row.expression,\n df=seed_tables[control_row.seed_table],\n locals_dict={'np': np},\n df_alias=control_row.seed_table,\n trace_rows=None\n )\n\n # convert boolean True/False values to 1/0\n incidence = incidence * 1\n\n # aggregate person incidence counts to household\n if control_row.seed_table == 'persons':\n df = pd.DataFrame({\n hh_col: persons_df[hh_col],\n 'incidence': incidence\n })\n incidence = df.groupby([hh_col], as_index=True).sum()\n\n incidence_table[control_row.target] = incidence\n\n return incidence_table\n\n\ndef add_geography_columns(incidence_table, households_df, crosswalk_df):\n \"\"\"\n Add seed and meta geography columns to incidence_table\n\n Parameters\n ----------\n incidence_table\n households_df\n crosswalk_df\n\n Returns\n -------\n\n \"\"\"\n\n geographies = setting('geographies')\n meta_geography = geographies[0]\n seed_geography = setting('seed_geography')\n\n # add seed_geography col to incidence table\n incidence_table[seed_geography] = households_df[seed_geography]\n\n # add meta column to incidence table\n seed_to_meta = \\\n crosswalk_df[[seed_geography, meta_geography]] \\\n .groupby(seed_geography, as_index=True).min()[meta_geography]\n incidence_table[meta_geography] = incidence_table[seed_geography].map(seed_to_meta)\n\n return incidence_table\n\n\ndef build_control_table(geo, control_spec, crosswalk_df):\n\n # control_geographies is list with target geography and the geographies beneath it\n control_geographies = setting('geographies')\n assert geo in control_geographies\n control_geographies = control_geographies[control_geographies.index(geo):]\n\n # only want controls for control_geographies\n control_spec = control_spec[control_spec['geography'].isin(control_geographies)]\n controls_list = []\n\n # for each geography at or beneath target geography\n for g in control_geographies:\n\n # control spec rows for this geography\n spec = control_spec[control_spec['geography'] == g]\n\n # are there any controls specified for this geography? (e.g. seed has none)\n if len(spec.index) == 0:\n continue\n\n # control_data for this geography\n control_data_df = get_control_data_table(g)\n\n control_data_columns = [geo] + spec.control_field.tolist()\n\n if g == geo:\n # for top level, we expect geo_col, and need to group and sum\n assert geo in control_data_df.columns\n controls = control_data_df[control_data_columns]\n controls.set_index(geo, inplace=True)\n else:\n # aggregate sub geography control totals to the target geo level\n\n # add geo_col to control_data table\n if geo not in control_data_df.columns:\n # create series mapping sub_geo id to geo id\n sub_to_geog = crosswalk_df[[g, geo]].groupby(g, as_index=True).min()[geo]\n\n control_data_df[geo] = control_data_df[g].map(sub_to_geog)\n\n # aggregate (sum) controls to geo level\n controls = control_data_df[control_data_columns].groupby(geo, as_index=True).sum()\n\n controls_list.append(controls)\n\n # concat geography columns\n controls = pd.concat(controls_list, axis=1)\n\n # rename columns from seed_col to target\n columns = {c: t for c, t in zip(control_spec.control_field, control_spec.target)}\n controls.rename(columns=columns, inplace=True)\n\n # reorder columns to match order of control_spec rows\n controls = controls[control_spec.target]\n\n # drop controls for zero-household geographies\n total_hh_control_col = setting('total_hh_control')\n empty = (controls[total_hh_control_col] == 0)\n if empty.any():\n controls = controls[~empty]\n logger.info(\"dropping %s %s control rows with empty total_hh_control\" % (empty.sum(), geo))\n\n return controls\n\n\ndef build_crosswalk_table():\n \"\"\"\n build crosswalk table filtered to include only zones in lowest geography\n \"\"\"\n\n geographies = setting('geographies')\n\n crosswalk_data_table = inject.get_table('geo_cross_walk').to_frame()\n\n # dont need any other geographies\n crosswalk = crosswalk_data_table[geographies]\n\n # filter geo_cross_walk_df to only include geo_ids with lowest_geography controls\n # (just in case geo_cross_walk_df table contains rows for unused low zones)\n low_geography = geographies[-1]\n low_control_data_df = get_control_data_table(low_geography)\n rows_in_low_controls = crosswalk[low_geography].isin(low_control_data_df[low_geography])\n crosswalk = crosswalk[rows_in_low_controls]\n\n return crosswalk\n\n\ndef build_grouped_incidence_table(incidence_table, control_spec, seed_geography):\n\n hh_incidence_table = incidence_table\n household_id_col = setting('household_id_col')\n\n hh_groupby_cols = list(control_spec.target) + [seed_geography]\n hh_grouper = hh_incidence_table.groupby(hh_groupby_cols)\n group_incidence_table = hh_grouper.max()\n group_incidence_table['sample_weight'] = hh_grouper.sum()['sample_weight']\n group_incidence_table['group_size'] = hh_grouper.count()['sample_weight']\n group_incidence_table = group_incidence_table.reset_index()\n\n logger.info(\"grouped incidence table has %s entries, ungrouped has %s\"\n % (len(group_incidence_table.index), len(hh_incidence_table.index)))\n\n # add group_id of each hh to hh_incidence_table\n group_incidence_table['group_id'] = group_incidence_table.index\n hh_incidence_table['group_id'] = hh_incidence_table[hh_groupby_cols].merge(\n group_incidence_table[hh_groupby_cols + ['group_id']],\n on=hh_groupby_cols,\n how='left').group_id.astype(int).values\n\n # it doesn't really matter what the incidence_table index is until we create population\n # when we need to expand each group to constituent households\n # but incidence_table should have the same name whether grouped or ungrouped\n # so that the rest of the steps can handle them interchangeably\n group_incidence_table.index.name = hh_incidence_table.index.name\n\n # create table mapping household_groups to households and their sample_weights\n # explicitly provide hh_id as a column to make it easier for use when expanding population\n household_groups = hh_incidence_table[['group_id', 'sample_weight']].copy()\n household_groups[household_id_col] = household_groups.index.astype(int)\n\n return group_incidence_table, household_groups\n\n\ndef filter_households(households_df, persons_df, crosswalk_df):\n \"\"\"\n Filter households and persons tables, removing zero weight households\n and any households not in seed zones.\n\n Returns filtered households_df and persons_df\n \"\"\"\n\n # drop any zero weight households (there are some in calm data)\n hh_weight_col = setting('household_weight_col')\n households_df = households_df[households_df[hh_weight_col] > 0]\n\n # remove any households not in seed zones\n seed_geography = setting('seed_geography')\n seed_ids = crosswalk_df[seed_geography].unique()\n\n rows_in_seed_zones = households_df[seed_geography].isin(seed_ids)\n if rows_in_seed_zones.any():\n households_df = households_df[rows_in_seed_zones]\n logger.info(\"dropped %s households not in seed zones\" % (~rows_in_seed_zones).sum())\n logger.info(\"kept %s households in seed zones\" % len(households_df))\n\n return households_df, persons_df\n\n\[email protected]()\ndef setup_data_structures(settings, configs_dir, households, persons):\n \"\"\"\n Setup geographic correspondence (crosswalk), control sets, and incidence tables.\n\n A control tables for target geographies should already have been read in by running\n input_pre_processor. The zone control tables contains one row for each zone, with columns\n specifying control field totals for that control\n\n This step reads in the global control file, which specifies which control control fields\n in the control table should be used for balancing, along with their importance and the\n recipe (seed table and expression) for determining household incidence for that control.\n\n If GROUP_BY_INCIDENCE_SIGNATURE setting is enabled, then incidence table rows are\n household group ids and and additional household_groups table is created mapping hh group ids\n to actual hh_ids.\n\n Parameters\n ----------\n settings: dict\n contents of settings.yaml as dict\n configs_dir: str\n households: pipeline table\n persons: pipeline table\n\n creates pipeline tables:\n crosswalk\n controls\n geography-specific controls\n incidence_table\n household_groups (if GROUP_BY_INCIDENCE_SIGNATURE setting is enabled)\n\n modifies tables:\n households\n persons\n\n \"\"\"\n\n seed_geography = setting('seed_geography')\n\n households_df = households.to_frame()\n persons_df = persons.to_frame()\n\n crosswalk_df = build_crosswalk_table()\n inject.add_table('crosswalk', crosswalk_df)\n\n control_spec = read_control_spec(setting('control_file_name', 'controls.csv'), configs_dir)\n inject.add_table('control_spec', control_spec)\n\n geographies = settings['geographies']\n for g in geographies:\n controls = build_control_table(g, control_spec, crosswalk_df)\n inject.add_table(control_table_name(g), controls)\n\n households_df, persons_df = filter_households(households_df, persons_df, crosswalk_df)\n pipeline.replace_table('households', households_df)\n pipeline.replace_table('persons', persons_df)\n\n incidence_table = \\\n build_incidence_table(control_spec, households_df, persons_df, crosswalk_df)\n\n incidence_table = add_geography_columns(incidence_table, households_df, crosswalk_df)\n\n # add sample_weight col to incidence table\n hh_weight_col = setting('household_weight_col')\n incidence_table['sample_weight'] = households_df[hh_weight_col]\n\n if setting('GROUP_BY_INCIDENCE_SIGNATURE'):\n group_incidence_table, household_groups \\\n = build_grouped_incidence_table(incidence_table, control_spec, seed_geography)\n\n inject.add_table('household_groups', household_groups)\n inject.add_table('incidence_table', group_incidence_table)\n else:\n inject.add_table('incidence_table', incidence_table)\n\n\[email protected]()\ndef repop_setup_data_structures(configs_dir, households, persons):\n \"\"\"\n Setup geographic correspondence (crosswalk), control sets, and incidence tables for repop run.\n\n A new lowest-level geography control tables should already have been read in by rerunning\n input_pre_processor with a table_list override. The control table contains one row for\n each zone, with columns specifying control field totals for that control\n\n This step reads in the repop control file, which specifies which control control fields\n in the control table should be used for balancing, along with their importance and the\n recipe (seed table and expression) for determining household incidence for that control.\n\n Parameters\n ----------\n configs_dir : str\n households: pipeline table\n persons: pipeline table\n\n Returns\n -------\n\n \"\"\"\n\n seed_geography = setting('seed_geography')\n geographies = setting('geographies')\n low_geography = geographies[-1]\n\n # replace crosswalk table\n crosswalk_df = build_crosswalk_table()\n pipeline.replace_table('crosswalk', crosswalk_df)\n\n # replace control_spec\n control_file_name = setting('repop_control_file_name', 'repop_controls.csv')\n control_spec = read_control_spec(control_file_name, configs_dir)\n\n # repop control spec should only specify controls for lowest level geography\n assert control_spec.geography.unique() == [low_geography]\n\n pipeline.replace_table('control_spec', control_spec)\n\n # build incidence_table with repop controls and households in repop zones\n # filter households (dropping any not in crosswalk) in order to build incidence_table\n # We DO NOT REPLACE households and persons as we need full tables to synthesize population\n # (There is no problem, however, with overwriting the incidence_table and household_groups\n # because the expand_households step has ALREADY created the expanded_household_ids table\n # for the original simulated population. )\n\n households_df = households.to_frame()\n persons_df = persons.to_frame()\n households_df, persons_df = filter_households(households_df, persons_df, crosswalk_df)\n incidence_table = build_incidence_table(control_spec, households_df, persons_df, crosswalk_df)\n incidence_table = add_geography_columns(incidence_table, households_df, crosswalk_df)\n # add sample_weight col to incidence table\n hh_weight_col = setting('household_weight_col')\n incidence_table['sample_weight'] = households_df[hh_weight_col]\n\n # rebuild control tables with only the low level controls (aggregated at higher levels)\n for g in geographies:\n controls = build_control_table(g, control_spec, crosswalk_df)\n pipeline.replace_table(control_table_name(g), controls)\n\n if setting('GROUP_BY_INCIDENCE_SIGNATURE'):\n group_incidence_table, household_groups \\\n = build_grouped_incidence_table(incidence_table, control_spec, seed_geography)\n\n pipeline.replace_table('household_groups', household_groups)\n pipeline.replace_table('incidence_table', group_incidence_table)\n else:\n pipeline.replace_table('incidence_table', incidence_table)\n"
]
| [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
]
|
LemonPepperSeasoning/Path-Finder-With-Social-Zone | [
"2fe3e977520feff5637ab6a5e54dc5a2777b5097"
]
| [
"CoordinateConverter/main.py"
]
| [
"\nimport math\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\ndef robot_to_camera(rx, ry):\n R = [rx, ry]\n A = math.sqrt(R[0]**2 + R[1]**2) / ratio\n alpha = math.atan(R[1] / R[0])\n\n add_x = math.cos(diff_degree + alpha) * A\n add_y = math.sin(diff_degree + alpha) * A\n\n c_x = p0[0] - add_x\n c_y = p0[1] + add_y\n print(\"ROBOT : {}, {}\".format(c_x, c_y))\n return c_x, c_y\n\n\ndef camera_to_robot(cx, cy):\n C = [cx, cy]\n add_x = p0[0] - C[0]\n add_y = C[1] - p0[1]\n\n r_distance = math.sqrt(add_x ** 2 + add_y ** 2) * ratio\n\n alpha = math.atan(add_y / add_x)\n\n theata = alpha - diff_degree\n\n r_x = math.cos(theata) * r_distance\n r_y = math.sin(theata) * r_distance\n print(\"CAMERA : {}, {}\".format(r_x, r_y))\n return r_x, r_y\n\n\ndef plot():\n x_values = []\n y_values = []\n for i in range(0, 10):\n x_values.append(0)\n y_values.append(i)\n x_values.append(i)\n y_values.append(0)\n for i in range(1, 5):\n x, y = robot_to_camera(i, i)\n x_values.append(x)\n y_values.append(y)\n plt.scatter(x_values, y_values, s=5)\n plt.show()\n\nif __name__ == \"__main__\":\n p0 = [0.23, 2.94]\n p1 = [-0.27, 3.52]\n q1 = [1, 1]\n # p2 = [-0.96,4.4]\n # q2 = [2,2]\n p3 = [-1.16, 6.64]\n q3 = [ 2.5 , 4]\n x = abs(p1[1]-p0[1])\n y = abs(p1[0]-p0[0])\n print(\"{}, {}\".format(x, y))\n total_degree = math.atan(x / y)\n\n in_degree = math.atan(q1[1]/q1[0])\n print(in_degree*360/(2*math.pi))\n\n diff_degree = total_degree - in_degree\n print(diff_degree*360/(2*math.pi))\n\n ratio = math.sqrt(q1[0]**2 + q1[1]**2) / math.sqrt(x**2 + y ** 2)\n print(ratio)\n \n x, y = robot_to_camera( 2, 2)\n \n "
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter"
]
]
|
eduardofv/lang_model_eval | [
"d89dbe9fe291f0befb9701e8dc4cea4154cf9d45"
]
| [
"analysis/analysis_utils.py"
]
| [
"\"\"\"Utility functions to be used by analysis tasks.\"\"\"\nimport os\nimport re\nimport collections\nimport json\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\n\n#File Utils\ndef load_metadata(directory):\n \"\"\"Load the metadata file 'experiment_metadata.json' from experiment dir\"\"\"\n val = None\n try:\n fname = f\"{directory}/experiment_metadata.json\"\n with open(fname) as fin:\n val = json.load(fin)\n except IOError as ex:\n print(ex)\n val = None\n return val\n\ndef validate_required_fields(metadata):\n \"\"\"Check if the metadata can be used on analysis. This is evolving\"\"\"\n required = ['RESULTS-BALANCED_ACCURACY']\n val = True\n for field in required:\n val = val & (field in metadata)\n return val\n\ndef load_result_directories(directories):\n \"\"\"\n Walks a directory list in which every directory contains experiment\n results, loading a metadata file for each directory.\n Validates and returns the metadata dict\n \"\"\"\n dirs = []\n for search_dir in directories:\n _, directory, _ = list(os.walk(search_dir, followlinks=True))[0]\n dirs += [f\"{search_dir}{subdir}\" for subdir in directory]\n\n results = {}\n for directory in dirs:\n metadata = load_metadata(directory)\n if metadata is None or not validate_required_fields(metadata):\n print(f\"WARNING: Invalid metadata: {directory}\")\n else:\n results[directory] = metadata\n return results\n\n\n#Data Utils\ndef load_full_dataset(results):\n \"\"\"Converts a results dict to a DataFrame\"\"\"\n experiments = results.keys()\n return pd.DataFrame({\n #Experiment and Environment\n 'runid': [results[k]['EXPERIMENT-RUNID'] for k in experiments],\n 'version': [results[k]['EXPERIMENT-VERSION'] for k in experiments],\n 'GPU': [gpu(results[k]) for k in experiments],\n #Data\n 'dataset': [results[k]['DATA-DATASET_FN'] for k in experiments],\n 'rows_to_load': [rows_to_load(results[k]) for k in experiments],\n 'training_set_size': [training_set_size(results[k]) for k in experiments],\n 'test_set_size': [test_set_size(results[k]) for k in experiments],\n 'max_seq_len': [max_seq_len(results[k]) for k in experiments],\n 'output_dim': [results[k]['DATA-OUTPUT_DIM'] for k in experiments],\n #Model and training\n 'lm': [lm(results[k]) for k in experiments],\n 'batch_size': [results[k]['TRAIN-BATCH_SIZE'] for k in experiments],\n 'learning_rate': [learning_rate(results[k]) for k in experiments],\n #Results\n 'training_time': [training_time(results[k]) for k in experiments],\n 'bac': [results[k]['RESULTS-BALANCED_ACCURACY'] for k in experiments],\n 'min_loss': [min_loss(results[k]) for k in experiments],\n 'last_loss': [last_loss(results[k]) for k in experiments],\n 'total_epochs': [epochs(results[k]) for k in experiments],\n 'best_epoch': [best_epoch(results[k]) for k in experiments],\n 'val_loss': [val_loss(results[k]) for k in experiments],\n 'test_bac': [test_bac(results[k]) for k in experiments]\n })\n\ndef warn_if_experiments_differ(data, must_be_unique):\n \"\"\"\n Sends a warning if a field which is expected to have one unique value\n has more. Use to inspect the dataframe\n \"\"\"\n for field in must_be_unique:\n if len(data[field].unique()) != 1:\n print(f\"WARNING: There are experiments with different {field}:\")\n print(data[field].unique())\n\ndef extract_type_from_nnlm(data):\n \"\"\"\n Extract the version for the NNLM collection of Language Models\n https://tfhub.dev/google/collections/nnlm/1\n \"\"\"\n return data['lm'].str.extract(r'nnlm-(e.-.*)-w.*')\n\n\ndef average_list_metric(\n data,\n metric_field,\n dimension_field,\n ignore_trailing=True):\n \"\"\"\n Averages \"dimension_field\" values of list field (metric_field).\n This is used on experiments results that are a list, for instance,\n val_loss or accuracy. This can be used to plot learning curves.\n\n The argument 'ignore_trailing' will stop averaging values on the list\n with the least arguments. For example,\n\n if ignore_trailing is True: [1, 1, 3] [1, 2] will produce [1, 1,5]\n if ignore_trailing is False: [1, 1, 3] [1, 2] will produce [1, 1,5, 3]\n \"\"\"\n\n def val_or_none(list_, index):\n if index < len(list_):\n return list_[index]\n return None\n\n if not ignore_trailing:\n print(\"WARNING: ignore_trailing set to False. \"\n \"Last epochs for some dimensions may be misleading.\")\n\n values = collections.defaultdict(list)\n for _, row in data.iterrows():\n values[row[dimension_field]].append(row[metric_field])\n aggregated = collections.defaultdict(list)\n for dim, metrics in values.items():\n aggregated[dimension_field].append(dim)\n vals = []\n if ignore_trailing:\n epochs = min([len(x) for x in metrics])\n else:\n epochs = max([len(x) for x in metrics])\n for index in range(epochs):\n if not ignore_trailing:\n m = [val_or_none(x, index) for x in metrics]\n m = list(filter(lambda x: x is not None, m))\n val = sum(m) / len(m)\n else:\n val = sum([x[index] for x in metrics]) / len(metrics)\n vals.append(val)\n aggregated[metric_field].append(vals)\n return pd.DataFrame(aggregated)\n\n\n#Graph utils\n#from https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color\ndef contrast_color(color, blackish='black', whiteish='whitesmoke'):\n \"\"\"Selects white(ish) or black(ish) for text to contrast over some RGB\"\"\"\n luminance = (0.299 * color[0] + 0.587 * color[1] + 0.114 * color[2])\n if luminance > 0.6:\n return blackish\n return whiteish\n\ndef colors_by_value(values, color_space='muted', return_legend_handles=False):\n \"\"\"\n Creates a list of colors based on the unique categorical values\n \"\"\"\n categories = sorted(values.unique())\n pal = sns.color_palette(color_space, len(categories))\n col_dict = dict(zip(categories, pal))\n #colors = list(values.map(col_dict))\n colors = [col_dict[val] for val in values]\n if return_legend_handles:\n handles = []\n for k, v in col_dict.items():\n handles.append(mpl.patches.Patch(color=v, label=k))\n return (colors, handles)\n return colors\n\n#TODO: Revisar\n#https://matplotlib.org/3.1.3/gallery/statistics/barchart_demo.html\n#para mejor manejo de la posición del texto.\n# Hacer el blog post despues con la solucion\n##http://eduardofv.com/wp-admin/post.php?post=517&action=edit\n#https://colab.research.google.com/drive/1kwKuOwim7ngYmFSRjkVYMi5_K6WA9vmD\ndef annotate_barh(ax, fontsize='x-small'):\n \"\"\"Adds value labels inside the bars of a barh plot\"\"\"\n plt.draw()\n for patch in ax.patches:\n label = f\"{patch.get_width():1.4f}\"\n p_x = patch.get_width()\n p_y = patch.get_y()\n #Put an invisible text to measure it's dimensions\n txt = plt.text(p_x, p_y, label, fontsize=fontsize, alpha=0.0)\n bbox = txt.get_window_extent().transformed(ax.transData.inverted())\n t_w = bbox.width * 1.1\n t_h = bbox.height\n p_y += (patch.get_height() - t_h)/1.5\n if t_w > 0.9 * patch.get_width():\n plt.text(p_x, p_y, label, fontsize=fontsize)\n else:\n p_x -= t_w\n col = contrast_color(patch.get_facecolor())\n plt.text(p_x, p_y, label, fontsize=fontsize, color=col)\n\ndef plot_learning_curve(data, curve_field, dimension_field):\n \"\"\"\n Plots learning curves contained as lists in in the *curve_field* of the\n DataFrame. Dimension field will be used for the labels of each sample.\n \"\"\"\n #fig = plt.figure()\n ax = plt.axes()\n df1 = data.sort_values(dimension_field)\n df1['color'] = colors_by_value(df1[dimension_field])\n for _, row in df1.iterrows():\n plt.plot(row[curve_field],\n label=row[dimension_field],\n color=row['color'])\n ax.legend()\n\n#Config object field parsing\n# These methods convert the values from the metadata to standard values that\n# can be used in the analysis. Fills non-existent values, select correct fields\n# set default, etc\n#pylint: disable=C0116\ndef learning_rate(metadata):\n val = None\n try:\n val = metadata['MODEL-OPTIMIZER_FULL_CONFIG']['learning_rate']\n except KeyError:\n print(\"WARNING: Actual learning_rate not found\")\n return val\n\ndef training_time(metadata):\n val = None\n if 'EXPERIMENT-TRAINING_TOOK' in metadata:\n val = metadata['EXPERIMENT-TRAINING_TOOK']\n return val\n\ndef lm(metadata):\n lm_val = None\n if 'TFHUB-EMB_MODEL' in metadata:\n lm_val = metadata['TFHUB-EMB_MODEL']\n match = re.search(\"https://tfhub.dev/google/([^/]+)/.$\", lm_val)\n if match is not None:\n lm_val = match.group(1)\n else:\n print(f\"WARNING: LM could not be parsed from {lm_val}\")\n lm_val = \"LM Not Found\"\n elif 'HUG-EMB_MODEL' in metadata:\n lm_val = metadata['HUG-EMB_MODEL']\n return lm_val\n\ndef gpu(metadata):\n if metadata['EXPERIMENT-ENVIRONMENT'][4] == 'GPU: available':\n return metadata['EXPERIMENT-ENVIRONMENT'][5].split('(')[0].split(\":\")[1].strip()\n return \"GPU: Not available\"\n\ndef max_seq_len(metadata):\n max_seq_len_val = 'Full'\n if 'MODEL-HUG_MAX_SEQ_LEN' in metadata:\n max_seq_len_val = metadata['MODEL-HUG_MAX_SEQ_LEN']\n elif 'MODEL-BERT_MAX_SEQ_LEN' in metadata:\n max_seq_len_val = metadata['MODEL-BERT_MAX_SEQ_LEN']\n return max_seq_len_val\n\ndef rows_to_load(metadata):\n rows_to_load_val = \"All\"\n if 'DATA-ROWS_TO_LOAD' in metadata:\n rows_to_load_val = metadata['DATA-ROWS_TO_LOAD']\n return rows_to_load_val\n\ndef min_loss(metadata):\n val = None\n if 'RESULTS-HISTORIES' in metadata and metadata['RESULTS-HISTORIES']:\n val = min(metadata['RESULTS-HISTORIES'][0]['val_loss'])\n return val\n\ndef last_loss(metadata):\n val = None\n if 'RESULTS-HISTORIES' in metadata and metadata['RESULTS-HISTORIES']:\n val = metadata['RESULTS-HISTORIES'][0]['val_loss'][-1]\n return val\n\ndef epochs(metadata):\n val = 'NA'\n if 'RESULTS-HISTORIES' in metadata and metadata['RESULTS-HISTORIES']:\n val = len(metadata['RESULTS-HISTORIES'][0]['loss'])\n return val\n\ndef best_epoch(metadata):\n val = 'NA'\n if 'RESULTS-HISTORIES' in metadata and metadata['RESULTS-HISTORIES']:\n if 'val_loss' in metadata['RESULTS-HISTORIES'][0]:\n v_loss = metadata['RESULTS-HISTORIES'][0]['val_loss']\n val = np.argmin(v_loss) + 1\n return val\n\ndef training_set_size(metadata):\n return metadata['DATA-TRAINING_SET_SIZE']\n\ndef test_set_size(metadata):\n return metadata['DATA-TEST_SET_SIZE']\n\ndef test_bac(metadata):\n val = None\n try:\n val = metadata['RESULTS-HISTORIES'][0]['test_bac']\n except KeyError:\n print(f\"WARNING: test_bac not found in Histories\")\n return val\n\ndef val_loss(metadata):\n val = None\n try:\n val = metadata['RESULTS-HISTORIES'][0]['val_loss']\n except KeyError:\n print(f\"WARNING: val_loss not found in Histories\")\n return val\n"
]
| [
[
"matplotlib.patches.Patch",
"pandas.DataFrame",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.plot",
"numpy.argmin",
"matplotlib.pyplot.text"
]
]
|
adswa/pymento | [
"17182124a9b917365b181bf7936f290f16a9105c"
]
| [
"pymento_meg/viz/plots.py"
]
| [
"import mne\nimport matplotlib\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom pymento_meg.utils import _construct_path\nfrom pathlib import Path\nfrom matplotlib import interactive\n\ninteractive(True)\n\n\ndef plot_psd(raw, subject, figdir, filtering):\n \"\"\"\n Helper to plot spectral densities\n \"\"\"\n print(\n f\"Plotting spectral density plots for subject sub-{subject}\"\n f\"after Maxwell filtering.\"\n )\n if filtering:\n # append a 'filtered' suffix to the file name\n fname = _construct_path(\n [\n Path(figdir),\n f\"sub-{subject}\",\n \"meg\",\n f\"sub-{subject}_task-memento_spectral-density_filtered.png\",\n ]\n )\n else:\n fname = _construct_path(\n [\n Path(figdir),\n f\"sub-{subject}\",\n \"meg\",\n f\"sub-{subject}_task-memento_spectral-density.png\",\n ]\n )\n fig = raw.plot_psd()\n fig.savefig(fname)\n\n\ndef plot_noisy_channel_detection(\n auto_scores, subject=\"test\", ch_type=\"grad\", outpath=\"/tmp/\"\n):\n\n # Select the data for specified channel type\n ch_subset = auto_scores[\"ch_types\"] == ch_type\n ch_names = auto_scores[\"ch_names\"][ch_subset]\n scores = auto_scores[\"scores_noisy\"][ch_subset]\n limits = auto_scores[\"limits_noisy\"][ch_subset]\n bins = auto_scores[\"bins\"] # The the windows that were evaluated.\n # We will label each segment by its start and stop time, with up to 3\n # digits before and 3 digits after the decimal place (1 ms precision).\n bin_labels = [f\"{start:3.3f} – {stop:3.3f}\" for start, stop in bins]\n\n # We store the data in a Pandas DataFrame. The seaborn heatmap function\n # we will call below will then be able to automatically assign the correct\n # labels to all axes.\n data_to_plot = pd.DataFrame(\n data=scores,\n columns=pd.Index(bin_labels, name=\"Time (s)\"),\n index=pd.Index(ch_names, name=\"Channel\"),\n )\n\n # First, plot the \"raw\" scores.\n fig, ax = plt.subplots(1, 2, figsize=(12, 8))\n fig.suptitle(\n f\"Automated noisy channel detection: {ch_type}, subject sub-{subject}\",\n fontsize=16,\n fontweight=\"bold\",\n )\n sns.heatmap(data=data_to_plot, cmap=\"Reds\", cbar_kws=dict(label=\"Score\"), ax=ax[0])\n [\n ax[0].axvline(x, ls=\"dashed\", lw=0.25, dashes=(25, 15), color=\"gray\")\n for x in range(1, len(bins))\n ]\n ax[0].set_title(\"All Scores\", fontweight=\"bold\")\n\n # Now, adjust the color range to highlight segments that exceeded the limit.\n sns.heatmap(\n data=data_to_plot,\n vmin=np.nanmin(limits), # bads in input data have NaN limits\n cmap=\"Reds\",\n cbar_kws=dict(label=\"Score\"),\n ax=ax[1],\n )\n [\n ax[1].axvline(x, ls=\"dashed\", lw=0.25, dashes=(25, 15), color=\"gray\")\n for x in range(1, len(bins))\n ]\n ax[1].set_title(\"Scores > Limit\", fontweight=\"bold\")\n\n # The figure title should not overlap with the subplots.\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n fname = _construct_path(\n [\n Path(outpath),\n f\"sub-{subject}\",\n \"meg\",\n f\"noise_detection_sub-{subject}_{ch_type}.png\",\n ]\n )\n fig.savefig(fname)\n\n\n# TODO: Do a psd plot for each trial\n"
]
| [
[
"matplotlib.interactive",
"pandas.Index",
"numpy.nanmin",
"matplotlib.pyplot.subplots"
]
]
|
alegonz/hmmlearn | [
"ad6df92f85b88a7aecce70953a3873f6f3c95ccb"
]
| [
"lib/hmmlearn/tests/test_gmm_hmm.py"
]
| [
"from __future__ import absolute_import\n\nimport numpy as np\nimport pytest\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.utils import check_random_state\n\nfrom hmmlearn import hmm\n\nfrom . import log_likelihood_increasing, make_covar_matrix, normalized\n\npytestmark = pytest.mark.xfail()\n\ndef create_random_gmm(n_mix, n_features, covariance_type, prng=0):\n prng = check_random_state(prng)\n g = GaussianMixture(n_mix, covariance_type=covariance_type)\n g.means_ = prng.randint(-20, 20, (n_mix, n_features))\n g.covars_ = make_covar_matrix(covariance_type, n_mix, n_features)\n g.weights_ = normalized(prng.rand(n_mix))\n return g\n\n\nclass GMMHMMTestMixin(object):\n def setup_method(self, method):\n self.prng = np.random.RandomState(9)\n self.n_components = 3\n self.n_mix = 2\n self.n_features = 2\n self.startprob = self.prng.rand(self.n_components)\n self.startprob = self.startprob / self.startprob.sum()\n self.transmat = normalized(\n self.prng.rand(self.n_components, self.n_components),\n axis=1)\n\n self.gmms = []\n for state in range(self.n_components):\n self.gmms.append(create_random_gmm(\n self.n_mix, self.n_features, self.covariance_type,\n prng=self.prng))\n\n def test_score_samples_and_decode(self):\n h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)\n h.startprob_ = self.startprob\n h.transmat_ = self.transmat\n h.gmms_ = self.gmms\n\n # Make sure the means are far apart so posteriors.argmax()\n # picks the actual component used to generate the observations.\n for g in h.gmms_:\n g.means_ *= 20\n\n refstateseq = np.repeat(np.arange(self.n_components), 5)\n n_samples = len(refstateseq)\n X = [h.gmms_[x].sample(1).flatten() for x in refstateseq]\n\n _ll, posteriors = h.score_samples(X)\n\n assert posteriors.shape == (n_samples, self.n_components)\n assert np.allclose(posteriors.sum(axis=1), np.ones(n_samples))\n\n _logprob, stateseq = h.decode(X)\n assert np.allclose(stateseq, refstateseq)\n\n def test_sample(self, n_samples=1000):\n h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)\n h.startprob_ = self.startprob\n h.transmat_ = self.transmat\n h.gmms_ = self.gmms\n X, state_sequence = h.sample(n_samples)\n assert X.shape == (n_samples, self.n_features)\n assert len(state_sequence) == n_samples\n\n @pytest.mark.parametrize(\"params\", [\"stmwc\", \"wt\", \"m\"])\n def test_fit(self, params, n_iter=5):\n h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type,\n covars_prior=1.0)\n h.startprob_ = self.startprob\n h.transmat_ = normalized(\n self.transmat + np.diag(self.prng.rand(self.n_components)), 1)\n h.gmms_ = self.gmms\n\n lengths = [10] * 10\n X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)\n\n # Mess up the parameters and see if we can re-learn them.\n h.n_iter = 0\n h.fit(X, lengths=lengths)\n h.transmat_ = normalized(self.prng.rand(self.n_components,\n self.n_components), axis=1)\n h.startprob_ = normalized(self.prng.rand(self.n_components))\n\n assert log_likelihood_increasing(h, X, lengths, n_iter)\n\n def test_fit_works_on_sequences_of_different_length(self):\n lengths = [3, 4, 5]\n X = self.prng.rand(sum(lengths), self.n_features)\n\n h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)\n # This shouldn't raise\n # ValueError: setting an array element with a sequence.\n h.fit(X, lengths=lengths)\n\n\nclass TestGMMHMMWithDiagCovars(GMMHMMTestMixin):\n covariance_type = 'diag'\n\n\[email protected]\nclass TestGMMHMMWithTiedCovars(GMMHMMTestMixin):\n covariance_type = 'tied'\n\n\[email protected]\nclass TestGMMHMMWithFullCovars(GMMHMMTestMixin):\n covariance_type = 'full'\n"
]
| [
[
"numpy.allclose",
"numpy.arange",
"numpy.ones",
"sklearn.mixture.GaussianMixture",
"numpy.random.RandomState",
"sklearn.utils.check_random_state"
]
]
|
browatbn2/MAD | [
"2b3f1a9b36ef096db648ede9854c733fd444a2a6"
]
| [
"datasets/voxceleb.py"
]
| [
"import os\nimport time\nimport cv2\nimport numpy as np\nfrom skimage import io\n\nimport torch.utils.data as td\nimport pandas as pd\n\nimport config as cfg\nfrom datasets import ds_utils\nimport utils.io\nfrom utils import log\n\nimport cv2 as cv\n\n# Ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef draw_flow(img, flow, step=16):\n h, w = img.shape[:2]\n y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)\n fx, fy = flow[y,x].T\n lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)\n lines = np.int32(lines + 0.5)\n vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n cv.polylines(vis, lines, 0, (0, 255, 0))\n for (x1, y1), (_x2, _y2) in lines:\n cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1)\n return vis\n\ndef draw_hsv(flow):\n h, w = flow.shape[:2]\n fx, fy = flow[:,:,0], flow[:,:,1]\n ang = np.arctan2(fy, fx) + np.pi\n v = np.sqrt(fx*fx+fy*fy)\n hsv = np.zeros((h, w, 3), np.uint8)\n ang = 1\n hsv[...,0] = ang*(180/np.pi/2)\n hsv[...,1] = 255\n hsv[...,2] = np.minimum(v*30, 255)\n bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n return bgr\n\n\ndef encode_landmarks(landmarks):\n return ';'.join(landmarks.ravel().astype(str))\n\nclass VoxCeleb(td.Dataset):\n\n def __init__(self, root_dir=cfg.VOXCELEB_ROOT, train=True, start=None,\n max_samples=None, deterministic=True, with_bumps=False,\n min_of_conf=0.3, min_face_height=100, use_cache=True, **kwargs):\n\n from utils.face_extractor import FaceExtractor\n self.face_extractor = FaceExtractor()\n\n self.use_cache = use_cache\n self.root_dir = root_dir\n self.cropped_img_dir = os.path.join(cfg.VOXCELEB_ROOT_LOCAL, 'crops')\n self.fullsize_img_dir = os.path.join(root_dir, 'frames/unzippedIntervalFaces/data')\n self.feature_dir = os.path.join(root_dir, 'features/unzippedIntervalFaces/data')\n self.npfeature_dir = os.path.join(cfg.VOXCELEB_ROOT_LOCAL, 'features/unzippedIntervalFaces/data')\n self.train = train\n self.with_bumps = with_bumps\n\n annotation_filename = 'dev' if train else 'test'\n path_annotations_mod = os.path.join(root_dir, annotation_filename + '.mod.pkl')\n if os.path.isfile(path_annotations_mod) and False:\n self.annotations = pd.read_pickle(path_annotations_mod)\n else:\n print('Reading CSV file...')\n self.annotations = pd.read_csv(os.path.join(root_dir, annotation_filename+'.csv'))\n print('done.')\n\n # self.annotations['of_conf'] = -1\n # self.annotations['landmarks'] = ''\n # self.annotations['pose'] = ''\n # of_confs, poses, landmarks = [], [], []\n #\n #\n #\n # # for cnt, filename in enumerate(self.annotations.fname):\n # for cnt, idx in enumerate(self.annotations.index):\n # filename = self.annotations.iloc[idx].fname\n # filename_noext = os.path.splitext(filename)[0]\n # of_conf, lms, pose = ds_utils.read_openface_detection(os.path.join(self.feature_dir, filename_noext))\n # str_landmarks = encode_landmarks(lms)\n # of_confs.append(of_conf)\n # # poses.append(pose)\n # landmarks.append(lms)\n # self.annotations.loc[idx, 'of_conf'] = of_conf\n # self.annotations.loc[idx, 'landmarks'] = str_landmarks\n # self.annotations.loc[idx, 'pose'] = encode_landmarks(pose)\n # if (cnt+1) % 100 == 0:\n # print(cnt+1)\n # if (cnt+1) % 1000 == 0:\n # print('saving annotations...')\n # self.annotations.to_pickle(path_annotations_mod)\n # # self.annotations.to_csv(path_annotations_mod, index=False)\n # self.annotations.to_pickle(path_annotations_mod)\n\n path_annotations_mod = os.path.join(root_dir, annotation_filename + '.lms.pkl')\n lm_annots = pd.read_pickle(os.path.join(root_dir, path_annotations_mod))\n\n t = time.time()\n self.annotations = pd.merge(self.annotations, lm_annots, on='fname', how='inner')\n print(\"Time merge: {:.2f}\".format(time.time()-t))\n\n t = time.time()\n self.annotations['vid'] = self.annotations.fname.map(lambda x: x.split('/')[2])\n self.annotations['id'] = self.annotations.uid.map(lambda x: int(x[2:]))\n print(\"Time vid/id labels: {:.2f}\".format(time.time()-t))\n\n print(\"Num. faces: {}\".format(len(self.annotations)))\n print(\"Num. ids : {}\".format(self.annotations.id.nunique()))\n\n # drop bad face detections\n print(\"Removing faces with conf < {}\".format(min_of_conf))\n self.annotations = self.annotations[self.annotations.of_conf >= min_of_conf]\n print(\"Num. faces: {}\".format(len(self.annotations)))\n\n # drop small faces\n print(\"Removing faces with height < {}px\".format(min_face_height))\n self.annotations = self.annotations[self.annotations.face_height >= min_face_height]\n print(\"Num. faces: {}\".format(len(self.annotations)))\n\n\n fr = 0\n prev_vid = -1\n frame_nums = []\n for n, id in enumerate(self.annotations.vid.values):\n fr += 1\n if id != prev_vid:\n prev_vid = id\n fr = 0\n frame_nums.append(fr)\n self.annotations['FRAME'] = frame_nums\n\n self.max_frames_per_video = 200\n self.frame_interval = 3\n print('Limiting videos in VoxCeleb to {} frames...'.format(self.max_frames_per_video))\n self.annotations = self.annotations[self.annotations.FRAME % self.frame_interval == 0]\n self.annotations = self.annotations[self.annotations.FRAME < self.max_frames_per_video * self.frame_interval]\n print(\"Num. faces: {}\".format(len(self.annotations)))\n\n # limit number of samples\n st,nd = 0, None\n if start is not None:\n st = start\n if max_samples is not None:\n nd = st+max_samples\n self.annotations = self.annotations[st:nd]\n\n self.transform = ds_utils.build_transform(deterministic=True, color=True)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Train: {}\\n'.format(self.train)\n fmt_str += ' Root Location: {}\\n'.format(self.root_dir)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n fmt_str += self._stats_repr()\n return fmt_str\n\n def _stats_repr(self):\n fmt_str = \" Number of identities: {}\\n\".format(self.annotations.id.nunique())\n fmt_str += \" Number of videos: {}\\n\".format(self.annotations.vid.nunique())\n fmt_str += \" Frame inverval: {}\\n\".format(self.frame_interval)\n fmt_str += \" Max frames per vid: {}\\n\".format(self.max_frames_per_video)\n return fmt_str\n\n @property\n def labels(self):\n return self.annotations.id.values\n\n def get_landmarks(self, sample):\n landmarks = np.array([sample.landmarks_x, sample.landmarks_y], dtype=np.float32).T\n # return face_processing.scale_landmarks_to_crop(landmarks, output_size=(cfg.CROP_SIZE, cfg.CROP_SIZE))\n return landmarks\n\n @property\n def vids(self):\n return self.annotations.vid.values\n\n def show_landmarks(self, img, landmarks, title='landmarks'):\n for lm in landmarks:\n lm_x, lm_y = lm[0], lm[1]\n cv2.circle(img, (int(lm_x), int(lm_y)), 2, (0, 0, 255), -1)\n cv2.imshow(title, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n cv2.waitKey(10)\n\n def __len__(self):\n return len(self.annotations)\n\n def __getitem__(self, idx):\n sample = self.annotations.iloc[idx]\n filename, id, vid = sample.fname, sample.id, sample.vid\n\n pose = np.array((sample.pose_pitch, sample.pose_yaw, sample.pose_roll), dtype=np.float32)\n landmarks = self.get_landmarks(sample)\n\n t = time.time()\n crop, landmarks, pose, cropper = self.face_extractor.get_face(filename + '.jpg', self.fullsize_img_dir,\n self.cropped_img_dir, landmarks=landmarks,\n pose=pose, use_cache=self.use_cache,\n detect_face=False, crop_type='tight',\n aligned=True)\n # self.show_landmarks(crop, landmarks, 'imgs')\n # crop, pose, landmarks,of_conf_seq = self.get_face(fname, landmarks=landmarks, use_cache=True, from_sequence=True)\n # landmarks = face_processing.scale_landmarks_to_crop(landmarks, output_size=(cfg.CROP_SIZE, cfg.CROP_SIZE))\n # self.show_landmarks(crop, landmarks, 'sequence')\n # print(of_conf, of_conf_seq)\n # cv2.waitKey()\n\n cropped_sample = {'image': crop, 'landmarks': landmarks, 'pose': pose}\n item = self.transform(cropped_sample)\n\n # face_mask = face_processing.get_face_mask(landmarks, crop.shape)\n # transformed_face_mask = face_processing.CenterCrop(cfg.INPUT_SIZE)(face_mask)\n\n item.update({\n 'id': id,\n 'fnames': filename,\n # 'face_mask': transformed_face_mask,\n 'expression': np.array([[-1,0,0]], np.float32),\n 'vid': vid\n })\n\n if self.with_bumps:\n H = np.eye(3,3)\n step = 1\n next_id = max(0, min(len(self)-1, idx+step))\n if self.annotations.iloc[next_id].vid != vid:\n next_id = max(0, min(len(self)-1, idx-step))\n if self.annotations.iloc[max(0, idx-step)].vid != vid:\n # fallback to single image\n crop_next = crop\n landmarks_next = landmarks\n else:\n sample_next = self.annotations.iloc[next_id]\n pose_next = np.array((sample_next.pose_pitch, sample_next.pose_yaw, sample_next.pose_roll), dtype=np.float32)\n of_conf = sample_next.of_conf\n crop_next, landmarks_next = self.get_face(sample_next.fname,\n landmarks=self.get_landmarks(sample_next),\n use_cache=True)\n\n pose_diff = np.abs(pose - pose_next)\n if np.any(pose_diff > np.deg2rad(7)) or of_conf < 0.9:\n # print(np.rad2deg(pose_diff))\n crop_next = crop\n landmarks_next = landmarks\n else:\n # calculate homograpy to project next images onto current image\n H = cv2.findHomography(landmarks_next, landmarks)[0]\n\n bumps = face_processing.calc_face_bumps(crop, crop_next, landmarks, landmarks_next, H)\n transformed_bumps = face_processing.CenterCrop(cfg.INPUT_SIZE)(bumps)\n # transformed_bumps = self.transform(bumps)\n\n # bumps = face_processing.calc_face_bumps(crop, crop_next, landmarks, landmarks_next)\n # transformed_crop_next = self.transform(crop_next)\n # return transformed_crop, id, pose, landmarks, emotion, vid, fname, transformed_crop_next, landmarks_next, H, transformed_bumps\n item.update({'bumps': transformed_bumps})\n\n return item\n\n def get_face(self, filename, landmarks=None, size=(cfg.CROP_SIZE, cfg.CROP_SIZE), use_cache=True, from_sequence=False):\n # landmarks = np.zeros((68, 2), dtype=np.float32)\n # pose = np.zeros(3, dtype=np.float32)\n crop_filepath = os.path.join(self.cropped_img_dir, filename + '.jpg')\n\n if use_cache and os.path.isfile(crop_filepath):\n try:\n crop = io.imread(crop_filepath)\n except OSError:\n os.remove(crop_filepath)\n return self.get_face(filename, landmarks, size, use_cache, from_sequence)\n if crop.shape[:2] != size:\n crop = cv2.resize(crop, size, interpolation=cv2.INTER_CUBIC)\n if landmarks is None:\n of_conf, landmarks, _ = ds_utils.read_openface_detection(\n os.path.join(self.feature_dir, filename),\n numpy_lmFilepath=os.path.join(self.npfeature_dir, filename)\n )\n landmarks = face_processing.scale_landmarks_to_crop(landmarks, output_size=size)\n else:\n # Load image from dataset\n img_path = os.path.join(self.fullsize_img_dir, filename + '.jpg')\n img = io.imread(img_path)\n if img is None:\n raise IOError(\"\\tError: Could not load image {}!\".format(img_path))\n\n # load landmarks extracted with OpenFace2\n if landmarks is None:\n of_conf, landmarks, _ = ds_utils.read_openface_detection(\n os.path.join(self.feature_dir, filename),\n numpy_lmFilepath=os.path.join(self.npfeature_dir, filename),\n from_sequence=from_sequence\n )\n if of_conf <= 0.0:\n log.warning(\"No landmarks for image {}\".format(filename))\n\n # crop, landmarks = face_processing.crop_bump(img, landmarks, output_size=size)\n crop, landmarks = face_processing.crop_celebHQ(img, landmarks, output_size=size)\n\n if use_cache:\n utils.io.makedirs(crop_filepath)\n io.imsave(crop_filepath, crop)\n\n return crop, landmarks\n\n\n\ndef get_name_uid_map(split):\n import glob\n def create_map(in_dir):\n ytid2pid = {}\n person_dirs = sorted(glob.glob(os.path.join(cfg.VOXCELEB_ROOT, in_dir, '*')))\n for cnt, p_dir in enumerate(person_dirs):\n pid = p_dir.split('/')[-1]\n if 'frames' in in_dir:\n vid_dirs = sorted(glob.glob(os.path.join(p_dir, '1.6', '*')))\n else:\n vid_dirs = sorted(glob.glob(os.path.join(p_dir, '*')))\n for cnt_vids, vid_dir in enumerate(vid_dirs[:1]):\n yt_id = vid_dir.split('/')[-1]\n ytid2pid[yt_id] = pid\n return ytid2pid\n\n ytid2uid = create_map(split+'_txt')\n ytid2name = create_map('frames/unzippedIntervalFaces/data')\n\n # name2uid = {}\n map = {}\n for ytid, uid in ytid2uid.items():\n name = ytid2name[ytid]\n # print(uid, name, ytid)\n map[name] = uid\n map[uid] = name\n # name2uid[name] = uid\n # uid2name[uid] = name\n\n return map\n\n\ndef create_annotations(split, num_ids):\n import glob\n WRITE_CROPS = False\n annotations = {}\n id_map = get_name_uid_map(split)\n # person_dirs = sorted(glob.glob(os.path.join(cfg.VOXCELEB_ROOT, 'frames/unzippedIntervalFaces/data', '*')))\n person_dirs = sorted(glob.glob(os.path.join(cfg.VOXCELEB_ROOT, split+'_txt', '*')))\n for cnt, p_dir in enumerate(person_dirs[:num_ids]):\n uid = p_dir.split('/')[-1]\n name_dir = p_dir.replace(split+'_txt', 'frames/unzippedIntervalFaces/data')\n print(cnt, name_dir, uid)\n try:\n name_dir = name_dir.replace(uid, id_map[uid])\n except:\n pass\n vid_dirs = sorted(glob.glob(os.path.join(name_dir, '1.6', '*')))\n for cnt_vids, vid_dir in enumerate(vid_dirs):\n track_dirs = sorted(glob.glob(os.path.join(vid_dir, '*')))\n for cnt_vids, img_dir in enumerate(track_dirs):\n imagefiles = sorted(glob.glob(os.path.join(img_dir, '*.jpg')))\n for cnt_img, img_path in enumerate(imagefiles):\n # lm_path = img_path.replace('frames', 'features').replace('jpg', 'csv')\n # of_conf, landmarks, pose = ds_utils.read_openface_detection(lm_path)\n # of_conf = 0.1\n\n # face_height = landmarks[:, 1].max() - landmarks[:, 1].min()\n\n # if of_conf > 0.0:\n # if WRITE_CROPS:\n # img_orig = io.imread(img_path)\n #\n # crop, landmarks_crop = face_processing.crop_bump(img_orig, landmarks,\n # output_size=(cfg.INPUT_SIZE + cfg.CROP_BORDER * 2,\n # cfg.INPUT_SIZE + cfg.CROP_BORDER * 2))\n #\n # crop_filepath = img_path.replace('OriginalImg', 'crops')\n # utils.io.makedirs(crop_filepath)\n # io.imsave(crop_filepath, crop)\n # else:\n # print(img_path)\n\n fname = os.path.splitext('/'.join(img_path.split('/')[-5:]))[0]\n annotations[fname] = {'uid': uid}#, 'conf': of_conf, 'face_height': face_height}\n\n print(\"Saving DataFrame...\")\n df = pd.DataFrame.from_dict(annotations, orient='index')\n df.index.name = 'fname'\n df.to_csv(os.path.join(cfg.VOXCELEB_ROOT, split+'.csv'))\n\n\n\n\ndef read_openface_csvs():\n class VoxCelebLandmarks(td.Dataset):\n\n def __init__(self, root_dir=cfg.VOXCELEB_ROOT, train=True, start=None,\n max_samples=None):\n\n self.root_dir = root_dir\n self.cropped_img_dir = os.path.join(cfg.VOXCELEB_ROOT_LOCAL, 'crops/unzippedIntervalFaces/data')\n self.fullsize_img_dir = os.path.join(root_dir, 'frames/unzippedIntervalFaces/data')\n self.feature_dir = os.path.join(root_dir, 'features/unzippedIntervalFaces/data')\n self.npfeature_dir = os.path.join(cfg.VOXCELEB_ROOT_LOCAL, 'features/unzippedIntervalFaces/data')\n\n annotation_filename = 'dev' if train else 'test'\n self.annotations = pd.read_csv(os.path.join(root_dir, annotation_filename + '.csv'))\n\n # limit number of samples\n st, nd = 0, None\n if start is not None:\n st = start\n if max_samples is not None:\n nd = st + max_samples\n self.annotations = self.annotations[st:nd]\n\n def __len__(self):\n return len(self.annotations)\n\n def __getitem__(self, idx):\n fname = self.annotations.iloc[idx].fname\n of_conf, landmarks, pose = ds_utils.read_openface_detection(\n os.path.join(self.feature_dir, fname),\n numpy_lmFilepath=os.path.join(self.npfeature_dir, fname),\n from_sequence=False\n )\n return {'fn': fname,\n 'cnf': of_conf,\n 'lmx': landmarks[:,0],\n 'lmy': landmarks[:,1],\n 'h': int(landmarks[:,1].max() - landmarks[:,1].min()),\n 'w': int(landmarks[:,0].max() - landmarks[:,0].min()),\n 'p': pose[0],\n 'y': pose[1],\n 'r': pose[2]\n }\n\n train = True\n data = []\n ds = VoxCelebLandmarks(train=train, max_samples=8000000)\n dl = td.DataLoader(ds, batch_size=200, shuffle=False, num_workers=12, collate_fn=lambda b: b)\n for batch in dl:\n data.extend(batch)\n log.info(len(data))\n\n print(\"Saving DataFrame...\")\n df = pd.DataFrame.from_dict(data)\n df = df.set_index('fn')\n annotation_filename = 'dev' if train else 'test'\n df.to_pickle(os.path.join(cfg.VOXCELEB_ROOT, annotation_filename + '.lms.pkl'))\n\n\ndef extract_features(split='dev', st=None, nd=None, is_sequence=True):\n \"\"\" Extract facial features (landmarks, pose,...) from images \"\"\"\n import glob\n from utils import visionLogging as log\n # person_dirs = sorted(glob.glob(os.path.join(cfg.VOXCELEB_ROOT, 'frames/unzippedIntervalFaces/data', '*')))\n id_map = get_name_uid_map(split)\n person_dirs = sorted(glob.glob(os.path.join(cfg.VOXCELEB_ROOT, split+'_txt', '*')))\n for cnt, p_dir in enumerate(person_dirs[st:nd]):\n uid = p_dir.split('/')[-1]\n name_dir = p_dir.replace(split + '_txt', 'frames/unzippedIntervalFaces/data')\n try:\n person_name = id_map[uid]\n name_dir = name_dir.replace(uid, person_name)\n log.info(\"{}[{}-{}] {}\".format(st + cnt, st, nd, person_name))\n except:\n print(name_dir, uid)\n vid_dirs = sorted(glob.glob(os.path.join(name_dir, '1.6', '*')))\n for cnt_vids, vid_dir in enumerate(vid_dirs):\n track_dirs = sorted(glob.glob(os.path.join(vid_dir, '*')))\n for cnt_vids, img_dir in enumerate(track_dirs):\n if is_sequence:\n out_dir = img_dir.replace('frames', 'features_sequence')\n else:\n out_dir = img_dir.replace('frames', 'features')\n face_processing.run_open_face(img_dir, out_dir, is_sequence=is_sequence)\n\n\ndef extract_main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--st', default=None, type=int)\n parser.add_argument('--nd', default=None, type=int)\n args = parser.parse_args()\n\n extract_features(st=args.st, nd=args.nd)\n\nif __name__ == '__main__':\n # extract_main()\n # create_annotations(split='dev', num_ids=500)\n # extract_crops()\n\n # read_openface_csvs()\n # exit()\n\n from utils import vis, face_processing\n\n ds = VoxCeleb(train=True, max_samples=50000, use_cache=True)\n print(ds)\n dl = td.DataLoader(ds, batch_size=40, shuffle=False, num_workers=0)\n from utils.nn import Batch\n\n for data in dl:\n batch = Batch(data)\n print(batch.ids)\n ds_utils.denormalize(batch.images)\n # vis.show_images_in_batch(batch.images.detach().cpu())\n vis.vis_square(batch.images.detach().cpu(), fx=0.7, fy=0.7, normalize=False)\n # print(item)\n"
]
| [
[
"pandas.merge",
"pandas.read_pickle",
"numpy.minimum",
"numpy.sqrt",
"numpy.abs",
"numpy.eye",
"numpy.int32",
"torch.utils.data.DataLoader",
"numpy.arctan2",
"numpy.deg2rad",
"pandas.DataFrame.from_dict",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
]
|
BILAB-NTU/Deep-learning-based-multi-transducer-photoacoustic-tomography-imaging-without-radius-calibration | [
"d02e7b05f9ddc6c740f691158844fd3c74704e91"
]
| [
"Fully_Dense_UNet_convLSTM_bridge.py"
]
| [
"from tensorflow.keras import Model\r\nfrom tensorflow.keras.layers import Conv2D, Add, Input, ConvLSTM2D,BatchNormalization, Concatenate, Conv2DTranspose,Reshape\r\n\r\n\r\n\r\n########################################################################################################################\r\n'''MODEL FUNCTIONS:'''\r\n########################################################################################################################\r\ndef reshape_conc(input,shortcut, filters):\r\n\r\n dim = input.shape[1]\r\n out = Reshape(target_shape=(1, dim, dim, filters))(input)\r\n shortcut = Reshape(target_shape=(1, dim, dim, filters ))(shortcut)\r\n out = Concatenate()([out, shortcut])\r\n return out\r\n\r\ndef convlstm(input, filters, kernel_size, padding, activation, kernel_initializer):\r\n out = ConvLSTM2D( filters=filters, kernel_size=kernel_size, padding=padding,activation=activation,\r\n kernel_initializer=kernel_initializer,return_sequences=False)(input)\r\n return out\r\n\r\n\r\ndef DownBlock(input, filters, kernel_size, padding, activation, kernel_initializer):\r\n ############################################\r\n out = FD_Block(input, f_in=filters // 2, f_out=filters, k=filters // 8, kernel_size=3, padding='same',\r\n activation=activation, kernel_initializer='glorot_normal')\r\n shortcut = out\r\n out = DownSample(out, filters, kernel_size, strides=2, padding=padding,\r\n activation=activation, kernel_initializer=kernel_initializer)\r\n ############################################\r\n return [out, shortcut]\r\n########################################################################################################################\r\n\r\ndef UpBlock(input, filters, kernel_size, padding, activation, kernel_initializer):\r\n ############################################\r\n out = Conv2D_BatchNorm(input, filters=filters // 2, kernel_size=1, strides=1, padding=padding,\r\n activation=activation, kernel_initializer=kernel_initializer)\r\n out = FD_Block(input, f_in=filters // 2, f_out=filters, k=filters // 8, kernel_size=3, padding='same',\r\n activation=activation, kernel_initializer='glorot_normal')\r\n out = UpSample(out, filters , kernel_size, strides=2, padding=padding,\r\n activation=activation, kernel_initializer=kernel_initializer)\r\n ############################################\r\n return out\r\n\r\n\r\n\r\n########################################################################################################################\r\n'''SUBFUNCTIONS FOR FUNCTIONS:'''\r\n########################################################################################################################\r\n\r\n\r\ndef Conv2D_BatchNorm(input, filters, kernel_size=3, strides=1, padding='same',\r\n activation='linear', kernel_initializer='glorot_normal'):\r\n ############################################\r\n out = Conv2D(filters=filters, kernel_size=kernel_size,\r\n strides=strides, padding=padding,\r\n activation=activation,\r\n kernel_initializer=kernel_initializer)(input)\r\n out = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\r\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\r\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\r\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\r\n gamma_constraint=None)(out)\r\n ############################################\r\n return out\r\n\r\n\r\ndef Conv2D_Transpose_BatchNorm(input, filters, kernel_size=3, strides=2, padding='same',\r\n activation='relu', kernel_initializer='glorot_normal'):\r\n ############################################\r\n out = Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding,\r\n activation=activation, kernel_initializer=kernel_initializer)(input)\r\n out = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\r\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\r\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\r\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\r\n gamma_constraint=None)(out)\r\n ############################################\r\n return out\r\n\r\n\r\ndef DownSample(input, filters, kernel_size=3, strides=2, padding='same',\r\n activation='linear', kernel_initializer='glorot_normal'):\r\n ############################################\r\n out = Conv2D_BatchNorm(input, filters, kernel_size=1, strides=1, padding=padding, activation=activation, kernel_initializer=kernel_initializer)\r\n\r\n out = Conv2D_BatchNorm(out, filters, kernel_size, strides=strides, padding=padding,activation=activation, kernel_initializer=kernel_initializer)\r\n ############################################\r\n return out\r\n\r\n\r\ndef UpSample(input, filters, kernel_size=3, strides=2, padding='same',\r\n activation='linear', kernel_initializer='glorot_normal'):\r\n ############################################\r\n out = Conv2D_BatchNorm(input, filters, kernel_size=1, strides=1, padding=padding, activation=activation, kernel_initializer=kernel_initializer)\r\n\r\n out = Conv2D_Transpose_BatchNorm(out, filters // 2, kernel_size, strides=strides, padding=padding,activation=activation, kernel_initializer=kernel_initializer)\r\n ############################################\r\n return out\r\n\r\n\r\n########################################################################################################################\r\n'''FULLY DENSE BLOCK:'''\r\n########################################################################################################################\r\ndef FD_Block(input, f_in, f_out, k, kernel_size=3, padding='same',\r\n activation='linear', kernel_initializer='glorot_normal'):\r\n out = input\r\n for i in range(f_in, f_out, k):\r\n shortcut = out\r\n out = Conv2D_BatchNorm(out, filters=f_in, kernel_size=1, strides=1, padding=padding,\r\n activation=activation, kernel_initializer=kernel_initializer)\r\n out = Conv2D_BatchNorm(out, filters=k, kernel_size=kernel_size, strides=1, padding=padding,\r\n activation=activation, kernel_initializer=kernel_initializer)\r\n out = Concatenate()([out, shortcut])\r\n return out\r\n\r\n########################################################################################################################\r\n'''Modified fully dense UNet with Conv LSTM block'''\r\n########################################################################################################################\r\n\r\ndef Modified_D_UNet(input, filters=32, kernel_size=3, padding='same',activation='relu', kernel_initializer='glorot_normal'):\r\n shortcut1_1 = input\r\n out = Conv2D_BatchNorm(input, filters, kernel_size=3, strides=1, padding=padding,\r\n activation=activation, kernel_initializer=kernel_initializer)\r\n [out, shortcut1_2] = DownBlock(out, filters * 2, kernel_size, padding, activation, kernel_initializer)\r\n [out, shortcut2_1] = DownBlock(out, filters * 2 * 2, kernel_size, padding, activation, kernel_initializer)\r\n [out, shortcut3_1] = DownBlock(out, filters * 2 * 2 * 2, kernel_size, padding, activation, kernel_initializer)\r\n [out, shortcut4_1] = DownBlock(out, filters * 2 * 2 * 2 * 2, kernel_size, padding, activation, kernel_initializer)\r\n dim = out.shape[1]\r\n out = Reshape(target_shape=(1, dim, dim, filters* 2 * 2 * 2 * 2))(out)\r\n out = convlstm(out, filters * 2 * 2 * 2 * 2, kernel_size, padding, activation, kernel_initializer)\r\n out = UpBlock(out, filters * 2 * 2 * 2 * 2 * 2, kernel_size, padding, activation, kernel_initializer)\r\n out = Concatenate()([out, shortcut4_1])\r\n out = UpBlock(out, filters * 2 * 2 * 2 * 2, kernel_size, padding, activation, kernel_initializer)\r\n out = Concatenate()([out, shortcut3_1])\r\n out = UpBlock(out, filters * 2 * 2 * 2, kernel_size, padding, activation, kernel_initializer)\r\n out = Concatenate()([out, shortcut2_1])\r\n out = UpBlock(out, filters * 2 * 2, kernel_size, padding, activation, kernel_initializer)\r\n out = Concatenate()([out, shortcut1_2])\r\n out = FD_Block(out, f_in=filters, f_out=filters * 2, k=filters // 4, kernel_size=3, padding='same',activation='linear', kernel_initializer='glorot_normal')\r\n out = Conv2D(filters=1, kernel_size=1,strides=1, padding=padding,activation='linear', kernel_initializer=kernel_initializer)(out)\r\n out = Add()([out, shortcut1_1])\r\n return out\r\n\r\n\r\ndef getModel(input_shape, filters, kernel_size, padding='same',activation='relu', kernel_initializer='glorot_normal'):\r\n model_inputs = Input(shape=input_shape, name='img')\r\n model_outputs = Modified_D_UNet(model_inputs, filters=filters, kernel_size=kernel_size, padding=padding,activation=activation, kernel_initializer=kernel_initializer)\r\n model = Model(model_inputs, model_outputs, name='FD-UNet_Model')\r\n return model\r\n"
]
| [
[
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.ConvLSTM2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Input"
]
]
|
maffei2443/trabalhoBD | [
"df8d69ce01aa31f98137c3a2df291ab38aa5bada"
]
| [
"user_interface.py"
]
| [
"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport os\nimport base64\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom crud import dao\n\ndef get_img(img):\n with open(img, \"rb\") as file:\n ret = base64.b64encode(file.read())\n return ret\n\ndef show_img(str_img):\n str_img = base64.b64decode(str_img)\n img = ''\n prf = '87894u08uio89he'\n gambs = prf + '.png'\n gambs2 = prf + '.jpg'\n with open(gambs, 'wb') as f:\n f.write(str_img)\n with open(gambs2, 'wb') as g:\n g.write(str_img)\n\n try: \n img = mpimg.imread(gambs)\n except Exception as e:\n try:\n img = mpimg.imread(gambs2)\n except Exception as e:\n print(e)\n \n imgplot = plt.imshow(img)\n a = plt.show()\n try:\n os.remove(gambs2)\n os.remove(gambs)\n except Exception as e:\n try:\n os.remove(gambs)\n except Exception as e:\n print(e)\n\n\ndef clear():\n try:\n os.system(\"clear\")\n except:\n try:\n os.system(\"cls\")\n except:\n print(\"Não foi possível limpar a tela.\")\n\ndef show_atb(data_obj, table_name, key):\n data = data_obj.get_all_tab(table_name, key)\n columns = data_obj.get_columns(table_name)\n\n print(\"\\nNome do atributo -- Valor\")\n print(\"-------------------------\")\n for i in range(len(data[0])):\n print((str(columns[i][0]) + \" -- \" + str(data[0][i]))[:200])\n\ndef show_columns(data_obj, table_name):\n columns = data_obj.get_columns(table_name)\n print(\"Atributos de \" + table_name + \":\")\n for column in columns:\n print(column[0])\n\ndef show_ids(data_obj, table_name):\n keys = data_obj.get_ids(table_name)\n\n print(\"id -- Nome\")\n print(\"-----------\")\n for item in keys:\n print(str(item[0]) + \" -- \" + item[1])\n\ndef check_ids(data_obj, table_name, key):\n keys = data_obj.get_ids(table_name)\n\n exist = False\n for item in keys:\n if key == str(item[0]):\n exist = True\n return exist\n\ndef check_column(data_obj, table_name, name):\n columns = data_obj.get_columns(table_name)\n\n exist = False\n for column in columns:\n if column[0] == name:\n exist = True\n return exist\n\ndef user_delete(data_obj):\n try:\n print(\"user_delete\")\n tables = data_obj.get_tables()\n for table in tables:\n print(\"-- \" + table)\n\n table_name = input(\"\\nDigite o nome do tipo de dado que deseja deletar: \")\n\n if table_name not in tables:\n input(\"Não é possível deletar um dado do tipo desejado, aperte ENTER para voltar ao menu\")\n return\n\n show_ids(data_obj, table_name)\n\n key = input(\"Digite o nome da chave do objeto que deseja deletar(chave de candidatura é o candidato, id da tabela para as demais tabelas): \")\n\n if not check_ids(data_obj, table_name, key):\n input(\"O id que se deseja deletar nao existe, aperte ENTER para voltar ao menu\")\n return\n\n data_obj.delete(table_name, key)\n\n input(table_name + \" deletado com sucesso, aperte ENTER para retornar ao menu\")\n\n except Exception as exception:\n print(exception)\n input(\"Digite algo para voltar ao menu\")\n\ndef user_create(data_obj):\n try:\n print(\"########## user_create ##########\")\n tables = data_obj.get_tables()\n for table in tables:\n print(\"-- \" + table)\n\n table_name = input(\"\\nDigite o nome do tipo de dado que deseja inserir: \")\n\n if table_name not in tables:\n input(\"Não é possível inserir um dado do tipo desejado, aperte ENTER para voltar ao menu\")\n return\n\n columns = data_obj.get_columns(table_name)\n\n values_names = \"\"\n values = \"\"\n for column in columns:\n if column[5] == \"auto_increment\":\n continue\n if column[2] == \"YES\":\n option = input(\"Deseja inserir \" + column[0] + \"?(Y/N)\")\n if option != 'Y' and option != 'y':\n continue\n\n value = input(\"Digite um valor do tipo \" + column[1] + \" para \" + column[0] + \": \")\n\n if column[1] == \"longblob\":\n value = \"\\\"\" + get_img(value).decode('ascii') + \"\\\"\"\n\n if values_names:\n values_names += \", \"\n if values:\n values += \", \"\n\n values_names += column[0]\n values += value\n\n data_obj.insert(table_name, values_names, values)\n input(table_name + \" inserido com sucesso, aperte ENTER para retornar ao menu\")\n\n except Exception as exception:\n print(exception)\n input(\"Digite algo para voltar ao menu\")\n\ndef user_update(data_obj):\n try:\n print(\"########## user_update ##########\")\n tables = data_obj.get_tables()\n for table in tables:\n print(\"-- \" + table)\n\n table_name = input(\"\\nDigite o nome do tipo de dado que deseja atualizar: \")\n\n if table_name not in tables:\n input(\"Não é possível atualizar um dado do tipo desejado, aperte ENTER para voltar ao menu\")\n return\n\n show_ids(data_obj, table_name)\n\n key = input(\"\\nDigite o id do item que deseja atualizar: \")\n\n if not check_ids(data_obj, table_name, key):\n input(\"O id que se deseja atualizar nao existe, aperte ENTER para voltar ao menu\")\n return\n\n show_atb(data_obj, table_name, key)\n\n name, value = input(\"Digite o nome do valor e o novo valor que deseja atribuir separados por espaco: \").split(\" \")\n\n if not check_column(data_obj, table_name, name):\n input(\"O atributo que se deseja atualizar nao existe, aperte ENTER para voltar ao menu\")\n return\n\n\n if name == \"foto\":\n value = \"\\\"\" + get_img(value).decode('ascii') + \"\\\"\"\n\n data_obj.update(table_name, name, value, key)\n\n input(table_name + \" atualizado com sucesso, aperte ENTER para retornar ao menu\")\n\n except Exception as exception:\n print(exception)\n input(\"Digite algo para voltar ao menu\")\n\ndef user_read(data_obj):\n try:\n print(\"########## user_read ##########\")\n\n tables = data_obj.get_tables()\n for table in tables:\n print(\"-- \" + table)\n\n table_name = input(\"\\nDigite o nome do tipo de dado que deseja ler: \")\n\n if table_name not in tables:\n input(\"Não é possível ler um dado do tipo desejado, aperte ENTER para voltar ao menu\")\n return\n\n show_columns(data_obj, table_name)\n\n atbs = input(\"Digite os nomes dos atributos que deseja ver separados por virgula (sem espaços): \")\n columns = atbs.split(\",\")\n\n for column in columns:\n if not check_column(data_obj, table_name, column):\n input(\"Não é possível ler o dado \" + column + \" aperte ENTER para voltar ao menu\")\n return\n\n data = data_obj.read(atbs, table_name)\n\n for item in data:\n print(\"\\nAtributo -- Valor\")\n print(\"------------------\")\n for i, _ in enumerate(item):\n if columns[i] != \"foto\" or item[i] == None:\n print(str(columns[i]) + \": \" + str(item[i]))\n else:\n show_img(item[i])\n\n input(\"\\nDigite ENTER para retornar ao menu\")\n except Exception as exception:\n print(exception)\n input(\"Digite algo para voltar ao menu\")\n\ndef user_special(data_obj):\n try:\n print(\"########## user_special ##########\")\n print(\"#1 - Candidatos de um local #\")\n print(\"#2 - Candidatos de um partido #\")\n print(\"#3 - Partidos de uma coligacao #\")\n print(\"#4 - Mostrar Presidentes #\")\n\n option = input(\"# Opção: \")\n\n if option == \"1\" or option == \"Candidatos de um local\":\n clear()\n local = input(\"Digite o nome do local: \")\n data = data_obj.Get_Proc(\"Local\", local)\n for item in data:\n print(\"Id -- Nome\")\n print(\"----------------------\")\n print(str(item[0]) + \"--\" + str(item[1]) + \"\\n\")\n input(\"Digite ENTER para voltar ao menu\")\n\n elif option == \"2\" or option == \"Candidatos de um partido\":\n clear()\n partido = input(\"Digite o nome do partido: \")\n data = data_obj.candidato_get_partido(\"Partido\", partido)\n for item in data:\n print(\"Id -- Nome\")\n print(\"----------------------\")\n print(str(item[0]) + \"--\" + str(item[1]) + \"\\n\")\n input(\"Digite ENTER para voltar ao menu\")\n\n elif option == \"3\" or option == \"Partidos de uma coligacao\":\n clear()\n colig = input(\"Digite o nome da coligacao: \")\n data = data_obj.partido_get_colig(\"Coligacao\", colig)\n for item in data:\n print(\"Id -- Nome\")\n print(\"----------------------\")\n print(str(item[0]) + \"--\" + str(item[1]) + \"\\n\")\n input(\"Digite ENTER para voltar ao menu\")\n \n elif option == \"4\" or option == \"Presidentes\":\n data = data_obj.read(\"*\", \"presidente\")\n print(\"Presidentes de Coligacao\")\n print(\"-------------------------\")\n for presidente in data:\n print(presidente[0])\n print()\n input(\"Digite ENTER para voltar ao menu\")\n\n except Exception as exception:\n print(exception)\n input(\"Digite algo para voltar ao menu\")\n\ndef main():\n user = input(\"Digite o nome do usuario mysql: \")\n passwd = input(\"Digite a senha do usuario mysql: \")\n\n # Cria conexao com o banco. No caso, caso voce possua uma instancia do MySQL rodando\n # localmente, pode-se atribuir ao parametro host o valor \"localhost\"\n \n # data_obj = dao(host=\"172.17.0.2\", port=3306, user=user, passwd=passwd)\n data_obj = dao(host=\"localhost\", port=3306, user=user, passwd=passwd)\n option = input(\"Deseja criar/resetar o banco? (Y/N) \")\n\n if option == \"Y\" or option == \"y\":\n data_obj.create_db()\n # data_obj.conn_db(host=\"172.17.0.2\", db=\"mydb\", port=3306, user=user, passwd=passwd)\n data_obj.conn_db(host=\"localhost\", db=\"mydb\", port=3306, user=user, passwd=passwd)\n\n stay = True\n\n while stay:\n clear()\n print(\"###################\")\n print(\"# Inserir - 1 #\")\n print(\"# Atualizar - 2 #\")\n print(\"# Ler - 3 #\")\n print(\"# Remover - 4 #\")\n print(\"# Consulta especial - 5 #\")\n print(\"# #\")\n print(\"# Sair - 6 #\")\n print(\"###################\")\n option = input(\"# Opção: \")\n\n if option == \"1\" or option == \"Inserir\":\n clear()\n user_create(data_obj)\n elif option == \"2\" or option == \"Atualizar\":\n clear()\n user_update(data_obj)\n elif option == \"3\" or option == \"Ler\":\n clear()\n user_read(data_obj)\n elif option == \"4\" or option == \"Remover\":\n clear()\n user_delete(data_obj)\n elif option == \"5\" or option == \"Consulta especial\":\n clear()\n user_special(data_obj)\n elif option == \"6\" or option == \"Sair\":\n clear()\n stay = False\n\n data_obj.close()\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"matplotlib.image.imread",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
]
|
Msameim181/RSNA-Bone-Age | [
"099d68c74aaf3311a6742d62c21146700e2c6c2c"
]
| [
"models/VGGNet/vggnet_model.py"
]
| [
"\"\"\"\nA from scratch implementation of the VGG architecture.\n\"\"\"\n\n# Imports\nimport torch\nimport torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions\n\nVGG_types = {\n \"VGG11\": [64, \"M\", 128, \"M\", 256, 256, \"M\", 512, 512, \"M\", 512, 512, \"M\"],\n \"VGG13\": [64, 64, \"M\", 128, 128, \"M\", 256, 256, \"M\", 512, 512, \"M\", 512, 512, \"M\"],\n \"VGG16\": [64, 64, \"M\", 128, 128, \"M\", 256, 256, 256, \"M\", 512, 512, 512, \"M\", 512, 512, 512, \"M\"],\n \"VGG19\": [64, 64, \"M\", 128, 128, \"M\", 256, 256, 256, 256, \"M\", 512, 512, 512, 512, \"M\", 512, 512, 512, 512, \"M\"],\n}\n\n\nclass VGGNet(nn.Module):\n def __init__(self, in_channels=3, num_classes=1000, type=\"VGG11\"):\n super(VGGNet, self).__init__()\n self.name = type\n self.n_channels = in_channels\n self.num_classes = num_classes\n self.in_channels = in_channels\n self.conv_layers = self.create_conv_layers(VGG_types[type])\n\n self.fcs = nn.Sequential(\n nn.Linear(512 * 285, 4096),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n )\n self.fc = nn.Linear(4096 + 1, num_classes)\n\n def forward(self, x):\n y = x[1]\n x = x[0]\n x = self.conv_layers(x)\n x = x.reshape(x.shape[0], -1)\n x = self.fcs(x)\n\n z = x\n y = torch.unsqueeze(y, 1).to(device='cuda', dtype=torch.float32)\n z = torch.cat((z, y), dim=1)\n \n x = self.fc(z)\n return x\n\n def create_conv_layers(self, architecture):\n layers = []\n in_channels = self.in_channels\n\n for x in architecture:\n if type(x) == int:\n out_channels = x\n\n layers += [\n nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=(1, 1),\n ),\n nn.BatchNorm2d(x),\n nn.ReLU(),\n ]\n in_channels = x\n elif x == \"M\":\n layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))]\n\n return nn.Sequential(*layers)\n\n\nif __name__ == \"__main__\":\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n net = VGGNet(in_channels=1, num_classes=229, type=\"VGG11\").to(device)\n # print(net)\n inp = torch.randn(1, 1, 500, 625).cuda()\n sx = torch.randn(1).cuda()\n\n out = net([inp, sx])\n print(out.shape)\n # N = 3 (Mini batch size)\n # x = torch.randn(1, 3, 224, 224).to(device)\n # print(net(x).shape)"
]
| [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.cat",
"torch.randn",
"torch.nn.Conv2d",
"torch.unsqueeze",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
]
|
abdullah-zaiter/Camera-Calibration | [
"ac113cc36cc3c0ae4794f2e5a85b1ee9b6d244dd"
]
| [
"src/common.py"
]
| [
"import os\nimport numpy as np\nimport cv2 as cv\nfrom collections import deque\nfrom calibrationUtils import NewtonRaphsonUndistort\n\nCEND = '\\33[0m'\nCBOLD = '\\33[1m'\nCRED = '\\33[31m'\nCGREEN = '\\33[32m'\nCBLUE = '\\33[34m'\npointr1 = []\npointr2 = []\npointd1 = []\npointd2 = []\n\nmode__ = str()\n\ndef init():\n print('''Hi, Usage:\n - after collecting at least 5 images, you can click the character c to initialize the calibration process for that dataset.\n - To get the undistort windows, the process of calibration needs to be run at least 5 times, clicking c each time.\n - make sure the c character was clicked, if the program recognized it it'll print a flag of starting the calibration.\n ''')\n cam_number = int(\n input(\"Enter the webcam camera number as your system identifies it: \"), 10)\n cap = cv.VideoCapture(cam_number)\n seconds = float(input(\n \"Enter the amount of time between the frames choosed for calibration in seconds (accepts float values): \"))\n aux = input(\"To use solvePNP for extrinsics press (1) for normal method click anykey: \")\n if aux == \"1\":\n mode__ = \"solvePNP\"\n print(\"Setting mode to solvePNP\")\n pass\n fps = cap.get(cv.CAP_PROP_FPS) # Gets the frames per second\n print(str(fps)+\" FPS\")\n multiplier = fps * seconds\n images = deque(maxlen=5)\n os.system(\"mkdir ./output\")\n os.system(\"mkdir ./output/xmls\")\n print(\"deleting old xml files\")\n os.system(\"rm ./output/xmls/*.xml\")\n frameId = 0\n return cap, multiplier, images, frameId\n\ndef splitfn(fn):\n path, fn = os.path.split(fn)\n name, ext = os.path.splitext(fn)\n return path, name, ext\n\ndef static_vars(**kwargs):\n def decorate(func):\n for k in kwargs:\n setattr(func, k, kwargs[k])\n return func\n return decorate\n\ndef matricesPreparation():\n distortion_matrix = averageMatrixCaluclator(\"distortion\")\n camera_matrix = averageMatrixCaluclator(\"intrinsics\")\n extrinsics_matrix = averageMatrixCaluclator(\"extrinsics\")\n writeXmlsAvgs(distortion_matrix, camera_matrix, extrinsics_matrix)\n writeXmlStds()\n return camera_matrix, extrinsics_matrix, distortion_matrix\n\ndef averageMatrixCaluclator(mat):\n #mat = distortion or intrinsics\n import glob\n path = \"./output/xmls/\"+mat+\"_*.xml\"\n i = 0\n for filename in glob.glob(path):\n i += 1\n xmlfile = XmlFile(mat+\"_1\"\".xml\")\n avg_mat = xmlfile.readFromXml('matrix')\n for j in range(2,i+1):\n filename = mat+\"_\"+str(j)+\".xml\"\n xmlfile = XmlFile(filename)\n avg_mat += xmlfile.readFromXml('matrix')\n avg_mat /= i\n del i,j\n return avg_mat\n\ndef writeXmlStds():\n xmlf=XmlFile(\"stddistortion.xml\")\n xmlf.writeToXml('matrix', stdMatrixCaluclator(\"distortion\"))\n\n xmlf=XmlFile(\"stdintrinsics.xml\")\n xmlf.writeToXml('matrix', stdMatrixCaluclator(\"intrinsics\"))\n\n xmlf = XmlFile(\"stdextrinsics.xml\")\n xmlf.writeToXml('matrix', stdMatrixCaluclator(\"extrinsics\"))\n del xmlf\n\n\ndef writeXmlsAvgs(distortion_matrix, camera_matrix, extrinsics_matrix):\n xmlf = XmlFile(\"avgdistortion.xml\")\n xmlf.writeToXml('matrix', distortion_matrix)\n\n xmlf = XmlFile(\"avgintrinsics.xml\")\n xmlf.writeToXml('matrix', camera_matrix)\n\n xmlf = XmlFile(\"avgextrinsics.xml\")\n xmlf.writeToXml('matrix', extrinsics_matrix)\n del xmlf\n\ndef stdMatrixCaluclator(mat):\n #mat = distortion or intrinsics\n import glob\n path = \"./output/xmls/\"+mat+\"_*.xml\"\n i = 0\n for filename in glob.glob(path):\n i += 1\n xmlfile = XmlFile(mat+\"_1\"\".xml\")\n all_matrix = xmlfile.readFromXml('matrix')\n for j in range(2, i+1):\n filename = mat+\"_\"+str(j)+\".xml\"\n xmlfile = XmlFile(filename)\n all_matrix = np.dstack((all_matrix, xmlfile.readFromXml('matrix')))\n del i, j\n return np.std(all_matrix, 2, ddof=1)\n\n\nclass XmlFile:\n Name = \"\"\n def __init__(self, name):\n if not(name.endswith('.xml')):\n print(\"invalid extension for xml file, check the name.\")\n else:\n self.Name = name\n\n def writeToXml(self, label, value):\n outfile = './output/xmls/'\n outfile = os.path.join(outfile,self.Name)\n f = cv.FileStorage(outfile, flags=1)\n f.write(name=label, val=value)\n f.release()\n\n def readFromXml(self, label):\n outfile = './output/xmls/'\n outfile = os.path.join(outfile,self.Name)\n f = cv.FileStorage(outfile, flags=0)\n value = f.getNode(label).mat()\n f.release()\n return value\n"
]
| [
[
"numpy.std"
]
]
|
secretBiology/SecretPlots | [
"eca1d0e0932e605df49d1f958f98a1f41200d589"
]
| [
"SecretPlots/network/pathfinder.py"
]
| [
"# SecretPlots\n# Copyright (c) 2019. SecretBiology\n#\n# Author: Rohit Suratekar\n# Organisation: SecretBiology\n# Website: https://github.com/secretBiology/SecretPlots\n# Licence: MIT License\n# Creation: 11/15/19, 11:09 AM\n#\n#\n# Path finder functions related to the our network\n\nfrom typing import List, Dict, Union\n\nimport numpy as np\nfrom SecretPlots.utils import Log\n\n\n# np.set_printoptions(linewidth=300)\n\n\nclass Point:\n def __init__(self, index: int):\n self.index = index\n self.row = None\n self.column = None\n self.mark = \".\"\n self.value = np.inf\n self.original_value = None\n self.previous_node = None\n\n @property\n def pos(self):\n return self.row, self.column\n\n def __repr__(self):\n return f\"{self.mark}\"\n # return f\"({self.value}, {self.previous_node})\"\n\n def update(self, value, node):\n if value < self.value:\n self.value = value\n self.previous_node = node\n\n\nclass PathFinder:\n def __init__(self, graph):\n self._raw_data = np.asanyarray(graph)\n self._graph = None\n self.path_mark = \"o\"\n self.empty_mark = \".\"\n self.block_mark = \"|\"\n self.end_mark = \"X\"\n self.start_mark = \"o\"\n self.block_item = None\n self.block_penalty = np.inf\n self.show_blocks = False\n\n @property\n def graph(self) -> np.ndarray:\n if self._graph is None:\n self._generate_nodes()\n return self._graph\n\n @property\n def columns(self):\n return self.graph.shape[1]\n\n @property\n def rows(self):\n return self.graph.shape[0]\n\n def _generate_nodes(self):\n data = self._raw_data.flatten()\n new_data = []\n for i in range(len(data)):\n node = Point(i)\n node.row = int(i / self._raw_data.shape[1])\n node.column = i % self._raw_data.shape[1]\n node.original_value = data[i]\n node.mark = self.empty_mark\n if self.show_blocks and node.original_value == self.block_item:\n node.mark = self.block_mark\n new_data.append(node)\n\n data = new_data\n data = np.array(data, dtype=Point).reshape(self._raw_data.shape)\n self._graph = data\n\n def get_surrounding(self, row, col) -> List[Point]:\n def _get_item(r, c):\n if r < 0 or c < 0 or r >= self.rows or c >= self.columns:\n return None\n else:\n return self.graph[r, c]\n\n data = [\n _get_item(row, col + 1), # Right\n _get_item(row + 1, col), # Bottom\n _get_item(row, col - 1), # Left\n _get_item(row - 1, col) # Top\n\n ]\n return [x for x in data if x is not None]\n\n def dijkstra_distance(self, start_node: Point, end_node: Point):\n if end_node.original_value == self.block_item:\n return self.block_penalty\n return start_node.value + 1\n\n @staticmethod\n def cartesian_distance(node1: Point, node2: Point):\n y1, x1 = node1.pos\n y2, x2 = node2.pos\n return np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))\n\n def a_star_distance(self,\n start_node: Point,\n end_node: Point,\n final_node: Point):\n\n if end_node.original_value == self.block_item:\n return self.block_penalty\n return start_node.value + self.cartesian_distance(end_node, final_node)\n\n def _generate_distance_matrix(self,\n start_index: int,\n end_index: int, *,\n use_dijkstra: bool):\n\n start_row, start_col = self.split_index(start_index, self.columns)\n end_row, end_col = self.split_index(end_index, self.columns)\n self.graph[start_row, start_col].value = 0\n unvisited = [x for x in self.graph.flatten()]\n while len(unvisited) >= 1:\n unvisited = sorted(unvisited, key=lambda x: x.value)\n current = unvisited[0]\n row, col = current.pos\n for a in self.get_surrounding(row, col):\n if use_dijkstra:\n a.update(self.dijkstra_distance(current, a), current.index)\n else:\n a.update(\n self.a_star_distance(current,\n a,\n self.graph[end_row, end_col]),\n current.index)\n unvisited.remove(current)\n\n @staticmethod\n def split_index(index, width):\n row = int(index / width)\n col = index % width\n return row, col\n\n def _trace_path(self, start, end) -> list:\n path = []\n row, col = self.split_index(end, self.columns)\n item = self.graph[row, col] # type:Point\n path.append(item.index)\n while item != start:\n\n item.mark = self.path_mark\n if item.index == end:\n item.mark = self.end_mark\n elif item.index == start:\n item.mark = self.start_mark\n if item.previous_node is None:\n break\n nr, nc = self.split_index(item.previous_node, self.columns)\n item = self.graph[nr, nc]\n path.append(item.index)\n\n path = list(reversed(path))\n return path\n\n def find_path(self, start, end, *, use_dijkstra=False):\n self._generate_distance_matrix(start, end, use_dijkstra=use_dijkstra)\n self._trace_path(start, end)\n print(self.graph)\n # Reset Data\n self._generate_nodes()\n\n def boolean_path(self, start, end, *,\n use_dijkstra=False,\n exclude_start=False,\n exclude_end=False) -> np.ndarray:\n\n self._generate_distance_matrix(start, end, use_dijkstra=use_dijkstra)\n self._trace_path(start, end)\n data = self.graph.flatten()\n for i in range(len(data)):\n if data[i].mark in [self.path_mark, self.end_mark]:\n data[i] = 1\n else:\n data[i] = 0\n if exclude_start:\n data[start] = 0\n if exclude_end:\n data[end] = 0\n data = np.asanyarray(data).reshape(self.graph.shape)\n return data\n\n def path_index(self, start, end, *,\n use_dijkstra=False,\n exclude_start=False,\n exclude_end=False) -> list:\n\n self._generate_distance_matrix(start, end, use_dijkstra=use_dijkstra)\n return self._trace_path(start, end)\n\n\nclass MatItem:\n\n def __init__(self, height, width):\n self.x = None\n self.y = None\n self.height = height\n self.width = width\n self.name = \"N/A\"\n self.column = None\n self.row = None\n\n def assign(self, index: int, width: int):\n row = int(index / width)\n col = index % width\n self.x = col * self.width\n self.y = row * self.height\n self.column = col\n self.row = row\n\n @property\n def is_gap(self):\n raise NotImplementedError()\n\n\nclass Edge:\n def __init__(self, start, end, link_type):\n self.start = start\n self.end = end\n\n\nclass Node(MatItem):\n @property\n def is_gap(self):\n return False\n\n def __init__(self, name, height, width):\n super().__init__(height, width)\n self.name = name\n self.links = 0\n self.paths = {}\n self.color = None\n\n def __repr__(self):\n return f\"({self.name} {self.links})\"\n\n def add_paths(self, node_name, indexes):\n self.paths[node_name] = indexes\n\n\nclass Gap(MatItem):\n @property\n def is_gap(self):\n return True\n\n def __init__(self, height, width):\n super().__init__(height, width)\n self.lines = []\n self.v_slots = 1\n self.h_slots = 1\n\n def add_line(self, node_name: str):\n if node_name not in self.lines:\n self.lines.append(node_name)\n\n def __repr__(self):\n if len(self.lines) == 0:\n return f\"( )\"\n else:\n return f\"( {''.join(self.lines)} )\"\n\n\nclass Space:\n def __init__(self, data, height, width, log: Log):\n self._raw_data = data\n self._log = log\n self._nodes = None\n self._max_cols = None\n self._matrix = None\n self.node_gap = 1\n self.use_dijkstra = True\n self.node_height = height\n self.node_width = width\n self.node_placement = None\n self._assignment_done = False\n\n @property\n def nodes(self) -> Dict[str, Node]:\n if self._nodes is None:\n nodes = {}\n # Create empty nodes\n for d in self._raw_data:\n nodes[d[0]] = Node(d[0], height=self.node_height,\n width=self.node_width)\n nodes[d[1]] = Node(d[1], height=self.node_height,\n width=self.node_width)\n\n # Add details\n for d in self._raw_data:\n nodes[d[0]].links += 1\n nodes[d[1]].links += 1\n\n self._nodes = nodes\n return self._nodes\n\n @property\n def max_cols(self) -> int:\n if self._max_cols is None:\n self._max_cols = int(np.sqrt(len(self.nodes)))\n return self._max_cols\n\n @max_cols.setter\n def max_cols(self, value: int):\n if not isinstance(value, int):\n raise ValueError(f\"Column size can not be {type(value)}\")\n self._max_cols = value\n\n @property\n def matrix(self) -> np.ndarray:\n if self._matrix is None:\n self._place_nodes()\n return self._matrix\n\n def _place_with_arrangement(self):\n self._log.info(\"User has defined the node placement\")\n mat = [x for x in self.nodes.values()]\n new_mat = []\n for d in self.node_placement:\n if d not in self.nodes.keys():\n self._log.warn(f\"{d} not found in the current space\")\n continue\n new_mat.append(self.nodes[d])\n mat.remove(self.nodes[d])\n new_mat.extend(mat)\n\n while len(new_mat) % self.max_cols != 0:\n new_mat.insert(-1, None)\n\n new_mat = np.array(new_mat).reshape((-1, self.max_cols))\n self._matrix = new_mat\n\n def _place_nodes(self):\n if self.node_placement is not None:\n self._place_with_arrangement()\n return\n\n mat = [x for x in self.nodes.values()]\n forward_array = []\n reverse_array = []\n swap_array = True\n while len(mat) > 0:\n mat = sorted(mat, key=lambda x: x.links, reverse=True)\n if swap_array:\n forward_array.append(mat[0])\n else:\n reverse_array.append(mat[0])\n mat.pop(0)\n swap_array = not swap_array\n mat = forward_array\n mat.extend(reversed(reverse_array))\n\n while len(mat) % self.max_cols != 0:\n mat.insert(-1, None)\n\n mat = np.array(mat).reshape((-1, self.max_cols))\n\n self._matrix = mat\n self._log.info(\"Automatic node placement is done\")\n\n def _add_space(self):\n ng = self.node_gap + 1\n rows = self.matrix.shape[0] * ng + (ng - 1)\n cols = self.matrix.shape[1] * ng + (ng - 1)\n a = [None] * rows * cols\n a = np.array(a).reshape((rows, cols))\n a[(ng - 1)::ng, (ng - 1)::ng] = self.matrix\n k = [Gap(height=self.node_height,\n width=self.node_width) if x is None else x\n for x in a.flatten()]\n k = np.array(k).reshape(a.shape)\n self._matrix = k\n\n @property\n def boolean_matrix(self) -> np.ndarray:\n x = [0 if m.is_gap else None for m in self.matrix.flatten()]\n x = np.array(x).reshape(self.matrix.shape)\n return x\n\n def _get_node_coord(self, node: Node) -> tuple:\n k = list(self.matrix.flatten())\n k = k.index(node)\n return self._convert_idx(k)\n\n def _get_node_index(self, node: Node, is_output: bool):\n row, col = self._get_node_coord(node)\n if is_output:\n row -= 1 # TODO: Check properly\n else:\n row += 1\n return row * self.matrix.shape[1] + col\n\n def _get_item_at(self, index: int) -> Union[Gap, Node]:\n row, col = self._convert_idx(index)\n return self.matrix[row, col]\n\n def _convert_idx(self, idx) -> tuple:\n row = int(idx / self.matrix.shape[1])\n col = idx % self.matrix.shape[1]\n return row, col\n\n def _assign_edges(self):\n for d in self._raw_data:\n p = PathFinder(self.boolean_matrix)\n start_node = self._get_node_index(self.nodes[d[0]], True)\n end_node = self._get_node_index(self.nodes[d[1]], False)\n idx = []\n for i in p.path_index(start_node, end_node,\n use_dijkstra=self.use_dijkstra):\n r, c = self._convert_idx(i)\n idx.append((i, r, c))\n\n self.nodes[d[0]].add_paths(d[1], idx)\n\n def get(self) -> np.ndarray:\n if not self._assignment_done:\n self._add_space()\n self._assign_edges()\n self._assignment_done = True\n return self.matrix\n\n\ndef run():\n data = [\n [\"a\", \"b\", 1],\n [\"a\", \"c\", 1],\n [\"a\", \"d\", 1],\n [\"c\", \"d\", 1],\n [\"b\", \"d\", 1],\n ]\n"
]
| [
[
"numpy.square",
"numpy.array",
"numpy.asanyarray"
]
]
|
alanmatzumiya/Paper | [
"d65ff68475eb72324594701d06754d0d005f6a86"
]
| [
"examples/burgers_example/test_burgers.py"
]
| [
"from numpy import sin, pi, linspace\nfrom pySpectralFPK import FPK_solver\nfrom timeit import timeit\n\n\ndef u0(x):\n \"\"\"\n Initial Condition\n Parameters\n ----------\n x : array or float;\n Real space\n Returns\n -------\n array or float : Initial condition evaluated in the real space\n \"\"\"\n return sin(pi * x)\n\n\nparams = dict(nu=0.01, x=linspace(0, 1, 256), t=linspace(0, 10, 128), N=5)\n\nburgers_solved = FPK_solver(u0, params=params, equation=\"burgers\")\nburgers_approx = FPK_solver(burgers_solved.u0_approx, params=params, equation=\"burgers\")\n\ndata = burgers_solved.get_data\ndata_approx = burgers_approx.get_data\ndistance = burgers_solved.stability(data, data_approx)\n\nburgers_solved.plot.graph_3d()\nburgers_approx.plot.graph_3d()\nburgers_solved.plot.graph_time([i for i in range(0, 10)], data_approx)\nburgers_solved.plot.graph_distance(distance)\n\n\n# Number of runs\nN = 10\n\n# Average time\ntime = timeit(FPK_solver(u0, params=params, equation=\"burgers\"), number=N) / N\n\n# Save file\nfile = open('execution_time', 'w')\nfile.write('execution_time' + ' = ' + str(time) + ' seconds')\nfile.close()\n\n"
]
| [
[
"numpy.linspace",
"numpy.sin"
]
]
|
orduek/nilearn | [
"1e244d8bddbf755611b854c37d28f121d68a59d7"
]
| [
"nilearn/image/tests/test_resampling.py"
]
| [
"\"\"\"\nTest the resampling code.\n\"\"\"\nimport os\nimport copy\nimport math\n\nfrom numpy.testing import (assert_almost_equal,\n assert_array_equal,\n assert_array_almost_equal)\nimport numpy as np\nimport pytest\n\nfrom nibabel import Nifti1Image\n\nfrom nilearn.image.resampling import resample_img, resample_to_img, reorder_img\nfrom nilearn.image.resampling import from_matrix_vector, coord_transform\nfrom nilearn.image.resampling import get_bounds\nfrom nilearn.image.resampling import BoundingBoxError\nfrom nilearn.image.image import _pad_array, crop_img\nfrom nilearn._utils import testing\nfrom nilearn.image import get_data\n\n\n###############################################################################\n# Helper function\ndef rotation(theta, phi):\n \"\"\" Returns a rotation 3x3 matrix.\n \"\"\"\n cos = np.cos\n sin = np.sin\n a1 = np.array([[cos(theta), -sin(theta), 0],\n [sin(theta), cos(theta), 0],\n [0, 0, 1]])\n a2 = np.array([[1, 0, 0],\n [0, cos(phi), -sin(phi)],\n [0, sin(phi), cos(phi)]])\n return np.dot(a1, a2)\n\n\n###############################################################################\n# Tests\ndef test_identity_resample():\n \"\"\" Test resampling with an identity affine.\n \"\"\"\n shape = (3, 2, 5, 2)\n data = np.random.randint(0, 10, shape)\n affine = np.eye(4)\n affine[:3, -1] = 0.5 * np.array(shape[:3])\n rot_img = resample_img(Nifti1Image(data, affine),\n target_affine=affine, interpolation='nearest')\n np.testing.assert_almost_equal(data, get_data(rot_img))\n # Smoke-test with a list affine\n rot_img = resample_img(Nifti1Image(data, affine),\n target_affine=affine.tolist(),\n interpolation='nearest')\n # Test with a 3x3 affine\n rot_img = resample_img(Nifti1Image(data, affine),\n target_affine=affine[:3, :3],\n interpolation='nearest')\n np.testing.assert_almost_equal(data, get_data(rot_img))\n\n # Test with non native endian data\n\n # Test with big endian data ('>f8')\n for interpolation in ['nearest', 'linear', 'continuous']:\n rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine),\n target_affine=affine.tolist(),\n interpolation=interpolation)\n np.testing.assert_almost_equal(data, get_data(rot_img))\n\n # Test with little endian data ('<f8')\n for interpolation in ['nearest', 'linear', 'continuous']:\n rot_img = resample_img(Nifti1Image(data.astype('<f8'), affine),\n target_affine=affine.tolist(),\n interpolation=interpolation)\n np.testing.assert_almost_equal(data, get_data(rot_img))\n\n\ndef test_downsample():\n \"\"\" Test resampling with a 1/2 down-sampling affine.\n \"\"\"\n rand_gen = np.random.RandomState(0)\n shape = (6, 3, 6, 2)\n data = rand_gen.random_sample(shape)\n affine = np.eye(4)\n rot_img = resample_img(Nifti1Image(data, affine),\n target_affine=2 * affine, interpolation='nearest')\n downsampled = data[::2, ::2, ::2, ...]\n x, y, z = downsampled.shape[:3]\n np.testing.assert_almost_equal(downsampled,\n get_data(rot_img)[:x, :y, :z, ...])\n\n rot_img_2 = resample_img(Nifti1Image(data, affine),\n target_affine=2 * affine, interpolation='nearest',\n force_resample=True)\n np.testing.assert_almost_equal(get_data(rot_img_2),\n get_data(rot_img))\n # Test with non native endian data\n\n # Test to check that if giving non native endian data as input should\n # work as normal and expected to return the same output as above tests.\n\n # Big endian data ('>f8')\n for copy in [True, False]:\n rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine),\n target_affine=2 * affine,\n interpolation='nearest',\n copy=copy)\n np.testing.assert_almost_equal(downsampled,\n get_data(rot_img)[:x, :y, :z, ...])\n\n # Little endian data\n for copy in [True, False]:\n rot_img = resample_img(Nifti1Image(data.astype('<f8'), affine),\n target_affine=2 * affine,\n interpolation='nearest',\n copy=copy)\n np.testing.assert_almost_equal(downsampled,\n get_data(rot_img)[:x, :y, :z, ...])\n\n\ndef test_resampling_fill_value():\n \"\"\" Test resampling with a non-zero fill value\n \"\"\"\n prng = np.random.RandomState(10)\n\n data_3d = prng.rand(1, 4, 4)\n data_4d = prng.rand(1, 4, 4, 3)\n\n angle = np.pi/4\n rot = rotation(0, angle)\n\n # Try a few different fill values\n for data in [data_3d, data_4d]:\n for val in (-3.75, 0):\n if val:\n rot_img = resample_img(Nifti1Image(data, np.eye(4)),\n target_affine=rot,\n interpolation='nearest',\n fill_value=val,\n clip=False)\n else:\n rot_img = resample_img(Nifti1Image(data, np.eye(4)),\n target_affine=rot,\n interpolation='nearest',\n clip=False)\n assert (get_data(rot_img).flatten()[0] ==\n val)\n\n rot_img2 = resample_to_img(Nifti1Image(data, np.eye(4)),\n rot_img,\n interpolation='nearest',\n fill_value=val)\n assert (get_data(rot_img2).flatten()[0] ==\n val)\n\n\ndef test_resampling_with_affine():\n \"\"\" Test resampling with a given rotation part of the affine.\n \"\"\"\n prng = np.random.RandomState(10)\n\n data_3d = prng.randint(4, size=(1, 4, 4))\n data_4d = prng.randint(4, size=(1, 4, 4, 3))\n\n for data in [data_3d, data_4d]:\n for angle in (0, np.pi, np.pi / 2., np.pi / 4., np.pi / 3.):\n rot = rotation(0, angle)\n rot_img = resample_img(Nifti1Image(data, np.eye(4)),\n target_affine=rot,\n interpolation='nearest')\n assert (np.max(data) ==\n np.max(get_data(rot_img)))\n assert get_data(rot_img).dtype == data.dtype\n\n # We take the same rotation logic as above and test with nonnative endian\n # data as input\n for data in [data_3d, data_4d]:\n img = Nifti1Image(data.astype('>f8'), np.eye(4))\n for angle in (0, np.pi, np.pi / 2., np.pi / 4., np.pi / 3.):\n rot = rotation(0, angle)\n rot_img = resample_img(img, target_affine=rot,\n interpolation='nearest')\n assert (np.max(data) ==\n np.max(get_data(rot_img)))\n\n\ndef test_resampling_continuous_with_affine():\n prng = np.random.RandomState(10)\n\n data_3d = prng.randint(1, 4, size=(1, 10, 10))\n data_4d = prng.randint(1, 4, size=(1, 10, 10, 3))\n\n for data in [data_3d, data_4d]:\n for angle in (0, np.pi / 2., np.pi, 3 * np.pi / 2.):\n rot = rotation(0, angle)\n\n img = Nifti1Image(data, np.eye(4))\n rot_img = resample_img(\n img,\n target_affine=rot,\n interpolation='continuous')\n rot_img_back = resample_img(\n rot_img,\n target_affine=np.eye(4),\n interpolation='continuous')\n\n center = slice(1, 9)\n # values on the edges are wrong for some reason\n mask = (0, center, center)\n np.testing.assert_allclose(\n get_data(img)[mask],\n get_data(rot_img_back)[mask])\n assert (get_data(rot_img).dtype ==\n np.dtype(data.dtype.name.replace('int', 'float')))\n\n\ndef test_resampling_error_checks():\n shape = (3, 2, 5, 2)\n target_shape = (5, 3, 2)\n affine = np.eye(4)\n data = np.random.randint(0, 10, shape)\n img = Nifti1Image(data, affine)\n\n # Correct parameters: no exception\n resample_img(img, target_shape=target_shape, target_affine=affine)\n resample_img(img, target_affine=affine)\n\n with testing.write_tmp_imgs(img) as filename:\n resample_img(filename, target_shape=target_shape, target_affine=affine)\n\n # Missing parameter\n pytest.raises(ValueError, resample_img, img, target_shape=target_shape)\n\n # Invalid shape\n pytest.raises(ValueError, resample_img, img, target_shape=(2, 3),\n target_affine=affine)\n\n # Invalid interpolation\n interpolation = 'an_invalid_interpolation'\n pattern = \"interpolation must be either.+{0}\".format(interpolation)\n with pytest.raises(ValueError, match=pattern):\n resample_img(img,\n target_shape=target_shape,\n target_affine=affine,\n interpolation=\"an_invalid_interpolation\"\n )\n\n # Noop\n target_shape = shape[:3]\n\n img_r = resample_img(img, copy=False)\n assert img_r == img\n\n img_r = resample_img(img, copy=True)\n assert not np.may_share_memory(get_data(img_r), get_data(img))\n\n np.testing.assert_almost_equal(get_data(img_r), get_data(img))\n np.testing.assert_almost_equal(img_r.affine, img.affine)\n\n img_r = resample_img(img, target_affine=affine, target_shape=target_shape,\n copy=False)\n assert img_r == img\n\n img_r = resample_img(img, target_affine=affine, target_shape=target_shape,\n copy=True)\n assert not np.may_share_memory(get_data(img_r), get_data(img))\n np.testing.assert_almost_equal(get_data(img_r), get_data(img))\n np.testing.assert_almost_equal(img_r.affine, img.affine)\n\n\ndef test_4d_affine_bounding_box_error():\n\n small_data = np.ones([4, 4, 4])\n small_data_4D_affine = np.eye(4)\n small_data_4D_affine[:3, -1] = np.array([5, 4, 5])\n\n small_img = Nifti1Image(small_data,\n small_data_4D_affine)\n\n bigger_data_4D_affine = np.eye(4)\n bigger_data = np.zeros([10, 10, 10])\n bigger_img = Nifti1Image(bigger_data,\n bigger_data_4D_affine)\n\n # We would like to check whether all/most of the data\n # will be contained in the resampled image\n # The measure will be the l2 norm, since some resampling\n # schemes approximately conserve it\n\n def l2_norm(arr):\n return (arr ** 2).sum()\n\n # resample using 4D affine and specified target shape\n small_to_big_with_shape = resample_img(\n small_img,\n target_affine=bigger_img.affine,\n target_shape=bigger_img.shape)\n # resample using 3D affine and no target shape\n small_to_big_without_shape_3D_affine = resample_img(\n small_img,\n target_affine=bigger_img.affine[:3, :3])\n # resample using 4D affine and no target shape\n small_to_big_without_shape = resample_img(\n small_img,\n target_affine=bigger_img.affine)\n\n # The first 2 should pass\n assert_almost_equal(l2_norm(small_data),\n l2_norm(get_data(small_to_big_with_shape)))\n assert_almost_equal(\n l2_norm(small_data),\n l2_norm(get_data(small_to_big_without_shape_3D_affine)))\n\n # After correcting decision tree for 4x4 affine given + no target shape\n # from \"use initial shape\" to \"calculate minimal bounding box respecting\n # the affine anchor and the data\"\n assert_almost_equal(l2_norm(small_data),\n l2_norm(get_data(small_to_big_without_shape)))\n\n assert_array_equal(\n small_to_big_without_shape.shape,\n small_data_4D_affine[:3, -1] + np.array(small_img.shape))\n\n\ndef test_raises_upon_3x3_affine_and_no_shape():\n img = Nifti1Image(np.zeros([8, 9, 10]),\n affine=np.eye(4))\n exception = ValueError\n message = (\"Given target shape without anchor \"\n \"vector: Affine shape should be \\\\(4, 4\\\\) and \"\n \"not \\\\(3, 3\\\\)\")\n with pytest.raises(exception, match=message):\n resample_img(img, target_affine=np.eye(3) * 2,\n target_shape=(10, 10, 10)\n )\n\n\ndef test_3x3_affine_bbox():\n # Test that the bounding-box is properly computed when\n # transforming with a negative affine component\n # This is specifically to test for a change in behavior between\n # scipy < 0.18 and scipy >= 0.18, which is an interaction between\n # offset and a diagonal affine\n image = np.ones((20, 30))\n source_affine = np.eye(4)\n # Give the affine an offset\n source_affine[:2, 3] = np.array([96, 64])\n\n # We need to turn this data into a nibabel image\n img = Nifti1Image(image[:, :, np.newaxis], affine=source_affine)\n\n target_affine_3x3 = np.eye(3) * 2\n # One negative axes\n target_affine_3x3[1] *= -1\n\n img_3d_affine = resample_img(img, target_affine=target_affine_3x3)\n\n # If the bounding box is computed wrong, the image will be only\n # zeros\n np.testing.assert_allclose(get_data(img_3d_affine).max(), image.max())\n\n\ndef test_raises_bbox_error_if_data_outside_box():\n # Make some cases which should raise exceptions\n\n # original image\n data = np.zeros([8, 9, 10])\n affine = np.eye(4)\n affine_offset = np.array([1, 1, 1])\n affine[:3, 3] = affine_offset\n\n img = Nifti1Image(data, affine)\n\n # some axis flipping affines\n axis_flips = np.array(list(map(np.diag,\n [[-1, 1, 1, 1],\n [1, -1, 1, 1],\n [1, 1, -1, 1],\n [-1, -1, 1, 1],\n [-1, 1, -1, 1],\n [1, -1, -1, 1]])))\n\n # some in plane 90 degree rotations base on these\n # (by permuting two lines)\n af = axis_flips\n rotations = np.array([af[0][[1, 0, 2, 3]],\n af[0][[2, 1, 0, 3]],\n af[1][[1, 0, 2, 3]],\n af[1][[0, 2, 1, 3]],\n af[2][[2, 1, 0, 3]],\n af[2][[0, 2, 1, 3]]])\n\n new_affines = np.concatenate([axis_flips,\n rotations])\n new_offset = np.array([0., 0., 0.])\n new_affines[:, :3, 3] = new_offset[np.newaxis, :]\n\n for new_affine in new_affines:\n exception = BoundingBoxError\n message = (\"The field of view given \"\n \"by the target affine does \"\n \"not contain any of the data\")\n\n with pytest.raises(exception, match=message):\n resample_img(img, target_affine=new_affine)\n\n\ndef test_resampling_result_axis_permutation():\n # Transform real data using easily checkable transformations\n # For now: axis permutations\n # create a cuboid full of deterministic data, padded with one\n # voxel thickness of zeros\n core_shape = (3, 5, 4)\n core_data = np.arange(np.prod(core_shape)).reshape(core_shape)\n full_data_shape = np.array(core_shape) + 2\n full_data = np.zeros(full_data_shape)\n full_data[[slice(1, 1 + s) for s in core_shape]] = core_data\n\n source_img = Nifti1Image(full_data, np.eye(4))\n\n axis_permutations = [[0, 1, 2],\n [1, 0, 2],\n [2, 1, 0],\n [0, 2, 1]]\n\n # check 3x3 transformation matrix\n for ap in axis_permutations:\n target_affine = np.eye(3)[ap]\n resampled_img = resample_img(source_img,\n target_affine=target_affine)\n\n resampled_data = get_data(resampled_img)\n what_resampled_data_should_be = full_data.transpose(ap)\n assert_array_almost_equal(resampled_data,\n what_resampled_data_should_be)\n\n # check 4x4 transformation matrix\n offset = np.array([-2, 1, -3])\n for ap in axis_permutations:\n target_affine = np.eye(4)\n target_affine[:3, :3] = np.eye(3)[ap]\n target_affine[:3, 3] = offset\n\n resampled_img = resample_img(source_img,\n target_affine=target_affine)\n resampled_data = get_data(resampled_img)\n offset_cropping = np.vstack([-offset[ap][np.newaxis, :],\n np.zeros([1, 3])]\n ).T.ravel().astype(int)\n what_resampled_data_should_be = _pad_array(full_data.transpose(ap),\n list(offset_cropping))\n\n assert_array_almost_equal(resampled_data,\n what_resampled_data_should_be)\n\n\ndef test_resampling_nan():\n # Test that when the data has NaNs they do not propagate to the\n # whole image\n\n for core_shape in [(3, 5, 4), (3, 5, 4, 2)]:\n # create deterministic data, padded with one\n # voxel thickness of zeros\n core_data = np.arange(np.prod(core_shape)\n ).reshape(core_shape).astype(np.float)\n # Introduce a nan\n core_data[2, 2:4, 1] = np.nan\n full_data_shape = np.array(core_shape) + 2\n full_data = np.zeros(full_data_shape)\n full_data[[slice(1, 1 + s) for s in core_shape]] = core_data\n\n source_img = Nifti1Image(full_data, np.eye(4))\n\n # Transform real data using easily checkable transformations\n # For now: axis permutations\n axis_permutation = [0, 1, 2]\n\n # check 3x3 transformation matrix\n target_affine = np.eye(3)[axis_permutation]\n with pytest.warns(RuntimeWarning):\n resampled_img = resample_img(source_img,\n target_affine=target_affine)\n\n resampled_data = get_data(resampled_img)\n if full_data.ndim == 4:\n axis_permutation.append(3)\n what_resampled_data_should_be = full_data.transpose(axis_permutation)\n non_nan = np.isfinite(what_resampled_data_should_be)\n\n # Check that the input data hasn't been modified:\n assert not np.all(non_nan)\n\n # Check that for finite value resampling works without problems\n assert_array_almost_equal(resampled_data[non_nan],\n what_resampled_data_should_be[non_nan])\n\n # Check that what was not finite is still not finite\n assert not np.any(np.isfinite(\n resampled_data[np.logical_not(non_nan)]))\n\n # Test with an actual resampling, in the case of a bigish hole\n # This checks the extrapolation mechanism: if we don't do any\n # extrapolation before resampling, the hole creates big\n # artefacts\n data = 10 * np.ones((10, 10, 10))\n data[4:6, 4:6, 4:6] = np.nan\n source_img = Nifti1Image(data, 2 * np.eye(4))\n with pytest.warns(RuntimeWarning):\n resampled_img = resample_img(source_img,\n target_affine=np.eye(4))\n\n resampled_data = get_data(resampled_img)\n np.testing.assert_allclose(10, resampled_data[np.isfinite(resampled_data)])\n\n\ndef test_resample_to_img():\n # Testing resample to img function\n rand_gen = np.random.RandomState(0)\n shape = (6, 3, 6, 3)\n data = rand_gen.random_sample(shape)\n\n source_affine = np.eye(4)\n source_img = Nifti1Image(data, source_affine)\n\n target_affine = 2 * source_affine\n target_img = Nifti1Image(data, target_affine)\n\n\n result_img = resample_to_img(source_img, target_img,\n interpolation='nearest')\n\n downsampled = data[::2, ::2, ::2, ...]\n x, y, z = downsampled.shape[:3]\n np.testing.assert_almost_equal(downsampled,\n get_data(result_img)[:x, :y, :z, ...])\n\ndef test_crop():\n # Testing that padding of arrays and cropping of images work symmetrically\n shape = (4, 6, 2)\n data = np.ones(shape)\n padded = _pad_array(data, [3, 2, 4, 4, 5, 7])\n padd_nii = Nifti1Image(padded, np.eye(4))\n\n cropped = crop_img(padd_nii, pad=False)\n np.testing.assert_equal(get_data(cropped), data)\n\n\ndef test_resample_identify_affine_int_translation():\n # Testing resample to img function\n rand_gen = np.random.RandomState(0)\n\n source_shape = (6, 4, 6)\n source_affine = np.eye(4)\n source_affine[:, 3] = np.append(np.random.randint(0, 4, 3), 1)\n source_data = rand_gen.random_sample(source_shape)\n source_img = Nifti1Image(source_data, source_affine)\n\n target_shape = (11, 10, 9)\n target_data = np.zeros(target_shape)\n target_affine = source_affine\n target_affine[:3, 3] -= 3 # add an offset of 3 in x, y, z\n target_data[3:9, 3:7, 3:9] = source_data # put the data at the offset location\n target_img = Nifti1Image(target_data, target_affine)\n\n result_img = resample_to_img(source_img, target_img,\n interpolation='nearest')\n np.testing.assert_almost_equal(get_data(target_img),\n get_data(result_img))\n\n result_img_2 = resample_to_img(result_img, source_img,\n interpolation='nearest')\n np.testing.assert_almost_equal(get_data(source_img),\n get_data(result_img_2))\n\n result_img_3 = resample_to_img(result_img, source_img,\n interpolation='nearest',\n force_resample=True)\n np.testing.assert_almost_equal(get_data(result_img_2),\n get_data(result_img_3))\n\n result_img_4 = resample_to_img(source_img, target_img,\n interpolation='nearest',\n force_resample=True)\n np.testing.assert_almost_equal(get_data(target_img),\n get_data(result_img_4))\n\ndef test_resample_clip():\n # Resample and image and get larger and smaller\n # value than in the original. Use clip to get rid of these images\n\n shape = (6, 3, 6)\n data = np.zeros(shape=shape)\n data[1:-2, 1:-1, 1:-2] = 1\n\n source_affine = np.diag((2, 2, 2, 1))\n source_img = Nifti1Image(data, source_affine)\n\n target_affine = np.eye(4)\n no_clip_data = get_data(resample_img(source_img, target_affine,\n clip=False))\n clip_data = get_data(resample_img(source_img,\n target_affine, clip=True))\n\n not_clip = np.where((no_clip_data > data.min()) & (no_clip_data < data.max()))\n\n assert np.any(no_clip_data > data.max())\n assert np.any(no_clip_data < data.min())\n assert np.all(clip_data <= data.max())\n assert np.all(clip_data >= data.min())\n assert_array_equal(no_clip_data[not_clip], clip_data[not_clip])\n\n\ndef test_reorder_img():\n # We need to test on a square array, as rotation does not change\n # shape, whereas reordering does.\n shape = (5, 5, 5, 2, 2)\n rng = np.random.RandomState(42)\n data = rng.rand(*shape)\n affine = np.eye(4)\n affine[:3, -1] = 0.5 * np.array(shape[:3])\n ref_img = Nifti1Image(data, affine)\n # Test with purely positive matrices and compare to a rotation\n for theta, phi in np.random.randint(4, size=(5, 2)):\n rot = rotation(theta * np.pi / 2, phi * np.pi / 2)\n rot[np.abs(rot) < 0.001] = 0\n rot[rot > 0.9] = 1\n rot[rot < -0.9] = 1\n b = 0.5 * np.array(shape[:3])\n new_affine = from_matrix_vector(rot, b)\n rot_img = resample_img(ref_img, target_affine=new_affine)\n np.testing.assert_array_equal(rot_img.affine, new_affine)\n np.testing.assert_array_equal(get_data(rot_img).shape, shape)\n reordered_img = reorder_img(rot_img)\n np.testing.assert_array_equal(reordered_img.affine[:3, :3],\n np.eye(3))\n np.testing.assert_almost_equal(get_data(reordered_img),\n data)\n\n # Create a non-diagonal affine, and check that we raise a sensible\n # exception\n affine[1, 0] = 0.1\n ref_img = Nifti1Image(data, affine)\n with pytest.raises(ValueError, match='Cannot reorder the axes'):\n reorder_img(ref_img)\n\n # Test that no exception is raised when resample='continuous'\n reorder_img(ref_img, resample='continuous')\n\n # Test that resample args gets passed to resample_img\n interpolation = 'nearest'\n reordered_img = reorder_img(ref_img, resample=interpolation)\n resampled_img = resample_img(ref_img,\n target_affine=reordered_img.affine,\n interpolation=interpolation)\n np.testing.assert_array_equal(get_data(reordered_img),\n get_data(resampled_img))\n\n # Make sure invalid resample argument is included in the error message\n interpolation = 'an_invalid_interpolation'\n pattern = \"interpolation must be either.+{0}\".format(interpolation)\n with pytest.raises(ValueError, match=pattern):\n reorder_img(ref_img, resample=interpolation)\n\n # Test flipping an axis\n data = rng.rand(*shape)\n for i in (0, 1, 2):\n # Make a diagonal affine with a negative axis, and check that\n # can be reordered, also vary the shape\n shape = (i + 1, i + 2, 3 - i)\n affine = np.eye(4)\n affine[i, i] *= -1\n img = Nifti1Image(data, affine)\n orig_img = copy.copy(img)\n #x, y, z = img.get_world_coords()\n #sample = img.values_in_world(x, y, z)\n img2 = reorder_img(img)\n # Check that img has not been changed\n np.testing.assert_array_equal(img.affine, orig_img.affine)\n np.testing.assert_array_equal(get_data(img),\n get_data(orig_img))\n # Test that the affine is indeed diagonal:\n np.testing.assert_array_equal(img2.affine[:3, :3],\n np.diag(np.diag(img2.affine[:3, :3])))\n assert np.all(np.diag(img2.affine) >= 0)\n\n\ndef test_reorder_img_non_native_endianness():\n def _get_resampled_img(dtype):\n data = np.ones((10, 10, 10), dtype=dtype)\n data[3:7, 3:7, 3:7] = 2\n\n affine = np.eye(4)\n\n theta = math.pi / 6.\n c = math.cos(theta)\n s = math.sin(theta)\n\n affine = np.array([[1, 0, 0, 0],\n [0, c, -s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]])\n\n img = Nifti1Image(data, affine)\n return resample_img(img, target_affine=np.eye(4))\n\n img_1 = _get_resampled_img('<f8')\n img_2 = _get_resampled_img('>f8')\n\n np.testing.assert_equal(get_data(img_1), get_data(img_2))\n\n\ndef test_reorder_img_mirror():\n affine = np.array([\n [-1.1, -0., 0., 0.],\n [-0., -1.2, 0., 0.],\n [-0., -0., 1.3, 0.],\n [0., 0., 0., 1.]\n ])\n img = Nifti1Image(np.zeros((4, 6, 8)), affine=affine)\n reordered = reorder_img(img)\n np.testing.assert_allclose(\n get_bounds(reordered.shape, reordered.affine),\n get_bounds(img.shape, img.affine),\n )\n\n\ndef test_coord_transform_trivial():\n sform = np.eye(4)\n x = np.random.random((10,))\n y = np.random.random((10,))\n z = np.random.random((10,))\n\n x_, y_, z_ = coord_transform(x, y, z, sform)\n np.testing.assert_array_equal(x, x_)\n np.testing.assert_array_equal(y, y_)\n np.testing.assert_array_equal(z, z_)\n\n sform[:, -1] = 1\n x_, y_, z_ = coord_transform(x, y, z, sform)\n np.testing.assert_array_equal(x + 1, x_)\n np.testing.assert_array_equal(y + 1, y_)\n np.testing.assert_array_equal(z + 1, z_)\n\n # Test the output in case of one item array\n x, y, z = x[:1], y[:1], z[:1]\n x_, y_, z_ = coord_transform(x, y, z, sform)\n np.testing.assert_array_equal(x + 1, x_)\n np.testing.assert_array_equal(y + 1, y_)\n np.testing.assert_array_equal(z + 1, z_)\n\n # Test the output in case of simple items\n x, y, z = x[0], y[0], z[0]\n x_, y_, z_ = coord_transform(x, y, z, sform)\n np.testing.assert_array_equal(x + 1, x_)\n np.testing.assert_array_equal(y + 1, y_)\n np.testing.assert_array_equal(z + 1, z_)\n\n # Test the outputs have the same shape as the inputs\n x = np.ones((3, 2, 4))\n y = np.ones((3, 2, 4))\n z = np.ones((3, 2, 4))\n x_, y_, z_ = coord_transform(x, y, z, sform)\n assert x.shape == x_.shape\n\n\[email protected](os.environ.get('APPVEYOR') == 'True',\n reason='This test too slow (7-8 minutes) on AppVeyor')\ndef test_resample_img_segmentation_fault():\n # see https://github.com/nilearn/nilearn/issues/346\n shape_in = (64, 64, 64)\n aff_in = np.diag([2., 2., 2., 1.])\n aff_out = np.diag([3., 3., 3., 1.])\n # fourth_dim = 1024 works fine but for 1025 creates a segmentation\n # fault with scipy < 0.14.1\n fourth_dim = 1025\n\n try:\n data = np.ones(shape_in + (fourth_dim, ), dtype=np.float64)\n except MemoryError:\n # This can happen on AppVeyor and for 32-bit Python on Windows\n pytest.skip('Not enough RAM to run this test')\n else:\n img_in = Nifti1Image(data, aff_in)\n\n resample_img(img_in,\n target_affine=aff_out,\n interpolation='nearest')\n\n\ndef test_resampling_with_int_types_no_crash():\n affine = np.eye(4)\n data = np.zeros((2, 2, 2))\n\n for dtype in [np.int, np.int8, np.int16, np.int32, np.int64,\n np.uint, np.uint8, np.uint16, np.uint32, np.uint64,\n np.float32, np.float64, np.float, '>i8', '<i8']:\n img = Nifti1Image(data.astype(dtype), affine)\n resample_img(img, target_affine=2. * affine)\n"
]
| [
[
"numpy.diag",
"numpy.dot",
"numpy.concatenate",
"numpy.all",
"numpy.max",
"numpy.random.randint",
"numpy.eye",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"numpy.logical_not",
"numpy.array",
"numpy.random.RandomState",
"numpy.random.random",
"numpy.abs",
"numpy.isfinite",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.prod"
]
]
|
ravic1995/FundamentalAnalysis | [
"0ca0d75a81e0bcf8df75843a451d174a40150915"
]
| [
"FundamentalAnalysis/ratios.py"
]
| [
"from urllib.request import urlopen\nfrom urllib.error import HTTPError\nimport json\nimport pandas as pd\n\n\ndef key_metrics(ticker, api_key, period=\"annual\", TTM=False, limit=0):\n \"\"\"\n Description\n ----\n Gives information about key metrics of a company overtime which includes\n i.a. PE ratio, Debt to Equity, Dividend Yield and Average Inventory.\n\n Input\n ----\n ticker (string)\n The company ticker (for example: \"NFLX\")\n api_key (string)\n The API Key obtained from https://financialmodelingprep.com/developer/docs/\n period (string)\n Data period, this can be \"annual\" or \"quarter\".\n TTM (boolean)\n Obtain the trailing twelve months (TTM) key metrics.\n limit (integer)\n The limit for the years of data\n\n Output\n ----\n data (dataframe)\n Data with variables in rows and the period in columns.\n \"\"\"\n if TTM:\n URL = f\"https://financialmodelingprep.com/api/v3/key-metrics-ttm/{ticker}?limit={limit}&apikey={api_key}\"\n else:\n URL = f\"https://financialmodelingprep.com/api/v3/key-metrics/{ticker}?period={period}&limit={limit}&apikey={api_key}\"\n\n try:\n response = urlopen(URL)\n data = json.loads(response.read().decode(\"utf-8\"))\n except HTTPError:\n raise ValueError(\"This endpoint is only for premium members. Please visit the subscription page to upgrade the \"\n \"plan (Starter or higher) at https://financialmodelingprep.com/developer/docs/pricing\")\n\n if 'Error Message' in data:\n raise ValueError(data['Error Message'])\n\n if TTM:\n data_formatted = pd.Series(data[0])\n else:\n data_formatted = {}\n for value in data:\n if period == \"quarter\":\n date = value['date'][:7]\n else:\n date = value['date'][:4]\n del value['date']\n del value['symbol']\n\n data_formatted[date] = value\n data_formatted = pd.DataFrame(data_formatted)\n\n return data_formatted\n\ndef financial_ratios(ticker, api_key, period=\"annual\", TTM=False):\n \"\"\"\n Description\n ----\n Gives information about the financial ratios of a company overtime\n which includes i.a. investment, liquidity, profitability and debt ratios.\n\n Input\n ----\n ticker (string)\n The company ticker (for example: \"LYFT\")\n api_key (string)\n The API Key obtained from https://financialmodelingprep.com/developer/docs/\n period (string)\n Data period, this can be \"annual\" or \"quarter\".\n TTM (boolean)\n Obtain the trailing twelve months (TTM) ratios.\n\n Output\n ----\n data (dataframe or series)\n Data with variables in rows and the period in columns.\n \"\"\"\n if TTM:\n URL = f\"https://financialmodelingprep.com/api/v3/ratios-ttm/{ticker}?apikey={api_key}\"\n else:\n URL = f\"https://financialmodelingprep.com/api/v3/ratios/{ticker}?period={period}&apikey={api_key}\"\n\n try:\n response = urlopen(URL)\n data = json.loads(response.read().decode(\"utf-8\"))\n except HTTPError:\n raise ValueError(\"This endpoint is only for premium members. Please visit the subscription page to upgrade the \"\n \"plan (Starter or higher) at https://financialmodelingprep.com/developer/docs/pricing\")\n\n if 'Error Message' in data:\n raise ValueError(data['Error Message'])\n\n if TTM:\n data_formatted = pd.Series(data[0])\n else:\n data_formatted = {}\n for value in data:\n if period == \"quarter\":\n date = value['date'][:7]\n else:\n date = value['date'][:4]\n del value['date']\n del value['symbol']\n\n data_formatted[date] = value\n data_formatted = pd.DataFrame(data_formatted)\n\n return data_formatted\n\n\ndef financial_statement_growth(ticker, api_key, period=\"annual\"):\n \"\"\"\n Description\n ----\n Gives information about the financial statement growth of a company overtime\n which includes i.a. EBIT growth (%) and shareholder equity growth (% per 3, 5\n and 10 years)\n\n Input\n ----\n ticker (string)\n The company ticker (for example: \"WMT\")\n api_key (string)\n The API Key obtained from https://financialmodelingprep.com/developer/docs/\n period (string)\n Data period, this can be \"annual\" or \"quarter\".\n\n Output\n ----\n data (dataframe)\n Data with variables in rows and the period in columns.\n \"\"\"\n try:\n response = urlopen(f\"https://financialmodelingprep.com/api/v3/financial-growth/{ticker}\"\n f\"?period={period}&apikey={api_key}\")\n data = json.loads(response.read().decode(\"utf-8\"))\n except HTTPError:\n raise ValueError(\"This endpoint is only for premium members. Please visit the subscription page to upgrade the \"\n \"plan (Starter or higher) at https://financialmodelingprep.com/developer/docs/pricing\")\n\n if 'Error Message' in data:\n raise ValueError(data['Error Message'])\n\n data_formatted = {}\n for value in data:\n if period == \"quarter\":\n date = value['date'][:7]\n else:\n date = value['date'][:4]\n del value['date']\n del value['symbol']\n\n data_formatted[date] = value\n\n return pd.DataFrame(data_formatted)\n"
]
| [
[
"pandas.Series",
"pandas.DataFrame"
]
]
|
NISP-official/CARTL | [
"8fba278783c9088b876f09eb0e659a9f9356f129"
]
| [
"src/utils/spectral_norm.py"
]
| [
"\"\"\"\nSpectral Normalization from https://arxiv.org/abs/1802.05957\nCode from PyTorch 1.5.1\n\"\"\"\nimport torch\nfrom torch.nn.functional import normalize\n\n\nclass SpectralNorm(object):\n # Invariant before and after each forward call:\n # u = normalize(W @ v)\n # NB: At initialization, this invariant is not enforced\n\n _version = 1\n # At version 1:\n # made `W` not a buffer,\n # added `v` as a buffer, and\n # made eval mode use `W = u @ W_orig @ v` rather than the stored `W`.\n\n def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12, norm_beta=1.0):\n self.name = name\n self.dim = dim\n if n_power_iterations <= 0:\n raise ValueError('Expected n_power_iterations to be positive, but '\n 'got n_power_iterations={}'.format(n_power_iterations))\n self.n_power_iterations = n_power_iterations\n self.eps = eps\n self._norm_beta = norm_beta\n\n def reshape_weight_to_matrix(self, weight):\n weight_mat = weight\n if self.dim != 0:\n # permute dim to front\n weight_mat = weight_mat.permute(self.dim,\n *[d for d in range(weight_mat.dim()) if d != self.dim])\n height = weight_mat.size(0)\n return weight_mat.reshape(height, -1)\n\n def compute_weight(self, module, do_power_iteration):\n # NB: If `do_power_iteration` is set, the `u` and `v` vectors are\n # updated in power iteration **in-place**. This is very important\n # because in `DataParallel` forward, the vectors (being buffers) are\n # broadcast from the parallelized module to each module replica,\n # which is a new module object created on the fly. And each replica\n # runs its own spectral norm power iteration. So simply assigning\n # the updated vectors to the module this function runs on will cause\n # the update to be lost forever. And the next time the parallelized\n # module is replicated, the same randomly initialized vectors are\n # broadcast and used!\n #\n # Therefore, to make the change propagate back, we rely on two\n # important behaviors (also enforced via tests):\n # 1. `DataParallel` doesn't clone storage if the broadcast tensor\n # is already on correct device; and it makes sure that the\n # parallelized module is already on `device[0]`.\n # 2. If the out tensor in `out=` kwarg has correct shape, it will\n # just fill in the values.\n # Therefore, since the same power iteration is performed on all\n # devices, simply updating the tensors in-place will make sure that\n # the module replica on `device[0]` will update the _u vector on the\n # parallized module (by shared storage).\n #\n # However, after we update `u` and `v` in-place, we need to **clone**\n # them before using them to normalize the weight. This is to support\n # backproping through two forward passes, e.g., the common pattern in\n # GAN training: loss = D(real) - D(fake). Otherwise, engine will\n # complain that variables needed to do backward for the first forward\n # (i.e., the `u` and `v` vectors) are changed in the second forward.\n weight = getattr(module, self.name + '_orig')\n u = getattr(module, self.name + '_u')\n v = getattr(module, self.name + '_v')\n weight_mat = self.reshape_weight_to_matrix(weight)\n\n if do_power_iteration:\n with torch.no_grad():\n for _ in range(self.n_power_iterations):\n # Spectral norm of weight equals to `u^T W v`, where `u` and `v`\n # are the first left and right singular vectors.\n # This power iteration produces approximations of `u` and `v`.\n v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v)\n u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)\n if self.n_power_iterations > 0:\n # See above on why we need to clone\n u = u.clone(memory_format=torch.contiguous_format)\n v = v.clone(memory_format=torch.contiguous_format)\n\n sigma = torch.dot(u, torch.mv(weight_mat, v))\n weight = weight / sigma * self._norm_beta\n return weight\n\n def remove(self, module):\n with torch.no_grad():\n weight = self.compute_weight(module, do_power_iteration=False)\n delattr(module, self.name)\n delattr(module, self.name + '_u')\n delattr(module, self.name + '_v')\n delattr(module, self.name + '_orig')\n module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))\n\n def __call__(self, module, inputs):\n setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training))\n\n def _solve_v_and_rescale(self, weight_mat, u, target_sigma):\n # Tries to returns a vector `v` s.t. `u = normalize(W @ v)`\n # (the invariant at top of this class) and `u @ W @ v = sigma`.\n # This uses pinverse in case W^T W is not invertible.\n v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)).squeeze(1)\n return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v)))\n\n @staticmethod\n def apply(module, name, n_power_iterations, dim, eps, norm_beta=1.0):\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, SpectralNorm) and hook.name == name:\n raise RuntimeError(\"Cannot register two spectral_norm hooks on \"\n \"the same parameter {}\".format(name))\n\n fn = SpectralNorm(name, n_power_iterations, dim, eps, norm_beta=norm_beta)\n weight = module._parameters[name]\n\n with torch.no_grad():\n weight_mat = fn.reshape_weight_to_matrix(weight)\n\n h, w = weight_mat.size()\n # randomly initialize `u` and `v`\n u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps)\n v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps)\n\n delattr(module, fn.name)\n module.register_parameter(fn.name + \"_orig\", weight)\n # We still need to assign weight back as fn.name because all sorts of\n # things may assume that it exists, e.g., when initializing weights.\n # However, we can't directly assign as it could be an nn.Parameter and\n # gets added as a parameter. Instead, we register weight.data as a plain\n # attribute.\n setattr(module, fn.name, weight.data)\n module.register_buffer(fn.name + \"_u\", u)\n module.register_buffer(fn.name + \"_v\", v)\n\n module.register_forward_pre_hook(fn)\n module._register_state_dict_hook(SpectralNormStateDictHook(fn))\n module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn))\n return fn\n\n\n# This is a top level class because Py2 pickle doesn't like inner class nor an\n# instancemethod.\nclass SpectralNormLoadStateDictPreHook(object):\n # See docstring of SpectralNorm._version on the changes to spectral_norm.\n def __init__(self, fn):\n self.fn = fn\n\n # For state_dict with version None, (assuming that it has gone through at\n # least one training forward), we have\n #\n # u = normalize(W_orig @ v)\n # W = W_orig / sigma, where sigma = u @ W_orig @ v\n #\n # To compute `v`, we solve `W_orig @ x = u`, and let\n # v = x / (u @ W_orig @ x) * (W / W_orig).\n def __call__(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n fn = self.fn\n version = local_metadata.get('spectral_norm', {}).get(fn.name + '.version', None)\n if version is None or version < 1:\n weight_key = prefix + fn.name\n if version is None and all(weight_key + s in state_dict for s in ('_orig', '_u', '_v')) and \\\n weight_key not in state_dict:\n # Detect if it is the updated state dict and just missing metadata.\n # This could happen if the users are crafting a state dict themselves,\n # so we just pretend that this is the newest.\n return\n has_missing_keys = False\n for suffix in ('_orig', '', '_u'):\n key = weight_key + suffix\n if key not in state_dict:\n has_missing_keys = True\n if strict:\n missing_keys.append(key)\n if has_missing_keys:\n return\n with torch.no_grad():\n weight_orig = state_dict[weight_key + '_orig']\n weight = state_dict.pop(weight_key)\n sigma = (weight_orig / weight).mean()\n weight_mat = fn.reshape_weight_to_matrix(weight_orig)\n u = state_dict[weight_key + '_u']\n v = fn._solve_v_and_rescale(weight_mat, u, sigma)\n state_dict[weight_key + '_v'] = v\n\n\n# This is a top level class because Py2 pickle doesn't like inner class nor an\n# instancemethod.\nclass SpectralNormStateDictHook(object):\n # See docstring of SpectralNorm._version on the changes to spectral_norm.\n def __init__(self, fn):\n self.fn = fn\n\n def __call__(self, module, state_dict, prefix, local_metadata):\n if 'spectral_norm' not in local_metadata:\n local_metadata['spectral_norm'] = {}\n key = self.fn.name + '.version'\n if key in local_metadata['spectral_norm']:\n raise RuntimeError(\"Unexpected key in metadata['spectral_norm']: {}\".format(key))\n local_metadata['spectral_norm'][key] = self.fn._version\n\n\ndef spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None, norm_beta=1.0):\n r\"\"\"Applies spectral normalization to a parameter in the given module.\n\n .. math::\n \\mathbf{W}_{SN} = \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})},\n \\sigma(\\mathbf{W}) = \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}\n\n Spectral normalization stabilizes the training of discriminators (critics)\n in Generative Adversarial Networks (GANs) by rescaling the weight tensor\n with spectral norm :math:`\\sigma` of the weight matrix calculated using\n power iteration method. If the dimension of the weight tensor is greater\n than 2, it is reshaped to 2D in power iteration method to get spectral\n norm. This is implemented via a hook that calculates spectral norm and\n rescales weight before every :meth:`~Module.forward` call.\n\n See `Spectral Normalization for Generative Adversarial Networks`_ .\n\n .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957\n\n Args:\n module (nn.Module): containing module\n name (str, optional): name of weight parameter\n n_power_iterations (int, optional): number of power iterations to\n calculate spectral norm\n eps (float, optional): epsilon for numerical stability in\n calculating norms\n dim (int, optional): dimension corresponding to number of outputs,\n the default is ``0``, except for modules that are instances of\n ConvTranspose{1,2,3}d, when it is ``1``\n\n Returns:\n The original module with the spectral norm hook\n\n Example::\n\n >>> m = spectral_norm(nn.Linear(20, 40))\n >>> m\n Linear(in_features=20, out_features=40, bias=True)\n >>> m.weight_u.size()\n torch.Size([40])\n\n \"\"\"\n if dim is None:\n if isinstance(module, (torch.nn.ConvTranspose1d,\n torch.nn.ConvTranspose2d,\n torch.nn.ConvTranspose3d)):\n dim = 1\n else:\n dim = 0\n SpectralNorm.apply(module, name, n_power_iterations, dim, eps, norm_beta=norm_beta)\n return module\n\n\ndef remove_spectral_norm(module, name='weight'):\n r\"\"\"Removes the spectral normalization reparameterization from a module.\n\n Args:\n module (Module): containing module\n name (str, optional): name of weight parameter\n\n Example:\n >>> m = spectral_norm(nn.Linear(40, 10))\n >>> remove_spectral_norm(m)\n \"\"\"\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, SpectralNorm) and hook.name == name:\n hook.remove(module)\n del module._forward_pre_hooks[k]\n break\n else:\n raise ValueError(\"spectral_norm of '{}' not found in {}\".format(\n name, module))\n\n for k, hook in module._state_dict_hooks.items():\n if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name:\n del module._state_dict_hooks[k]\n break\n\n for k, hook in module._load_state_dict_pre_hooks.items():\n if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name:\n del module._load_state_dict_pre_hooks[k]\n break\n\n return module\n"
]
| [
[
"torch.mv",
"torch.no_grad"
]
]
|
Naroloal/dlaCluster | [
"0a0990e332068670069ed5a2621cf25c9065c0c6"
]
| [
"fractalDimensionality.py"
]
| [
"\"\"\"\nThis function finds the fractal dimensionality of the cluster \n\"\"\"\n\nfrom DLAcluster import DLAcluster \nimport numpy\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n\n\nradiusArray=numpy.arange(30,60,5)\nmass=[]\n\nfor i in radiusArray:\n massValue,matrix=DLAcluster(i,False) #import radius and True/False for GIF\n mass.append(massValue)\n\n#------- Find fit for mass and radius of the cluster:\n# Find log radius and log mass\n# Should be a linear function a+bx, with the slope b equal to the power of t and 'a'=scaling\n\n#Find Log of all the arrays\nlogRadius=numpy.log(radiusArray)\nlogMass=numpy.log(mass)\n\n#Fit a log function using numpy polyfit\nfitLog=numpy.polyfit(logRadius, logMass,1)\nfitLogFunc=numpy.poly1d(fitLog)\n\n#print out the results\nprint(\"Parameters for the log fit: slope = \",fitLog[0],\"shift: \",fitLog[1])\nprint(\"Parameters from the log fit: form is e^\",fitLog[1],\"*r^\",fitLog[0])\nnum=str(numpy.round(fitLog[0],3))\n\n# ------------------------------------------------------------------------------\n\n################################################################################\n### Create Plots\n################################################################################\n\n# ------------------------------------------------------------------------------\n\n#--------------- Plot log\nfig=plt.subplot()\nplt.scatter(logRadius,logMass, color='tomato', edgecolors='tomato', s=30)\nplt.plot(logRadius, fitLogFunc(logRadius),color='dodgerblue', lw=3)\nplt.title(\"Log-log plot, mass vs radius\",fontsize=20)\nplt.xlabel(\"Log radius\",fontsize=15)\nplt.ylabel(\"Log mass\",fontsize=15)\nplt.grid(True)\nfig.text(2.6,4.3,'fractal dimensionality:'+num)\nfig.spines[\"top\"].set_visible(False) \nfig.spines[\"right\"].set_visible(False) \nplt.savefig('logRadiusMass.png')\nplt.show()\n\n\n"
]
| [
[
"numpy.log",
"numpy.poly1d",
"numpy.polyfit",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.savefig",
"numpy.round",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
]
|
sjpfenninger/calliope | [
"a4e49c3b7d37f908bafc84543510eec0b4cf5d9f"
]
| [
"calliope/time_masks.py"
]
| [
"\"\"\"\nCopyright (C) 2013-2016 Stefan Pfenninger.\nLicensed under the Apache 2.0 License (see LICENSE file).\n\ntime_masks.py\n~~~~~~~~~~~~~\n\nFunctions to pick timesteps from data given certain criteria.\n\n\"\"\"\n\nimport pandas as pd\n\nfrom . import time_funcs\n\n\ndef _get_array(data, var, tech, locations):\n arr = data[var]\n arr = arr.loc[{'y': tech}]\n if locations is not None:\n arr = arr.loc[{'x': locations}]\n return arr\n\n\ndef zero(data, tech, var='r', locations=None):\n \"\"\"\n Returns timesteps where ``var`` for the technology ``tech``\n across the given list of ``locations`` is zero.\n\n If ``locations`` not given, uses all available locations.\n\n \"\"\"\n arr = _get_array(data, var, tech, locations)\n s = arr.mean(dim='x').to_pandas() # Get a t-indexed Series\n\n return s[s == 0].index\n\n\ndef _concat_indices(indices):\n return pd.concat([i.to_series() for i in indices]).sort_index().index\n\n\ndef _get_minmax_timestaps(series, length, n, how='max', padding=None):\n # Get the max/min timestamps\n group = series.groupby(pd.TimeGrouper(length)).mean()\n timesteps = []\n for _ in range(n):\n if how == 'max':\n ts = group.idxmax()\n elif how == 'min':\n ts = group.idxmin()\n timesteps.append(ts)\n group = group.drop(ts)\n\n # Get range of timestamps including padding\n full_timesteps = []\n for ts in timesteps:\n ts_end = ts + pd.Timedelta(length)\n if padding is not None:\n ts -= pd.Timedelta(padding)\n ts_end += pd.Timedelta(padding)\n ts_range = pd.date_range(ts, ts_end, freq='1H')[:-1]\n full_timesteps.append(ts_range)\n\n ts_index = _concat_indices(full_timesteps)\n\n return ts_index\n\n\ndef extreme(data, tech, var='r', how='max',\n length='1D', n=1, groupby_length=None,\n locations=None, padding=None):\n \"\"\"\n Returns timesteps for period of ``length`` where ``var`` for the technology\n ``tech`` across the given list of ``locations`` is either minmal\n or maximal.\n\n Parameters\n ----------\n data : xarray.Dataset\n tech : str\n Technology whose `var` to find extreme for.\n var : str, optional\n default 'r'\n how : str, optional\n 'max' (default) or 'min'.\n length : str, optional\n Defaults to '1D'.\n n : int, optional\n Number of periods of `length` to look for, default is 1.\n groupby_length : str, optional\n Group time series and return `n` periods of `length`\n for each group.\n locations : list, optional\n List of locations to use, if None, uses all available locations.\n padding : int, optional\n Pad beginning and end of the unmasked area by the number of\n timesteps given.\n\n \"\"\"\n arr = _get_array(data, var, tech, locations)\n\n return _extreme(arr, how, length, n, groupby_length, padding)\n\n\ndef extreme_diff(data, tech0, tech1, var='r', how='max',\n length='1D', n=1, groupby_length=None,\n locations=None, padding=None):\n data_n = time_funcs.normalize(data)\n arr0 = _get_array(data_n, var, tech0, locations)\n arr1 = _get_array(data_n, var, tech1, locations)\n arr = arr0 - arr1\n\n return _extreme(arr, how, length, n, groupby_length, padding)\n\n\ndef _extreme(arr, how='max',\n length='1D', n=1, groupby_length=None,\n padding=None):\n\n full_series = arr.mean(dim='x').to_pandas() # Get a t-indexed Series\n\n if groupby_length:\n groupby = pd.TimeGrouper(groupby_length)\n group_indices = []\n grouping = full_series.groupby(groupby)\n for k in grouping.groups.keys():\n s = grouping.get_group(k)\n group_indices.append(_get_minmax_timestaps(s, length, n, how, padding))\n ts_index = _concat_indices(group_indices)\n else:\n ts_index = _get_minmax_timestaps(full_series, length, n, how, padding)\n\n return ts_index\n\n\n_WEEK_DAY_FUNCS = {\n 'extreme': extreme,\n 'extreme_diff': extreme_diff\n}\n\n\ndef week(data, day_func, **day_func_kwargs):\n # Get extreme day time index\n func = _WEEK_DAY_FUNCS[day_func]\n day = func(data, **day_func_kwargs)\n\n # Using day of week, figure out how many days before and after to get\n # a complete week\n days_before = 6 - day[0].dayofweek\n days_after = 6 - days_before\n\n # Turn it into a week\n # FIXME: assumes 1H timestep length\n start_hour = day[0] - pd.Timedelta('{}D'.format(days_before))\n end_hour = day[-1] + pd.Timedelta('{}D'.format(days_after))\n before = pd.date_range(start_hour, day[0], freq='1H')[:-1]\n after = pd.date_range(day[-1], end_hour, freq='1H')[1:]\n result_week = before.append(day).append(after)\n\n return result_week\n"
]
| [
[
"pandas.TimeGrouper",
"pandas.Timedelta",
"pandas.date_range"
]
]
|
TheMemoryDealer/CV-leaf-segm | [
"291d46c0732b3be2f4cdbf4440e8d0f0584c1db4"
]
| [
"main.py"
]
| [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport csv, os, sys, glob, statistics, cv2, re\nfrom termcolor import colored\nimport pandas as pd\nfrom scipy import ndimage as ndi\nfrom skimage.measure import regionprops\nfrom skimage.segmentation import watershed\nfrom skimage.feature import peak_local_max\n\nIN_DIR = './data'\n\nplt.rcParams[\"figure.figsize\"] = (10,6) # fixed figure size\n\n# GLOBAL SOTRAGE\nimg_raw_store = [] # store raw img here\nimg_label_store = [] # store labels here\nname_store = [] # store img names\nimage_sizes = [] # store img sizes\nsegmented_store = [] # store the answers here\nresults_store = [] # store segmentation applied\ncomplete_store = [] # store leaf counts\nkmeans_store = [] # store to show kmeans clustering segmentation\nalmost_final_store = [] # store almost final result\nleaf_detected_store1 = [] # store num leaf detected (watershed)\nleaf_detected_store2 = [] # store num leaf detected (k-means)\ndistance_store = [] # store distances before watershed\nwatershed_store = [] # store watershed results\n\n\ndef mother_of_plots(img_raw_store, img_label_store, segmented_store,\n\t\t\t\t\tcomplete_store, kmeans_store, watershed_store):\n\tindx = 1\n\tfor j in range(16): # through imgs\n\t\tax1 = plt.subplot(16, 6, indx)\n\t\tplt.imshow(img_raw_store[j]) # raw\n\t\tplt.axis('off')\n\t\tindx += 1\n\t\tax2 = plt.subplot(16, 6, indx)\n\t\tplt.imshow(img_label_store[j]) # label\n\t\tplt.axis('off')\n\t\tindx += 1\n\t\tax3 = plt.subplot(16 ,6, indx)\n\t\tplt.imshow(segmented_store[j], cmap=plt.cm.gray) # thresh\n\t\tplt.axis('off')\n\t\tindx += 1\n\t\tax4 = plt.subplot(16 ,6, indx)\n\t\tplt.imshow(kmeans_store[j]) # segm\n\t\tplt.axis('off')\n\t\tindx += 1\n\t\tax5 = plt.subplot(16 ,6, indx)\n\t\tplt.imshow(watershed_store[j], cmap=plt.cm.nipy_spectral) # watershed\n\t\tplt.axis('off')\n\t\tindx += 1\n\t\tax6 = plt.subplot(16, 6, indx)\n\t\tplt.imshow(complete_store[j]) # detected\n\t\tplt.axis('off')\n\t\tindx += 1\n\n\t\tif j == 0: # draw titles only on first row\n\t\t\tax1.set_title(\"Input\", fontweight='bold')\n\t\t\tax2.set_title(\"Label\", fontweight='bold')\n\t\t\tax3.set_title(\"Thresh\", fontweight='bold')\n\t\t\tax4.set_title(\"K-means\", fontweight='bold')\n\t\t\tax5.set_title(\"Watershed\", fontweight='bold')\n\t\t\tax6.set_title(\"Detected\", fontweight='bold')\n\n\tplt.show() # <--------------------\n\n\ndef step_plot(img_raw_store, img_label_store, segmented_store,\n\t\t\t complete_store, kmeans_store, almost_final_store,\n\t\t\t leafs, leaf_detected_store2, name_store, results_store,\n\t\t\t leaf_detected_store1, distance_store, watershed_store):\n\tds_store = [] # store ds score to later get mean\n\tleaf_error1 = [] # store detection error (watershed)\n\tleaf_error2 = [] # store detection error (k-means)\n\tname_store = list(set(name_store)) # remove duplicates\n\tname_store.sort() # sort names\n\tfor i in range(len(img_raw_store)):\n\t\t# print(dice(segmented_store[i], img_label_store[i]))\n\t\tax1 = plt.subplot(3, 3, 1)\n\t\tplt.imshow(img_raw_store[i])\n\t\tplt.axis('off')\n\t\tax1.set_title(\"Input\", fontweight='bold')\n\n\t\tax2 = plt.subplot(3, 3, 2)\n\t\tplt.imshow(img_label_store[i])\n\t\tplt.axis('off')\n\t\tax2.set_title('Label', fontweight='bold')\n\n\t\tax3 = plt.subplot(3, 3, 3)\n\t\tplt.imshow(segmented_store[i], cmap=plt.cm.gray)\n\t\tplt.axis('off')\n\t\tax3.set_title('Threshold', fontweight='bold')\n\n\t\tax4 = plt.subplot(3, 3, 4)\n\t\tplt.imshow(results_store[i], cmap=plt.cm.gray)\n\t\tplt.axis('off')\n\t\tax4.set_title('Bitwise', fontweight='bold', color=\"red\")\n\n\t\tax5 = plt.subplot(3, 3, 5)\n\t\tplt.imshow(distance_store[i], cmap=plt.cm.gray)\n\t\tplt.axis('off')\n\t\tax5.set_title('Distances', fontweight='bold')\n\n\t\tax6 = plt.subplot(3, 3, 6)\n\t\tplt.imshow(watershed_store[i], cmap=plt.cm.nipy_spectral)\n\t\tplt.axis('off')\n\t\tax6.set_title('Watershed', fontweight='bold', color=\"blue\")\n\n\t\tax7 = plt.subplot(3, 3, 7)\n\t\tplt.imshow(kmeans_store[i])\n\t\tplt.axis('off')\n\t\tax7.set_title('K-means', fontweight='bold')\n\n\t\tax8 = plt.subplot(3, 3, 8)\n\t\tplt.imshow(almost_final_store[i], cmap=plt.cm.gray)\n\t\tplt.axis('off')\n\t\tax8.set_title('Post-proc', fontweight='bold')\n\n\t\tax9 = plt.subplot(3, 3, 9)\n\t\tplt.imshow(complete_store[i])\n\t\tplt.axis('off')\n\t\tax9.set_title('Detected', color=\"purple\", fontweight='bold')\n\t\tplt.figtext(0.15, 0.02, \"DS - {}\".format(dice(segmented_store[i], img_label_store[i])),\n\t\t\t\t\tha=\"center\",\n\t\t\t\t\tfontsize=16,\n\t\t\t\t\tbbox={\"facecolor\":\"red\", \"alpha\":0.7, \"pad\":3})\n\t\tplt.figtext(0.85, 0.02, \"Detected (K-Means) \\n {}/{}\".format(leaf_detected_store2[i],\n\t\t\t\t\tleafs[i]),\n\t\t\t\t\tha=\"center\",\n\t\t\t\t\tfontsize=16,\n\t\t\t\t\tbbox={\"facecolor\":\"purple\", \"alpha\":0.7, \"pad\":3})\n\t\tplt.figtext(0.5, 0.95, \"{}.png\".format(name_store[i]),\n\t\t\t\t\tha=\"center\",\n\t\t\t\t\tfontsize=16,\n\t\t\t\t\tbbox={\"facecolor\":\"yellow\", \"alpha\":0.7, \"pad\":3})\n\t\tplt.figtext(0.5, 0.02, \"Detected (Watershed) \\n {}/{}\".format(leaf_detected_store1[i],\n\t\t\t\t\tleafs[i]),\n\t\t\t\t\tha=\"center\",\n\t\t\t\t\tfontsize=16,\n\t\t\t\t\tbbox={\"facecolor\":\"blue\", \"alpha\":0.7, \"pad\":3})\n\t\tplt.show() # <--------------------\n\t\tds_store.append(dice(segmented_store[i], img_label_store[i])) # add to later get mean\n\t\tleaf_error2.append(leafs[i] - leaf_detected_store2[i]) # add to later get mean\n\t\tleaf_error1.append(leafs[i] - leaf_detected_store1[i]) # same here\n\tprint('DS mean - ', colored(statistics.mean(ds_store), 'red'))\n\tprint('Leaf Detection difference mean - ',\n\t\tcolored(statistics.mean(leaf_error1), 'cyan')) # watershed err\n\tprint('Leaf Detection difference mean - ',\n\t\tcolored(statistics.mean(leaf_error2), 'magenta')) # kmeans err\n\ndef dice(mask, gt):\n\tmask = np.asarray(mask).astype(np.bool)\n\tgt = np.asarray(gt).astype(np.bool)\n\tgt = gt[:,:,1] # gt comes in all colour spaces. Pick one\n\t# print(mask.shape) # <------ Debug\n\t# print(gt.shape)\n\tif mask.shape != gt.shape:\n\t\traise ValueError(\"Shape mismatch: mask and gt must have the same shape.\")\n\t# Compute Dice coefficient\n\tintersection = np.logical_and(mask, gt) # where both intersect\n\treturn round(2. * intersection.sum() / (mask.sum() + gt.sum()),2)\n\ndef get_size_stats():\n\tdf = pd.DataFrame(image_sizes)\n\tprint(colored(\"\\tHEIGHT\", 'green'))\n\tprint(df[0].describe().apply(\"{0:.0f}\".format))\n\tprint(colored(\"\\tWIDTH\", 'green'))\n\tprint(df[1].describe().apply(\"{0:.0f}\".format))\n\ndef kmeans(img, kmeans_store):\n\t# reshape the image to a 2D array of pixels and 3 color values (RGB)\n\tpixel_values = img.reshape((-1, 3))\n\t# convert to float\n\tpixel_values = np.float32(pixel_values)\n\t# define stopping criteria\n\tcriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)\n\t# number of clusters (K)\n\tk = 3\n\t_, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_PP_CENTERS)\n\t# convert back to 8 bit values\n\tcenters = np.uint8(centers)\n\t# flatten the labels array\n\tlabels = labels.flatten()\n\t# convert all pixels to the color of the centroids\n\tsegmented_image = centers[labels.flatten()]\n\t# reshape back to the original image dimension\n\tsegmented_image = segmented_image.reshape(img.shape)\n\tkmeans_store.append(segmented_image)\n\t# segmented_image = cv2.GaussianBlur(segmented_image,(3,3),0) # play with this\n\t# plt.imshow(segmented_image) # <------ DEBUG\n\t# plt.show() # <------ DEBUG\n\t# disable only the cluster number 2 (turn the pixel into black)\n\tmasked_image = np.copy(segmented_image)\n\t# convert to the shape of a vector of pixel values\n\tmasked_image = masked_image.reshape((-1, 3))\n\t# color (i.e cluster) to disable\n\tcluster = 2\n\tmasked_image[labels == cluster] = [0, 0, 0]\n\t# convert back to original shape\n\tmasked_image = masked_image.reshape(img.shape)\n\treturn masked_image\n\ndef wshed(m):\n\tdistance = ndi.distance_transform_edt(m) # calc distances\n\tdistance_store.append(-distance)\n\t# get coordinates from distances. play around with footprint\n\tcoords = peak_local_max(distance, footprint=np.ones((12, 12)), labels=m)\n\t# prep mask array\n\tmaskk = np.zeros(distance.shape, dtype=bool)\n\t# check if mask aligns with coordinates\n\tmaskk[tuple(coords.T)] = True\n\t# label the mask\n\tmarkers, _ = ndi.label(maskk)\n\t# do segmentation\n\tlabels = watershed(-distance, markers, mask=m)\n\treturn labels\n\ndef main():\n\tprint('Input directory: {}'.format(IN_DIR))\n\timg_paths = glob.glob(os.path.join(IN_DIR, '*.png'))\n\timg_paths.sort()\n\tprint(colored('{} image paths loaded'.format(len(img_paths)), 'red'))\n\t# sort images\n\timg_raw_dir = [ x for x in img_paths if \"rgb\" in x ]\n\timg_label_dir = [ x for x in img_paths if \"label\" in x ]\n\tprint(colored(\"\\n\".join(img_raw_dir), 'green'))\n\tprint(colored(\"\\n\".join(img_label_dir), 'yellow'))\n\tdf = pd.read_csv('./Leaf_counts.csv', names=['name','leafs']) # read real leafs\n\tleafs = df[\"leafs\"].tolist() # convert to list\n\tprint('Actual leaf count - ', leafs)\n\t# read in raw images and store stuff to mem\n\tleafi = 0\n\tfor img in img_paths:\n\t\t# get img names here\n\t\timg_name = img\n\t\tm = re.search('a/(.+?)_', img_name)\n\t\tif m: # grab names\n\t\t\tname = m.group(1)\n\t\t\tname_store.append(name) # add them to list\n\t\tif 'rgb' in img: # this img is to be processed\n\t\t\timgg = cv2.imread(img) # all imgs on OpenCV are BGR by default.\n\t\t\theight, width = imgg.shape[:2] # get h,w\n\t\t\timage_sizes.append((height,width)) # for stats\n\t\t\timg = cv2.cvtColor(imgg, cv2.COLOR_BGR2RGB)\n\t\t\timg_raw_store.append(img)\n\t\t\t# now do the thresholding\n\t\t\timgHSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\t\t\tlow = (33,78,67)\n\t\t\thigh = (151,255,255)\n\t\t\tmask = cv2.inRange(imgHSV, low, high)\n\t\t\t# play around with trans\n\t\t\tmask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((1, 1)))\n\t\t\t# mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, np.ones((2, 2)))\n\t\t\tmask = cv2.erode(mask,np.ones((1, 1)),iterations = 2)\n\t\t\tsegmented_store.append(mask)\n\t\t\tlabels = wshed(mask)\n\t\t\twatershed_store.append(labels)\n\t\t\t# measure properties of labeled image regions\n\t\t\tregions = regionprops(labels)\n\t\t\tregions = [r for r in regions if r.area > 60] # sanity check\n\t\t\tprint(colored('Leaf detected (from Watershed) : ', 'cyan'),\n\t\t\t\t len(regions), '/', leafs[leafi])\n\t\t\tleaf_detected_store1.append(len(regions))\n\n\t\t\t# get threshold results\n\t\t\tresult = cv2.bitwise_and(imgHSV, imgHSV, mask=mask)\n\t\t\tresult = cv2.cvtColor(result, cv2.COLOR_HSV2RGB)\n\t\t\tresults_store.append(result)\n\n\t\t\t# <------ DEBUG SECTION\n\t\t\t# plt.imshow(result)\n\t\t\t# plt.axis('off')\n\t\t\t# plt.show()\n\n\t\t\tgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t\t\timg = result.copy() # apparently normal assignment just wont work\n\t\t\tmasked_image = kmeans(img, kmeans_store) # apply kmeans segmentation\n\t\t\tmasked_image = cv2.blur(masked_image, (6, 6)) # blur a bit\n\t\t\tgray = cv2.cvtColor(masked_image, cv2.COLOR_RGB2GRAY)\n\t\t\tgray_blurred = cv2.blur(gray, (6, 6)) # blur a bit\n\t\t\tgray_blurred = cv2.erode(gray_blurred,np.ones((1, 1)),iterations = 1)\n\t\t\t# gray_blurred = cv2.morphologyEx(gray_blurred, cv2.MORPH_CLOSE, np.ones((5, 5)))\n\t\t\talmost_final_store.append(gray_blurred)\n\t\t\t# apply Hough transform on the blurred image.\n\t\t\tdetected_circles = cv2.HoughCircles(gray_blurred,\n\t\t\t\t\t\t\tcv2.HOUGH_GRADIENT, 1, 10, param1 = 50,\n\t\t\t\t\t\tparam2 = 18, minRadius = 0, maxRadius = 44)\n\t\t\t# araw circles that are detected.\n\t\t\tif detected_circles is not None:\n\t\t\t\t# convert the circle parameters a, b and r to integers.\n\t\t\t\tdetected_circles = np.uint16(np.around(detected_circles))\n\t\t\t\tcounter = 1\n\t\t\t\tprint(colored('Leaf detected (from K-means): ', 'magenta'),\n\t\t\t\t\t len(detected_circles[0, :]), '/', leafs[leafi])\n\t\t\t\tleaf_detected_store2.append(len(detected_circles[0, :]))\n\t\t\t\tfor pt in detected_circles[0, :]:\n\t\t\t\t\ta, b, r = pt[0], pt[1], pt[2]\n\t\t\t\t\t# draw the circumference of the circle.\n\t\t\t\t\tcv2.circle(img, (a, b), r, (255, 0, 0), 2)\n\t\t\t\t\t# draw a small circle (of radius 1) to show the center.\n\t\t\t\t\tcv2.circle(img, (a, b), 1, (0, 0, 255), 3)\n\t\t\t\t\tcv2.putText(img, \"{}\".format(counter), (a, b - 10),\n\t\t\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5,\n\t\t\t\t\t\t\t\t(0, 50, 200), 2)\n\t\t\t\t\tcounter+=1\n\t\t\t\tleafi+=1\n\t\t\telse: # in case no circles are detected\n\t\t\t\tleaf_detected_store2.append(0)\n\t\t\t\tprint('Leaf detected : ', '0', '/', leafs[leafi])\n\t\t\t\tleafi+=1\n\t\t\tcomplete_store.append(img)\n\n\t\t\tax1 = plt.subplot(1, 2, 2)\n\t\t\tplt.imshow(img)\n\t\t\tax1.set_title(\"Detected\", color=\"purple\")\n\t\t\tplt.axis('off')\n\t\t\tax2 = plt.subplot(1, 2, 1)\n\t\t\tax2.set_title(\"Watershed\", color=\"blue\")\n\t\t\tplt.imshow(labels, cmap=plt.cm.nipy_spectral)\n\t\t\tplt.axis('off')\n\t\t\tplt.show()\n\n\t\telif 'label' in img: # this is label image\n\t\t\timg = cv2.imread(img)\n\t\t\t# px = np.count_nonzero(img) # <------ DEBUG\n\t\t\timg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\t\t\timg_label_store.append(img)\n\t\t\t# print('px - ', px) # <------ DEBUG\n\t\telse:\n\t\t\tprint('THIS SHOULD NOT BE HERE')\n\t\tprint('-' * 10)\n\tget_size_stats()\n\tstep_plot(img_raw_store, img_label_store, segmented_store,\n\t\t\t complete_store, kmeans_store, almost_final_store,\n\t\t\t leafs, leaf_detected_store2, name_store, results_store,\n\t\t\t leaf_detected_store1, distance_store, watershed_store)\n\n\tmother_of_plots(img_raw_store, img_label_store, segmented_store,\n\t\t\t\t\tcomplete_store, kmeans_store, watershed_store)\n\nif __name__ == \"__main__\":\n main() # :)"
]
| [
[
"matplotlib.pyplot.imshow",
"pandas.read_csv",
"numpy.logical_and",
"numpy.asarray",
"numpy.uint8",
"numpy.around",
"scipy.ndimage.distance_transform_edt",
"pandas.DataFrame",
"numpy.ones",
"scipy.ndimage.label",
"numpy.copy",
"matplotlib.pyplot.subplot",
"numpy.float32",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"numpy.zeros"
]
]
|
LiFH/MySR | [
"f6075f8711853aba6f0aae9cef18c5da84abb78c"
]
| [
"utils.py"
]
| [
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : utils.py \n@Contact : [email protected]\n@Modify Time 2020/3/30 3:56 PM \n'''\nimport torch\nimport pytorch_ssim\nfrom math import log10\n\ndef get_network_params(net):\n return sum(param.numel() for param in net.parameters())\n\n\n\n\ndef RGB_normalization(out):\n a = torch.rand((1, 1))\n zero = torch.zeros_like(a)\n one = torch.ones_like(a)\n if torch.cuda.is_available():\n zero = zero.cuda()\n one = one.cuda()\n out = torch.where(out > 1, one, out)\n out = torch.where(out < 0, zero ,out)\n return out\n\n\n# def init_models(opt):\n#\n# #generator initialization:\n# netG = GeneratorConcatSkip2CleanAdd(opt)\n# netG.apply(weights_init)\n# if opt.netG != '':\n# netG.load_state_dict(torch.load(opt.netG, map_location='cpu'))\n# print(netG)\n# # neG = netG.module\n# #discriminator initialization:\n# netD = WDiscriminator(opt)\n# netD.apply(weights_init)\n# if opt.netD != '':\n# netD.load_state_dict(torch.load(opt.netD,map_location='cpu'))\n# print(netD)\n# # netD = netD.module\n#\n# return netD, netG\n\n\ndef psnr_ssim(sr, hr, batch_size=1):\n '''\n To calculate the psnr and ssim\n PSNR=10*log10((2^n-1)^2/MSE)\n '''\n valing_results = {'mse': 0, 'ssims': 0, 'psnr': 0, 'ssim': 0, 'batch_size': 0}\n valing_results['batch_size'] = batch_size\n batch_mse = ((sr - hr) ** 2).data.mean()\n valing_results['mse'] += batch_mse * batch_size\n batch_ssim = pytorch_ssim.ssim(sr, hr).item()\n valing_results['ssims'] += batch_ssim * batch_size\n valing_results['psnr'] = 10 * log10(1 / (valing_results['mse'] / valing_results['batch_size']))\n valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_size']\n return valing_results['psnr'], valing_results['ssim']"
]
| [
[
"torch.zeros_like",
"torch.rand",
"torch.cuda.is_available",
"torch.where",
"torch.ones_like"
]
]
|
settinghead/pytorch-CycleGAN-and-pix2pix | [
"1d31100d608cc7835fdd24c2279cb54ab3c82e0b"
]
| [
"options/base_options.py"
]
| [
"import argparse\nimport os\nfrom util import util\nimport torch\nimport models\nimport data\n\n\nclass BaseOptions():\n def __init__(self):\n self.initialized = False\n\n def initialize(self, parser):\n parser.add_argument('--dataroot', required=True,\n help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n parser.add_argument('--batch_size', type=int,\n default=1, help='input batch size')\n parser.add_argument('--loadSize', type=int,\n default=480, help='scale images to this size')\n parser.add_argument('--fineSize', type=int,\n default=384, help='then crop to this size')\n parser.add_argument('--display_winsize', type=int, default=384,\n help='display window size for both visdom and HTML')\n parser.add_argument('--input_nc', type=int, default=3,\n help='# of input image channels')\n parser.add_argument('--output_nc', type=int, default=3,\n help='# of output image channels')\n parser.add_argument('--ngf', type=int, default=64,\n help='# of gen filters in first conv layer')\n parser.add_argument('--ndf', type=int, default=64,\n help='# of discrim filters in first conv layer')\n parser.add_argument('--netD', type=str, default='basic',\n help='selects model to use for netD')\n parser.add_argument(\n '--netG', type=str, default='resnet_9blocks', help='selects model to use for netG')\n parser.add_argument('--n_layers_D', type=int,\n default=3, help='only used if netD==n_layers')\n parser.add_argument('--gpu_ids', type=str, default='0',\n help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n parser.add_argument('--name', type=str, default='experiment_name',\n help='name of the experiment. It decides where to store samples and models')\n parser.add_argument('--dataset_mode', type=str, default='unaligned',\n help='chooses how datasets are loaded. [unaligned | aligned | single]')\n parser.add_argument('--model', type=str, default='cycle_gan',\n help='chooses which model to use. cycle_gan, pix2pix, test')\n parser.add_argument('--direction', type=str,\n default='AtoB', help='AtoB or BtoA')\n parser.add_argument('--epoch', type=str, default='latest',\n help='which epoch to load? set to latest to use latest cached model')\n parser.add_argument('--num_threads', default=4,\n type=int, help='# threads for loading data')\n parser.add_argument('--checkpoints_dir', type=str,\n default='./checkpoints', help='models are saved here')\n parser.add_argument('--norm', type=str, default='instance',\n help='instance normalization or batch normalization')\n parser.add_argument('--serial_batches', action='store_true',\n help='if true, takes images in order to make batches, otherwise takes them randomly')\n parser.add_argument('--no_dropout', action='store_true',\n help='no dropout for the generator')\n parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"),\n help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop',\n help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop|none]')\n parser.add_argument('--no_flip', action='store_true',\n help='if specified, do not flip the images for data augmentation')\n parser.add_argument('--init_type', type=str, default='normal',\n help='network initialization [normal|xavier|kaiming|orthogonal]')\n parser.add_argument('--init_gain', type=float, default=0.02,\n help='scaling factor for normal, xavier and orthogonal.')\n parser.add_argument('--verbose', action='store_true',\n help='if specified, print more debugging information')\n parser.add_argument('--suffix', default='', type=str,\n help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')\n self.initialized = True\n return parser\n\n def gather_options(self):\n # initialize parser with basic options\n if not self.initialized:\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n\n # get the basic options\n opt, _ = parser.parse_known_args()\n\n # modify model-related parser options\n model_name = opt.model\n model_option_setter = models.get_option_setter(model_name)\n parser = model_option_setter(parser, self.isTrain)\n opt, _ = parser.parse_known_args() # parse again with the new defaults\n\n # modify dataset-related parser options\n dataset_name = opt.dataset_mode\n dataset_option_setter = data.get_option_setter(dataset_name)\n parser = dataset_option_setter(parser, self.isTrain)\n\n self.parser = parser\n\n return parser.parse_args()\n\n def print_options(self, opt):\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = self.parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n\n # save to the disk\n expr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n util.mkdirs(expr_dir)\n file_name = os.path.join(expr_dir, 'opt.txt')\n with open(file_name, 'wt') as opt_file:\n opt_file.write(message)\n opt_file.write('\\n')\n\n def parse(self):\n\n opt = self.gather_options()\n opt.isTrain = self.isTrain # train or test\n\n # process opt.suffix\n if opt.suffix:\n suffix = ('_' + opt.suffix.format(**vars(opt))\n ) if opt.suffix != '' else ''\n opt.name = opt.name + suffix\n\n self.print_options(opt)\n\n # set gpu ids\n str_ids = opt.gpu_ids.split(',')\n opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n opt.gpu_ids.append(id)\n if len(opt.gpu_ids) > 0:\n torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt\n"
]
| [
[
"torch.cuda.set_device"
]
]
|
Crazy-LittleBoy/MLTools | [
"813d3a29ac64b21e74d33f0470fb31fc372b27c4"
]
| [
"stacking.py"
]
| [
"# -*- coding:utf-8 -*-\r\nimport numpy as np\r\nimport json\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.model_selection import train_test_split, StratifiedShuffleSplit, KFold\r\nimport pandas as pd\r\nfrom lightgbm import LGBMClassifier\r\nfrom xgboost import XGBClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\n# from sklearn.metrics import classification_report, f1_score, precision_score, recall_score, accuracy_score, roc_auc_score, roc_curve, auc\r\n\r\n\r\nclass Stacking(object):\r\n \"\"\"\r\n 多模型stacking融合\r\n 建议使用sklearn接口形式的算法(实现了fit和predict方法)\r\n Args:\r\n n_splits: 第1层模型k折交叉验证训练集划分份数\r\n base_models:第1层模型(多模型),必须实现fit和predict方法\r\n stacker: 第2层模型(单模型),必须实现fit和predict方法\r\n Returns:\r\n ...\r\n \"\"\"\r\n\r\n def __init__(self, n_splits, dict_models, stacker, fit_params, random_state=2021):\r\n self.n_splits = n_splits\r\n self.stacker = stacker\r\n self.dict_models = dict_models\r\n # self.metric = metric\r\n self.fit_params = fit_params\r\n self.random_state = random_state\r\n\r\n def fit_predict(self, input_trainset_x, input_trainset_y, input_validset_x, input_validset_y, input_testset_x):\r\n trainset_x = np.array(input_trainset_x)\r\n trainset_y = np.array(input_trainset_y)\r\n validset_x = np.array(input_validset_x)\r\n validset_y = np.array(input_validset_y)\r\n testset_x = np.array(input_testset_x)\r\n\r\n folds = list(KFold(n_splits=self.n_splits, shuffle=True, random_state=self.random_state).split(trainset_x, trainset_y))\r\n stack_trainsets_x = []\r\n stack_trainsets_y = []\r\n stack_testsets_x = []\r\n for i, model_name in enumerate(self.dict_models.keys()):\r\n model = self.dict_models[model_name]\r\n stack_trainset_x = []\r\n stack_trainset_y = []\r\n stack_testset_x = []\r\n for j, (train_idx, test_idx) in enumerate(folds):\r\n train_x = trainset_x[train_idx]\r\n train_y = trainset_y[train_idx]\r\n holdout_x = trainset_x[test_idx]\r\n holdout_y = trainset_y[test_idx]\r\n print(\"[Info]: fitting [%s]-->%d/%d [fold]-->%d/%d\" % (model_name, i + 1, len(self.dict_models.keys()), j + 1, self.n_splits))\r\n if 'xgb' in model_name:\r\n model.fit(train_x, train_y, eval_set=[(validset_x, validset_y)], **self.fit_params[model_name])\r\n pred_holdout_x = model.predict_proba(holdout_x, ntree_limit=model.best_ntree_limit)\r\n pred_testset_x = model.predict_proba(testset_x, ntree_limit=model.best_ntree_limit)\r\n elif 'lgb' in model_name:\r\n model.fit(train_x, train_y, eval_set=[(validset_x, validset_y)], **self.fit_params[model_name])\r\n pred_holdout_x = model.predict_proba(holdout_x, num_iteration=model.best_iteration_)\r\n pred_testset_x = model.predict_proba(testset_x, num_iteration=model.best_iteration_)\r\n else:\r\n model.fit(train_x, train_y, **self.fit_params[model_name])\r\n pred_holdout_x = model.predict_proba(holdout_x)\r\n pred_testset_x = model.predict_proba(testset_x)\r\n print('[Info]: pred_holdout_x.shape:%s, holdout_y.shape:%s, pred_testset_x.shape:%s' % (pred_holdout_x.shape, holdout_y.shape, pred_testset_x.shape))\r\n if pred_holdout_x.shape[1] > 1:\r\n pred_holdout_x = pred_holdout_x[:, -1].ravel()\r\n pred_testset_x = pred_testset_x[:, -1].ravel()\r\n stack_trainset_x.append(pred_holdout_x)\r\n stack_trainset_y.append(holdout_y)\r\n stack_testset_x.append(pred_testset_x)\r\n stack_trainset_x = np.concatenate(stack_trainset_x)\r\n stack_trainsets_x.append(stack_trainset_x)\r\n stack_trainset_y = np.concatenate(stack_trainset_y)\r\n stack_trainsets_y.append(stack_trainset_y)\r\n stack_testset_x = np.mean(np.array(stack_testset_x), axis=0)\r\n # stack_testset_x = np.mean(np.hstack(stack_testset_x), axis=1, keepdims=True)\r\n print('[Info]: stack_trainset_x.shape:%s, stack_testset_x.shape: %s' % (str(stack_trainset_x.shape), str(stack_testset_x.shape)))\r\n stack_testsets_x.append(stack_testset_x)\r\n stack_trainsets_x = np.array(stack_trainsets_x).T\r\n stack_trainsets_y = np.array(stack_trainsets_y).T\r\n stack_testsets_x = np.array(stack_testsets_x).T\r\n print(\"[Info]: stack_trainsets_x.shape: %s, stack_trainsets_y.shape: %s, stack_testsets_x.shape: %s\" % (stack_trainsets_x.shape,\r\n stack_trainsets_y.shape,\r\n stack_testsets_x.shape)\r\n )\r\n # pd.DataFrame(stack_trainsets_x).to_csv('./data/stack_trainsets_x.csv', index=False)\r\n # pd.DataFrame(stack_trainsets_y).to_csv('./data/stack_trainsets_y.csv', index=False)\r\n # pd.DataFrame(stack_testsets_x).to_csv('./data/stack_testsets_x.csv', index=False)\r\n\r\n self.stacker.fit(stack_trainsets_x, stack_trainsets_y[:, -1].ravel())\r\n predict = self.stacker.predict_proba(stack_testsets_x)\r\n\r\n return predict\r\n\r\n\r\nif __name__ == '__main__':\r\n # # stacking\r\n # data = pd.read_csv('./data/train_data_clean.csv',\r\n # # nrows=40000,\r\n # low_memory=False,\r\n # )\r\n # # data['label'] = (data['label'] < 8) * 1\r\n # print(data.shape)\r\n # print(data.head(3))\r\n # feature_names = data.columns.tolist()[1:-1]\r\n # # 划分数据集\r\n # data = data.sample(frac=1, random_state=2020).reset_index(drop=True).values\r\n # train_set_x, test_set_x, train_set_y, test_set_y = train_test_split(data[:, 1:-1], data[:, -1].ravel(), test_size=0.3)\r\n # valid_set_x, test_set_x, valid_set_y, test_set_y = train_test_split(test_set_x, test_set_y, test_size=0.5)\r\n # # print(train_set_x.shape, test_set_x.shape)\r\n # print(train_set_y.shape, valid_set_y.shape, test_set_y.shape)\r\n #\r\n # with open('./config/parameters.json', 'r') as jfile:\r\n # model_config = json.load(jfile)\r\n # print(model_config)\r\n #\r\n # fit_params = {'lgb_model': {'early_stopping_rounds': 15,\r\n # 'eval_metric': 'auc',\r\n # 'verbose': False,\r\n # 'feature_name': feature_names,\r\n # },\r\n # 'xgb_model': {'early_stopping_rounds': 15,\r\n # 'eval_metric': 'auc',\r\n # 'verbose': False,\r\n # },\r\n # 'gbdt_model': {},\r\n # 'rf_model': {},\r\n # 'adaboost_model': {},\r\n # }\r\n #\r\n # lgb_model = LGBMClassifier(**model_config['lgb_model']['model_params'])\r\n # xgb_model = XGBClassifier(**model_config['xgb_model']['model_params'])\r\n # gbdt_model = GradientBoostingClassifier(**model_config['gbdt_model']['model_params'])\r\n # rf_model = RandomForestClassifier(**model_config['rf_model']['model_params'])\r\n # adaboost_model = AdaBoostClassifier(**model_config['adaboost_model']['model_params'])\r\n #\r\n # stack = Stacking(n_splits=5,\r\n # dict_models={'lgb_model': lgb_model,\r\n # 'xgb_model': xgb_model,\r\n # 'gbdt_model': gbdt_model,\r\n # 'rf_model': rf_model,\r\n # 'adaboost_model': adaboost_model,\r\n # },\r\n # stacker=LogisticRegression(),\r\n # fit_params=fit_params,\r\n # )\r\n # stacking_result = stack.fit_predict(input_trainset_x=train_set_x,\r\n # input_trainset_y=train_set_y,\r\n # input_validset_x=valid_set_x,\r\n # input_validset_y=valid_set_y,\r\n # input_testset_x=test_set_x,\r\n # )\r\n # print(stacking_result[:10])\r\n # print('[Metric]: %s' % roc_auc_score(test_set_y, stacking_result[:, -1].ravel()))\r\n # print(classification_report(y_true=test_set_y, y_pred=(stacking_result[:, -1] > 0.5).ravel() * 1))\r\n\r\n ### 预测\r\n # 读取训练数据\r\n data = pd.read_csv('./data/train_data_clean.csv',\r\n # nrows=4000,\r\n low_memory=False,\r\n )\r\n # data['label'] = (data['label'] < 8) * 1\r\n print(data.shape)\r\n print(data.head(3))\r\n feature_names = data.columns.tolist()[1:-1]\r\n # 划分数据集\r\n data = data.sample(frac=1, random_state=2020).reset_index(drop=True).values\r\n # train_set_x, valid_set_x, train_set_y, valid_set_y = train_test_split(data[:, 1:-1], data[:, -1].ravel(), test_size=0.3)\r\n # # valid_set_x, test_set_x, valid_set_y, test_set_y = train_test_split(valid_set_x, valid_set_y, test_size=0.5)\r\n # print(train_set_x.shape, valid_set_x.shape)\r\n # # print(train_set_y.shape, valid_set_y.shape, test_set_y.shape)\r\n\r\n # 读取测试数据\r\n test = pd.read_csv('./data/test_data_clean.csv',\r\n low_memory=False,\r\n )\r\n print(test.shape)\r\n print(test.head(3))\r\n test_result = test[['ID']]\r\n test = test.values\r\n test_x = test[:, 1:]\r\n print('[Info]: test data shape: ', test_x.shape)\r\n\r\n with open('./config/parameters.json', 'r') as jfile:\r\n model_config = json.load(jfile)\r\n print(model_config)\r\n\r\n fit_params = {'lgb_model': {'early_stopping_rounds': 15,\r\n 'eval_metric': 'auc',\r\n 'verbose': False,\r\n 'feature_name': feature_names,\r\n },\r\n 'xgb_model': {'early_stopping_rounds': 15,\r\n 'eval_metric': 'auc',\r\n 'verbose': False,\r\n },\r\n 'gbdt_model': {},\r\n 'rf_model': {},\r\n 'adaboost_model': {},\r\n }\r\n\r\n lgb_model = LGBMClassifier(**model_config['lgb_model']['model_params'])\r\n xgb_model = XGBClassifier(**model_config['xgb_model']['model_params'])\r\n gbdt_model = GradientBoostingClassifier(**model_config['gbdt_model']['model_params'])\r\n rf_model = RandomForestClassifier(**model_config['rf_model']['model_params'])\r\n adaboost_model = AdaBoostClassifier(**model_config['adaboost_model']['model_params'])\r\n\r\n stack = Stacking(n_splits=7,\r\n dict_models={'lgb_model': lgb_model,\r\n 'xgb_model': xgb_model,\r\n 'gbdt_model': gbdt_model,\r\n 'rf_model': rf_model,\r\n 'adaboost_model': adaboost_model,\r\n },\r\n stacker=LogisticRegression(),\r\n fit_params=fit_params,\r\n )\r\n\r\n stacking_results = []\r\n trainset_x, trainset_y = data[:, 1:-1], data[:, -1]\r\n n_splits = 7\r\n splits = list(KFold(n_splits=n_splits, shuffle=True, random_state=2021).split(trainset_x, trainset_y))\r\n for i, (train_idx, valid_idx) in enumerate(splits):\r\n print('\\n')\r\n print('-' * 50 + ' training for rounds: %s/%s ' % (i + 1, n_splits) + '-' * 50)\r\n train_set_x, valid_set_x, train_set_y, valid_set_y = trainset_x[train_idx], trainset_x[valid_idx], trainset_y[train_idx], trainset_y[valid_idx],\r\n stacking_result = stack.fit_predict(input_trainset_x=train_set_x,\r\n input_trainset_y=train_set_y,\r\n input_validset_x=valid_set_x,\r\n input_validset_y=valid_set_y,\r\n input_testset_x=test_x,\r\n )\r\n print(stacking_result[:10])\r\n stacking_results.append(stacking_result[:, 1].ravel())\r\n stacking_results = np.mean(stacking_results, axis=0)\r\n test_result['TARGET'] = stacking_results\r\n print(test_result.head())\r\n test_result.to_csv('./output/submission_cv_stacking.cv7.csv', index=False)\r\n"
]
| [
[
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.KFold",
"numpy.concatenate",
"sklearn.ensemble.AdaBoostClassifier",
"numpy.mean",
"sklearn.ensemble.GradientBoostingClassifier",
"numpy.array"
]
]
|
farizrahman4u/dl4j-test-resources | [
"ec81bfb53c77bbdfffef146a4bc11074dc7f3191"
]
| [
"src/main/resources/modelimport/keras/fullconfigs/cnn/predict.py"
]
| [
"from keras.models import load_model\nimport numpy as np\n\nnp.random.seed(1337)\n\nmodel = load_model(\"cnn_batch_norm.h5\")\ninput = np.random.random((5, 10, 10, 3))\n\noutput = model.predict(input)\n\nassert abs(-0.0520611 - output[0][0]) < 0.000001\nassert abs(0.04986075 - output[1][0]) < 0.000001\nassert abs(0.16297522 - output[2][0]) < 0.000001\nassert abs(0.15389983 - output[3][0]) < 0.000001\nassert abs(0.15537278 - output[4][0]) < 0.000001\n\nnp.save(arr=input, file=\"input.npy\")\nnp.save(arr=output, file=\"predictions.npy\")\n"
]
| [
[
"numpy.save",
"numpy.random.random",
"numpy.random.seed"
]
]
|
dejac001/MCFlow | [
"19d1ee21318b49102842d75493a2fb830ec116f0",
"19d1ee21318b49102842d75493a2fb830ec116f0"
]
| [
"quick/calc_kH.py",
"defineIntersection.py"
]
| [
"def kH(dG, MW):\n mean, stderr = dG\n b = -1/(0.008314*323.)\n a = MW/(8314*323.)\n return [\n math.exp(b*mean)*a,\n b*stderr*math.exp(b*mean)*a,\n ]\n\nimport math\n\n#dG = [-12.7,0.7]\nif __name__ == '__main__':\n# print(kH(dG, 102.162))\n# print(kH([-39.7,0.2], 104.15))\n import sys\n from plotting.read import readDat\n import numpy as np\n file, MW = sys.argv[-2:]\n MW = float(MW)\n data = readDat(file)\n dG = np.mean(data[1])\n dG_std = np.std(data[1], ddof=1)\n print(kH([dG,dG_std], MW))\n\n",
"def write_circles():\n r = rcut\n coords = []\n for ii, my_center in enumerate(sphere):\n h, k, l = pbc(my_center[0],a),pbc(my_center[1],b), pbc(my_center[2],c)\n for theta in np.linspace(0,2*np.pi):\n for phi in np.linspace(0,np.pi):\n x = h+r*np.cos(theta)*np.sin(phi)\n y = k+r*np.sin(theta)*np.sin(phi)\n z = l+r*np.cos(phi)\n coords.append( \n list(map(pbc,[x,y,z],[a,b,c]))\n )\n data = {'atoms':['F' for i in range(len(coords))],\n 'coords':coords}\n writer.xyz('spheres.xyz',data)\n# my_ax.plot(coords[:,0],coords[:,1],'-.',color='red')\n# if len(pbc_coords) > 0:\n# pbc_coords = [[x[i],y[i]] for i in pbc_coords]\n# coords = fold_coords(pbc_coords,[a,b])\n# my_ax.plot(coords[:,0],coords[:,1],'-.',color='red')\n\nfrom MCFlow.file_formatting import reader, writer\nfrom MCFlow.calc_tools import calculate_distance, pbc, fold_coords\nimport numpy as np\nimport pprint\n\nrcut = 5.0/2.\na, b, c = [20.022, 19.899, 13.383]\ncenter = [10.011,4.9748,0.]\nx, y, z = center\nsphere = [ \n [x,y,z],\n [a/2-x,b/2+y,c/2+z],\n [x,b/2-y,z],\n [a/2+x,y,c/2-z],\n [a-x,b-y,c-z],\n [a/2+x,b/2-y,c/2-z],\n [a-x,b/2+y,c-z],\n [a/2-x,b-y,c/2+z]\n ]\n\nif __name__ == '__main__': \n \n new_data = {'atoms':[],'coords':[]}\n old_data = reader.xyz('energy_grid_114_O_merged1.5.xyz')\n for atom, xyz in zip(old_data['atoms'], old_data['coords']):\n for my_center in sphere:\n if calculate_distance(xyz,my_center,[a,b,c]) < rcut:\n atom = 'H'\n new_data['atoms'].append(atom)\n new_data['coords'].append(xyz)\n \n \n writer.xyz('new_channels.xyz',new_data)\n"
]
| [
[
"numpy.std",
"numpy.mean"
],
[
"numpy.cos",
"numpy.linspace",
"numpy.sin"
]
]
|
codemzs/onnxruntime | [
"c69194ec4c8c9674368113aa6044d0db708cd813"
]
| [
"onnxruntime/test/python/onnx_backend_test_series.py"
]
| [
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport argparse\nimport sys\nimport os\nimport platform\nimport unittest\nimport onnx\nimport onnx.backend.test\n\nimport numpy as np\nimport onnxruntime.backend as c2\n\npytest_plugins = 'onnx.backend.test.report',\n\nclass OrtBackendTest(onnx.backend.test.BackendTest):\n\n def __init__(self, backend, parent_module=None):\n super(OrtBackendTest, self).__init__(backend, parent_module)\n\n @classmethod\n def assert_similar_outputs(cls, ref_outputs, outputs, rtol, atol):\n np.testing.assert_equal(len(ref_outputs), len(outputs))\n for i in range(len(outputs)):\n np.testing.assert_equal(ref_outputs[i].dtype, outputs[i].dtype)\n if ref_outputs[i].dtype == np.object:\n np.testing.assert_array_equal(ref_outputs[i], outputs[i])\n else:\n np.testing.assert_allclose(\n ref_outputs[i],\n outputs[i],\n rtol=1e-3,\n atol=1e-5)\n\n\n# ORT first supported opset 7, so models with nodes that require versions prior to opset 7 are not supported\ndef tests_with_pre_opset7_dependencies_filters():\n filters = ['^test_AvgPool1d_cpu',\n '^test_AvgPool1d_stride_cpu',\n '^test_AvgPool2d_cpu',\n '^test_AvgPool2d_stride_cpu',\n '^test_AvgPool3d_cpu',\n '^test_AvgPool3d_stride1_pad0_gpu_input_cpu',\n '^test_AvgPool3d_stride_cpu',\n '^test_BatchNorm1d_3d_input_eval_cpu',\n '^test_BatchNorm2d_eval_cpu',\n '^test_BatchNorm2d_momentum_eval_cpu',\n '^test_BatchNorm3d_eval_cpu',\n '^test_BatchNorm3d_momentum_eval_cpu',\n '^test_GLU_cpu',\n '^test_GLU_dim_cpu',\n '^test_Linear_cpu',\n '^test_PReLU_1d_cpu',\n '^test_PReLU_1d_multiparam_cpu',\n '^test_PReLU_2d_cpu',\n '^test_PReLU_2d_multiparam_cpu',\n '^test_PReLU_3d_cpu',\n '^test_PReLU_3d_multiparam_cpu',\n '^test_PoissonNLLLLoss_no_reduce_cpu',\n '^test_Softsign_cpu',\n '^test_operator_add_broadcast_cpu',\n '^test_operator_add_size1_broadcast_cpu',\n '^test_operator_add_size1_right_broadcast_cpu',\n '^test_operator_add_size1_singleton_broadcast_cpu',\n '^test_operator_addconstant_cpu',\n '^test_operator_addmm_cpu',\n '^test_operator_basic_cpu',\n '^test_operator_mm_cpu',\n '^test_operator_non_float_params_cpu',\n '^test_operator_params_cpu',\n '^test_operator_pow_cpu']\n\n return filters\n\n\ndef unsupported_usages_filters():\n filters = ['^test_convtranspose_1d_cpu', # ConvTransponse supports 4-D only\n '^test_convtranspose_3d_cpu']\n\n return filters\n\n\ndef other_tests_failing_permanently_filters():\n # Numpy float to string has unexpected rounding for some results given numpy default precision is meant to be 8.\n # e.g. 0.296140194 -> '0.2961402' not '0.29614019'. ORT produces the latter with precision set to 8, which\n # doesn't match the expected output that was generated with numpy.\n filters = ['^test_cast_FLOAT_to_STRING_cpu']\n\n return filters\n\n\n\ndef test_with_types_disabled_due_to_binary_size_concerns_filters():\n filters = ['^test_bitshift_right_uint16_cpu',\n '^test_bitshift_left_uint16_cpu']\n\n return filters\n\n\ndef create_backend_test(testname=None):\n backend_test = OrtBackendTest(c2, __name__)\n\n # Type not supported\n backend_test.exclude(r'(FLOAT16)')\n\n if testname:\n backend_test.include(testname + '.*')\n else:\n # Tests that are failing temporarily and should be fixed\n current_failing_tests = [#'^test_cast_STRING_to_FLOAT_cpu', # old test data that is bad on Linux CI builds\n '^test_unique_not_sorted_without_axis_cpu', # bad expected data. enable after https://github.com/onnx/onnx/pull/2381 is picked up\n '^test_mod_float_mixed_sign_example_cpu', #onnxruntime::Mod::Compute fmod_ was false. fmod attribute must be true for float, float16 and double types\n '^test_resize_downsample_scales_cubic_align_corners_cpu', # results mismatch with onnx tests\n '^test_resize_downsample_scales_linear_align_corners_cpu', # results mismatch with onnx tests\n '^test_resize_tf_crop_and_resize_cpu', # bad expected data, needs test fix\n '^test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu', # bad expected data, needs test fix\n '^test_resize_upsample_sizes_nearest_floor_align_corners_cpu', # bad expected data, needs test fix\n '^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu', # bad expected data, needs test fix\n '^test_maxunpool_export_with_output_shape_cpu', # Invalid output in ONNX test. See https://github.com/onnx/onnx/issues/2398'\n ]\n\n # Example of how to disable tests for a specific provider.\n # if c2.supports_device('NGRAPH'):\n # current_failing_tests.append('^test_operator_repeat_dim_overflow_cpu')\n if c2.supports_device('NGRAPH'):\n current_failing_tests += ['^test_clip.*',\n '^test_qlinearconv_cpu',\n '^test_depthtospace_crd.*',\n '^test_argmax_negative_axis.*',\n '^test_argmin_negative_axis.*',\n '^test_hardmax_negative_axis.*',\n '^test_gemm_default_no_bias_cpu',\n '^test_flatten_negative_axis.*',\n '^test_reduce_[a-z1-9_]*_negative_axes_.*',\n 'test_squeeze_negative_axes_cpu',\n 'test_unsqueeze_negative_axes_cpu',\n 'test_constant_pad_cpu',\n 'test_edge_pad_cpu',\n 'test_reflect_pad_cpu']\n\n if c2.supports_device('DNNL'):\n current_failing_tests += ['^test_range_float_type_positive_delta_expanded_cpu',\n '^test_range_int32_type_negative_delta_expanded_cpu',\n\t\t\t\t\t\t\t\t\t '^test_averagepool_2d_ceil_cpu',\n '^test_maxpool_2d_ceil_cpu',\n\t\t\t\t\t\t\t\t\t '^test_maxpool_2d_dilations_cpu']\n\n if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device('OPENVINO_GPU_FP16'):\n current_failing_tests.append('^test_div_cpu*')\n # temporarily exclude vgg19 test which comsumes too much memory, run out of memory on Upsquared device.\n # single test pass for vgg19, need furture investigation\n current_failing_tests.append('^test_vgg19_cpu*')\n\n if c2.supports_device('OPENVINO_CPU_FP32'):\n current_failing_tests += ['^test_scan9_sum_cpu',#sum_out output node not defined, temporarily disabling test\n '^test_scan_sum_cpu'] #sum_out output node not defined, temporarily disabling test\n\n filters = current_failing_tests + \\\n tests_with_pre_opset7_dependencies_filters() + \\\n unsupported_usages_filters() + \\\n other_tests_failing_permanently_filters() + \\\n test_with_types_disabled_due_to_binary_size_concerns_filters()\n\n backend_test.exclude('(' + '|'.join(filters) + ')')\n print('excluded tests:', filters)\n\n # import all test cases at global scope to make\n # them visible to python.unittest.\n globals().update(backend_test.enable_report().test_cases)\n\n return backend_test\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(os.path.basename(__file__),\n description='Run the ONNX backend tests using ONNXRuntime.')\n\n # Add an argument to match a single test name, by adding the name to the 'include' filter.\n # Using -k with python unittest (https://docs.python.org/3/library/unittest.html#command-line-options)\n # doesn't work as it filters on the test method name (Runner._add_model_test) rather than inidividual test case names.\n parser.add_argument('-t', '--test-name', dest='testname', type=str,\n help=\"Only run tests that match this value. Matching is regex based, and '.*' is automatically appended\")\n\n # parse just our args. python unittest has its own args and arg parsing, and that runs inside unittest.main()\n args, left = parser.parse_known_args()\n sys.argv = sys.argv[:1] + left\n\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n backend_test = create_backend_test(args.testname)\n unittest.main()\n"
]
| [
[
"numpy.testing.assert_array_equal",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose"
]
]
|
hammcin/CarND-Behavioral-Cloning-P3 | [
"bbce8d03cdc5fdc9ff0b65dafb6adb926ce1c56a"
]
| [
"model.py"
]
| [
"import csv\nfrom sklearn.model_selection import train_test_split\nimport cv2\nimport random\nimport numpy as np\nimport sklearn\nfrom scipy import ndimage\nfrom keras.models import Sequential, Model\nfrom keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D\nfrom keras.layers import Cropping2D, Activation, BatchNormalization\nfrom keras.callbacks import ModelCheckpoint\nimport matplotlib.pyplot as plt\n\n# Load csv file with image paths and steering angles into different lists\n# for training set and validation set\ndef load_data(data_path = '/opt/Behavioral-Cloning/Data/', valid_split=0.2):\n lines = []\n with open(data_path + 'driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\n train_samples, validation_samples = train_test_split(lines,\n test_size=valid_split)\n\n return train_samples, validation_samples\n\n# Data generator\n# Input arguments: list representing info in csv file, path to data\n# Returns batches of images and steering angles\ndef generator(samples, data_path = '/opt/Behavioral-Cloning/Data/',\n batch_size=32):\n num_samples = len(samples)\n while 1:\n random.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n if (offset+batch_size)<num_samples:\n batch_samples = samples[offset:(offset+batch_size)]\n else:\n batch_samples = samples[offset:]\n\n images = []\n measurements = []\n for batch_sample in batch_samples:\n source_path_center = batch_sample[0]\n filename_center = source_path_center.split('/')[-1]\n center_path = data_path + 'IMG/' + filename_center\n image_center = ndimage.imread(center_path)\n\n images.append(image_center)\n measurement_center = float(batch_sample[3])\n measurements.append(measurement_center)\n\n # Augment dataset by flipping images\n images.append(cv2.flip(image_center,1))\n measurements.append(measurement_center*-1.0)\n\n X_train = np.array(images)\n y_train = np.array(measurements)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# Define model\ndef net(loss='mse', optimizer='adam'):\n model = Sequential()\n\n model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\n\n model.add(Cropping2D(cropping=((70,25),(0,0))))\n\n model.add(Convolution2D(24,(5,5),strides=(2,2)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n\n model.add(Convolution2D(36,(5,5),strides=(2,2)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n\n model.add(Convolution2D(48,(5,5),strides=(2,2)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n\n model.add(Convolution2D(64,(3,3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n\n model.add(Convolution2D(64,(3,3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n\n model.add(Flatten())\n\n model.add(Dense(100))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n\n model.add(Dense(50))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n\n model.add(Dense(10))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n\n model.add(Dense(1))\n\n model.compile(loss=loss, optimizer=optimizer)\n\n return model\n\n# Save a figure with loss over epochs for training and validation sets\ndef loss_fig(history_object, file_name='model_mse_loss.jpg'):\n plt.plot(history_object.history['loss'])\n plt.plot(history_object.history['val_loss'])\n plt.title('model mean squared error loss')\n plt.ylabel('mean squared error loss')\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n plt.savefig(file_name)\n\ndef main():\n\n # parameters for training\n n_batch = 32\n data_path = '/opt/Behavioral-Cloning/Data/'\n\n # instantiate training and validation data generators\n train_samples, validation_samples = load_data(data_path=data_path)\n train_generator = generator(train_samples, data_path=data_path,\n batch_size=n_batch)\n validation_generator = generator(validation_samples, data_path=data_path,\n batch_size=n_batch)\n\n # instatiate model\n model = net()\n\n # Train model\n checkpoint = ModelCheckpoint(\"model.h5\", save_best_only=True)\n callbacks_list = [checkpoint]\n history_object = model.fit_generator(train_generator,\n steps_per_epoch=(len(train_samples)//n_batch) + 1,\n callbacks=callbacks_list,\n validation_data=validation_generator,\n validation_steps=(len(validation_samples)//n_batch) + 1,\n epochs=30)\n\n # Generate figure of loss over epochs and save\n loss_fig(history_object)\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"matplotlib.pyplot.legend",
"scipy.ndimage.imread",
"matplotlib.pyplot.title",
"sklearn.utils.shuffle",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
]
|
lim0606/pytorch-ardae-vae | [
"52f460a90fa5822692031ab7dcca39fa9168988e"
]
| [
"models/vae/auxtoy.py"
]
| [
"import math\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import autograd\n\nfrom models.layers import Identity, MLP\nfrom models.reparam import NormalDistributionLinear\nfrom utils import loss_kld_gaussian, loss_kld_gaussian_vs_gaussian, loss_recon_gaussian, normal_energy_func\nfrom utils import logprob_gaussian\nfrom utils import get_nonlinear_func\nfrom models.vae.toy import Decoder\n\n\ndef sample_gaussian(mu, logvar, _std=1.):\n if _std is None:\n _std = 1.\n std = _std*torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + std * eps\n\nclass AuxEncoder(nn.Module):\n def __init__(self,\n input_dim=2,\n h_dim=8,\n noise_dim=2,\n nonlinearity='softplus',\n num_hidden_layers=1,\n clip_logvar=None,\n ):\n super().__init__()\n self.input_dim = input_dim\n self.h_dim = h_dim\n self.noise_dim = noise_dim\n self.nonlinearity = nonlinearity\n self.num_hidden_layers = num_hidden_layers\n self.clip_logvar = clip_logvar\n\n self.main = MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)\n self.reparam = NormalDistributionLinear(h_dim, noise_dim, nonlinearity=clip_logvar)\n\n def sample(self, mu, logvar, _std=1.):\n return sample_gaussian(mu, logvar, _std=_std)\n\n def forward(self, x, _std=1.):\n batch_size = x.size(0)\n x = x.view(batch_size, self.input_dim)\n\n # forward\n h = self.main(x)\n mu, logvar = self.reparam(h)\n\n # sample\n noise = self.sample(mu, logvar, _std=_std)\n\n return noise, mu, logvar, h\n\nclass Encoder(nn.Module):\n def __init__(self,\n input_dim=2,\n noise_dim=2,\n h_dim=8,\n z_dim=2,\n nonlinearity='softplus',\n num_hidden_layers=1,\n enc_input=False,\n enc_noise=False,\n clip_logvar=None,\n ):\n super().__init__()\n self.input_dim = input_dim\n self.noise_dim = noise_dim\n self.h_dim = h_dim\n self.z_dim = z_dim\n self.nonlinearity = nonlinearity\n self.num_hidden_layers = num_hidden_layers\n self.enc_input = enc_input\n self.enc_noise = enc_noise\n self.clip_logvar = clip_logvar\n #self.inp_dim = input_dim if not enc_input else h_dim\n #self.ctx_dim = noise_dim if not enc_noise else h_dim\n\n self.inp_encode = None\n self.nos_encode = None\n self.fc = None\n self.reparam = None\n\n #self.main = MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=True)\n #self.reparam = NormalDistributionLinear(h_dim, z_dim, nonlinearity=clip_logvar)\n\n def sample(self, mu_z, logvar_z):\n raise NotImplementedError\n #return self.reparam.sample_gaussian(mu_z, logvar_z)\n\n def _forward_inp(self, x):\n batch_size = x.size(0)\n x = x.view(batch_size, self.input_dim)\n\n # enc\n inp = self.inp_encode(x)\n\n return inp\n\n def _forward_nos(self, noise):\n # enc\n nos = self.nos_encode(noise)\n\n return nos\n\n def _forward_all(self, inp, nos):\n raise NotImplementedError\n #hid = self.fc(inp, nos)\n #mu_z, logvar_ = self.reparam(hid)\n #z = self.sample(mu_z, logvar_z)\n return z, mu_z, logvar_z, h\n\n def forward(self, x, noise, nz=1):\n batch_size = x.size(0)\n\n # enc\n nos = self._forward_nos(noise)\n inp = self._forward_inp(x)\n\n # view\n assert nos.size(0) == batch_size*nz\n inp = inp.unsqueeze(1).expand(-1, nz, -1).contiguous()\n inp = inp.view(batch_size*nz, -1)\n\n # forward\n z, mu_z, logvar_z, h = self._forward_all(inp, nos)\n #z, mu_z, logvar_z = self._forward_all(inp, nos)\n\n return z, mu_z, logvar_z, h\n #return z, mu_z, logvar_z\n\nclass SimpleEncoder(Encoder):\n def __init__(self,\n input_dim=2,\n noise_dim=2,\n h_dim=64,\n z_dim=2,\n nonlinearity='tanh',\n num_hidden_layers=1,\n enc_input=False,\n enc_noise=False,\n clip_logvar=None,\n ):\n super().__init__(\n input_dim = input_dim,\n noise_dim = noise_dim,\n h_dim = h_dim,\n z_dim = z_dim,\n nonlinearity = nonlinearity,\n num_hidden_layers = num_hidden_layers,\n enc_input = enc_input,\n enc_noise = enc_noise,\n clip_logvar = clip_logvar,\n )\n inp_dim = input_dim if not enc_input else h_dim\n ctx_dim = noise_dim if not enc_noise else h_dim\n\n self.inp_encode = Identity() if not enc_input \\\n else MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)\n self.nos_encode = Identity() if not enc_noise \\\n else MLP(input_dim=noise_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)\n self.fc = MLP(input_dim=inp_dim+ctx_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)\n self.reparam = NormalDistributionLinear(h_dim, z_dim, nonlinearity=clip_logvar)\n\n def sample(self, mu_z, logvar_z):\n return self.reparam.sample_gaussian(mu_z, logvar_z)\n\n def _forward_all(self, inp, nos):\n h1 = torch.cat([inp, nos], dim=1)\n h2 = self.fc(h1)\n mu_z, logvar_z = self.reparam(h2)\n z = self.sample(mu_z, logvar_z)\n return z, mu_z, logvar_z, h2\n\nclass AuxDecoder(nn.Module):\n def __init__(self,\n input_dim=2,\n z_dim=2,\n noise_dim=2,\n h_dim=64,\n nonlinearity='tanh',\n num_hidden_layers=1,\n enc_input=False,\n enc_latent=False,\n clip_logvar=None,\n ):\n super().__init__()\n self.input_dim = input_dim\n self.z_dim = z_dim\n self.noise_dim = noise_dim\n self.h_dim = h_dim\n self.nonlinearity = nonlinearity\n self.num_hidden_layers = num_hidden_layers\n self.enc_input = enc_input\n self.enc_latent = enc_latent\n inp_dim = input_dim if not enc_input else h_dim\n ltt_dim = z_dim if not enc_latent else h_dim\n\n self.inp_encode = Identity() if not enc_input \\\n else MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)\n self.ltt_encode = Identity() if not enc_latent \\\n else MLP(input_dim=z_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)\n self.fc = MLP(input_dim=inp_dim+ltt_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)\n self.reparam = NormalDistributionLinear(h_dim, noise_dim, nonlinearity=clip_logvar)\n\n def sample(self, mu, logvar):\n return self.reparam.sample_gaussian(mu, logvar)\n\n def _forward_inp(self, x):\n batch_size = x.size(0)\n x = x.view(batch_size, self.input_dim)\n\n # enc\n inp = self.inp_encode(x)\n\n return inp\n\n def _forward_ltt(self, z):\n # enc\n ltt = self.ltt_encode(z)\n\n return ltt\n\n def _forward_all(self, inp, ltt):\n h1 = torch.cat([inp, ltt], dim=1)\n h2 = self.fc(h1)\n mu_n, logvar_n = self.reparam(h2)\n noise = self.sample(mu_n, logvar_n)\n return noise, mu_n, logvar_n\n\n def forward(self, x, z, nz=1):\n batch_size = x.size(0)\n\n # enc\n ltt = self._forward_ltt(z)\n inp = self._forward_inp(x)\n\n # view\n assert ltt.size(0) == batch_size*nz\n inp = inp.unsqueeze(1).expand(-1, nz, -1).contiguous()\n inp = inp.view(batch_size*nz, -1)\n\n # forward\n noise, mu_n, logvar_n = self._forward_all(inp, ltt)\n\n return noise, mu_n, logvar_n\n\nclass VAE(nn.Module):\n def __init__(self,\n energy_func=normal_energy_func,\n input_dim=2,\n noise_dim=2,\n h_dim=64,\n z_dim=2,\n nonlinearity='tanh',\n num_hidden_layers=1,\n init='gaussian', #None,\n enc_type='simple',\n clip_logvar=None,\n ):\n super().__init__()\n self.energy_func = energy_func\n self.input_dim = input_dim\n self.noise_dim = noise_dim\n self.h_dim = h_dim\n self.z_dim = z_dim\n self.latent_dim = z_dim # for ais\n self.nonlinearity = nonlinearity\n self.num_hidden_layers = num_hidden_layers\n self.init = init\n clip_logvar = None if clip_logvar == 'none' else clip_logvar\n self.clip_logvar = clip_logvar\n self.enc_type = enc_type\n assert enc_type in ['simple']\n\n self.aux_encode = AuxEncoder(input_dim, h_dim, noise_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, clip_logvar=clip_logvar)\n if enc_type == 'simple':\n self.encode = SimpleEncoder(input_dim, noise_dim, h_dim, z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, clip_logvar=None)\n else:\n raise NotImplementedError\n self.decode = Decoder(input_dim, h_dim, z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, init=init)\n self.aux_decode = AuxDecoder(input_dim, z_dim, noise_dim, h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers)\n\n def loss(self,\n mu_qz, logvar_qz,\n mu_qz0, logvar_qz0,\n mu_pz0, logvar_pz0,\n mu_x, logvar_x, target_x,\n beta=1.0,\n ):\n # kld loss: log q(z|z0, x) - log p(z)\n kld_loss = loss_kld_gaussian(mu_qz, logvar_qz, do_sum=False)\n\n # aux dec loss: -log r(z0|z,x)\n aux_kld_loss = loss_kld_gaussian_vs_gaussian(\n mu_qz0, logvar_qz0,\n mu_pz0, logvar_pz0,\n do_sum=False,\n )\n\n # recon loss (neg likelihood): -log p(x|z)\n recon_loss = loss_recon_gaussian(mu_x, logvar_x, target_x.view(-1, 2), do_sum=False)\n\n # add loss\n loss = recon_loss + beta*kld_loss + beta*aux_kld_loss\n return loss.mean(), recon_loss.mean(), kld_loss.mean(), aux_kld_loss.mean()\n\n def forward(self, input, beta=1.0):\n # init\n batch_size = input.size(0)\n input = input.view(batch_size, self.input_dim)\n\n # aux encode\n z0, mu_qz0, logvar_qz0, _ = self.aux_encode(input)\n\n # encode\n z, mu_qz, logvar_qz, _ = self.encode(input, z0)\n #z, mu_qz, logvar_qz = self.encode(input, z0)\n\n # aux decode\n _, mu_pz0, logvar_pz0 = self.aux_decode(input, z)\n\n # decode\n x, mu_px, logvar_px = self.decode(z)\n\n ''' get loss '''\n loss, recon_loss, kld_loss, aux_kld_loss = self.loss(\n mu_qz, logvar_qz,\n mu_qz0, logvar_qz0,\n mu_pz0, logvar_pz0,\n mu_px, logvar_px, input,\n beta=beta,\n )\n\n # return\n #return x, mu_px, z, loss, recon_loss.detach(), kld_loss.detach(), aux_kld_loss.detach()\n return x, mu_px, z, loss, recon_loss.detach(), kld_loss.detach()+aux_kld_loss.detach()\n\n def generate(self, batch_size=1):\n # init mu_z and logvar_z (as unit normal dist)\n weight = next(self.parameters())\n mu_z = weight.new_zeros(batch_size, self.z_dim)\n logvar_z = weight.new_zeros(batch_size, self.z_dim)\n\n # sample z (from unit normal dist)\n z = sample_gaussian(mu_z, logvar_z) # sample z\n\n # decode\n output, mu_x, logvar_x = self.decode(z)\n\n # return\n return output, mu_x, z\n\n def logprob(self, input, sample_size=128, z=None):\n #assert int(math.sqrt(sample_size))**2 == sample_size\n # init\n batch_size = input.size(0)\n sample_size1 = sample_size #int(math.sqrt(sample_size))\n sample_size2 = 1 #int(math.sqrt(sample_size))\n input = input.view(batch_size, self.input_dim)\n\n ''' get - (log q(z|z0,x) + log q(z0|z) - log p(z0|z,x) - log p(z)) '''\n ''' get log q(z0|x) '''\n _, mu_qz0, logvar_qz0, _ = self.aux_encode(input)\n mu_qz0 = mu_qz0.unsqueeze(1).expand(batch_size, sample_size1, self.noise_dim).contiguous().view(batch_size*sample_size1, self.noise_dim) # bsz*ssz1 x noise_dim\n logvar_qz0 = logvar_qz0.unsqueeze(1).expand(batch_size, sample_size1, self.noise_dim).contiguous().view(batch_size*sample_size1, self.noise_dim) # bsz*ssz1 x noise_dim\n z0 = self.aux_encode.sample(mu_qz0, logvar_qz0) # bsz*ssz1 x noise_dim\n log_qz0 = logprob_gaussian(mu_qz0, logvar_qz0, z0, do_unsqueeze=False, do_mean=False)\n log_qz0 = torch.sum(log_qz0.view(batch_size, sample_size1, self.noise_dim), dim=2) # bsz x ssz1\n log_qz0 = log_qz0.unsqueeze(2).expand(batch_size, sample_size1, sample_size2).contiguous().view(batch_size, sample_size1*sample_size2) # bsz x ssz1*ssz2\n\n ''' get log q(z|z0,x) '''\n # forward\n nos = self.encode._forward_nos(z0) # bsz*ssz1 x noise_dim\n inp = self.encode._forward_inp(input) # bsz x noise_dim\n inp = inp.unsqueeze(1).expand(batch_size, sample_size1, -1).contiguous().view(batch_size*sample_size1, -1)\n _, mu_qz, logvar_qz, _ = self.encode._forward_all(inp, nos) # bsz*ssz1 x z_dim\n mu_qz = mu_qz.detach().repeat(1, sample_size2).view(batch_size*sample_size1, sample_size2, self.z_dim)\n logvar_qz = logvar_qz.detach().repeat(1, sample_size2).view(batch_size*sample_size1, sample_size2, self.z_dim)\n z = self.encode.sample(mu_qz, logvar_qz) # bsz x ssz1 x ssz2 x z_dim\n log_qz = logprob_gaussian(mu_qz, logvar_qz, z, do_unsqueeze=False, do_mean=False)\n log_qz = torch.sum(log_qz.view(batch_size, sample_size1*sample_size2, self.z_dim), dim=2) # bsz x ssz1*ssz2\n\n ''' get log p(z0|z,x) '''\n # encode\n _z0 = z0.unsqueeze(1).expand(batch_size*sample_size1, sample_size2, self.noise_dim).contiguous().view(batch_size, sample_size1, sample_size2, self.noise_dim)\n ltt = self.aux_decode._forward_ltt(z.view(-1, self.z_dim)) # bsz*ssz1*ssz2 x z_dim\n inp = self.aux_decode._forward_inp(input) # bsz x inp_dim\n inp = inp.unsqueeze(1).unsqueeze(1).expand(batch_size, sample_size1, sample_size2, -1).contiguous().view(batch_size*sample_size1*sample_size2, -1) # bsz*ss1*ssz2 x inp_dim\n _, mu_pz0, logvar_pz0 = self.aux_decode._forward_all(inp, ltt) # bsz*ssz1 x z_dim\n mu_pz0 = mu_pz0.view(batch_size, sample_size1, sample_size2, self.noise_dim)\n logvar_pz0 = logvar_pz0.view(batch_size, sample_size1, sample_size2, self.noise_dim)\n log_pz0 = logprob_gaussian(mu_pz0, logvar_pz0, _z0, do_unsqueeze=False, do_mean=False) # bsz x ssz1 x ssz2 xnoise_dim\n log_pz0 = torch.sum(log_pz0.view(batch_size, sample_size1*sample_size2, self.noise_dim), dim=2) # bsz x ssz1*ssz2\n\n ''' get log p(z) '''\n # get prior (as unit normal dist)\n mu_pz = input.new_zeros(batch_size*sample_size1, sample_size2, self.z_dim)\n logvar_pz = input.new_zeros(batch_size*sample_size1, sample_size2, self.z_dim)\n log_pz = logprob_gaussian(mu_pz, logvar_pz, z, do_unsqueeze=False, do_mean=False)\n log_pz = torch.sum(log_pz.view(batch_size, sample_size1*sample_size2, self.z_dim), dim=2) # bsz x ssz1*ssz2\n\n ''' get log p(x|z) '''\n # decode\n _input = input.unsqueeze(1).unsqueeze(1).expand(\n batch_size, sample_size1, sample_size2, self.input_dim) # bsz x ssz1 x ssz2 x input_dim\n _z = z.view(-1, self.z_dim)\n _, mu_x, logvar_x = self.decode(_z) # bsz*ssz1*ssz2 x zdim\n mu_x = mu_x.view(batch_size, sample_size1, sample_size2, self.input_dim)\n logvar_x = logvar_x.view(batch_size, sample_size1, sample_size2, self.input_dim)\n loglikelihood = logprob_gaussian(mu_x, logvar_x, _input, do_unsqueeze=False, do_mean=False)\n #_, logit_x = self.decode(_z) # bsz*ssz1*ssz2 x zdim\n #logit_x = logit_x.view(batch_size, sample_size1, sample_size2, self.input_dim)\n #loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')\n loglikelihood = torch.sum(loglikelihood.view(batch_size, sample_size1*sample_size2, self.input_dim), dim=2) # bsz x ssz1*ssz2\n\n ''' get log p(x|z)p(z)/q(z|x) '''\n logprob = loglikelihood + log_pz + log_pz0 - log_qz - log_qz0 # bsz x ssz1*ssz2\n logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)\n rprob = (logprob - logprob_max).exp() # relative prob\n logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1\n\n # return\n return logprob.mean()\n"
]
| [
[
"torch.randn_like",
"torch.mean",
"torch.max",
"torch.cat",
"torch.exp"
]
]
|
LeeDoYup/DeblurGAN-tf | [
"c147055e968006702718c98cfd3ad2c3abf33d31"
]
| [
"data/data_loader.py"
]
| [
"from __future__ import print_function\n\nimport os\nimport numpy as np\nimport cv2\nimport glob\nimport logging\n\ndef read_data_path_custom(data_path, image_type='png'):\n image_names = '*.'+image_type\n names = glob.glob(os.path.join(data_path, image_names))\n return names\n\ndef read_data_path(data_path, name='GOPRO', image_type='png'):\n dir_list = [dir for dir in glob.glob(data_path+'/*') if os.path.isdir(dir)]\n image_pair_path = []\n for i, dir in enumerate(dir_list):\n if not name in dir:\n dir_list.remove(dir)\n dir_image_pair(dir_list[0])\n\n for i, dir in enumerate(dir_list):\n image_pair_path.extend(dir_image_pair(dir))\n return image_pair_path\n\n\ndef dir_image_pair(dir_path, image_type='png'):\n blur_path = os.path.join(dir_path, 'blur')\n real_path = os.path.join(dir_path, 'sharp')\n blur_image_pathes = glob.glob(blur_path+'/*.'+image_type)\n real_image_pathes = glob.glob(real_path+'/*.'+image_type)\n assert len(blur_image_pathes) == len(real_image_pathes)\n pair_path = zip(blur_image_pathes, real_image_pathes)\n iter_pair_path = pair_path #for iteration\n \n result = list(pair_path)\n \n for blur, real in iter_pair_path:\n name1=blur.split('/')[-1]\n name2=real.split('/')[-1]\n if name1 != name2:\n result.remove((blur, real))\n print(\"blur: %s, real: %s pair was removed in training data\"%(name1, name2))\n return result \n\ndef read_image_pair(pair_path, resize_or_crop=None, image_size=(256,256)):\n image_blur = cv2.imread(pair_path[0], cv2.IMREAD_COLOR)\n image_blur = image_blur / 255.0 * 2.0 - 1.0\n image_real = cv2.imread(pair_path[1], cv2.IMREAD_COLOR)\n image_real = image_real / 255.0 * 2.0 - 1.0\n\n if resize_or_crop != None: \n assert image_size != None\n\n if resize_or_crop == 'resize':\n image_blur = cv2.resize(image_blur, image_size, interpolation=cv2.INTER_AREA)\n image_real = cv2.resize(image_real, image_size, interpolation=cv2.INTER_AREA)\n elif resize_or_crop == 'crop':\n image_blur = cv2.crop(image_blur, image_size)\n image_real = cv2.crop(image_real, image_size)\n else:\n raise\n\n if np.size(np.shape(image_blur)) == 3:\n image_blur = np.expand_dims(image_blur, axis=0)\n if np.size(np.shape(image_real)) == 3:\n image_real = np.expand_dims(image_real, axis=0)\n image_blur = np.array(image_blur, dtype=np.float32)\n image_real = np.array(image_real, dtype=np.float32)\n return image_blur, image_real\n\ndef read_image(path, resize_or_crop=None, image_size=(256,256)):\n image = cv2.imread(path, cv2.IMREAD_COLOR)\n image = image/255.0 * 2.0 - 1.0\n\n assert resize_or_crop != None\n assert image_size != None\n\n if resize_or_crop == 'resize':\n image = cv2.resize(image, image_size, interpolation=cv2.INTER_AREA)\n elif resize_or_crop == 'crop':\n image = cv2.crop(image, image_size)\n\n if np.size(np.shape(image)) == 3: \n image = np.expand_dims(image, axis=0)\n\n image = np.array(image, dtype=np.float32)\n return image\n\nif __name__ == '__main__':\n pair_path = read_data_path('/data/private/data//GOPRO_Large/train', name='GOPRO')\n image1, image2 = read_image_pair(pair_path[0], resize_or_crop='resize')\n\n \n cv2.imshow('image1',image1)\n cv2.imshow('image2',image2)\n cv2.waitKey(0)\n"
]
| [
[
"numpy.shape",
"numpy.array",
"numpy.expand_dims"
]
]
|
movingpictures83/Caffe | [
"bed251d065f1ba8f408b562f8c27b3239a45b477"
]
| [
"CaffePlugin.py"
]
| [
"import caffe\nimport sys\nimport pickle\nimport numpy as np\n#caffe_root = \"/home/User/caffe/\"\n#sys.path.insert(0, caffe_root + 'python')\ncaffe.set_mode_gpu()\n\n\nclass CaffePlugin():\n def input(self, file):\n with open(file) as config:\n self.model_def = config.readline().strip()\n self.model_weights = config.readline().strip()\n data_file = config.readline().strip()\n labels_file = config.readline().strip()\n npy_file = config.readline().strip()\n\n image_paths = []\n for line in config:\n image_paths.append(line.strip())\n #print(npy_file)\n #print(image_paths)\n #image_paths = ['plugins/DeepLearningClassification/example/images/cat.jpg', 'plugins/DeepLearningClassification/example/images/fish-bike.jpg']\n # load the mean ImageNet image (as distributed with Caffe) for subtraction\n #mu = np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy')\n #print(\"ENTER\")\n #x = input()\n mu = np.load(npy_file)\n #mu = np.load('plugins/DeepLearningClassification/example/models/ilsvrc_2012_mean.npy')\n #print(\"ENTER\")\n #x = input()\n mu = mu.mean(1).mean(1) # average over pixels to obtain the mean (BGR) pixel values\n\n batch_size = len(image_paths) # Number of images\n\n # create transformer for the input called 'data'\n transformer = caffe.io.Transformer({'data': (batch_size, 3, 227, 227)})\n transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension\n transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel\n transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]\n transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR\n\n print(\"SETTING SELF DATA\")\n self.data = []#transformed_images = []\n for image_path in image_paths:\n image = caffe.io.load_image(image_path)\n transformed_image = transformer.preprocess('data', image)\n self.data.append(transformed_image) #transformed_images.append(transformed_image)\n\n\n\n\n #with open(data_file) as pickled_data:\n # self.data = pickle.load(pickled_data)\n self.labels = np.loadtxt(labels_file, str, delimiter='\\t')\n\n\n def run(self):\n net = caffe.Net(self.model_def, # defines the structure of the model\n self.model_weights, # contains the trained weights\n caffe.TEST) # use test mode (e.g., don't perform dropout)\n\n in_shape = list(net.blobs['data'].data.shape)\n in_shape[0] = len(self.data) # set new batch size\n net.blobs['data'].reshape(*tuple(in_shape))\n for i, data in enumerate(self.data):\n net.blobs['data'].data[i,:,:,:] = data \n output = net.forward()\n last_layer_name = net._layer_names[-1]\n output_prob = output[last_layer_name]\n predicted_value = (output_prob.argmax(axis=1))\n self.predicted_values = self.labels[predicted_value]\n\n\n def output(self, file):\n with open(file,\"w+\") as f:\n f.write(str(self.predicted_values))\n\n"
]
| [
[
"numpy.load",
"numpy.loadtxt"
]
]
|
CheerfulUser/tessffi | [
"576c6baed6e2a5762da2a97e12f2e5a8e233b74c"
]
| [
"tess_ffi_mask.py"
]
| [
"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nfrom astropy.io import fits\nfrom astropy.nddata import Cutout2D\nfrom astropy.wcs import WCS\nfrom scipy.signal import fftconvolve\n\nimport argparse\n\n# turn off runtime warnings (lots from logic on nans)\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning) \n\n\n\ndef size_limit(x,y,image):\n yy,xx = image.shape\n ind = ((y > 0) & (y < yy-1) & (x > 0) & (x < xx-1))\n return ind\n\n\ndef region_cut(table,wcs):\n ra = table.ra.values\n dec = table.dec.values\n foot = wcs.calc_footprint()\n minra = min(foot[:,0])\n maxra = max(foot[:,0])\n mindec = min(foot[:,1])\n maxdec = max(foot[:,1])\n inddec = (dec < maxdec) & (dec> mindec)\n indra = (ra < maxra) & (ra> minra)\n ind = indra * inddec\n tab = table.iloc[ind]\n return tab\n\ndef circle_app(rad):\n \"\"\"\n makes a kinda circular aperture, probably not worth using.\n \"\"\"\n mask = np.zeros((int(rad*2+.5)+1,int(rad*2+.5)+1))\n c = rad\n x,y =np.where(mask==0)\n dist = np.sqrt((x-c)**2 + (y-c)**2)\n\n ind = (dist) < rad + .2\n mask[y[ind],x[ind]]= 1\n return mask\n\n\ndef ps1_auto_mask(table,wcs,scale=1):\n \"\"\"\n Make a source mask using the PS1 catalog\n \"\"\"\n image = np.zeros(wcs.array_shape)\n r = table.raMean.values\n d = table.decMean.values\n x,y = wcs.all_world2pix(r,d,0)\n x = (x+.5).astype(int)\n y = (y+.5).astype(int)\n m = table.iMeanPSFMag.values\n ind = size_limit(x,y,image)\n x = x[ind]; y = y[ind]; m = m[ind]\n \n maglim = np.zeros_like(image,dtype=float)\n magim = image.copy()\n magim[y,x] = m\n \n \n masks = {}\n \n mags = [[18,17],[17,16],[16,15],[15,14],[14,13.5],[13.5,12]]\n size = (np.array([3,4,5,6,7,8]) * scale).astype(int)\n for i in range(len(mags)):\n m = ((magim > mags[i][1]) & (magim <= mags[i][0])) * 1.\n k = np.ones((size[i],size[i]))\n conv = fftconvolve(m, k,mode='same')#.astype(int)\n masks[str(mags[i][0])] = (conv >.1) * 1.\n masks['all'] = np.zeros_like(image,dtype=float)\n for key in masks:\n masks['all'] += masks[key]\n masks['all'] = (masks['all'] > .1) * 1.\n return masks\n\ndef star_auto_mask(table,wcs,scale=1):\n \"\"\"\n Make a source mask from gaia source catalogue\n \"\"\"\n table = region_cut(table, wcs)\n image = np.zeros(wcs.array_shape)\n r = table.ra.values\n d = table.dec.values\n x,y = wcs.all_world2pix(r,d,0)\n x = (x+.5).astype(int)\n y = (y+.5).astype(int)\n try:\n m = table.gaia.values.copy()\n except:\n m = table.mag.values.copy()\n ind = size_limit(x,y,image)\n x = x[ind]; y = y[ind]; m = m[ind]\n \n maglim = np.zeros_like(image,dtype=float)\n magim = image.copy()\n magim[y,x] = m\n \n masks = {}\n \n mags = [[18,17],[17,16],[16,15],[15,14],[14,13.5],[13.5,12],[12,10],[10,9],[9,8],[8,7]]\n size = (np.array([3,4,5,6,7,8,10,14,16,18])*scale).astype(int)\n for i in range(len(mags)):\n m = ((magim > mags[i][1]) & (magim <= mags[i][0])) * 1.\n k = np.ones((size[i],size[i]))\n conv = fftconvolve(m, k,mode='same')#.astype(int)\n masks[str(mags[i][0])] = (conv >.1) * 1.\n masks['all'] = np.zeros_like(image,dtype=float)\n for key in masks:\n masks['all'] += masks[key]\n masks['all'] = (masks['all'] > .1) * 1.\n return masks\n\n \ndef Big_sat(table,wcs,scale=1):\n \"\"\"\n Make crude cross masks for the TESS saturated sources.\n The properties in the mask need some fine tuning.\n \"\"\"\n table = region_cut(table, wcs)\n image = np.zeros(wcs.array_shape)\n try:\n i = (table.gaia.values < 7) #& (gaia.gaia.values > 2)\n except:\n i = (table.mag.values < 7) #& (gaia.gaia.values > 2)\n sat = table.iloc[i]\n r = sat.ra.values\n d = sat.dec.values\n x,y = wcs.all_world2pix(r,d,0)\n x = x.astype(int)\n y = y.astype(int)\n try:\n mags = sat.gaia.values\n except:\n mags = sat.mag.values\n ind = size_limit(x,y,image)\n \n x = x[ind]; y = y[ind]; mags = mags[ind]\n \n \n satmasks = []\n for i in range(len(x)):\n mag = mags[i]\n mask = np.zeros_like(image,dtype=float)\n if (mag <= 7) & (mag > 5):\n body = int(13 * scale)\n length = int(20 * scale)\n width = int(4 * scale)\n if (mag <= 5) & (mag > 4):\n body = 15 * scale\n length = int(60 * scale)\n width = int(10 * scale)\n if (mag <= 4):# & (mag > 4):\n body = int(25 * scale)\n length = int(115 * scale)\n width = int(10 * scale)\n body = int(body) # no idea why this is needed, but it apparently is.\n kernal = np.ones((body*2,body*2))\n mask[y[i],x[i]] = 1 \n conv = fftconvolve(mask, kernal,mode='same')#.astype(int)\n mask = (conv >.1) * 1.\n\n mask[y[i]-length:y[i]+length,x[i]-width:x[i]+width] = 1 \n mask[y[i]-width:y[i]+width,x[i]-length:x[i]+length] = 1 \n \n satmasks += [mask]\n satmasks = np.array(satmasks)\n return satmasks\n\ndef Strap_mask(data,size):\n strap_mask = np.zeros_like(data)\n path = '/user/rridden/feet/'\n straps = pd.read_csv(path+'tess_straps.csv')['Column'].values\n strap_mask[:,straps+43] = 1\n big_strap = fftconvolve(strap_mask,np.ones((size,size)),mode='same') > .5\n return big_strap\n\n\ndef Make_bad_pixel_mask(file,image):\n \n data = fits.open(image)[0].data\n header = fits.open(image)[0].header\n bad = np.loadtxt(file,skiprows=3,dtype=object)\n mask = np.zeros_like(data)\n for b in bad:\n b = b.split('(')[-1].split(',')\n\n x = int(float(b[0]))\n y = int(float(b[1]))\n dx = int(float(b[2]))\n dy = int(float(b[3]))\n\n mask[y:y+dy,x:x+dx] = 1\n \n #Make_fits(mask,name,header)\n mask = mask.astype(int)\n mask = mask * 8\n return mask\n\ndef Mask_xy(file,image):\n xymask = np.zeros_like(image,dtype=int)\n xy = pd.read_csv(file,delimiter=' ')\n \n for i in range(len(xy)):\n x = xy.x.values\n y = xy.y.values\n dim1 = xy.dim1.values\n dim2 = xy.dim2.values\n m = np.zeros_like(image)\n m[y,x] = 1\n if np.isfinite(dim2[i]):\n kern = np.ones((int(dim2[i]),int(dim1[i])))\n else:\n kern = circle_app(dim1[i])\n m = ((fftconvolve(m,kern,mode='same') > .5) * 1)\n xymask += m\n return xymask\n\n\ndef Make_fits(data, name, header):\n #print('makefits shape ',data.shape)\n newhdu = fits.PrimaryHDU(data, header = header)\n newhdu.scale('int16', bscale=1.0,bzero=32768.0)\n newhdu.writeto(name,overwrite=True)\n return \n\ndef Make_mask(path,file,sec,ext,badpix,user,xy_list,sn,scale,strapsize):\n path = path+str(sec) + '/'\n hdu = fits.open(file)[ext]\n image = hdu.data\n wcs = WCS(hdu)\n cam = str(hdu.header['CAMERA'])\n ccd = str(hdu.header['CCD'])\n ps1 = pd.read_csv(path+'ps1_s' + str(sec)+'_'+cam+ccd+'_footprint.csv')\n gaia = pd.read_csv(path+'gaia_s' + str(sec)+'_'+cam+ccd+'_footprint.csv')\n print(path+'gaia_s' + str(sec)+'_'+cam+ccd+'_footprint.csv')\n\n \n sat = Big_sat(gaia,wcs,scale)\n mg = star_auto_mask(gaia,wcs,scale)\n mp = ps1_auto_mask(ps1,wcs,scale)\n\n sat = (np.nansum(sat,axis=0) > 0).astype(int) * 2 # assign 2 bit \n mask = ((mg['all']+mp['all']) > 0).astype(int) * 1 # assign 1 bit \n strap = Strap_mask(image,strapsize).astype(int) * 4 # assign 4 bit \n if badpix is not None:\n bp = Make_bad_pixel_mask(badpix, file)\n totalmask = mask | sat | strap | bp\n else:\n totalmask = mask | sat | strap\n if user is not None:\n user_list = pd.read_csv(user)\n user_list = user_list.iloc[user_list.mag.values > 0]\n user_sat = Big_sat(user_list,wcs,scale)\n user_m = star_auto_mask(user_list,wcs,scale)\n sat = (np.nansum(user_sat,axis=0) > 0).astype(int)\n user_mask = ((user_m['all'] + sat) > 0).astype(int) * 16 # assign 16 bit\n\n totalmask = totalmask | user_mask\n if xy_list is not None:\n m = Mask_xy(file, image)\n m = m.astype(int) * 16 # assign 16 bit\n totalmask = totalmask | m\n if sn is not None:\n sn_list = pd.read_csv(sn)\n sn_list = sn_list.iloc[sn_list.mag.values > 0]\n sn_sat = Big_sat(sn_list,wcs,scale)\n sn_m = star_auto_mask(sn_list,wcs,scale)\n sat = (np.nansum(sn_sat,axis=0) > 0).astype(int)\n sn_mask = ((sn_m['all'] + sat) > 0).astype(int) * 32 # assign 16 bit\n totalmask = totalmask | sn_mask\n \n return totalmask\n\ndef Update_header(header):\n head = header\n head['STARBIT'] = (1, 'bit value for normal sources')\n head['SATBIT'] = (2, 'bit value for saturated sources')\n head['STRAPBIT'] = (4, 'bit value for straps')\n head['STRAPBIT'] = (8, 'bit value for bad pixels')\n head['USERBIT'] = (16, 'bit value for USER list')\n head['SNBIT'] = (32, 'bit value for SN list')\n return head\n\ndef TESS_source_mask(path,file,sec,ext, name, badpix, user, \n xy_list,sn, scale, strapsize, sub):\n \"\"\"\n Make and save a source mask for a TESS image using \n \"\"\"\n mask = Make_mask(path,file,sec,ext,badpix,user,xy_list,sn,scale,strapsize)\n \n hdu = fits.open(file)[ext]\n head = Update_header(hdu.header)\n \n\n Make_fits(mask,name,head)\n if sub:\n print('Making submasks for straps and bad pixels')\n # make strap submask\n strap = (mask & 4)\n n = name.split('.fits')[0] + '.strap.fits'\n Make_fits(strap, n, head)\n\n # make bad pixel submask\n bad = (mask & 2) | (mask & 8)\n n = name.split('.fits')[0] + '.badpix.fits'\n Make_fits(bad, n, head)\n\n if user is not None:\n u = (mask & 16)\n n = name.split('.fits')[0] + '.user.fits'\n Make_fits(u, n, head)\n\n\n\n\n\ndef define_options(parser=None, usage=None, conflict_handler='resolve'):\n if parser is None:\n parser = argparse.ArgumentParser(usage=usage, conflict_handler=conflict_handler)\n\n parser.add_argument('-f','--file', default = None, \n help=('Fits file to make the mask of.'))\n parser.add_argument('-cat','--cat_path', default = '/user/rridden/feet/',\n help=('Path to catalogue tree'))\n parser.add_argument('-sec','--sector', default = None,\n help=('Sector of data'))\n parser.add_argument('-ext','--extension', default = 0,\n help=('Fits extension of image'))\n parser.add_argument('-o','--output', default = 'default.mask.fits',\n help=('Full output path/name for the created mask'))\n parser.add_argument('-b','--badpix',default = None,\n help=('DS9 region file to mask bad pixels.'))\n parser.add_argument('--scale',default = 1,\n help=('scale factor for the mask size, applies to all masks'))\n parser.add_argument('--strapsize',default = 3,\n help=('size for the strap mask size.'))\n parser.add_argument('--save_submasks',default = False,\n help=('save bad pixel and strap submasks.'))\n parser.add_argument('--user_list',default = None,\n help=('user sources, file containing ra, dec and mag.'))\n parser.add_argument('--xy_list',default = None,\n help=('user sources, file containing xy position and box size/radius.'))\n parser.add_argument('--sn_list',default = None,\n help=('SN file containing ra, dec and mag.'))\n\n return parser\n\n \nif __name__ == '__main__':\n print('Making mask for TESS image')\n parser = define_options()\n args = parser.parse_args()\n print('got options: ',args)\n file = args.file\n save = args.output\n scale = float(args.scale)\n sub = args.save_submasks\n strapsize = int(args.strapsize)\n badpix = args.badpix\n ext = int(args.extension)\n sec = args.sector\n user = args.user_list\n xy_list = args.xy_list\n sn = args.sn_list\n path = args.cat_path\n\n TESS_source_mask(path,file,sec,ext, save, badpix, user, xy_list, sn, scale, strapsize, sub)\n print('Made mask for {}, saved as {}'.format(file,save))"
]
| [
[
"pandas.read_csv",
"numpy.sqrt",
"scipy.signal.fftconvolve",
"numpy.isfinite",
"numpy.ones",
"numpy.loadtxt",
"numpy.nansum",
"numpy.zeros_like",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
]
|
Sen-R/rl | [
"76c5660dbd21d9cce767afd8a416b6e744181b60"
]
| [
"tests/test_utils.py"
]
| [
"import pytest\nfrom rl.utils import *\n\nimport torch\nfrom torch import nn\n\nl_init = -2.\nt_init = 2.\n\ndef local_nn():\n l = nn.Linear(3, 2)\n nn.init.constant_(l.weight, l_init)\n nn.init.constant_(l.bias, l_init)\n return l\n\ndef target_nn():\n t = nn.Linear(3, 2)\n nn.init.constant_(t.weight, t_init)\n nn.init.constant_(t.bias, t_init)\n return t\n\ndef test_hard_update():\n local = local_nn()\n target = target_nn()\n hard_update(local, target)\n for l, t in zip(local.parameters(), target.parameters()):\n assert torch.allclose(l, t)\n\[email protected](\"tau\", [0., 0.25, 1.])\ndef test_soft_update(tau):\n local = local_nn()\n target = target_nn()\n print('Before')\n print('------')\n for l, t in zip(local.parameters(), target.parameters()):\n print (l, t, sep='\\n\\n')\n soft_update(local, target, tau)\n print('\\nAfter')\n print( '-----')\n for l, t in zip(local.parameters(), target.parameters()):\n e = torch.ones_like(t) * (tau * l_init + (1-tau) * t_init)\n print (l, t, e, sep='\\n\\n')\n assert torch.allclose(t, e)\n \n \n"
]
| [
[
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.allclose",
"torch.ones_like"
]
]
|
DrArryYao/PVE-MCC_for_unsignalized_intersection | [
"78a4e0c8d368738a1f959791e6c020e97de8862d"
]
| [
"main.py"
]
| [
"# 模型训练的主代码\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport scipy.io as scio\nimport argparse\nimport cv2\nfrom shutil import copyfile\nimport matplotlib.pyplot as plt\nfrom traffic_interaction_scene import TrafficInteraction\nfrom traffic_interaction_scene import Visible\nimport time\nfrom model_agent_maddpg import MADDPG\nfrom replay_buffer import ReplayBuffer\nimport io\nfrom PIL import Image\n\n\ndef create_init_update(oneline_name, target_name, tau=0.99):\n \"\"\"\n :param oneline_name: the online model name\n :param target_name: the target model name\n :param tau: The proportion of each transfer from the online model to the target model\n :return:\n \"\"\"\n online_var = [i for i in tf.trainable_variables() if oneline_name in i.name]\n target_var = [i for i in tf.trainable_variables() if target_name in i.name]\n\n target_init = [tf.assign(target, online) for online, target in zip(online_var, target_var)]\n target_update = [tf.assign(target, (1 - tau) * online + tau * target) for online, target in\n zip(online_var, target_var)] # 按照比例用online更新target\n\n return target_init, target_update\n\n\ndef get_agents_action(sta, sess, agent, noise_range=0.0):\n \"\"\"\n :param sta: the state of the agent\n :param sess: the session of tf\n :param agent: the model of the agent\n :param noise_range: the noise range added to the agent model output\n :return: the action of the agent in its current state\n \"\"\"\n agent1_action = agent.action(state=[sta], sess=sess) + np.random.randn(1) * noise_range\n return agent1_action\n\n\ndef train_agent_seq(agent_ddpg, agent_ddpg_target, agent_memory, agent_actor_target_update,\n agent_critic_target_update, sess, summary_writer, args):\n batch, w_id, eid = agent_memory.getBatch(\n args.batch_size)\n if not batch:\n return\n agent_num = args.o_agent_num + 1\n total_obs_batch = np.zeros((args.batch_size, agent_num, agent_num * 4))\n rew_batch = np.zeros((args.batch_size,))\n total_act_batch = np.zeros((args.batch_size, agent_num))\n total_next_obs_batch = np.zeros((args.batch_size, agent_num, agent_num * 4))\n next_state_mask = np.zeros((args.batch_size,))\n for k, (s0, a, r, s1, done) in enumerate(batch):\n total_obs_batch[k] = s0\n rew_batch[k] = r\n total_act_batch[k] = a\n if not done:\n total_next_obs_batch[k] = s1\n next_state_mask[k] = 1\n other_act = []\n act_batch = np.array(total_act_batch[:, 0]) # 获取本agent动作集\n act_batch = act_batch.reshape(act_batch.shape[0], 1)\n for n in range(1, agent_num):\n other_act.append(total_act_batch[:, n])\n other_act_batch = np.vstack(other_act).transpose()\n e_id = eid\n obs_batch = total_obs_batch[:, 0, :] # 获取本agent当前状态集\n target = rew_batch.reshape(-1, 1)\n td_error = abs(agent_ddpg_target.Q(\n state=obs_batch, action=act_batch, other_action=other_act_batch, sess=sess) - target)\n if e_id is not None:\n agent_memory.update_priority(e_id, td_error)\n agent_ddpg.train_critic(state=obs_batch, action=act_batch, other_action=other_act_batch, target=target, sess=sess,\n summary_writer=summary_writer, lr=args.critic_lr)\n agent_ddpg.train_actor(state=obs_batch, other_action=other_act_batch, sess=sess, summary_writer=summary_writer,\n lr=args.actor_lr)\n sess.run([agent_actor_target_update, agent_critic_target_update]) # 从online模型更新到target模型\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"MADDPG experiments for multiagent traffic interaction environments\")\n parser.add_argument(\"--num_episodes\", type=int, default=1000, help=\"number of episodes\") # episode次数\n parser.add_argument(\"--o_agent_num\", type=int, default=6, help=\"other agent numbers\")\n parser.add_argument(\"--seq_max_step\", type=int, default=12, help=\"the step of multi-step learning\")\n\n parser.add_argument(\"--actor_lr\", type=float, default=1e-4, help=\"learning rate for Adam optimizer\") # 学习率\n parser.add_argument(\"--critic_lr\", type=float, default=1e-3, help=\"learning rate for Adam optimizer\") # 学习率\n parser.add_argument(\"--gamma\", type=float, default=0.80, help=\"discount factor\") # 折扣率\n parser.add_argument(\"--trans_r\", type=float, default=0.998, help=\"transfer rate for online model to target model\")\n parser.add_argument(\"--batch_size\", type=int, default=128,\n help=\"number of episodes to optimize at the same time\") # 经验采样数目\n parser.add_argument(\"--learn_start\", type=int, default=20000,\n help=\"learn start step\") # 经验采样数目\n parser.add_argument(\"--lane_num\", type=int, default=12,\n help=\"the num of lane of intersection\") # 车道总数,12表示双向六车道交叉口\n parser.add_argument(\"--num_units\", type=int, default=64, help=\"number of units in the mlp\")\n parser.add_argument(\"--collision_thr\", type=float, default=2, help=\"the threshold for collision\")\n parser.add_argument(\"--actual_lane\", action=\"store_true\", default=False, help=\"\")\n parser.add_argument(\"--c_mode\", type=str, default=\"closer\",\n help=\"the way of choosing closer cars, front ,front-end or closer\")\n\n parser.add_argument(\"--model\", type=str, default=\"MADDPG\",\n help=\"the model for training, MADDPG or DDPG\")\n\n parser.add_argument(\"--exp_name\", type=str, default=\"test \", help=\"name of the experiment\") # 实验名\n parser.add_argument(\"--type\", type=str, default=\"test\", help=\"type of experiment train or test\")\n parser.add_argument(\"--mat_path\", type=str, default=\"./data/train/arvTimeNewVeh_for_train.mat\",\n help=\"the path of mat file\")\n parser.add_argument(\"--save_dir\", type=str, default=\"model_data\",\n help=\"directory in which training state and model should be saved\") # 模型存储\n parser.add_argument(\"--save_rate\", type=int, default=1,\n help=\"save model once every time this many episodes are completed\") # 存储模型的回合间隔\n parser.add_argument(\"--load_dir\", type=str, default=\"\",\n help=\"directory in which training state and model are loaded\") # 模型加载目录\n parser.add_argument(\"--video_name\", type=str, default=\"\",\n help=\"if it not empty, program will generate a result video (.mp4 format defaultly)with the result imgs\")\n parser.add_argument(\"--visible\", action=\"store_true\", default=False, help=\"visible or not\")\n # Evaluation\n parser.add_argument(\"--restore\", action=\"store_true\", default=False) # 恢复之前的模型,在 load-dir 或 save-dir\n parser.add_argument(\"--benchmark\", action=\"store_true\", default=False) # 用保存的模型跑测试\n parser.add_argument(\"--batch_test\", action=\"store_true\", default=False) # 是否批量测试\n parser.add_argument(\"--benchmark_iters\", type=int, default=6000, help=\"number of iterations run for benchmarking\")\n parser.add_argument(\"--benchmark-dir\", type=str, default=\"./benchmark_files/\",\n help=\"directory where benchmark data is saved\")\n parser.add_argument(\"--plots-dir\", type=str, default=\"./learning_curves/\",\n help=\"directory where plot data is saved\") # 训练曲线的目录\n return parser.parse_args()\n\n\ndef benchmark(model, arrive_time, sess):\n total_c = 0\n collisions_count = 0\n for mat_file in [\"arvTimeNewVeh_300.mat\", \"arvTimeNewVeh_600.mat\", \"arvTimeNewVeh_900.mat\"]:\n data = scio.loadmat(mat_file) # 加载.mat数据\n arrive_time = data[\"arvTimeNewVeh\"]\n env = TrafficInteraction(arrive_time, 150, args, vm=6, virtual_l=not args.actual_lane)\n # env = TrafficInteraction(arrive_time, 150, args, vm=6, vM=20, v0=12)\n for i in range(args.benchmark_iters):\n for lane in range(4):\n for ind, veh in enumerate(env.veh_info[lane]):\n o_n = veh[\"state\"]\n agent1_action = [[0]]\n if veh[\"control\"]:\n agent1_action = get_agents_action(o_n[0], sess, model, noise_range=0) # 模型根据当前状态进行预测\n env.step(lane, ind, agent1_action[0][0]) # 环境根据输入的动作返回下一时刻的状态和奖励\n # env.step(lane, ind, 0) # 环境根据输入的动作返回下一时刻的状态和奖励\n state_next, reward, actions, collisions, estm_collisions, collisions_per_veh = env.scene_update()\n for k in range(len(actions)):\n if collisions_per_veh[k][0] > 0:\n collisions_count += 1\n if i % 1000 == 0:\n print(\"i: %s collisions_rate: %s\" % (i, float(collisions_count) / (env.id_seq + total_c)))\n env.delete_vehicle()\n total_c += env.id_seq\n print(\"vehicle number: %s; collisions occurred number: %s; collisions rate: %s\" % (\n total_c, collisions_count, float(collisions_count) / total_c))\n return float(collisions_count) / total_c\n\n\ndef train():\n # 建立Agent,Agent对应两个DDPG结构,一个是eval-net,一个是target-net\n agent1_ddpg = MADDPG('agent1', actor_lr=args.actor_lr, critic_lr=args.critic_lr, nb_other_aciton=args.o_agent_num,\n num_units=args.num_units, model=args.model)\n agent1_ddpg_target = MADDPG('agent1_target', actor_lr=args.actor_lr, critic_lr=args.critic_lr,\n nb_other_aciton=args.o_agent_num, num_units=args.num_units, model=args.model)\n saver = tf.train.Saver() # 为存储模型预备\n agent1_actor_target_init, agent1_actor_target_update = create_init_update('agent1actor', 'agent1_targetactor',\n tau=args.trans_r)\n agent1_critic_target_init, agent1_critic_target_update = create_init_update('agent1_critic', 'agent1_target_critic',\n tau=args.trans_r)\n count_n = 0\n col = tf.Variable(0, dtype=tf.int8)\n collisions_op = tf.summary.scalar('collisions', col)\n etsm_col = tf.Variable(0, dtype=tf.int8)\n etsm_collisions_op = tf.summary.scalar('estimate_collisions', etsm_col)\n v_mean = tf.Variable(0, dtype=tf.float32)\n v_mean_op = tf.summary.scalar('v_mean', v_mean)\n collision_rate = tf.Variable(0, dtype=tf.float32)\n collision_rate_op = tf.summary.scalar('collision_rate', collision_rate)\n acc_mean = tf.Variable(0, dtype=tf.float32)\n acc_mean_op = tf.summary.scalar('acc_mean', acc_mean)\n reward_mean = tf.Variable(0, dtype=tf.float32)\n reward_mean_op = tf.summary.scalar('reward_mean', reward_mean)\n collisions_mean = tf.Variable(0, dtype=tf.float32)\n collisions_mean_op = tf.summary.scalar('collisions_mean', collisions_mean)\n estm_collisions_mean = tf.Variable(0, dtype=tf.float32)\n estm_collisions_mean_op = tf.summary.scalar('estm_collisions_mean', estm_collisions_mean)\n collisions_veh_numbers = tf.Variable(0, dtype=tf.int32)\n collisions_veh_numbers_op = tf.summary.scalar('collision_veh_numbers', collisions_veh_numbers)\n vehs_jerk = tf.Variable(0, dtype=tf.int32)\n vehs_jerk_op = tf.summary.scalar('jerk', vehs_jerk)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = False\n config.gpu_options.per_process_gpu_memory_fraction = 0.050\n sess = tf.Session(config=config)\n sess.run(tf.global_variables_initializer())\n sess.run([agent1_actor_target_init, agent1_critic_target_init])\n if args.restore:\n saver.restore(sess, tf.train.latest_checkpoint(os.path.join(args.save_dir, args.exp_name)))\n print(\"load cptk file from \" + tf.train.latest_checkpoint(os.path.join(args.save_dir, args.exp_name)))\n\n summary_writer = tf.summary.FileWriter(os.path.join(args.save_dir, args.exp_name), graph=tf.get_default_graph())\n\n # 设置经验池最大空间\n agent1_memory_seq = ReplayBuffer(500000, args.batch_size, args.learn_start, 50000, rand_s=True)\n reward_list = []\n jerk_list = []\n collisions_list = []\n estm_collisions_list = []\n statistic_count = 0\n mean_window_length = 50\n state_now = []\n collisions_count = 0\n rate_latest = 1.0\n test_rate_latest = 1.0\n time_total = []\n seq_max_step = args.seq_max_step\n for epoch in range(args.num_episodes):\n collisions_count_last = collisions_count\n args.gamma = np.tanh(float(epoch + 6) / 12.0) * 0.90\n data = scio.loadmat(\"./data/train/arvTimeNewVeh_for_train.mat\") # 加载训练.mat数据\n arrive_time = data[\"arvTimeNewVeh\"]\n env = TrafficInteraction(arrive_time, 150, args, vm=6, virtual_l=not args.actual_lane, lane_num=args.lane_num)\n for i in range(6000):\n state_now.clear()\n for lane in range(args.lane_num):\n for ind, veh in enumerate(env.veh_info[lane]):\n o_n = veh[\"state\"]\n agent1_action = [[0]]\n if veh[\"control\"]:\n count_n += 1\n agent1_action = get_agents_action(o_n[0], sess, agent1_ddpg, noise_range=0.2) # 模型根据当前状态进行预测\n state_now.append(o_n)\n env.step(lane, ind, agent1_action[0][0])\n ids, state_next, reward, actions, collisions, estm_collisions, collisions_per_veh, jerks, lock = env.scene_update()\n for seq, car_index in enumerate(ids):\n env.veh_info[car_index[0]][car_index[1]][\"buffer\"].append(\n [state_now[seq], actions[seq], reward[seq], state_next[seq],\n env.veh_info[car_index[0]][car_index[1]][\"Done\"]])\n if env.veh_info[car_index[0]][car_index[1]][\"Done\"] or env.veh_info[car_index[0]][car_index[1]][\n \"count\"] > seq_max_step:\n seq_data = env.veh_info[car_index[0]][car_index[1]][\"buffer\"]\n if env.veh_info[car_index[0]][car_index[1]][\"Done\"]:\n r_target = seq_data[-1][2]\n else:\n other_act_next = []\n for n in range(1, args.o_agent_num + 1):\n other_act_next.append(agent1_ddpg_target.action([seq_data[-1][3][n]], sess)[0][0])\n r_target = seq_data[-1][2] + args.gamma * agent1_ddpg_target.Q(state=[seq_data[-1][3][0]],\n action=agent1_ddpg_target.action(\n [seq_data[-1][3][0]],\n sess), other_action=[\n other_act_next], sess=sess)[0][0]\n for cur_data in reversed(seq_data[:-1]):\n r_target = cur_data[2] + args.gamma * r_target\n agent1_memory_seq.add(np.array(seq_data[0][0]), np.array(seq_data[0][1]), r_target,\n np.array(seq_data[0][3]), False)\n env.veh_info[car_index[0]][car_index[1]][\"buffer\"].pop(0)\n env.veh_info[car_index[0]][car_index[1]][\"count\"] -= 1\n reward_list += reward\n jerk_list += jerks\n if len(collisions_per_veh) > 0:\n collisions_list += list(np.array(collisions_per_veh)[:, 0])\n estm_collisions_list += list(np.array(collisions_per_veh)[:, 1])\n reward_list = reward_list[-mean_window_length:]\n jerk_list = jerk_list[-mean_window_length:]\n collisions_list = collisions_list[-mean_window_length:]\n estm_collisions_list = estm_collisions_list[-mean_window_length:]\n for k in range(len(actions)):\n if collisions_per_veh[k][0] > 0:\n collisions_count += 1\n if count_n > 10000:\n statistic_count += 1\n time_t = time.time()\n train_agent_seq(agent1_ddpg, agent1_ddpg_target, agent1_memory_seq,\n agent1_actor_target_update, agent1_critic_target_update, sess, summary_writer, args)\n time_total.append(time.time() - time_t)\n a = tf.trainable_variables\n if len(actions) > 0:\n summary_writer.add_summary(sess.run(collisions_op, {col: collisions}), statistic_count)\n summary_writer.add_summary(sess.run(etsm_collisions_op, {etsm_col: estm_collisions}),\n statistic_count)\n summary_writer.add_summary(sess.run(v_mean_op, {v_mean: np.mean(np.array(state_next)[:, 0, 1])}),\n statistic_count)\n summary_writer.add_summary(sess.run(vehs_jerk_op, {vehs_jerk: np.mean(jerk_list)}), statistic_count)\n summary_writer.add_summary(\n sess.run(acc_mean_op, {acc_mean: np.mean(np.array(state_next)[:, 0, 2])}),\n statistic_count)\n summary_writer.add_summary(sess.run(reward_mean_op, {reward_mean: np.mean(reward_list)}),\n statistic_count)\n summary_writer.add_summary(sess.run(collisions_mean_op, {collisions_mean: np.mean(collisions_list)}),\n statistic_count)\n summary_writer.add_summary(\n sess.run(estm_collisions_mean_op, {estm_collisions_mean: np.mean(estm_collisions_list)}),\n statistic_count)\n summary_writer.add_summary(\n sess.run(collisions_veh_numbers_op, {collisions_veh_numbers: collisions_count}), statistic_count)\n if i % 100 == 0:\n print(\n \"reward mean: %s;epoch: %s;i: %s;count: %s;collisions_count: %s latest_c_rate: %s;\"\n \"test best c_rate: %s;a-lr: %0.6f; c-lr: %0.6f; time_mean: %s\" % (\n np.mean(reward_list), epoch, i, count_n, collisions_count, rate_latest, test_rate_latest,\n args.actor_lr, args.critic_lr, np.mean(time_total)))\n env.delete_vehicle()\n if epoch % args.save_rate == 0:\n print('update model to ' + os.path.join(args.save_dir, args.exp_name, str(epoch) + '.cptk'))\n saver.save(sess, os.path.join(args.save_dir, args.exp_name, str(epoch) + '.cptk'))\n if rate_latest > (collisions_count - collisions_count_last) / float(env.id_seq):\n rate_latest = (collisions_count - collisions_count_last) / float(env.id_seq)\n copyfile(\n os.path.join(args.save_dir, args.exp_name, str(epoch) + '.cptk.data-00000-of-00001'),\n os.path.join(args.save_dir, args.exp_name, 'best.cptk.data-00000-of-00001'))\n copyfile(\n os.path.join(args.save_dir, args.exp_name, str(epoch) + '.cptk.index'),\n os.path.join(args.save_dir, args.exp_name, 'best.cptk.index'))\n copyfile(\n os.path.join(args.save_dir, args.exp_name, str(epoch) + '.cptk.meta'),\n os.path.join(args.save_dir, args.exp_name, 'best.cptk.meta'))\n summary_writer.add_summary(sess.run(collision_rate_op, {\n collision_rate: (collisions_count - collisions_count_last) / float(env.id_seq)}),\n epoch)\n if epoch % 2 == 0 and args.benchmark:\n c_rate = benchmark(agent1_ddpg, arrive_time, sess)\n if c_rate < test_rate_latest:\n test_rate_latest = c_rate\n copyfile(\n os.path.join(args.save_dir, args.exp_name, str(epoch) + '.cptk.data-00000-of-00001'),\n os.path.join(args.save_dir, args.exp_name, 'test_best.cptk.data-00000-of-00001'))\n copyfile(\n os.path.join(args.save_dir, args.exp_name, str(epoch) + '.cptk.index'),\n os.path.join(args.save_dir, args.exp_name, 'test_best.cptk.index'))\n copyfile(\n os.path.join(args.save_dir, args.exp_name, str(epoch) + '.cptk.meta'),\n os.path.join(args.save_dir, args.exp_name, 'test_best.cptk.meta'))\n if epoch % 5 == 4:\n args.actor_lr = args.actor_lr * 0.9\n args.critic_lr = args.critic_lr * 0.9\n sess.close()\n\n\n# 特征重要性分析工具\ndef actor_feature_importance_analyze(state, model, sess, idx=0):\n plt.figure(0)\n imps = np.zeros(state.shape[0])\n base = get_agents_action(state, sess, model)[0]\n for j in range(imps.shape[0]):\n fes = []\n for i in range(100):\n tmp = state.copy()\n tmp[j] += np.random.rand(1) * 10\n fes.append(tmp)\n imps[j] = np.mean(abs((model.action(state=fes, sess=sess).reshape(100) - base[0])))\n if sum(imps) > 1:\n print(state, imps)\n plt.bar([i for i in range(len(imps))], imps)\n plt.savefig(\"result_img/feature_importance_curve_%s.png\" % idx)\n plt.close()\n\n\ndef test():\n agent1_ddpg_test = MADDPG('agent1', actor_lr=args.actor_lr, critic_lr=args.critic_lr,\n nb_other_aciton=args.o_agent_num, num_units=args.num_units)\n saver = tf.train.Saver()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n sess.run(tf.global_variables_initializer())\n model_path = os.path.join(args.save_dir, args.exp_name, \"test_best.cptk\")\n if not os.path.exists(model_path + \".meta\"):\n model_path = tf.train.latest_checkpoint(os.path.join(args.save_dir, args.exp_name))\n saver.restore(sess, model_path)\n print(\"load cptk file from \" + model_path)\n visible = Visible(lane_w=2.5, control_dis=150, l_mode=\"actual\", c_mode=args.c_mode, lane_num=args.lane_num)\n size = (960, 960)\n fps = 20\n video_writer = cv2.VideoWriter()\n if args.video_name != \"\":\n video_writer = cv2.VideoWriter(os.path.join(\"result_imgs\", args.video_name + \".avi\"),\n cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, size)\n mat_path = os.path.join(\"./data/test\", args.mat_path)\n data = scio.loadmat(mat_path) # 加载.mat数据\n arrive_time = data[\"arvTimeNewVeh\"]\n print(\"mat_path: \", mat_path)\n lock_total = 0\n collisions_count = 0\n time_total = []\n env = TrafficInteraction(arrive_time, 150, args, show_col=False, virtual_l=not args.actual_lane,\n lane_num=args.lane_num)\n jerk_total = 0\n for i in range(1000):\n for lane in range(args.lane_num):\n for ind, veh in enumerate(env.veh_info[lane]):\n o_n = veh[\"state\"]\n agent1_action = [[0]]\n if veh[\"control\"]:\n temp_t = time.time()\n agent1_action = get_agents_action(o_n[0], sess, agent1_ddpg_test, noise_range=0) # 模型根据当前状态进行预测\n time_total.append(time.time() - temp_t)\n env.step(lane, ind, agent1_action[0][0]) # 环境根据输入的动作返回下一时刻的状态和奖励\n ids, state_next, reward, actions, collisions, estm_collisions, collisions_per_veh, jerks, lock = env.scene_update()\n jerk_total += sum(jerks)\n lock_total += lock\n for k in range(len(actions)):\n if collisions_per_veh[k][0] > 0:\n collisions_count += 1\n if i % 50 == 0:\n print(\"i: %s collisions_rate: %s reward std: %s reward mean: %s lock_num: %s\" % (\n i, float(collisions_count) / env.id_seq, np.std(reward), np.mean(reward), lock_total))\n if (args.visible or args.video_name != \"\"):\n visible.show(env, i)\n img = cv2.imread(\"result_imgs/%s.png\" % i)\n # cv2.putText(img, \"density: \" + str(args.mat_pa), (200, 160), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)\n cv2.putText(img, \"frame: \" + str(i), (200, 200), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)\n cv2.putText(img, \"veh: \" + str(env.id_seq), (200, 240), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)\n cv2.putText(img, \"c-veh: %s\" % collisions_count, (200, 280), cv2.FONT_HERSHEY_COMPLEX, 0.5,\n (0, 0, 255),\n 1)\n cv2.putText(img, \"c-r: %0.4f\" % (float(collisions_count) / env.id_seq), (200, 320),\n cv2.FONT_HERSHEY_COMPLEX,\n 0.5, (0, 0, 255), 1)\n cv2.putText(img, \"p_veh: \" + str(env.passed_veh), (200, 360), cv2.FONT_HERSHEY_COMPLEX, 0.5,\n (0, 0, 0),\n 1)\n cv2.putText(img,\n \"pT-m: %0.4f s\" % (\n float(env.passed_veh_step_total) / (env.passed_veh + 0.0001) * env.deltaT),\n (200, 400), cv2.FONT_HERSHEY_COMPLEX,\n 0.5, (0, 0, 0), 1)\n if args.visible:\n cv2.imshow(\"unsignalized intersection\", img)\n cv2.waitKey(1)\n if args.video_name != \"\":\n video_writer.write(img)\n env.delete_vehicle()\n # if i < 2000:\n # scio.savemat(\"test_mat.mat\", {\"veh_info\": env.veh_info_record})\n video_writer.release()\n cv2.destroyAllWindows()\n choose_veh_visible = False\n veh_route = False\n if veh_route:\n n = 0\n color = {\"0\": 'darksalmon', \"3\": 'orchid', \"7\": 'b', \"10\": 'mediumslateblue', \"9\": \"mediumseagreen\"}\n plt.figure(0, figsize=(6.4, 3.2))\n plt.rcParams['font.family'] = ['SimHei']\n plt.rcParams['xtick.direction'] = 'in'\n plt.rcParams['ytick.direction'] = 'in'\n # 绘制轨迹\n t_l = 85\n leg = {\"0\": '目标车道-车辆', \"3\": '冲突车道1-车辆', \"7\": '冲突车道2-车辆', \"10\": '冲突车道3-车辆', \"9\": \"冲突车道4-车辆\"}\n idx = [\"0\", \"3\", \"7\", \"10\", \"9\"]\n for veh in env.virtual_data:\n n += 1\n x = [t[0] for t in env.virtual_data[veh] if t_l - 30 < t[0] < t_l]\n y = [t[1] for t in env.virtual_data[veh] if t_l - 30 < t[0] < t_l]\n if len(idx) > 0 and veh.split(\"_\")[0] == idx[0]:\n plt.plot(x, y, color[veh.split(\"_\")[0]], label=leg[veh.split(\"_\")[0]])\n plt.legend()\n leg.pop(idx[0])\n idx.pop(0)\n else:\n plt.plot(x, y, color[veh.split(\"_\")[0]])\n # plt.legend()\n plt.xlabel(\"时间/s\")\n plt.ylabel(\"车辆与冲突点的距离/m\")\n # plt.savefig(\"exp_result_imgs/route.png\")\n png1 = io.BytesIO()\n plt.savefig(png1, format=\"png\", dpi=500, pad_inches=.1, bbox_inches='tight')\n png2 = Image.open(png1)\n png2.save(\"exp_result_imgs/route.tiff\")\n png1.close()\n # plt.savefig(\"result_imgs/efficiency.png\")\n plt.close()\n plt.close()\n plt.figure(1, figsize=(6.4, 3.2))\n plt.rcParams['font.family'] = ['SimHei']\n plt.rcParams['xtick.direction'] = 'in'\n plt.rcParams['ytick.direction'] = 'in'\n # 绘制速度\n t_l = 85\n leg = {\"0\": '目标车道-车辆', \"3\": '冲突车道1-车辆', \"7\": '冲突车道2-车辆', \"10\": '冲突车道3-车辆', \"9\": \"冲突车道4-车辆\"}\n idx = [\"0\", \"3\", \"7\", \"10\", \"9\"]\n for veh in env.virtual_data:\n n += 1\n x = [t[0] for t in env.virtual_data[veh] if t_l - 30 < t[0] < t_l]\n y = [t[2] for t in env.virtual_data[veh] if t_l - 30 < t[0] < t_l]\n if len(idx) > 0 and veh.split(\"_\")[0] == idx[0]:\n plt.plot(x, y, color[veh.split(\"_\")[0]], lw=2, label=leg[veh.split(\"_\")[0]])\n plt.legend()\n leg.pop(idx[0])\n idx.pop(0)\n else:\n plt.plot(x, y, color[veh.split(\"_\")[0]], lw=2)\n # plt.legend()\n plt.xlabel(\"时间 [s]\")\n plt.ylabel(\"距离冲突点距离 [m]\")\n plt.savefig(\"exp_result_imgs/velocity.png\")\n plt.close()\n if choose_veh_visible:\n choose_veh_info = [np.array(item) for item in env.choose_veh_info]\n plt.figure(0)\n color = ['r', 'g', 'b', 'y']\n y_units = ['distance [m]', 'velocity [m/s]', 'accelerate speed [m/s^2]']\n titles = [\"The distance of the vehicle varies with the time\",\n \"The velocity of the vehicle varies with the time\",\n \"The accelerate spped of the vehicle varies with the time\"]\n for m in range(len(y_units)):\n for n in range(4):\n plt.plot(choose_veh_info[n][:, 0], choose_veh_info[n][:, m + 1], color[n])\n plt.legend([\"lane-0\", \"lane-1\", \"lane-2\", \"lane-3\"])\n plt.xlabel(\"time [s]\")\n plt.ylabel(y_units[m])\n plt.title(titles[m], fontsize='small')\n plt.savefig(\"exp_result_imgs/%s.png\" % (y_units[m].split(\" \")[0]), dpi=600)\n plt.close()\n print(\n \"vehicle number: %s; collisions occurred number: %s; collisions rate: %s, time_mean: %s, pT-m: %0.4f s jerks: %s\" % (\n env.id_seq, collisions_count, float(collisions_count) / env.id_seq, np.mean(time_total),\n float(env.passed_veh_step_total) / (env.passed_veh + 0.0001) * env.deltaT, jerk_total / env.passed_veh))\n sess.close()\n\n\ndef batch_test():\n agent1_ddpg_test = MADDPG('agent1', actor_lr=args.actor_lr, critic_lr=args.critic_lr,\n nb_other_aciton=args.o_agent_num, num_units=args.num_units)\n saver = tf.train.Saver()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n sess.run(tf.global_variables_initializer())\n model_path = os.path.join(args.save_dir, args.exp_name, \"test_best.cptk\")\n if not os.path.exists(model_path + \".meta\"):\n model_path = tf.train.latest_checkpoint(os.path.join(args.save_dir, args.exp_name))\n saver.restore(sess, model_path)\n print(\"load cptk file from \" + model_path)\n dens = [1200, 1000, 900, 800, 600, 400, 200]\n tw = open(args.exp_name + \"_batch_test_result_12_v1.txt\", \"w\")\n for d in dens:\n dens_f = \"arvTimeNewVeh_new_%s_%s.mat\" % (d, args.lane_num)\n mat_path = os.path.join(\"./data/test\", dens_f)\n print(mat_path)\n tw.write(mat_path + \"\\n\")\n data = scio.loadmat(mat_path) # 加载.mat数据\n arrive_time = data[\"arvTimeNewVeh\"]\n env = TrafficInteraction(arrive_time, 150, args, show_col=False, virtual_l=not args.actual_lane,\n lane_num=args.lane_num)\n jerk_total = 0\n collisions_count = 0\n lock_total = 0\n for i in range(36000):\n for lane in range(args.lane_num):\n for ind, veh in enumerate(env.veh_info[lane]):\n o_n = veh[\"state\"]\n agent1_action = [[0]]\n if veh[\"control\"]:\n agent1_action = get_agents_action(o_n[0], sess, agent1_ddpg_test,\n noise_range=0) # 模型根据当前状态进行预测\n env.step(lane, ind, agent1_action[0][0]) # 环境根据输入的动作返回下一时刻的状态和奖励\n ids, state_next, reward, actions, collisions, estm_collisions, collisions_per_veh, jerks, lock = env.scene_update()\n jerk_total += sum(jerks)\n lock_total += lock\n for k in range(len(actions)):\n if collisions_per_veh[k][0] > 0:\n collisions_count += 1\n if i % 1000 == 0:\n print(\"i: %s collisions_rate: %s reward std: %s reward mean: %s lock_num: %s\" % (\n i, float(collisions_count) / env.id_seq, np.std(reward), np.mean(reward), lock_total))\n env.delete_vehicle()\n result_txt = \"vehicle number %s collisions occurred number %s collisions rate %s pT-m %0.4f s jerks %s \" \\\n \"lock_num %s\" % (\n env.id_seq, collisions_count, float(collisions_count) / env.id_seq,\n float(env.passed_veh_step_total) / (env.passed_veh + 0.0001) * env.deltaT,\n jerk_total / env.passed_veh,\n lock_total)\n print(result_txt)\n tw.write(result_txt + \"\\n\")\n tw.close()\n sess.close()\n\n\nif __name__ == '__main__':\n args = parse_args()\n if not os.path.exists(\"result_imgs\"):\n os.makedirs(\"result_imgs\")\n if not os.path.exists(\"exp_result_imgs\"):\n os.makedirs(\"exp_result_imgs\")\n if not os.path.exists(os.path.join(args.save_dir, args.exp_name)):\n os.makedirs(os.path.join(args.save_dir, args.exp_name))\n if args.type == \"train\":\n with open(os.path.join(args.save_dir, args.exp_name, \"args.txt\"), \"w\") as fw:\n fw.write(str(args))\n train()\n else:\n if args.batch_test:\n batch_test()\n else:\n test()\n"
]
| [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.mean",
"tensorflow.get_default_graph",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"scipy.io.loadmat",
"tensorflow.ConfigProto",
"numpy.std",
"tensorflow.Session",
"matplotlib.pyplot.close",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"tensorflow.global_variables_initializer",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.ylabel",
"tensorflow.assign",
"matplotlib.pyplot.xlabel",
"numpy.vstack"
]
]
|
hjurong/predprey | [
"82e4cb07c45ce37ce502498a1af16a2b465ff9e6"
]
| [
"pred_prey.py"
]
| [
"import random\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom timeit import default_timer\r\n\r\nrandom.seed(100000)\r\n\r\nclass Island (object):\r\n \"\"\"Island\r\n a 3D grid where zero value indicates position not occupied.\r\n \"\"\"\r\n ## Initialisation of an Island.\r\n def __init__(self, x,y,z, wolf_count=0, eagle_count=0,\r\n rabbit_count=0, pigeon_count=0, grass_count=0, fruit_count=0):\r\n ''' Initialize grid to all 0's, then fill with animals.\r\n '''\r\n # Making the 3D-grid and its dimensions as Island attributes.\r\n # Construct the grid as a list of lists of lists;\r\n # first specify the z-dimension,\r\n # then the y-dimension and finally the x-dimension.\r\n self.grid_size_x = x\r\n self.grid_size_y = y\r\n self.grid_size_z = z \r\n self.grid_3D = [[[0 for i in range(x)] for j in range(y)] for k in range(z)]\r\n\r\n # Place animals onto the Island using the init_animals method.\r\n self.init_envir(wolf_count, eagle_count, rabbit_count, pigeon_count,\r\n grass_count, fruit_count)\r\n self.wolf_count, self.eagle_count = wolf_count, eagle_count\r\n self.rabbit_count, self.pigeon_count = rabbit_count, pigeon_count\r\n\r\n # Add two attributes to Island for holding the data of the animals.\r\n # Dictionaries are used to make appending and extracting data simpler.\r\n self.life_dict = {\"W\":[], \"E\":[], \"r\":[], \"p\":[]} \r\n self.offspring_dict = {\"W\":[], \"E\":[], \"r\":[], \"p\":[]}\r\n \r\n def size(self):\r\n ''' Return size of the island: three dimension.\r\n '''\r\n return self.grid_size_x,self.grid_size_y,self.grid_size_z \r\n \r\n def __str__(self):\r\n ''' String representation for printing.\r\n (0,0,0) is at the lower left corner of the bottom layer.\r\n '''\r\n s = \"\" # The 3D grid will be printed as a string.\r\n for k in range(self.grid_size_z-1,-1,-1): # Print 3D grid as 2D layers.\r\n s+=\"z-level {}\\n\".format(k) # Print the z-level of the current layer.\r\n # Add a horizontal row to indicate the x-axis. \r\n for n in range(self.grid_size_x): s+=\"{:<2s}\".format(str(n)+\" \")\r\n s+=\"\\n\" # Build the 2D-levels in a new line. \r\n for j in range(self.grid_size_y-1,-1,-1): # Print the rows of the 2D layers.\r\n for i in range(self.grid_size_x+1): # Each row has x columns\r\n # Add a vertical column to indicate the y-axis level.\r\n if i==self.grid_size_x: s+= \"{:<2s}\".format(str(j)+\" \") \r\n else:\r\n # print a '.' for an empty space.\r\n # print the name of the animal for occupied position.\r\n if not self.grid_3D[k][j][i]: s+= \"{:<2s}\".format('.' + \" \")\r\n else: s+= \"{:<2s}\".format((str(self.grid_3D[k][j][i])) + \" \")\r\n s+=\"\\n\" # Start the next row in a new line.\r\n s+=\"\\n\" # Separate every 2D-layer with a new line.\r\n return(s)\r\n \r\n def register(self,obj):\r\n ''' Register animal or plant with island, i.e. put it at the\r\n animal/plant's coordinates.\r\n '''\r\n x = obj.x\r\n y = obj.y\r\n z = obj.z\r\n self.grid_3D[z][y][x] = obj\r\n\r\n def remove(self,obj):\r\n ''' Remove animal or plant from island by making the animal's \r\n position to be 0.\r\n '''\r\n x = obj.x\r\n y = obj.y\r\n z = obj.z\r\n self.grid_3D[z][y][x] = 0\r\n \r\n def occupant(self,x,y,z):\r\n '''Return the animal at location (x,y,z).\r\n '''\r\n if 0 <= x < self.grid_size_x and 0 <= y < self.grid_size_y \\\r\n and 0 <= z < self.grid_size_z:\r\n return self.grid_3D[z][y][x]\r\n else:\r\n return -1 # outside island boundary \r\n\r\n def init_envir(self,wolf_count, eagle_count, rabbit_count, pigeon_count,\r\n grass_count, fruit_count):\r\n ''' Place the initial animals on the island.\r\n '''\r\n # The while-loop continues until unoccupied positions equalling to\r\n # wolf_count, eagle_count are found.\r\n # If the dimensions of the Island are insufficient to hold all the animals,\r\n # do not enter the loop; and inform the user.\r\n # Same while loop for all four animals.\r\n if wolf_count + rabbit_count + grass_count <= \\\r\n self.grid_size_x * self.grid_size_y and \\\r\n wolf_count + rabbit_count + pigeon_count + eagle_count \\\r\n + grass_count + fruit_count <= \\\r\n self.grid_size_x * self.grid_size_y * self.grid_size_z:\r\n # The while-loop continues until unoccupied positions equalling to\r\n # wolf_count, eagle_count are found.\r\n # Same while loop for all four animals.\r\n # The while-loop continues until unoccupied positions equalling to\r\n # wolf_count, eagle_count are found.\r\n loc_set = set() # Used to avoid repeats.\r\n count = 0\r\n while count < wolf_count:\r\n # Wolves are restricted to layer_0.\r\n x = random.randint(0,self.grid_size_x-1)\r\n y = random.randint(0,self.grid_size_y-1)\r\n z = 0 \r\n if (x,y,z) not in loc_set:\r\n new_wolf=Wolf(self,x,y,z)\r\n loc_set.add((x,y,z))\r\n count += 1\r\n self.register(new_wolf)\r\n \r\n count = 0\r\n while count < rabbit_count:\r\n # Rabbits are restricted to layer_0.\r\n x = random.randint(0,self.grid_size_x-1)\r\n y = random.randint(0,self.grid_size_y-1)\r\n z = 0\r\n if (x,y,z) not in loc_set:\r\n new_rabbit=Rabbit(self,x,y,z)\r\n loc_set.add((x,y,z))\r\n count += 1\r\n self.register(new_rabbit)\r\n\r\n count = 0\r\n while count < eagle_count:\r\n # No position restrictions for eagles.\r\n x = random.randint(0,self.grid_size_x-1)\r\n y = random.randint(0,self.grid_size_y-1)\r\n z = random.randint(0,self.grid_size_z-1)\r\n if (x,y,z) not in loc_set:\r\n new_eagle=Eagle(self,x,y,z)\r\n loc_set.add((x,y,z))\r\n count += 1\r\n self.register(new_eagle)\r\n\r\n count = 0 \r\n while count < pigeon_count:\r\n # No position restrictions for pigeons.\r\n x = random.randint(0,self.grid_size_x-1)\r\n y = random.randint(0,self.grid_size_y-1)\r\n z = random.randint(0,self.grid_size_z-1)\r\n if not self.occupant(x,y,z):\r\n new_pigeon=Pigeon(self,x,y,z)\r\n loc_set.add((x,y,z))\r\n count += 1\r\n self.register(new_pigeon)\r\n \r\n count = 0\r\n while count < grass_count:\r\n # Grass are restricted to layer_0.\r\n x = random.randint(0,self.grid_size_x-1)\r\n y = random.randint(0,self.grid_size_y-1)\r\n z = 0 \r\n if (x,y,z) not in loc_set:\r\n new_grass=Grass(self,x,y,z)\r\n loc_set.add((x,y,z))\r\n count += 1\r\n self.register(new_grass)\r\n\r\n count = 0\r\n while count < fruit_count:\r\n # Fruits are restricted to layer_0 and 1.\r\n x = random.randint(0,self.grid_size_x-1)\r\n y = random.randint(0,self.grid_size_y-1)\r\n z = random.randint(0,1) \r\n if (x,y,z) not in loc_set:\r\n new_fruit=Fruit(self,x,y,z)\r\n loc_set.add((x,y,z))\r\n count += 1\r\n self.register(new_fruit) \r\n else: print(\"Insufficient Island Dimensions\")\r\n\r\n def clear_all_moved_flags(self):\r\n ''' Animals have a moved flag to indicated they moved this turn.\r\n Clear that so they can move at the next clock tick.\r\n '''\r\n # Loop through every cell in the 3D grid list.\r\n for z in range(self.grid_size_z):\r\n for y in range(self.grid_size_y):\r\n for x in range(self.grid_size_x):\r\n if isinstance(self.grid_3D[z][y][x], Animal): \r\n self.grid_3D[z][y][x].clear_moved_flag()\r\n \r\n def get_locations(self):\r\n scatter_dict = {\"W\":[[],[],[],[],[]],\"E\":[[],[],[],[],[]],\r\n \"p\":[[],[],[],[],[]],\"r\":[[],[],[],[],[]],\r\n \"g\":[[],[],[],[],[]],\"f\":[[],[],[],[],[]]}\r\n for x in range(self.grid_size_x):\r\n for y in range(self.grid_size_y):\r\n for z in range(self.grid_size_z):\r\n instance = self.occupant(x,y,z) \r\n # Add the location statistics to a dictionary for animation.\r\n # The dictionary is initialised such that the x,y,z locations\r\n # can be added to the dictionary, as well as the life-time\r\n # and number of offspring.\r\n if isinstance(instance, Animal):\r\n scatter_dict[instance.name][0].append(instance.x)\r\n scatter_dict[instance.name][1].append(instance.y)\r\n scatter_dict[instance.name][2].append(instance.z)\r\n scatter_dict[instance.name][3].append(instance.life_time)\r\n scatter_dict[instance.name][4].append(instance.offspring)\r\n elif isinstance(instance, Plant):\r\n scatter_dict[instance.name][0].append(instance.x)\r\n scatter_dict[instance.name][1].append(instance.y)\r\n scatter_dict[instance.name][2].append(instance.z)\r\n scatter_dict[instance.name][3].append(instance.life_time)\r\n scatter_dict[instance.name][4].append(instance.eaten)\r\n return scatter_dict\r\n \r\n def count_animal(self, add_stat=False):\r\n ''' Count the number of a specified type of animal on the island,\r\n and the statistics of the animals can be appended\r\n to the appropriate lists if add_stat=True.'''\r\n wolf_count, eagle_count, rabbit_count, pigeon_count = 0, 0, 0, 0\r\n # Loop through every position of the 3D_grid list. \r\n for x in range(self.grid_size_x):\r\n for y in range(self.grid_size_y):\r\n for z in range(self.grid_size_z):\r\n animal = self.occupant(x,y,z) \r\n # If there is an animal corresponding to the type looking for,\r\n # add count. \r\n if animal and not add_stat:\r\n if isinstance(animal, Wolf): wolf_count+=1\r\n if isinstance(animal, Eagle): eagle_count+=1\r\n if isinstance(animal, Rabbit): rabbit_count+=1\r\n if isinstance(animal, Pigeon): pigeon_count+=1\r\n # If the add_stat condition is true,\r\n # append the animal's data (i.e., life time and number of offspring)\r\n # to the Island attributes.\r\n # This will be used at the end of the simulation to\r\n # gather the statistics of animals that are still alive.\r\n elif animal and add_stat:\r\n self.life_dict[animal.name].append(animal.life_time)\r\n self.offspring_dict[animal.name].append(animal.offspring)\r\n # Update the Island attributes if add_stat=False.\r\n if not add_stat:\r\n self.wolf_count, self.eagle_count = wolf_count, eagle_count\r\n self.rabbit_count, self.pigeon_count = rabbit_count, pigeon_count \r\n \r\n def cal_stat(self, type): \r\n \"\"\" Calculates all the required statistics for a type of animal and\r\n return the result as a tuple.\"\"\"\r\n\r\n # Get the data (i.e., the life_time and offspring lists \r\n # of a subclass of Animal.) from the Island attributes.\r\n # Sort those lists for extracting maximum and minimum and median calculations.\r\n offspring_list = self.offspring_dict[type]\r\n offspring_list.sort() \r\n life_list = self.life_dict[type]\r\n life_list.sort()\r\n life_num = len(life_list)\r\n\r\n # If a type of animal is initialised to be zero (i.e., none in the simulation),\r\n # IndexError will arise when trying to calculated its statistics.\r\n # This is not desirable for testing.\r\n # Use try-except to prevent the program from crashing if this occurs.\r\n try:\r\n offspring_min, offspring_max = offspring_list[0], offspring_list[-1]\r\n life_min, life_max = life_list[0], life_list[-1]\r\n\r\n # Calculate the average for the number of offspring and life_time.\r\n offspring_avg = sum(offspring_list)/len(offspring_list)\r\n life_avg = sum(life_list)/len(life_list)\r\n\r\n # Calculate the median for the number of offspring and life_time.\r\n # Need to consider two cases:\r\n # 1. If the length of the list is an odd number,\r\n # the median is the middle number of that list\r\n # (Python indexing starts at 0).\r\n # 2. If the length of the list is an even number,\r\n # the median is the average between the two middle numbers.\r\n if len(offspring_list)%2==1:\r\n offspring_med = offspring_list[int((len(offspring_list)-1)/2)]\r\n else:\r\n offspring_med =(offspring_list[int(len(offspring_list)/2-1)] +\r\n offspring_list[int(len(offspring_list)/2)])/2\r\n\r\n if len(life_list)%2==1:\r\n life_med = life_list[int((len(life_list)-1)/2)]\r\n else:\r\n life_med =(life_list[int(len(life_list)/2-1)] +\r\n life_list[int(len(life_list)/2)])/2\r\n\r\n # If a type of animal is not initialised, make all its statistics 0.\r\n except IndexError:\r\n life_num = \"\\nThis type of animal has not been initialised.\"\r\n life_min, life_max, life_avg, life_med = 0, 0 ,0 ,0\r\n offspring_min, offspring_max, offspring_avg, offspring_med = 0, 0, 0, 0\r\n\r\n # Return the result as a tuple. \r\n return life_num, life_max, life_min, life_avg, life_med,\\\r\n offspring_min, offspring_max, offspring_avg, offspring_med\r\n\r\nclass Plant(object):\r\n def __init__(self, island, x=0, y=0, z=0, s=\"P\"):\r\n \"\"\" Initialise a plant class as food for Preys.\r\n \"\"\"\r\n self.island = island\r\n self.name = s\r\n self.x, self.y, self.z = x, y, z\r\n self.life_time = 0\r\n self.eaten_time = 0\r\n self.eaten = False\r\n \r\n def __str__(self):\r\n \"\"\" Prints the name of the Plant.\r\n \"\"\"\r\n return self.name\r\n \r\n def position(self):\r\n ''' Return coordinates of the animal's current position.\r\n '''\r\n return self.x, self.y, self.z\r\n \r\n def grow(self):\r\n \"\"\" When a Plant grows back, its eaten flage becomes False.\r\n \"\"\"\r\n if self.eaten_time >= self.growth_time:\r\n assert self.eaten==True\r\n self.eaten = False\r\n self.eaten_time = 0\r\n \r\n def clock_tick(self):\r\n ''' Grass updates life_time.\r\n '''\r\n self.life_time += 1\r\n if self.eaten: self.eaten_time += 1\r\n self.grow()\r\n #print('Tick {} {},{},{}; life:{}, eaten_time:{}'.format(self.name,\r\n # self.x,self.y,self.z, self.life_time, self.eaten_time)) # Debug\r\n if self.life_time % (24*self.growth_time) == 0 and \\\r\n random.randint(0,2)==1: self.eaten = True\r\n\r\nclass Grass(Plant):\r\n def __init__(self, island, x=0, y=0, z=0, s=\"g\"):\r\n \"\"\" Grass is a subclass of Plant; it can be eaten by Rabbit.\r\n \"\"\"\r\n Plant.__init__(self, island, x, y, z, s)\r\n self.growth_time = self.regrowth\r\n \r\nclass Fruit(Plant):\r\n def __init__(self, island, x=0, y=0, z=0, s=\"f\"):\r\n \"\"\" Fruit is a subclass of Plant; it can be eaten by Prey.\r\n \"\"\"\r\n Plant.__init__(self, island, x, y, z, s)\r\n self.growth_time = self.regrowth\r\n \r\nclass Animal (object):\r\n def __init__(self, island, x=0, y=0, z=0, s=\"A\", s_range=2):\r\n \"\"\" Initialize an Animal class, with their positions and names.\r\n \"\"\"\r\n # Storing the information as class attributes.\r\n self.island = island\r\n self.name = s\r\n self.x, self.y, self.z = x, y, z\r\n self.moved = False\r\n self.life_time, self.offspring = 0, 0\r\n self.check_grid_list = [(i,j,k) for i in range(-1,2)\r\n for j in range(-1,2) for k in range(-1,2) \r\n if (i,j,k)!=(0,0,0)]\r\n assert len(self.check_grid_list)==26, \"Error in chech grid list\"\r\n self.search_set = set((i,j,k) for i in range(-s_range, s_range+1)\r\n for j in range(-s_range, s_range+1)\r\n for k in range(-s_range, s_range+1) \r\n if (i,j,k)!=(0,0,0)).difference(\r\n set(self.check_grid_list))\r\n assert len(self.search_set)==98, \"Error in search_set\"\r\n \r\n def __str__(self):\r\n \"\"\" Prints the name of the animal.\r\n \"\"\"\r\n return self.name\r\n \r\n def position(self):\r\n ''' Return coordinates of the animal's current position.\r\n '''\r\n return self.x, self.y, self.z\r\n\r\n def check_grid(self, type_looking_for=int):\r\n ''' Look randomly at all possible locations from the animal's location\r\n and return a location that is presently occupied by an object\r\n of the specified type. Return 0 if no such location exists\r\n '''\r\n # Generate a set of neighbour offset tuples.\r\n # Animals can only search positions that are in this list.\r\n # An animal can inspect a maximum of 26 positions.\r\n #start = time.time()\r\n offset = self.check_grid_list\r\n assert len(offset)==26, \"Error in offset list.\"\r\n # Randomly look through positions with offset to current position included in offset\r\n # by shuffling the list.\r\n # Every time this loop is entered, the offset list would have a different order.\r\n result = 0\r\n random.shuffle(offset)\r\n for index in range(len(offset)): \r\n x = self.x + offset[index][0] # neighbouring x,y,z coordinates\r\n y = self.y + offset[index][1]\r\n if isinstance(self, Eagle) or isinstance(self, Pigeon): \r\n # Eagle or pigeon have no restrictions on their positions. \r\n z = self.z + offset[index][2] \r\n else:\r\n # Wolf and rabbit are restricted to layer_0.\r\n z = self.z\r\n # If a position is found, return its coordinates and exit loop.\r\n if 0 <= x < self.island.grid_size_x and \\\r\n 0 <= y < self.island.grid_size_y and \\\r\n 0 <= z < self.island.grid_size_z and \\\r\n isinstance(self.island.occupant(x,y,z), type_looking_for): \r\n result=(x,y,z)\r\n break\r\n # If an object of the specified type cannot be found, result=0.\r\n #print(\"check_grid took {}\".format(time.time()-start))\r\n return result\r\n\r\n\r\n ## searching for a particular animal two grids away-----------------------\r\n def search(self, type_search=int, s_range=2):\r\n if s_range==2:\r\n search_set = self.search_set # only search 2 grids away.\r\n for elem in search_set:\r\n x = self.x + elem[0] # neighbouring x,y,z coordinates\r\n y = self.y + elem[1]\r\n z = self.z + elem[2]\r\n # If the position is outside the 3D grid, choose another one.\r\n if not 0 <= x < self.island.grid_size_x or \\\r\n not 0 <= y < self.island.grid_size_y or \\\r\n not 0 <= z < self.island.grid_size_z: continue\r\n # If a position is found, return its coordinates and exit loop.\r\n if isinstance(self.island.occupant(x,y,z), type_search):\r\n return (x,y,z), self.island.occupant(x,y,z).name\r\n break\r\n \r\n elif s_range==1:\r\n search_list = self.check_grid_list\r\n return_dict = {} # To hold all valid positions.\r\n for elem in search_list:\r\n x = self.x + elem[0] # neighbouring x,y,z coordinates.\r\n y = self.y + elem[1]\r\n z = self.z + elem[2]\r\n # If the position is outside the 3D grid, choose another one.\r\n if not 0 <= x < self.island.grid_size_x or \\\r\n not 0 <= y < self.island.grid_size_y or \\\r\n not 0 <= z < self.island.grid_size_z: continue\r\n # If a position is found, append to return_list.\r\n if isinstance(self.island.occupant(x,y,z), type_search):\r\n return_dict[(x,y,z)]=self.island.occupant(x,y,z).name\r\n return return_dict\r\n \r\n def get_position(self,coordinate=(0,0,0),other_type=None):\r\n assert isinstance(coordinate, tuple), \"Error in get_position\"\r\n position_set = set()\r\n for elem in self.check_grid_list:\r\n x = elem[0] + coordinate[0]\r\n y = elem[1] + coordinate[1]\r\n z = elem[2] + coordinate[2]\r\n if 0 <= x < self.island.grid_size_x and \\\r\n 0 <= y < self.island.grid_size_y and \\\r\n 0 <= z < self.island.grid_size_z:\r\n if other_type is None:\r\n if isinstance(self, Wolf) or isinstance(self, Rabbit):\r\n assert coordinate[2]==0, \"Error in get_position\"\r\n position_set.add((x, y, coordinate[2]))\r\n elif isinstance(self, Eagle) or isinstance(self, Pigeon):\r\n position_set.add((x, y, z))\r\n else: \r\n if other_type==\"W\" or other_type==\"r\":\r\n assert coordinate[2]==0, \"Error in get_position\"\r\n position_set.add((x, y, coordinate[2]))\r\n else:\r\n position_set.add((x, y, z))\r\n return position_set\r\n \r\n ## Secondary and more strategic move method\r\n def move_towards(self, search_type=int):\r\n \"\"\" Search for prey or predator at two grids away.\r\n If one present, move according to type. \r\n \"\"\"\r\n # This is done using sets. In particular, the reachable positions\r\n # of an animal is stored in a set. The predator will move to one of the\r\n # common positions assessible by both animals.\r\n # Those positions are found using set intersections.\r\n if self.search(type_search=search_type, s_range=2) and not self.moved:\r\n position, name = self.search(type_search=search_type, s_range=2)\r\n move_range = self.get_position(coordinate=(self.x,self.y,self.z)) \\\r\n & self.get_position(coordinate=position, other_type=name)\r\n for val in move_range:\r\n assert abs(self.x-val[0])<=1 and abs(self.y-val[1])<=1 \\\r\n and abs(self.z-val[2])<=1, \"Error in move\"\r\n if isinstance(self.island.occupant(val[0],val[1],val[2]), int): \r\n #print('Move_towards, {} to {}; from {},{},{} to {},{},{}'.format(\r\n # self.name,name,self.x,self.y,self.z,\r\n # val[0],val[1],val[2])) #debug\r\n self.island.remove(self) # Remove instance from current spot.\r\n self.x = val[0] # New x,y,z coordinates.\r\n self.y = val[1]\r\n self.z = val[2]\r\n self.island.register(self) # Register instance at new coordinates.\r\n self.moved=True\r\n break\r\n \r\n def move_away(self, search_type=int):\r\n \"\"\" A Prey looks for nearby (i.e. a grid away) Predators and move\r\n away from them.\r\n \"\"\"\r\n # This is also done with sets, similar to the move_towards method.\r\n # However, the position choices are created by the differece of sets.\r\n location_dict = self.search(type_search=search_type, s_range=1)\r\n if location_dict and not self.moved:\r\n move_range = self.get_position(coordinate=(self.x, self.y, self.z))\r\n for location, name in location_dict.items():\r\n move_range = move_range.difference(self.get_position(\r\n coordinate=location, other_type=name))\r\n if move_range:\r\n for val in move_range:\r\n assert abs(self.x-val[0])<=1 and \\\r\n abs(self.y-val[1])<=1 and \\\r\n abs(self.z-val[2])<=1, \"Error in move\"\r\n if isinstance(self.island.occupant(val[0],val[1],val[2]), int): \r\n #print('Move_away, {} from {}; from {},{},{} to {},{},{}'.format(\r\n # self.name, name, self.x,self.y,self.z,\r\n # val[0],val[1],val[2])) #debug\r\n self.island.remove(self) # Remove instance from current spot.\r\n self.x = val[0] # New x,y,z coordinates.\r\n self.y = val[1]\r\n self.z = val[2]\r\n self.island.register(self) # Register instance at new coordinates.\r\n self.moved=True\r\n break\r\n \r\n def move(self):\r\n ''' Move to an open, neighbouring position\r\n '''\r\n # An animal is only allowed to move once per clock tick. \r\n # Look for an empty cell, i.e., cell occupied by 0.\r\n location = self.check_grid(int)\r\n if location and not self.moved:\r\n assert abs(self.x-location[0])<=1 and abs(self.y-location[1])<=1 \\\r\n and abs(self.z-location[2])<=1, \"Error in move\"\r\n #print('Move, {}, from {},{},{} to {},{},{}'.format(\r\n # self.name,self.x,self.y,self.z,\r\n # location[0],location[1],location[2])) #debug\r\n self.island.remove(self) # Remove instance from current spot.\r\n self.x = location[0] # New x,y,z coordinates.\r\n self.y = location[1]\r\n self.z = location[2]\r\n self.island.register(self) # Register instance at new coordinates.\r\n self.moved=True # Change the moved flag.\r\n\r\n def clear_moved_flag(self):\r\n \"\"\" Change the animal's moved attribute to False.\r\n \"\"\"\r\n self.moved=False\r\n\r\n def breed(self):\r\n ''' Breed a new Animal at an empty neighbouring location.\r\n If no empty position available, wait.\r\n '''\r\n if self.breed_clock <= 0:\r\n location = self.check_grid(int)\r\n if location:\r\n # Register the new animal at the empty position.\r\n # Increase the instance's offspring count.\r\n # Reset breed_clock.\r\n self.breed_clock = self.breed_time \r\n the_class = self.__class__\r\n new_animal = the_class(\r\n self.island,x=location[0],y=location[1],z=location[2])\r\n self.island.register(new_animal) \r\n self.offspring += 1\r\n #print('{} Breeding {},{},{}'.format(\r\n # str(self.name),self.x,self.y,self.z)) # debug\r\n\r\nclass Prey(Animal):\r\n def __init__(self, island, x=0,y=0,z=0,s=\"Prey\"):\r\n \"\"\"Initialise the Prey class as a subclass of Animal.\r\n Prey will inherit all methods from its parent class.\r\n \"\"\"\r\n Animal.__init__(self,island,x,y,z,s)\r\n \r\n def clock_tick(self):\r\n '''Prey updates its local breed clock and life_time\r\n '''\r\n self.breed_clock -= 1\r\n self.starve_clock-= 1\r\n self.life_time += 1\r\n #print('Tick Prey {},{},{} breed:{} life:{}'.format(\r\n # self.x,self.y,self.z,self.breed_clock,self.life_time)) # debug\r\n if self.starve_clock <= 0 or self.life_time >= self.life_max:\r\n self.island.remove(self)\r\n self.island.life_dict[self.name].append(self.life_time)\r\n self.island.offspring_dict[self.name].append(self.offspring) \r\n #print('Death, {} at {},{},{}, life={}'.format(\r\n # self.name,self.x,self.y,self.z,self.life_time)) # Debug\r\n \r\n def feed(self, plant_type):\r\n ''' Prey looks at the offset locations for Plant.\r\n If found, it moves to that location and updates its starve clock.\r\n The Plant's eaten attribute is updated.\r\n '''\r\n # Eating involves moving; it can only be performed once per clock tick.\r\n if not self.moved:\r\n plant = self.check_grid(plant_type)\r\n if plant and not self.island.occupant(plant[0],\r\n plant[1],plant[2]).eaten:\r\n assert self.island.occupant(plant[0],plant[1],plant[2]).eaten==False\r\n #print('Eating: {} at {},{},{} {} at {},{},{}'.format(\r\n # str(self.name),self.x,self.y,self.z,\r\n # self.island.occupant(location[0],location[1],location[2]),\r\n # location[0],location[1],location[2])) # debug\r\n self.island.occupant(plant[0],plant[1],plant[2]).eaten=True\r\n self.starve_clock=self.starve_time\r\n \r\n def reposition(self):\r\n \"\"\" Move away from predators that are nearby. \r\n \"\"\"\r\n self.move_away(search_type=Predator) or \\\r\n self.move_towards(search_type=Plant)\r\n\r\nclass Predator(Animal):\r\n def __init__(self, island, x=0,y=0,z=0,s=\"Pred\"):\r\n \"\"\"Initialise the Predator class as a subclass of Animal.\r\n \"\"\" \r\n Animal.__init__(self,island,x,y,z,s)\r\n\r\n def clock_tick(self):\r\n ''' Predator updates breeding, starving and life_time\r\n '''\r\n self.breed_clock -= 1\r\n self.starve_clock-= 1\r\n self.life_time += 1\r\n #print('Tick Predator {},{},{} breed:{} starve: {} life:{}'.format(\r\n # self.x,self.y,self.z,\r\n # self.breed_clock,self.starve_clock,self.life_time)) # Debug\r\n \r\n # If the predator's starve clock is 0, remove it from Island, \r\n # register its data to Island's offspring_dict and life_dict.\r\n if self.starve_clock <= 0 or self.life_time >= self.life_max:\r\n self.island.remove(self)\r\n self.island.life_dict[self.name].append(self.life_time)\r\n self.island.offspring_dict[self.name].append(self.offspring) \r\n #print('Death, {} at {},{},{}, life={}'.format(\r\n # self.name,self.x,self.y,self.z,self.life_time)) # Debug\r\n \r\n def hunt(self, prey_type):\r\n ''' Predator looks at the offset locations for Prey.\r\n If found, it moves to that location and updates its starve clock.\r\n The Prey is removed.\r\n Register the data of the eaten prey to offspring_dict and life_dict.\r\n '''\r\n # Eating involves moving; it can only be performed once per clock tick.\r\n if not self.moved:\r\n location = self.check_grid(prey_type)\r\n if location:\r\n #print('Eating: {} at {},{},{} {} at {},{},{}'.format(\r\n # str(self.name),self.x,self.y,self.z,\r\n # self.island.occupant(location[0],location[1],location[2]),\r\n # location[0],location[1],location[2])) # debug\r\n \r\n # Add the statistics of the Prey animal to appropriate Island attributes.\r\n # Life-time:\r\n self.island.life_dict[str(self.island.occupant(\r\n location[0],location[1],location[2]))].append(\r\n self.island.occupant(\r\n location[0],location[1],location[2]).life_time) \r\n # Number of offspring:\r\n self.island.offspring_dict[str(self.island.occupant(\r\n location[0],location[1],location[2]))].append(\r\n self.island.occupant(\r\n location[0],location[1],location[2]).offspring) \r\n\r\n # Remove the eaten animal and move the Predator instance to its position.\r\n self.island.remove(self.island.occupant(location[0],location[1],location[2]))\r\n self.island.remove(self)\r\n self.x=location[0]\r\n self.y=location[1]\r\n self.z=location[2]\r\n self.island.register(self)\r\n self.starve_clock=self.starve_time\r\n self.moved=True\r\n\r\nclass Eagle (Predator):\r\n def __init__(self, island, x=0, y=0, z=0, s=\"E\"):\r\n \"\"\"Initialise the Eagle class as a subclass of Predator.\r\n Methods of the Predator class will be inherited.\r\n Add life_time, offspring breed_clock and starve_clock \r\n attributes to every instance.\r\n \"\"\" \r\n Predator.__init__(self,island,x,y,z,s)\r\n self.breed_clock = self.breed_time\r\n self.starve_clock = self.starve_time\r\n self.life_max = self.max_life - random.randint(0,2)\r\n #self.move_clock, self.life_time, self.offspring = 0, 0, 0\r\n \r\n\r\n ## Eat based on eat----------------------------------------------------\r\n def eat(self):\r\n ''' Eagles can eat pigeons and rabbits.\r\n '''\r\n self.hunt(prey_type = Prey)\r\n\r\n def reposition(self):\r\n \"\"\" Move towards preys and away from wolves that are two grids away. \r\n \"\"\"\r\n self.move_towards(search_type=Prey) or self.move_away(search_type=Wolf)\r\n \r\nclass Wolf (Predator):\r\n def __init__(self, island, x=0, y=0, z=0, s=\"W\"):\r\n \"\"\"Initialise the Wolf class as a subclass of Predator.\r\n Wolf inherits methods from Predator.\r\n Add life_time, offspring, breed_clock and starve_clock \r\n attributes to every instance.\r\n \"\"\" \r\n Predator.__init__(self,island,x,y,z,s)\r\n self.breed_clock = self.breed_time\r\n self.starve_clock = self.starve_time\r\n self.life_max = self.max_life - random.randint(0,2)\r\n #self.move_clock, self.life_time, self.offspring = 0, 0, 0\r\n \r\n\r\n ## Eat method based on the eat method\r\n def eat(self):\r\n ''' Wolves can eat rabbits, eagles and pigeons.\r\n '''\r\n self.hunt(prey_type=Prey) or self.hunt(prey_type=Eagle)\r\n \r\n \r\n ## Secondary move method\r\n def reposition(self):\r\n \"\"\" Move towards preys that are two grids away. \r\n \"\"\"\r\n self.move_towards(search_type=Prey) or self.move_towards(search_type=Eagle)\r\n\r\nclass Rabbit (Prey):\r\n def __init__(self, island, x=0, y=0, z=0, s=\"r\"):\r\n \"\"\"Initialise the Rabbit class as a subclass of Prey.\r\n Rabbit inherits from Prey.\r\n Add life_time, offspring breed_clock attributes to every instance.\r\n \"\"\" \r\n Prey.__init__(self,island,x,y,z,s)\r\n self.breed_clock = self.breed_time\r\n self.starve_clock= self.starve_time\r\n self.life_max = self.max_life - random.randint(0,2)\r\n #self.move_clock, self.life_time, self.offspring = 0, 0, 0\r\n\r\n def eat(self):\r\n self.feed(plant_type=Plant)\r\n \r\nclass Pigeon (Prey):\r\n def __init__(self, island, x=0, y=0, z=0, s=\"p\"):\r\n \"\"\"Initialise the Rabbit class as a subclass of Prey.\r\n Pigeon inherits from Prey.\r\n Add life_time, offspring breed_clock attributes to every instance.\r\n \"\"\" \r\n Prey.__init__(self,island,x,y,z,s)\r\n self.breed_clock = self.breed_time\r\n self.starve_clock= self.starve_time\r\n self.life_max = self.max_life - random.randint(0,2)\r\n #self.move_clock, self.life_time, self.offspring = 0, 0, 0\r\n\r\n def eat(self):\r\n self.feed(plant_type=Fruit)\r\n \r\ndef animation(data, ax, plot=None,x=0,y=0,z=0):\r\n assert isinstance(data,dict)\r\n if plot:\r\n #plot.remove()\r\n ax.cla() # Clear the axis for re-drawing.\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_zlabel('Z')\r\n ax.set_xlim((0, x))\r\n ax.set_ylim((0, y))\r\n ax.set_zlim((0, z))\r\n ax.set_autoscale_on(False)\r\n # Iterate through the stat-dictionary to update all animals.\r\n # The annotation for each animal shows its name, life-time and number of offspring.\r\n colour_dict = {\"W\":\"b\", \"E\":\"y\", \"p\":\"r\", \"r\":\"c\", \"g\":\"g\", \"f\":\"m\"}\r\n indicators = {\"g\":{True:\"k\", False:\"g\"}, \"f\":{True:\"k\", False:\"m\"}}\r\n size_dict = {\"W\":700, \"E\":650, \"p\":600, \"r\":600, \"g\":45, \"f\":45}\r\n for name, stats in data.items():\r\n if name in [\"W\",\"E\",\"p\",\"r\"]:\r\n for i in range(len(stats[0])):\r\n plot = ax.scatter(stats[0][i], stats[1][i], stats[2][i], \r\n marker=\"${} ({},{})$\".format(name,stats[3][i],\r\n stats[4][i]), color=colour_dict[name], s=size_dict[name])\r\n elif name in [\"g\",\"f\"]:\r\n for i in range(len(stats[0])):\r\n plot = ax.scatter(stats[0][i], stats[1][i], stats[2][i],\r\n marker=\".\", color=indicators[name][stats[4][i]], \r\n s=size_dict[name])\r\n try:\r\n plt.pause(0.01) # Pause the plot for it to update.\r\n except Exception as e: print(e) # do some logging \r\n return plot\r\n\r\ndef print_stat(island=None, animal_list=[]):\r\n \"\"\" Add the data of the animals that are still alive \r\n to appropriate Island attributes.\r\n Loop through every animal and print associated results.\"\"\"\r\n island.count_animal(add_stat=True)\r\n for elem in animal_list:\r\n stat_list = island.cal_stat(type=elem)\r\n print(\"\\nSimulation statistics for {}\".format(elem))\r\n print(\"Number of {} {:<25} {}\".format(\r\n elem,\"lived during simulation:\", stat_list[0]))\r\n print(\"{:<37} {:<5}\".format(\"Max life_time:\",stat_list[1]))\r\n print(\"{:<37} {:<5}\".format(\"Min life_time:\",stat_list[2]))\r\n print(\"{:<37} {:<5.3f}\".format(\"Average life_time:\",stat_list[3]))\r\n print(\"{:<37} {:<6.3f}\".format(\"Median life_time:\",stat_list[4]))\r\n print(\"{:<37} {:<5}\".format(\"Min number of offspring:\",stat_list[5]))\r\n print(\"{:<37} {:<5}\".format(\"Max number of offspring:\",stat_list[6]))\r\n print(\"{:<37} {:<5.3f}\".format(\"Average number of offspring:\",stat_list[7]))\r\n print(\"{:<37} {:<6.3f}\\n\".format(\"Median number of offspring:\",stat_list[8]))\r\n #print(\"\\nlife\", island.life_dict,\"\\noffspring\",island.offspring_dict) # Debug\r\n #print(island)\r\n\r\ndef stat_plot(W_list=[], E_list=[], p_list=[], r_list=[]):\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1,1,1)\r\n ax.plot(p_list,\"r\",label=\"Pigeon\",linewidth=2)\r\n ax.plot(r_list,\"g\",label=\"Rabbit\",linewidth=2)\r\n ax.plot(E_list,\"y\",label=\"Eagle\",linewidth=2)\r\n ax.plot(W_list,\"b\",label=\"Wolf\",linewidth=2)\r\n ax.legend(bbox_to_anchor=(0.75, 0.7), loc=3, borderaxespad=0.)\r\n ax.set_ylabel(r\"$Number$ $of$ $Alive$ $Animals$\", fontsize = 15)\r\n ax.set_xlabel(r\"$Clock$ $Tick$\", fontsize=15)\r\n ax.set_title(r\"$Population$ $Fluctuation$\", fontsize=20)\r\n plt.show()\r\n\r\n\r\ndef main(eagle_breed_time =13, eagle_starve_time=13, initial_eagles=13,\r\n wolf_breed_time =15, wolf_starve_time=12, initial_wolves=10,\r\n eagle_max_life =45, wolf_max_life =40,\r\n pigeon_breed_time=8, pigeon_max_life =28, initial_pigeons=30,\r\n rabbit_breed_time=6, rabbit_max_life =32, initial_rabbits=32,\r\n rabbit_starve_time=10, pigeon_starve_time=12,\r\n initial_grass=90, initial_fruit=100, grass_regrowth=8,\r\n fruit_regrowth=7,\r\n x=30, y=30, z=3, ticks=800, stop=True,\r\n print_statistics=False, plot_statistics=True, anim=True):\r\n ''' Main simulation. Sets defaults, runs event loop,\r\n plots data at the end, creates 3D-scatter-animation and\r\n print statistics if enabled.\r\n '''\r\n ## Initialization of class attributes.------------------------------------\r\n Eagle.breed_time, Eagle.starve_time= eagle_breed_time, eagle_starve_time\r\n Wolf.breed_time, Wolf.starve_time = wolf_breed_time, wolf_starve_time\r\n Eagle.max_life, Wolf.max_life = eagle_max_life, wolf_max_life \r\n Pigeon.breed_time, Pigeon.max_life = pigeon_breed_time, pigeon_max_life\r\n Rabbit.breed_time, Rabbit.max_life = rabbit_breed_time, rabbit_max_life\r\n Rabbit.starve_time, Pigeon.starve_time = rabbit_starve_time, pigeon_starve_time\r\n Grass.regrowth, Fruit.regrowth = grass_regrowth, fruit_regrowth\r\n \r\n ## Create lists to hold statistics for graphing---------------------------\r\n # If plotting is enabled. \r\n if plot_statistics: \r\n pigeon_list, rabbit_list, eagle_list, wolf_list = [], [], [], []\r\n \r\n ## Initialise an Island, called land--------------------------------------\r\n land = Island(x,y,z, initial_wolves, initial_eagles, initial_rabbits,\r\n initial_pigeons, initial_grass, initial_fruit)\r\n island_size = land.size()\r\n #print(land)\r\n\r\n ## Create a figure-object on which the animation will occur----------------\r\n if anim: \r\n fig1 = plt.figure()\r\n ax1 = fig1.add_subplot(111, projection='3d') # Animation in 3D axes\r\n ax1.set_xlabel('X')\r\n ax1.set_ylabel('Y')\r\n ax1.set_zlabel('Z')\r\n ax1.set_xlim((0, x))\r\n ax1.set_ylim((0, y))\r\n ax1.set_zlim((0, z))\r\n ax1.set_autoscale_on(False)\r\n plt.ion()\r\n plt.show()\r\n W = None # Create an artist on which the scatter points are drawn. \r\n\r\n # Simulate using a for loop. \r\n # Every clock tick, look at every position of the Island,\r\n # do tasks if there is an animal there.\r\n # Every time entering the for-loop, clear the moved flags of all animals.\r\n # If one of the populations become zero, exit the loop.\r\n for i in range(ticks):\r\n if anim:\r\n W = animation(land.get_locations(), ax1, plot=W, x=x, y=y, z=z)\r\n land.clear_all_moved_flags()\r\n #print(land) # debug\r\n for x in range(island_size[0]):\r\n for y in range(island_size[1]):\r\n for z in range(island_size[2]):\r\n instance = land.occupant(x,y,z)\r\n # Execute actions according to the type of instance.\r\n if isinstance(instance, Plant): instance.clock_tick()\r\n if isinstance(instance, Animal) and not instance.moved:\r\n instance.eat() or instance.reposition() or \\\r\n instance.move() or instance.breed() or \\\r\n instance.clock_tick()\r\n \r\n # Record population data for plotting.\r\n # Append the counts to a list only if plotting is enabled.\r\n land.count_animal() \r\n if plot_statistics:\r\n pigeon_list.append(land.pigeon_count)\r\n rabbit_list.append(land.rabbit_count)\r\n eagle_list.append(land.eagle_count)\r\n wolf_list.append(land.wolf_count)\r\n # Exit the simulation if one population becomes 0.\r\n if stop and (land.rabbit_count == 0 or land.pigeon_count == 0):\r\n print('Lost the Prey population. Quiting.')\r\n break\r\n if stop and (land.wolf_count == 0 or land.eagle_count == 0):\r\n print('Lost the Predator population. Quitting.')\r\n break\r\n #print(land)\r\n \r\n # Print out every 10th cycle, see what's going on.\r\n # Print the island, hold at the end of each cycle to get a look.\r\n if not i%50 and i!=0:\r\n print(\"ticks: {:>11}\".format(i))\r\n print(\"pigeon_count: {:>4} \\nrabbit_count: {:>4} \"\r\n \"\\nwolf_count: {:>6} \\neagle_count {:>6}\\n\".format(\r\n land.pigeon_count, land.rabbit_count, \r\n land.wolf_count, land.eagle_count))\r\n\r\n \r\n ## Statistics analysis and printing\r\n if print_statistics:\r\n print_stat(island=land,animal_list=[\"W\",\"E\",\"p\",\"r\"])\r\n \r\n \r\n ## Closing the 3D animation after existing the loop.--------------------\r\n if anim:\r\n plt.ioff()\r\n plt.pause(3) # Pause the plot at the end for inspection.\r\n plt.close() # Close the plot.\r\n \r\n ## Plotting population fluctuation over the simulation.-----------------\r\n if plot_statistics:\r\n stat_plot(W_list=wolf_list, E_list=eagle_list, \r\n p_list=pigeon_list, r_list=rabbit_list)\r\n\r\nclass Timer(object):\r\n def __init__(self, ctx=\"\"):\r\n self.ctx = ctx\r\n self.timer = default_timer\r\n \r\n def __enter__(self):\r\n self.start = self.timer()\r\n logging.info(\"start %s\", self.ctx)\r\n return self\r\n \r\n def __exit__(self, *args):\r\n end = self.timer()\r\n self.elapsed = end - self.start # seconds\r\n logging.info(\"%s took %d seconds\", self.ctx, self.elapsed)\r\n\r\nif __name__ == \"__main__\":\r\n with Timer(\"Pred-Prey Similation\"):\r\n main()\r\n"
]
| [
[
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.figure"
]
]
|
RedisDL/RedisDL | [
"eca43e13617989a0cdd5cbcaff88dd7e1474daaf"
]
| [
"tests/flow/includes.py"
]
| [
"import json\nimport os\nimport random\nimport sys\nimport time\nfrom multiprocessing import Process, Pipe\nimport threading\n\nimport redis\nfrom numpy.random import default_rng\nimport numpy as np\nfrom skimage.io import imread\nfrom skimage.transform import resize\n\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"../../opt/readies\"))\nimport paella\n\nROOT = os.environ.get(\"ROOT\", None)\nTESTMOD_PATH = os.environ.get(\"TESTMOD\", None)\nMAX_ITERATIONS = 2 if os.environ.get(\"MAX_ITERATIONS\") == None else os.environ.get(\"MAX_ITERATIONS\")\nTEST_TF = os.environ.get(\"TEST_TF\") != \"0\" and os.environ.get(\"WITH_TF\") != \"0\"\nTEST_TFLITE = os.environ.get(\"TEST_TFLITE\") != \"0\" and os.environ.get(\"WITH_TFLITE\") != \"0\"\nTEST_PT = os.environ.get(\"TEST_PT\") != \"0\" and os.environ.get(\"WITH_PT\") != \"0\"\nTEST_ONNX = os.environ.get(\"TEST_ONNX\") != \"0\" and os.environ.get(\"WITH_ORT\") != \"0\"\nCOV = os.environ.get(\"COV\") != \"0\" and os.environ.get(\"COV\") != \"0\"\nDEVICE = os.environ.get('DEVICE', 'CPU').upper().encode('utf-8', 'ignore').decode('utf-8')\nprint(f'\\nRunning inference sessions on {DEVICE}\\n')\nVALGRIND = os.environ.get(\"VALGRIND\") == \"1\"\n# change this to make inference tests longer\nMAX_TRANSACTIONS=100\n\n\ndef get_connection(env, routing_hint):\n return env.getConnectionByKey(routing_hint, 'SET')\n\n# returns the test name and line number from which a helper function within this file was called.\n# For example, if an assertion fails in check_error_message function, and the caller function to check_error_message\n# is in tests_onnx.py line 25, this should return: \"tests_onnx:py:25\"\ndef get_caller_pos():\n return f'{sys._getframe(2).f_code.co_filename.split(\"/\")[-1]}:{sys._getframe(2).f_lineno}'\n\ndef ensureSlaveSynced(con, env, timeout_ms=0):\n if env.useSlaves:\n # When WAIT returns, all the previous write commands\n # sent in the context of the current connection are\n # guaranteed to be received by the number of replicas returned by WAIT.\n wait_reply = con.execute_command('WAIT', '1', timeout_ms)\n try:\n number_replicas = int(wait_reply)\n except Exception as ex:\n # Error in converting to int\n env.debugPring(str(ex), force=True)\n env.assertFalse(True, message=get_caller_pos())\n return\n env.assertEqual(number_replicas, 1)\n\n\n# Ensures command is sent and forced disconnect\n# after without waiting for the reply to be parsed\n# Usefull for checking behaviour of commands\n# that are run with background threads\ndef send_and_disconnect(cmd, red):\n pool = red.connection_pool\n con = pool.get_connection(cmd[0])\n ret = con.send_command(*cmd)\n con.disconnect()\n # For making sure that Redis will have the time to exit cleanly.\n time.sleep(1)\n return ret\n\n\ndef check_cuda():\n return os.system('which nvcc')\n\n\ndef info_to_dict(info):\n info = [el.decode('utf-8') if type(el) is bytes else el for el in info]\n return dict(zip(info[::2], info[1::2]))\n\n\ndef load_resnet_test_data():\n test_data_path = os.path.join(os.path.dirname(__file__), 'test_data/imagenet')\n labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json')\n image_filename = os.path.join(test_data_path, 'dog.jpg')\n model_filename = os.path.join(test_data_path, 'resnet50.pb')\n script_filename = os.path.join(test_data_path, 'data_processing_script.txt')\n\n with open(script_filename, 'rb') as f:\n script = f.read()\n\n with open(model_filename, 'rb') as f:\n model_pb = f.read()\n\n with open(labels_filename, 'r') as f:\n labels = json.load(f)\n\n img_height, img_width = 224, 224\n\n img = imread(image_filename)\n img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True)\n img = img.astype(np.uint8)\n\n return model_pb, script, labels, img\n\ndef load_resnet_test_data_old():\n test_data_path = os.path.join(os.path.dirname(__file__), 'test_data/imagenet')\n labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json')\n image_filename = os.path.join(test_data_path, 'dog.jpg')\n model_filename = os.path.join(test_data_path, 'resnet50.pb')\n script_filename = os.path.join(test_data_path, 'data_processing_script_old.txt')\n\n with open(script_filename, 'rb') as f:\n script = f.read()\n\n with open(model_filename, 'rb') as f:\n model_pb = f.read()\n\n with open(labels_filename, 'r') as f:\n labels = json.load(f)\n\n img_height, img_width = 224, 224\n\n img = imread(image_filename)\n img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True)\n img = img.astype(np.uint8)\n\n return model_pb, script, labels, img\n\n\ndef load_mobilenet_v1_test_data():\n test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')\n labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json')\n image_filename = os.path.join(test_data_path, 'panda.jpg')\n model_filename = os.path.join(test_data_path, 'mobilenet/mobilenet_v1_100_224_cpu_NxHxWxC.pb')\n input_var = 'input'\n output_var = 'MobilenetV1/Predictions/Reshape_1'\n\n with open(model_filename, 'rb') as f:\n model_pb = f.read()\n\n with open(labels_filename, 'r') as f:\n labels = json.load(f)\n\n img_height, img_width = 224, 224\n\n img = imread(image_filename)\n img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True)\n img = img.astype(np.float32)\n\n return model_pb, input_var, output_var, labels, img\n\n\ndef load_mobilenet_v2_test_data():\n test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')\n labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json')\n image_filename = os.path.join(test_data_path, 'panda.jpg')\n model_filename = os.path.join(test_data_path, 'mobilenet/mobilenet_v2_1.4_224_frozen.pb')\n input_var = 'input'\n output_var = 'MobilenetV2/Predictions/Reshape_1'\n\n with open(model_filename, 'rb') as f:\n model_pb = f.read()\n\n with open(labels_filename, 'r') as f:\n labels = json.load(f)\n\n img_height, img_width = 224, 224\n\n img = imread(image_filename)\n img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True)\n img = img.astype(np.float32)\n\n return model_pb, input_var, output_var, labels, img\n\ndef load_creditcardfraud_data(env,max_tensors=10000):\n test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')\n model_filename = os.path.join(test_data_path, 'creditcardfraud.pb')\n creditcard_transaction_filename = os.path.join(test_data_path, 'creditcard_10K.csv')\n rg = default_rng()\n\n creditcard_transactions = np.genfromtxt(creditcard_transaction_filename, delimiter=',', dtype='float32', skip_header=1, usecols=range(0,30))\n\n creditcard_referencedata = []\n for tr in range(0,max_tensors):\n creditcard_referencedata.append(rg.random((1,256), dtype='float32'))\n\n with open(model_filename, 'rb') as f:\n model_pb = f.read()\n\n return model_pb, creditcard_transactions, creditcard_referencedata\n\n\ndef run_mobilenet(con, i, img, input_var, output_var):\n time.sleep(0.5 * random.randint(0, 10))\n con.execute_command('AI.TENSORSET', 'input{1}',\n 'FLOAT', 1, img.shape[1], img.shape[0], img.shape[2],\n 'BLOB', img.tobytes())\n\n con.execute_command('AI.MODELEXECUTE', 'mobilenet{1}',\n 'INPUTS', 1, 'input{1}', 'OUTPUTS', 1, 'output{1}')\n\n\ndef run_test_multiproc(env, routing_hint, n_procs, fn, args=tuple()):\n procs = []\n\n def tmpfn(i):\n con = get_connection(env, routing_hint)\n fn(con, i, *args)\n return 1\n\n for i in range(n_procs):\n p = Process(target=tmpfn, args=(i, ))\n p.start()\n procs.append(p)\n\n [p.join() for p in procs]\n\n\ndef get_parent_children_pipes(num_children):\n parent_end_pipes = []\n children_end_pipes = []\n\n # Create a pipe for every child process, so it can report number of successful runs.\n for i in range(num_children):\n parent_pipe, child_pipe = Pipe()\n parent_end_pipes.append(parent_pipe)\n children_end_pipes.append(child_pipe)\n\n return parent_end_pipes, children_end_pipes\n\n# Load a model/script from a file located in test_data dir.\ndef load_file_content(file_name):\n test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')\n filename = os.path.join(test_data_path, file_name)\n with open(filename, 'rb') as f:\n return f.read()\n\n\ndef check_error_message(env, con, error_msg, *command, error_msg_is_substr=False, error_type=redis.exceptions.ResponseError):\n try:\n con.execute_command(*command)\n env.assertFalse(True, message=get_caller_pos())\n except Exception as exception:\n env.assertEqual(type(exception), error_type, message=get_caller_pos())\n if error_msg_is_substr:\n # We only verify that the given error_msg is a substring of the entire error message.\n env.assertTrue(str(exception).find(error_msg) >= 0, message=get_caller_pos())\n else:\n env.assertEqual(error_msg, str(exception), message=get_caller_pos())\n\n\ndef check_error(env, con, *command, error_type=redis.exceptions.ResponseError):\n try:\n con.execute_command(*command)\n env.assertFalse(True, message=get_caller_pos())\n except Exception as e:\n exception = e\n env.assertTrue(issubclass(type(exception), error_type), message=get_caller_pos())\n\n\n# Returns a dict with all the fields of a certain section from INFO MODULES command\ndef get_info_section(con, section):\n sections = ['ai_versions', 'ai_git', 'ai_load_time_configs', 'ai_backends_info', 'ai_cpu']\n section_ind = [i for i in range(len(sections)) if sections[i] == 'ai_'+section][0]\n return {k.split(\":\")[0]: k.split(\":\")[1]\n for k in con.execute_command(\"INFO MODULES\").decode().split(\"#\")[section_ind+2].split()[1:]}\n"
]
| [
[
"numpy.random.default_rng"
]
]
|
oya163/nepali-ner | [
"9e8a853fd6eb912cdcd1195c6b3a5ee8a460b9e8"
]
| [
"train.py"
]
| [
"#!/usr/bin/env python3\n\n'''\n Trainer\n Author: Oyesh Mann Singh\n'''\n\nimport os\nfrom utils.eval import Evaluator\nfrom tqdm import tqdm, tqdm_notebook, tnrange\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom sklearn.metrics import accuracy_score\n\ntorch.manual_seed(163)\ntqdm.pandas(desc='Progress')\n\n# Decay functions to be used with lr_scheduler\ndef lr_decay_noam(config):\n return lambda t: (\n 10.0 * config.hidden_dim ** -0.5 * min(\n (t + 1) * config.learning_rate_warmup_steps ** -1.5, (t + 1) ** -0.5))\n\n\ndef lr_decay_exp(config):\n return lambda t: config.learning_rate_falloff ** t\n\n\n# Map names to lr decay functions\nlr_decay_map = {\n 'noam': lr_decay_noam,\n 'exp': lr_decay_exp\n}\n\n\nclass Trainer:\n def __init__(self, config, logger, dataloader, model, k):\n self.config = config\n self.logger = logger\n self.dataloader = dataloader\n self.verbose = config.verbose\n self.use_pos = config.use_pos\n\n self.train_dl, self.val_dl, self.test_dl = dataloader.load_data(batch_size=config.batch_size)\n\n ### DO NOT DELETE\n ### DEBUGGING PURPOSE\n # sample = next(iter(self.train_dl))\n # print(sample.TEXT)\n # print(sample.LABEL)\n # print(sample.POS)\n\n self.train_dlen = len(self.train_dl)\n self.val_dlen = len(self.val_dl)\n self.test_dlen = len(self.test_dl)\n\n self.model = model\n self.epochs = config.epochs\n\n self.loss_fn = nn.NLLLoss()\n\n self.opt = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),\n lr=config.learning_rate,\n weight_decay=config.weight_decay)\n\n self.lr_scheduler_step = self.lr_scheduler_epoch = None\n\n # Set up learing rate decay scheme\n if config.use_lr_decay:\n if '_' not in config.lr_rate_decay:\n raise ValueError(\"Malformed learning_rate_decay\")\n lrd_scheme, lrd_range = config.lr_rate_decay.split('_')\n\n if lrd_scheme not in lr_decay_map:\n raise ValueError(\"Unknown lr decay scheme {}\".format(lrd_scheme))\n\n lrd_func = lr_decay_map[lrd_scheme]\n lr_scheduler = optim.lr_scheduler.LambdaLR(\n self.opt,\n lrd_func(config),\n last_epoch=-1\n )\n # For each scheme, decay can happen every step or every epoch\n if lrd_range == 'epoch':\n self.lr_scheduler_epoch = lr_scheduler\n elif lrd_range == 'step':\n self.lr_scheduler_step = lr_scheduler\n else:\n raise ValueError(\"Unknown lr decay range {}\".format(lrd_range))\n\n self.k = k\n self.model_name = config.model_name + self.k\n self.file_name = self.model_name + '.pth'\n self.model_file = os.path.join(config.output_dir, self.file_name)\n\n self.total_train_loss = []\n self.total_train_acc = []\n self.total_val_loss = []\n self.total_val_acc = []\n\n self.early_max_patience = config.early_max_patience\n\n def load_checkpoint(self):\n checkpoint = torch.load(self.model_file)\n self.model.load_state_dict(checkpoint['state_dict'])\n self.opt = checkpoint['opt']\n self.opt.load_state_dict(checkpoint['opt_state'])\n self.total_train_loss = checkpoint['train_loss']\n self.total_train_acc = checkpoint['train_acc']\n self.total_val_loss = checkpoint['val_loss']\n self.total_val_acc = checkpoint['val_acc']\n self.epochs = checkpoint['epochs']\n\n def save_checkpoint(self):\n save_parameters = {'state_dict': self.model.state_dict(),\n 'opt': self.opt,\n 'opt_state': self.opt.state_dict(),\n 'train_loss': self.total_train_loss,\n 'train_acc': self.total_train_acc,\n 'val_loss': self.total_val_loss,\n 'val_acc': self.total_val_acc,\n 'epochs': self.epochs}\n torch.save(save_parameters, self.model_file)\n\n def fit(self):\n prev_lstm_val_acc = 0.0\n prev_val_loss = 100.0\n counter = 0\n patience_limit = 10\n\n for epoch in tnrange(0, self.epochs):\n y_true_train = list()\n y_pred_train = list()\n total_loss_train = 0\n\n t = tqdm(iter(self.train_dl), leave=False, total=self.train_dlen)\n for (k, v) in t:\n t.set_description(f'Epoch {epoch + 1}')\n self.model.train()\n\n self.opt.zero_grad()\n\n if self.use_pos:\n (X, p, y) = k\n pred = self.model(X, p)\n else:\n (X, y) = k\n pred = self.model(X, None)\n\n y = y.view(-1)\n loss = self.loss_fn(pred, y)\n loss.backward()\n self.opt.step()\n\n if self.lr_scheduler_step:\n self.lr_scheduler_step.step()\n\n t.set_postfix(loss=loss.item())\n pred_idx = torch.max(pred, dim=1)[1]\n\n y_true_train += list(y.cpu().data.numpy())\n y_pred_train += list(pred_idx.cpu().data.numpy())\n total_loss_train += loss.item()\n\n train_acc = accuracy_score(y_true_train, y_pred_train)\n train_loss = total_loss_train / self.train_dlen\n self.total_train_loss.append(train_loss)\n self.total_train_acc.append(train_acc)\n\n if self.val_dl:\n y_true_val = list()\n y_pred_val = list()\n total_loss_val = 0\n v = tqdm(iter(self.val_dl), leave=False)\n for (k, v) in v:\n if self.use_pos:\n (X, p, y) = k\n pred = self.model(X, p)\n else:\n (X, y) = k\n pred = self.model(X, None)\n y = y.view(-1)\n loss = self.loss_fn(pred, y)\n pred_idx = torch.max(pred, 1)[1]\n y_true_val += list(y.cpu().data.numpy())\n y_pred_val += list(pred_idx.cpu().data.numpy())\n total_loss_val += loss.item()\n\n valacc = accuracy_score(y_true_val, y_pred_val)\n valloss = total_loss_val / self.val_dlen\n self.logger.info(\n f'Epoch {epoch + 1}: train_loss: {train_loss:.4f} train_acc: {train_acc:.4f} | val_loss: {valloss:.4f} val_acc: {valacc:.4f}')\n else:\n self.logger.info(f'Epoch {epoch + 1}: train_loss: {train_loss:.4f} train_acc: {train_acc:.4f}')\n self.total_val_loss.append(valloss)\n self.total_val_acc.append(valacc)\n\n if self.lr_scheduler_epoch:\n self.lr_scheduler_epoch.step()\n\n if valloss < prev_val_loss:\n self.save_checkpoint()\n prev_val_loss = valloss\n counter = 0\n self.logger.info(\"Best model saved!!!\")\n else:\n counter += 1\n\n if counter >= self.early_max_patience:\n self.logger.info(\"Training stopped because maximum tolerance reached!!!\")\n break\n\n # Predict\n def predict(self):\n self.model.eval()\n evaluate = Evaluator(self.config, self.logger, self.model, self.dataloader, self.model_name)\n self.logger.info(\"Writing results\")\n evaluate.write_results()\n self.logger.info(\"Evaluate results\")\n acc, prec, rec, f1 = evaluate.conll_eval()\n return (acc, prec, rec, f1)\n\n # Infer\n def infer(self, sent):\n \"\"\"\n Prints the result\n \"\"\"\n evaluate = Evaluator(self.config, self.logger, self.model, self.dataloader, self.model_name)\n return evaluate.infer(sent)\n"
]
| [
[
"torch.nn.NLLLoss",
"torch.max",
"torch.load",
"torch.manual_seed",
"torch.save",
"sklearn.metrics.accuracy_score"
]
]
|
Carlor87/naima | [
"1728b0ac18fab9e709816c868625e5ffbaab83b7"
]
| [
"docs/_static/RXJ1713_IC.py"
]
| [
"#!/usr/bin/env python\nimport numpy as np\nimport naima\nimport os\nimport sys\nimport astropy.units as u\nfrom astropy.io import ascii\n\n# Model definition\n\nfrom naima.models import InverseCompton, ExponentialCutoffPowerLaw\n\n\ndef ElectronIC(pars, data):\n\n # Match parameters to ECPL properties, and give them the appropriate units\n amplitude = pars[0] / u.eV\n alpha = pars[1]\n e_cutoff = (10 ** pars[2]) * u.TeV\n\n # Initialize instances of the particle distribution and radiative model\n ECPL = ExponentialCutoffPowerLaw(amplitude, 10.0 * u.TeV, alpha, e_cutoff)\n IC = InverseCompton(\n ECPL,\n seed_photon_fields=[\n \"CMB\",\n [\"FIR\", 26.5 * u.K, 0.415 * u.eV / u.cm ** 3],\n ],\n )\n\n # compute flux at the energies given in data['energy'], and convert to\n # units of flux data\n model = IC.flux(data, distance=1.0 * u.kpc)\n\n # Save this realization of the particle distribution function\n elec_energy = np.logspace(11, 15, 100) * u.eV\n nelec = ECPL(elec_energy)\n\n # Compute and save total energy in electrons above 1 TeV\n We = IC.compute_We(Eemin=1 * u.TeV)\n\n # The first array returned will be compared to the observed spectrum for\n # fitting. All subsequent objects will be stores in the sampler metadata\n # blobs.\n return model, (elec_energy, nelec), We\n\n\n# Prior definition\n\n\ndef lnprior(pars):\n \"\"\"\n Return probability of parameter values according to prior knowledge.\n Parameter limits should be done here through uniform prior ditributions\n \"\"\"\n\n logprob = naima.uniform_prior(pars[0], 0.0, np.inf) + naima.uniform_prior(\n pars[1], -1, 5\n )\n\n return logprob\n\n\nif __name__ == \"__main__\":\n\n # Set initial parameters and labels\n\n p0 = np.array((1e30, 3.0, np.log10(30)))\n labels = [\"norm\", \"index\", \"log10(cutoff)\"]\n\n samplerf = \"RXJ1713_IC_sampler.hdf5\"\n if os.path.exists(samplerf) and \"onlyplot\" in sys.argv:\n sampler = naima.read_run(samplerf, modelfn=ElectronIC)\n else:\n # Read data\n data = ascii.read(\"../../examples/RXJ1713_HESS_2007.dat\")\n # Run sampler\n sampler, pos = naima.run_sampler(\n data_table=data,\n p0=p0,\n labels=labels,\n model=ElectronIC,\n prior=lnprior,\n nwalkers=128,\n nburn=100,\n nrun=100,\n threads=4,\n prefit=True,\n interactive=True,\n )\n # Save sampler\n naima.save_run(\"RXJ1713_IC_sampler.hdf5\", sampler)\n\n # Diagnostic plots\n\n naima.save_results_table(\"RXJ1713_IC\", sampler)\n from astropy.io import ascii\n\n results = ascii.read(\"RXJ1713_IC_results.ecsv\")\n results.remove_row(-1) # remove blob2\n for col in [\"median\", \"unc_lo\", \"unc_hi\"]:\n results[col].format = \".3g\"\n\n with open(\"RXJ1713_IC_results_table.txt\", \"w\") as f:\n info = []\n for key in [\"n_walkers\", \"n_run\", \"p0\", \"ML_pars\", \"MaxLogLikelihood\"]:\n info.append(\"{0:<18}: {1}\\n\".format(key, str(results.meta[key])))\n f.writelines(info)\n f.write(\"\\n\")\n f.write(\"------------- ------- ------- --------\\n\")\n results.write(f, format=\"ascii.fixed_width_two_line\")\n\n alabaster_width = 660\n alabaster_dpi = 125 * alabaster_width / 800\n\n print(\"Plotting chains...\")\n f = naima.plot_chain(sampler, 1)\n f.savefig(\"RXJ1713_IC_chain_index.png\", dpi=alabaster_dpi)\n f = naima.plot_chain(sampler, 2)\n f.savefig(\"RXJ1713_IC_chain_cutoff.png\", dpi=alabaster_dpi)\n\n e_range = [100 * u.GeV, 500 * u.TeV]\n\n # with samples\n print(\"Plotting samples...\")\n f = naima.plot_fit(sampler, 0, ML_info=False)\n f.axes[0].set_ylim(1e-13, 2e-10)\n f.axes[0].set_xlim(left=1e-1)\n f.tight_layout()\n f.subplots_adjust(hspace=0)\n f.savefig(\"RXJ1713_IC_model_samples.png\", dpi=alabaster_dpi)\n print(\"Plotting samples with e_range...\")\n f = naima.plot_fit(\n sampler, 0, e_range=e_range, ML_info=False, n_samples=500\n )\n f.axes[0].set_ylim(1e-13, 2e-10)\n f.axes[0].set_xlim(left=1e-1)\n f.tight_layout()\n f.subplots_adjust(hspace=0)\n f.savefig(\"RXJ1713_IC_model_samples_erange.png\", dpi=alabaster_dpi)\n\n # with confs\n print(\"Plotting confs...\")\n f = naima.plot_fit(\n sampler, 0, ML_info=False, confs=[3, 1], last_step=False\n )\n f.axes[0].set_ylim(1e-13, 2e-10)\n f.axes[0].set_xlim(left=1e-1)\n f.tight_layout()\n f.subplots_adjust(hspace=0)\n f.savefig(\"RXJ1713_IC_model_confs.png\", dpi=alabaster_dpi)\n print(\"Plotting confs with e_range...\")\n f = naima.plot_fit(\n sampler, 0, e_range=e_range, ML_info=False, confs=[3, 1]\n )\n f.axes[0].set_ylim(1e-13, 2e-10)\n f.axes[0].set_xlim(left=1e-1)\n f.tight_layout()\n f.subplots_adjust(hspace=0)\n f.savefig(\"RXJ1713_IC_model_confs_erange.png\", dpi=alabaster_dpi)\n\n print(\"Plotting corner...\")\n f = naima.plot_corner(sampler, bins=40)\n w = f.get_size_inches()[0]\n f.savefig(\"RXJ1713_IC_corner.png\", dpi=alabaster_width / w)\n\n print(\"Plotting blobs...\")\n f = naima.plot_blob(\n sampler,\n 1,\n ML_info=False,\n label=\"Electron energy distribution\",\n xlabel=r\"Electron energy [$\\mathrm{TeV}$]\",\n )\n f.axes[0].set_xlim(left=1e-1)\n f.tight_layout()\n f.savefig(\"RXJ1713_IC_pdist.png\", dpi=alabaster_dpi)\n f = naima.plot_blob(sampler, 2, label=r\"$W_e(E_e>1\\,\\mathrm{TeV})$\")\n f.savefig(\"RXJ1713_IC_We.png\", dpi=alabaster_dpi)\n"
]
| [
[
"numpy.logspace",
"numpy.log10"
]
]
|
dragan-avramovski/tensorflow-som | [
"043f5a04c5b3a3662a76b99cc4e941603793ec62"
]
| [
"tf_som.py"
]
| [
"# MIT License\r\n#\r\n# Copyright (c) 2018 Chris Gorman\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n# =================================================================================\r\nimport tensorflow as tf\r\nimport tensorflow_transform as tft\r\nimport tensorflow_probability as tfp\r\nimport numpy as np\r\nfrom pathlib import Path\r\nimport logging\r\nfrom hilbert import decode, encode\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n__author__ = \"Chris Gorman\"\r\n__email__ = \"[email protected]\"\r\n\r\n\"\"\"\r\nAdapted from code by Sachin Joglekar\r\nhttps://codesachin.wordpress.com/2015/11/28/self-organizing-maps-with-googles-tensorflow/\r\n\"\"\"\r\n\r\n\r\nclass SelfOrganizingMap:\r\n \"\"\"\r\n 2-D rectangular grid planar Self-Organizing Map with Gaussian neighbourhood function\r\n \"\"\"\r\n\r\n def __init__(self, m, n, dim, max_epochs=100, initial_radius=None, batch_size=128, initial_learning_rate=0.1,\r\n graph=None, std_coeff=0.5, model_name='Self-Organizing-Map', softmax_activity=False, gpus=0,\r\n output_sensitivity=-1.0, input_tensor=None, input_dataset=None, session=None, checkpoint_dir=None, restore_path=None, weights_init=None):\r\n \"\"\"\r\n Initialize a self-organizing map on the tensorflow graph\r\n :param m: Number of rows of neurons\r\n :param n: Number of columns of neurons\r\n :param dim: Dimensionality of the input data\r\n :param max_epochs: Number of epochs to train for\r\n :param initial_radius: Starting value of the neighborhood radius - defaults to max(m, n) / 2.0\r\n :param batch_size: Number of input vectors to train on at a time\r\n :param initial_learning_rate: The starting learning rate of the SOM. Decreases linearly w/r/t `max_epochs`\r\n :param graph: The tensorflow graph to build the network on\r\n :param std_coeff: Coefficient of the standard deviation of the neighborhood function\r\n :param model_name: The name that will be given to the checkpoint files\r\n :param softmax_activity: If `True` the activity will be softmaxed to form a probability distribution\r\n :param gpus: The number of GPUs to train the SOM on\r\n :param output_sensitivity The constant controlling the width of the activity gaussian. Numbers further from zero\r\n elicit activity when distance is low, effectively introducing a threshold on the distance w/r/t activity.\r\n See the plot in the readme file for a little introduction.\r\n :param session: A `tf.Session()` for executing the graph\r\n :param input_dataset: holds the complete dataset, often used for PCA\r\n :param weights_init: HCV = Hilber Curve init, PCA = Principal Component Analysis\r\n \"\"\"\r\n self._m = abs(int(m))\r\n self._n = abs(int(n))\r\n self._dim = abs(int(dim))\r\n if initial_radius is None:\r\n self._initial_radius = max(m, n) / 2.0\r\n else:\r\n self._initial_radius = float(initial_radius)\r\n self._max_epochs = abs(int(max_epochs))\r\n self._batch_size = abs(int(batch_size))\r\n self._std_coeff = abs(float(std_coeff))\r\n self._softmax_activity = bool(softmax_activity)\r\n self._model_name = str(model_name)\r\n if output_sensitivity > 0:\r\n output_sensitivity *= -1\r\n elif output_sensitivity == 0:\r\n output_sensitivity = -1\r\n # The activity equation is kind of long so I'm naming this c for brevity\r\n self._c = float(output_sensitivity)\r\n self._sess = session\r\n self._checkpoint_dir = checkpoint_dir\r\n self._restore_path = restore_path\r\n self._gpus = int(abs(gpus))\r\n self._trained = False\r\n\r\n # Initialized later, just declaring up here for neatness and to avoid warnings\r\n self._weights = None\r\n self._location_vects = None\r\n self._input = None\r\n self._epoch = None\r\n self._training_op = None\r\n self._centroid_grid = None\r\n self._locations = None\r\n self._activity_op = None\r\n self._saver = None\r\n self._merged = None\r\n self._activity_merged = None\r\n # This will be the collection of summaries for this subgraph. Add new summaries to it and pass it to merge()\r\n self._summary_list = list()\r\n self._input_tensor = input_tensor\r\n\r\n self._input_dataset = input_dataset\r\n self._weights_init = str(weights_init)\r\n\r\n if graph is None:\r\n self._graph = tf.Graph()\r\n elif type(graph) is not tf.Graph:\r\n raise AttributeError('SOM graph input is not of type tf.Graph')\r\n else:\r\n self._graph = graph\r\n self._initial_learning_rate = initial_learning_rate\r\n # Create the ops and put them on the graph\r\n self._initialize_tf_graph()\r\n # If we want to reload from a save this will do that\r\n self._maybe_reload_from_checkpoint()\r\n\r\n def _save_checkpoint(self, global_step):\r\n \"\"\" Save a checkpoint file\r\n :param global_step: The current step of the network.\r\n \"\"\"\r\n if self._saver is None:\r\n # Create the saver object\r\n self._saver = tf.compat.v1.train.Saver()\r\n if self._checkpoint_dir is not None:\r\n output_name = Path(self._checkpoint_dir) / self._model_name\r\n self._saver.save(self._sess, output_name, global_step=global_step)\r\n\r\n def _maybe_reload_from_checkpoint(self):\r\n \"\"\" If the program was called with a checkpoint argument, load the variables from that.\r\n\r\n We are assuming that if it's loaded then it's already trained.\r\n \"\"\"\r\n if self._saver is None:\r\n self._saver = tf.compat.v1.train.Saver()\r\n\r\n if self._restore_path is not None:\r\n logging.info(\"Restoring variables from checkpoint file {}\".format(\r\n self._restore_path))\r\n self._saver.restore(self._sess, Path(self._restore_path))\r\n self._trained = True\r\n logging.info(\"Checkpoint loaded\")\r\n\r\n def _neuron_locations(self):\r\n \"\"\" Maps an absolute neuron index to a 2d vector for calculating the neighborhood function \"\"\"\r\n for i in range(self._m):\r\n for j in range(self._n):\r\n yield np.array([i, j])\r\n\r\n def _initialize_tf_graph(self):\r\n \"\"\" Initialize the SOM on the TensorFlow graph\r\n\r\n In multi-gpu mode it will duplicate the model across the GPUs and use the CPU to calculate the final\r\n weight updates.\r\n \"\"\"\r\n with self._graph.as_default(), tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()), tf.device('/cpu:0'):\r\n # This list will contain the handles to the numerator and denominator tensors for each of the towers\r\n tower_updates = list()\r\n # This is used by all of the towers and needs to be fed to the graph, so let's put it here\r\n with tf.compat.v1.name_scope('Epoch'):\r\n self._epoch = tf.compat.v1.placeholder(\"float\", [], name=\"iter\")\r\n if self._gpus > 0:\r\n for i in range(self._gpus):\r\n # We only want the summaries of the last tower, so wipe it out each time\r\n self._summary_list = list()\r\n with tf.device('/gpu:{}'.format(i)):\r\n with tf.compat.v1.name_scope('Tower_{}'.format(i)) as scope:\r\n # Create the model on this tower and add the (numerator, denominator) tensors to the list\r\n tower_updates.append(self._tower_som())\r\n tf.compat.v1.get_variable_scope().reuse_variables()\r\n\r\n with tf.device('/gpu:{}'.format(self._gpus - 1)):\r\n # Put the activity op on the last GPU\r\n self._activity_op = self._make_activity_op(\r\n self._input_tensor)\r\n else:\r\n # Running CPU only\r\n with tf.compat.v1.name_scope(\"Tower_0\") as scope:\r\n tower_updates.append(self._tower_som())\r\n tf.compat.v1.get_variable_scope().reuse_variables()\r\n self._activity_op = self._make_activity_op(\r\n self._input_tensor)\r\n\r\n with tf.compat.v1.name_scope(\"Weight_Update\"):\r\n # Get the outputs\r\n numerators, denominators = zip(*tower_updates)\r\n # Add them up\r\n numerators = tf.reduce_sum(input_tensor=tf.stack(numerators), axis=0)\r\n denominators = tf.reduce_sum(input_tensor=tf.stack(denominators), axis=0)\r\n # Divide them\r\n new_weights = tf.divide(numerators, denominators)\r\n # Assign them\r\n self._training_op = tf.compat.v1.assign(self._weights, new_weights)\r\n\r\n def _tower_som(self):\r\n \"\"\" Build a single SOM tower on the TensorFlow graph \"\"\"\r\n # Randomly initialized weights for all neurons, stored together\r\n # as a matrix Variable of shape [num_neurons, input_dims]\r\n with tf.compat.v1.name_scope('Weights'):\r\n # Each tower will get its own copy of the weights variable. Since the towers are constructed sequentially,\r\n # the handle to the Tensors will be different for each tower even if we reference \"self\"\r\n \r\n #PCA INIT\r\n if self._weights_init == \"PCA\":\r\n self._weights = self._pca_weights_init()\r\n #HILBERT INIT\r\n elif self._weights_init == \"HCV\":\r\n self._weights = self._hcv_weight_init(2)\r\n #RANDOM INIT\r\n else:\r\n self._weights = tf.compat.v1.get_variable(name='weights',\r\n shape=[\r\n self._m * self._n, self._dim],\r\n initializer=tf.compat.v1.random_uniform_initializer(minval=0, maxval=1))\r\n\r\n with tf.compat.v1.name_scope('summaries'):\r\n # All summary ops are added to a list and then the merge() function is called at the end of\r\n # this method\r\n mean = tf.reduce_mean(input_tensor=self._weights)\r\n self._summary_list.append(tf.compat.v1.summary.scalar('mean', mean))\r\n with tf.compat.v1.name_scope('stdev'):\r\n stdev = tf.sqrt(tf.reduce_mean(\r\n input_tensor=tf.math.squared_difference(self._weights, mean)))\r\n self._summary_list.append(tf.compat.v1.summary.scalar('stdev', stdev))\r\n self._summary_list.append(tf.compat.v1.summary.scalar(\r\n 'max', tf.reduce_max(input_tensor=self._weights)))\r\n self._summary_list.append(tf.compat.v1.summary.scalar(\r\n 'min', tf.reduce_min(input_tensor=self._weights)))\r\n self._summary_list.append(\r\n tf.compat.v1.summary.histogram('histogram', self._weights))\r\n\r\n # Matrix of size [m*n, 2] for SOM grid locations of neurons.\r\n # Maps an index to an (x,y) coordinate of a neuron in the map for calculating the neighborhood distance\r\n self._location_vects = tf.constant(np.array(\r\n list(self._neuron_locations())), name='Location_Vectors')\r\n\r\n with tf.compat.v1.name_scope('Input'):\r\n self._input = tf.identity(self._input_tensor)\r\n\r\n # Start by computing the best matching units / winning units for each input vector in the batch.\r\n # Basically calculates the Euclidean distance between every\r\n # neuron's weight vector and the inputs, and returns the index of the neurons which give the least value\r\n # Since we are doing batch processing of the input, we need to calculate a BMU for each of the individual\r\n # inputs in the batch. Will have the shape [batch_size]\r\n\r\n # Oh also any time we call expand_dims it's almost always so we can make TF broadcast stuff properly\r\n with tf.compat.v1.name_scope('BMU_Indices'):\r\n # Distance between weights and the input vector\r\n # Note we are reducing along 2nd axis so we end up with a tensor of [batch_size, num_neurons]\r\n # corresponding to the distance between a particular input and each neuron in the map\r\n # Also note we are getting the squared distance because there's no point calling sqrt or tf.norm\r\n # if we're just doing a strict comparison\r\n squared_distance = tf.reduce_sum(\r\n input_tensor=tf.pow(tf.subtract(tf.expand_dims(self._weights, axis=0),\r\n tf.expand_dims(self._input, axis=1)), 2), axis=2)\r\n\r\n # Get the index of the minimum distance for each input item, shape will be [batch_size],\r\n bmu_indices = tf.argmin(input=squared_distance, axis=1)\r\n\r\n # This will extract the location of the BMU in the map for each input based on the BMU's indices\r\n with tf.compat.v1.name_scope('BMU_Locations'):\r\n # Using tf.gather we can use `bmu_indices` to index the location vectors directly\r\n bmu_locs = tf.reshape(\r\n tf.gather(self._location_vects, bmu_indices), [-1, 2])\r\n\r\n with tf.compat.v1.name_scope('Learning_Rate'):\r\n # With each epoch, the initial sigma value decreases linearly\r\n radius = tf.subtract(self._initial_radius,\r\n tf.multiply(self._epoch,\r\n tf.divide(tf.cast(tf.subtract(self._initial_radius, 1),\r\n tf.float32),\r\n tf.cast(tf.subtract(self._max_epochs, 1),\r\n tf.float32))))\r\n\r\n alpha = tf.multiply(self._initial_learning_rate,\r\n tf.subtract(1.0, tf.divide(tf.cast(self._epoch, tf.float32),\r\n tf.cast(self._max_epochs, tf.float32))))\r\n \r\n # Construct the op that will generate a matrix with learning rates for all neurons and all inputs,\r\n # based on iteration number and location to BMU\r\n\r\n # Start by getting the squared difference between each BMU location and every other unit in the map\r\n # bmu_locs is [batch_size, 2], i.e. the coordinates of the BMU for each input vector.\r\n # location vects shape should be [1, num_neurons, 2]\r\n # bmu_locs should be [batch_size, 1, 2]\r\n # Output needs to be [batch_size, num_neurons], i.e. a row vector of distances for each input item\r\n bmu_distance_squares = tf.reduce_sum(input_tensor=tf.pow(tf.subtract(\r\n tf.expand_dims(self._location_vects, axis=0),\r\n tf.expand_dims(bmu_locs, axis=1)), 2), axis=2)\r\n\r\n # Using the distances between each BMU, construct the Gaussian neighborhood function.\r\n # Basically, neurons which are close to the winner will move more than those further away.\r\n # The radius tensor decreases the width of the Gaussian over time, so early in training more\r\n # neurons will be affected by the winner and by the end of training only the winner will move.\r\n # This tensor will be of shape [batch_size, num_neurons] as well and will be the value multiplied to\r\n # each neuron based on its distance from the BMU for each input vector\r\n neighbourhood_func = tf.exp(tf.divide(tf.negative(tf.cast(\r\n bmu_distance_squares, \"float32\")), tf.multiply(\r\n tf.square(tf.multiply(radius, self._std_coeff)), 2)))\r\n\r\n # Finally multiply by the learning rate to decrease overall neuron movement over time\r\n learning_rate_op = tf.multiply(neighbourhood_func, alpha)\r\n\r\n # The batch formula for SOMs multiplies a neuron's neighborhood by all of the input vectors in the batch,\r\n # then divides that by just the sum of the neighborhood function for each of the inputs.\r\n # We are writing this in a way that performs that operation for each of the neurons in the map.\r\n with tf.compat.v1.name_scope('Update_Weights'):\r\n # The numerator needs to be shaped [num_neurons, dimensions] to represent the new weights\r\n # for each of the neurons. At this point, the learning rate tensor will be\r\n # shaped [batch_size, neurons].\r\n # The end result is that, for each neuron in the network, we use the learning\r\n # rate between it and each of the input vectors, to calculate a new set of weights.\r\n numerator = tf.reduce_sum(input_tensor=tf.multiply(tf.expand_dims(learning_rate_op, axis=-1),\r\n tf.expand_dims(self._input, axis=1)), axis=0)\r\n\r\n # The denominator is just the sum of the neighborhood functions for each neuron, so we get the sum\r\n # along axis 1 giving us an output shape of [num_neurons]. We then expand the dims so we can\r\n # broadcast for the division op. Again we transpose the learning rate tensor so it's\r\n # [num_neurons, batch_size] representing the learning rate of each neuron for each input vector\r\n denominator = tf.expand_dims(tf.reduce_sum(input_tensor=learning_rate_op,\r\n axis=0) + float(1e-12), axis=-1)\r\n\r\n # We on;y really care about summaries from one of the tower SOMs, so assign the merge op to\r\n # the last tower we make. Otherwise there's way too many on Tensorboard.\r\n self._merged = tf.compat.v1.summary.merge(self._summary_list)\r\n\r\n # With multi-gpu training we collect the results and do the weight assignment on the CPU\r\n return numerator, denominator\r\n\r\n def _make_activity_op(self, input_tensor):\r\n \"\"\" Creates the op for calculating the activity of a SOM\r\n :param input_tensor: A tensor to calculate the activity of. Must be of shape `[batch_size, dim]` where `dim` is\r\n the dimensionality of the SOM's weights.\r\n :return A handle to the newly created activity op:\r\n \"\"\"\r\n with self._graph.as_default():\r\n with tf.compat.v1.name_scope(\"Activity\"):\r\n # This constant controls the width of the gaussian.\r\n # The closer to 0 it is, the wider it is.\r\n c = tf.constant(self._c, dtype=\"float32\")\r\n # Get the euclidean distance between each neuron and the input vectors\r\n dist = tf.norm(tensor=tf.subtract(\r\n tf.expand_dims(self._weights, axis=0),\r\n tf.expand_dims(input_tensor, axis=1)),\r\n name=\"Distance\", axis=2) # [batch_size, neurons]\r\n\r\n # Calculate the Gaussian of the activity. Units with distances closer to 0 will have activities\r\n # closer to 1.\r\n activity = tf.exp(tf.multiply(\r\n tf.pow(dist, 2), c), name=\"Gaussian\")\r\n\r\n # Convert the activity into a softmax probability distribution\r\n if self._softmax_activity:\r\n activity = tf.divide(tf.exp(activity),\r\n tf.expand_dims(tf.reduce_sum(\r\n input_tensor=tf.exp(activity), axis=1), axis=-1),\r\n name=\"Softmax\")\r\n\r\n return tf.identity(activity, name=\"Output\")\r\n\r\n def get_activity_op(self):\r\n return self._activity_op\r\n\r\n def train(self, num_inputs, writer=None, step_offset=0):\r\n \"\"\" Train the network on the data provided by the input tensor.\r\n :param num_inputs: The total number of inputs in the data-set. Used to determine batches per epoch\r\n :param writer: The summary writer to add summaries to. This is created by the caller so when we stack layers\r\n we don't end up with duplicate outputs. If `None` then no summaries will be written.\r\n :param step_offset: The offset for the global step variable so I don't accidentally overwrite my summaries\r\n \"\"\"\r\n # Divide by num_gpus to avoid accidentally training on the same data a bunch of times\r\n if self._gpus > 0:\r\n batches_per_epoch = num_inputs // self._batch_size // self._gpus\r\n else:\r\n batches_per_epoch = num_inputs // self._batch_size\r\n total_batches = batches_per_epoch * self._max_epochs\r\n # Get how many batches constitute roughly 10 percent of the total for recording summaries\r\n summary_mod = int(0.1 * total_batches)\r\n global_step = step_offset\r\n\r\n logging.info(\"Training self-organizing Map\")\r\n for epoch in range(self._max_epochs):\r\n logging.info(\"Epoch: {}/{}\".format(epoch, self._max_epochs))\r\n for batch in range(batches_per_epoch):\r\n current_batch = batch + (batches_per_epoch * epoch)\r\n global_step = current_batch + step_offset\r\n percent_complete = current_batch / total_batches\r\n logging.debug(\"\\tBatch {}/{} - {:.2%} complete\".format(batch,\r\n batches_per_epoch, percent_complete))\r\n # Only do summaries when a SummaryWriter has been provided\r\n if writer:\r\n if current_batch > 0 and current_batch % summary_mod == 0:\r\n run_options = tf.compat.v1.RunOptions(\r\n trace_level=tf.compat.v1.RunOptions.FULL_TRACE)\r\n run_metadata = tf.compat.v1.RunMetadata()\r\n summary, _, _, = self._sess.run([self._merged, self._training_op,\r\n self._activity_op],\r\n feed_dict={\r\n self._epoch: epoch},\r\n options=run_options,\r\n run_metadata=run_metadata)\r\n writer.add_run_metadata(\r\n run_metadata, \"step_{}\".format(global_step))\r\n writer.add_summary(summary, global_step)\r\n self._save_checkpoint(global_step)\r\n else:\r\n summary, _ = self._sess.run([self._merged, self._training_op],\r\n feed_dict={self._epoch: epoch})\r\n writer.add_summary(summary, global_step)\r\n else:\r\n self._sess.run(self._training_op, feed_dict={\r\n self._epoch: epoch})\r\n\r\n self._trained = True\r\n return global_step\r\n\r\n @property\r\n def output_weights(self):\r\n \"\"\" :return: The weights of the trained SOM as a NumPy array, or `None` if the SOM hasn't been trained \"\"\"\r\n if self._trained:\r\n return np.array(self._sess.run(self._weights))\r\n else:\r\n return None\r\n\r\n def bmu_indices(self, dataset):\r\n with tf.compat.v1.name_scope('BMU_Indices_Dataset'):\r\n # This is the same code from _tower_som adapted to calculate all Best Matching Units for each item in the dataset\r\n squared_distance = tf.reduce_sum(\r\n input_tensor=tf.pow(tf.subtract(tf.expand_dims(self._weights, axis=0),\r\n tf.expand_dims(dataset, axis=1)), 2), axis=2)\r\n\r\n bmu_indices = tf.argmin(input=squared_distance, axis=1)\r\n bmu_locs = tf.reshape(tf.gather(self._location_vects, bmu_indices), [-1, 2])\r\n # The number of BMUs is the same as the number of items in the dataset\r\n return np.array(self._sess.run(bmu_locs))\r\n\r\n def _pca_weights_init(self, dataset):\r\n \"\"\" Initializes the weights of the map to span to the first two principal components.\r\n Training a SOM with initial weights values based on their Principal Components makes the training process converge faster.\r\n The data should be normalized prior to PCA initialization\r\n \"\"\"\r\n if dataset.shape[1] == 1:\r\n msg = 'At least 2 features are required for pca initialization'\r\n raise ValueError(msg)\r\n\r\n if self._m == 1 or self._n == 1:\r\n msg = 'PCA requires the SOM map to have dimensions > 1 '\r\n raise ValueError(msg)\r\n\r\n # Calculate the covarience matrix for a dataset\r\n tfcov = tfp.stats.covariance(dataset)\r\n # Calculate the Eigen vectors and the Eigen values\r\n eigen_values, eigen_vectors = tf.linalg.eigh(tfcov)\r\n # Order them in ascending order\r\n ev_order = tf.argsort(-eigen_values)\r\n\r\n # Create evenly spaced values for the interval of -1 to 1\r\n mspace = tf.Variable(tf.linspace(-1, 1, self._m), dtype=tf.float64)\r\n nspace = tf.Variable(tf.linspace(-1, 1, self._n), dtype=tf.float64)\r\n\r\n weights = list()\r\n # Calculate the principal components by using the first two eigen vectors\r\n for i in range(self._m):\r\n for j in range(self._n):\r\n weights.append(tf.add(tf.multiply(mspace[i], eigen_vectors[ev_order[0]]), tf.multiply(nspace[j], eigen_vectors[ev_order[1]])))\r\n \r\n weights_tensor = tf.convert_to_tensor(weights)\r\n \r\n weights_tensor = tf.cast(weights_tensor, tf.float32)\r\n # Finally, assign the new weights\r\n tf.compat.v1.assign(self._weights, weights_tensor)\r\n \r\n def _hcv_weight_init(self, num_dims):\r\n max_hilberts = np.arange(self._m * self._n)\r\n hilbert_vectors = decode(max_hilberts, self._dim, num_dims)\r\n scaler = MinMaxScaler()\r\n hilbert_vectors = scaler.fit_transform(hilbert_vectors)\r\n weights_tensor = tf.Variable(hilbert_vectors, dtype=tf.float32)\r\n\r\n return weights_tensor\r\n\r\n def quantization(self, dataset):\r\n with tf.compat.v1.name_scope('QUAN'):\r\n squared_distance = tf.reduce_sum(\r\n input_tensor=tf.pow(tf.subtract(tf.expand_dims(self._weights, axis=0),\r\n tf.expand_dims(dataset, axis=1)), 2), axis=2)\r\n\r\n # Get the index of the minimum distance for each input item, shape will be [batch_size],\r\n bmu_indices = tf.argmin(input=squared_distance, axis=1)\r\n # Get the Weights vectors for the BMUs\r\n bmu_weights = tf.gather(self._weights, bmu_indices)\r\n return bmu_weights\r\n\r\n def quantization_error(self, dataset):\r\n norm_values = tf.norm(dataset - self.quantization(dataset), axis=1)\r\n\r\n q_error = tf.reduce_mean(input_tensor=norm_values)\r\n return self._sess.run(q_error)\r\n\r\n\r\n def topographic_error(self, dataset):\r\n with tf.compat.v1.name_scope('TE'):\r\n t = tf.constant(1.42, dtype=tf.float32)\r\n squared_distance = tf.reduce_sum(\r\n input_tensor=tf.pow(tf.subtract(tf.expand_dims(self._weights, axis=0),\r\n tf.expand_dims(dataset, axis=1)), 2), axis=2)\r\n \r\n b2mu_inds = tf.argsort(squared_distance, axis=1)[:, :2] \r\n\r\n bmu_locs = tf.gather(self._location_vects, b2mu_inds)\r\n diff = tf.cast(bmu_locs[:,1:]-bmu_locs[:,:-1], dtype=tf.float32) \r\n distance = tf.norm(diff, axis=1) \r\n distance = distance[distance > t]\r\n te = tf.math.reduce_mean(distance)\r\n return self._sess.run(te)"
]
| [
[
"tensorflow.convert_to_tensor",
"tensorflow.device",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.argsort",
"tensorflow.linspace",
"tensorflow.compat.v1.train.Saver",
"tensorflow.linalg.eigh",
"tensorflow.argmin",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.compat.v1.summary.merge",
"numpy.arange",
"tensorflow.divide",
"tensorflow.subtract",
"tensorflow.gather",
"tensorflow.compat.v1.get_variable_scope",
"tensorflow.compat.v1.name_scope",
"tensorflow.norm",
"tensorflow.pow",
"tensorflow.compat.v1.assign",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.compat.v1.RunOptions",
"tensorflow.compat.v1.summary.scalar",
"numpy.array",
"tensorflow.compat.v1.RunMetadata",
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.math.squared_difference",
"tensorflow.expand_dims",
"tensorflow.math.reduce_mean",
"tensorflow.compat.v1.random_uniform_initializer",
"tensorflow.compat.v1.placeholder",
"tensorflow.reduce_min",
"tensorflow.compat.v1.summary.histogram"
]
]
|
ankushaggarwal/scipy | [
"eadc876441aa81bcc37d2e4b3d815502ae415004"
]
| [
"scipy/optimize/__init__.py"
]
| [
"\"\"\"\n=====================================================\nOptimization and root finding (:mod:`scipy.optimize`)\n=====================================================\n\n.. currentmodule:: scipy.optimize\n\nSciPy ``optimize`` provides functions for minimizing (or maximizing)\nobjective functions, possibly subject to constraints. It includes\nsolvers for nonlinear problems (with support for both local and global\noptimization algorithms), linear programing, constrained\nand nonlinear least-squares, root finding, and curve fitting.\n\nCommon functions and objects, shared across different solvers, are:\n\n.. autosummary::\n :toctree: generated/\n\n show_options - Show specific options optimization solvers.\n OptimizeResult - The optimization result returned by some optimizers.\n OptimizeWarning - The optimization encountered problems.\n\n\nOptimization\n============\n\nScalar functions optimization\n-----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n minimize_scalar - Interface for minimizers of univariate functions\n\nThe `minimize_scalar` function supports the following methods:\n\n.. toctree::\n\n optimize.minimize_scalar-brent\n optimize.minimize_scalar-bounded\n optimize.minimize_scalar-golden\n\nLocal (multivariate) optimization\n---------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n minimize - Interface for minimizers of multivariate functions.\n\nThe `minimize` function supports the following methods:\n\n.. toctree::\n\n optimize.minimize-neldermead\n optimize.minimize-powell\n optimize.minimize-cg\n optimize.minimize-bfgs\n optimize.minimize-newtoncg\n optimize.minimize-lbfgsb\n optimize.minimize-tnc\n optimize.minimize-cobyla\n optimize.minimize-slsqp\n optimize.minimize-trustconstr\n optimize.minimize-dogleg\n optimize.minimize-trustncg\n optimize.minimize-trustkrylov\n optimize.minimize-trustexact\n\nConstraints are passed to `minimize` function as a single object or\nas a list of objects from the following classes:\n\n.. autosummary::\n :toctree: generated/\n\n NonlinearConstraint - Class defining general nonlinear constraints.\n LinearConstraint - Class defining general linear constraints.\n\nSimple bound constraints are handled separately and there is a special class\nfor them:\n\n.. autosummary::\n :toctree: generated/\n\n Bounds - Bound constraints.\n\nQuasi-Newton strategies implementing `HessianUpdateStrategy`\ninterface can be used to approximate the Hessian in `minimize`\nfunction (available only for the 'trust-constr' method). Available\nquasi-Newton methods implementing this interface are:\n\n.. autosummary::\n :toctree: generated/\n\n BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.\n SR1 - Symmetric-rank-1 Hessian update strategy.\n\nGlobal optimization\n-------------------\n\n.. autosummary::\n :toctree: generated/\n\n basinhopping - Basinhopping stochastic optimizer.\n brute - Brute force searching optimizer.\n differential_evolution - stochastic minimization using differential evolution.\n\n shgo - simplicial homology global optimisation\n dual_annealing - Dual annealing stochastic optimizer.\n\n\nLeast-squares and curve fitting\n===============================\n\nNonlinear least-squares\n-----------------------\n\n.. autosummary::\n :toctree: generated/\n\n least_squares - Solve a nonlinear least-squares problem with bounds on the variables.\n\nLinear least-squares\n--------------------\n\n.. autosummary::\n :toctree: generated/\n\n nnls - Linear least-squares problem with non-negativity constraint.\n lsq_linear - Linear least-squares problem with bound constraints.\n\nCurve fitting\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n curve_fit -- Fit curve to a set of points.\n\nRoot finding\n============\n\nScalar functions\n----------------\n.. autosummary::\n :toctree: generated/\n\n root_scalar - Unified interface for nonlinear solvers of scalar functions.\n brentq - quadratic interpolation Brent method.\n brenth - Brent method, modified by Harris with hyperbolic extrapolation.\n ridder - Ridder's method.\n bisect - Bisection method.\n newton - Newton's method (also Secant, extended-Newton, and Halley's methods).\n toms748 - Alefeld, Potra & Shi Algorithm 748.\n RootResults - The root finding result returned by some root finders.\n\nThe `root_scalar` function supports the following methods:\n\n.. toctree::\n\n optimize.root_scalar-brentq\n optimize.root_scalar-brenth\n optimize.root_scalar-bisect\n optimize.root_scalar-ridder\n optimize.root_scalar-newton\n optimize.root_scalar-exnewton\n optimize.root_scalar-toms748\n optimize.root_scalar-secant\n optimize.root_scalar-halley\n\n\n\nThe table below lists situations and appropriate methods, along with\n*asymptotic* convergence rates per iteration (and per function evaluation)\nfor successful convergence to a simple root(*).\nBisection is the slowest of them all, adding one bit of accuracy for each\nfunction evaluation, but is guaranteed to converge.\nThe other bracketing methods all (eventually) increase the number of accurate\nbits by about 50% for every function evaluation.\nThe derivative-based methods, all built on `newton`, can converge quite quickly\nif the initial value is close to the root. They can also be applied to\nfunctions defined on (a subset of) the complex plane.\n\n+-------------+----------+----------+-----------+-------------+-------------+----------------+\n| Domain of f | Bracket? | Derivatives? | Solvers | Convergence |\n+ + +----------+-----------+ +-------------+----------------+\n| | | `fprime` | `fprime2` | | Guaranteed? | Rate(s)(*) |\n+=============+==========+==========+===========+=============+=============+================+\n| `R` | Yes | N/A | N/A | - bisection | - Yes | - 1 \"Linear\" |\n| | | | | - brentq | - Yes | - >=1, <= 1.62 |\n| | | | | - brenth | - Yes | - >=1, <= 1.62 |\n| | | | | - ridder | - Yes | - 2.0 (1.41) |\n| | | | | - toms748 | - Yes | - 2.7 (1.65) |\n+-------------+----------+----------+-----------+-------------+-------------+----------------+\n| `R` or `C` | No | No | No | secant | No | 1.62 (1.62) |\n+-------------+----------+----------+-----------+-------------+-------------+----------------+\n| `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) |\n+-------------+----------+----------+-----------+-------------+-------------+----------------+\n| `R` or `C` | No | Yes | No | exnewton | No\t | >2.00 (1.41) |\n+-------------+----------+----------+-----------+-------------+-------------+----------------+\n| `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) |\n+-------------+----------+----------+-----------+-------------+-------------+----------------+\n\n.. seealso::\n\n `scipy.optimize.cython_optimize` -- Typed Cython versions of zeros functions\n\nFixed point finding:\n\n.. autosummary::\n :toctree: generated/\n\n fixed_point - Single-variable fixed-point solver.\n\nMultidimensional\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n root - Unified interface for nonlinear solvers of multivariate functions.\n\nThe `root` function supports the following methods:\n\n.. toctree::\n\n optimize.root-hybr\n optimize.root-lm\n optimize.root-broyden1\n optimize.root-broyden2\n optimize.root-anderson\n optimize.root-linearmixing\n optimize.root-diagbroyden\n optimize.root-excitingmixing\n optimize.root-krylov\n optimize.root-dfsane\n\nLinear programming\n==================\n\n.. autosummary::\n :toctree: generated/\n\n linprog -- Unified interface for minimizers of linear programming problems.\n\nThe `linprog` function supports the following methods:\n\n.. toctree::\n\n optimize.linprog-simplex\n optimize.linprog-interior-point\n optimize.linprog-revised_simplex\n\nThe simplex method supports callback functions, such as:\n\n.. autosummary::\n :toctree: generated/\n\n linprog_verbose_callback -- Sample callback function for linprog (simplex).\n\nAssignment problems:\n\n.. autosummary::\n :toctree: generated/\n\n linear_sum_assignment -- Solves the linear-sum assignment problem.\n\nUtilities\n=========\n\nFinite-difference approximation\n-------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n approx_fprime - Approximate the gradient of a scalar function.\n check_grad - Check the supplied derivative using finite differences.\n\n\nLine search\n-----------\n\n.. autosummary::\n :toctree: generated/\n\n bracket - Bracket a minimum, given two starting points.\n line_search - Return a step that satisfies the strong Wolfe conditions.\n\nHessian approximation\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian.\n HessianUpdateStrategy - Interface for implementing Hessian update strategies\n\nBenchmark problems\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n rosen - The Rosenbrock function.\n rosen_der - The derivative of the Rosenbrock function.\n rosen_hess - The Hessian matrix of the Rosenbrock function.\n rosen_hess_prod - Product of the Rosenbrock Hessian with a vector.\n\nLegacy functions\n================\n\nThe functions below are not recommended for use in new scripts;\nall of these methods are accessible via a newer, more consistent\ninterfaces, provided by the interfaces above.\n\nOptimization\n------------\n\nGeneral-purpose multivariate methods:\n\n.. autosummary::\n :toctree: generated/\n\n fmin - Nelder-Mead Simplex algorithm.\n fmin_powell - Powell's (modified) level set method.\n fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm.\n fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno).\n fmin_ncg - Line-search Newton Conjugate Gradient.\n\nConstrained multivariate methods:\n\n.. autosummary::\n :toctree: generated/\n\n fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer.\n fmin_tnc - Truncated Newton code.\n fmin_cobyla - Constrained optimization by linear approximation.\n fmin_slsqp - Minimization using sequential least-squares programming.\n\nUnivariate (scalar) minimization methods:\n\n.. autosummary::\n :toctree: generated/\n\n fminbound - Bounded minimization of a scalar function.\n brent - 1-D function minimization using Brent method.\n golden - 1-D function minimization using Golden Section method.\n\nLeast-squares\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n leastsq - Minimize the sum of squares of M equations in N unknowns.\n\nRoot finding\n------------\n\nGeneral nonlinear solvers:\n\n.. autosummary::\n :toctree: generated/\n\n fsolve - Non-linear multivariable equation solver.\n broyden1 - Broyden's first method.\n broyden2 - Broyden's second method.\n\nLarge-scale nonlinear solvers:\n\n.. autosummary::\n :toctree: generated/\n\n newton_krylov\n anderson\n\nSimple iteration solvers:\n\n.. autosummary::\n :toctree: generated/\n\n excitingmixing\n linearmixing\n diagbroyden\n\n:mod:`Additional information on the nonlinear solvers <scipy.optimize.nonlin>`\n\"\"\"\n\nfrom .optimize import *\nfrom ._minimize import *\nfrom ._root import *\nfrom ._root_scalar import *\nfrom .minpack import *\nfrom .zeros import *\nfrom .lbfgsb import fmin_l_bfgs_b, LbfgsInvHessProduct\nfrom .tnc import fmin_tnc\nfrom .cobyla import fmin_cobyla\nfrom .nonlin import *\nfrom .slsqp import fmin_slsqp\nfrom .nnls import nnls\nfrom ._basinhopping import basinhopping\nfrom ._linprog import linprog, linprog_verbose_callback\nfrom ._lsap import linear_sum_assignment\nfrom ._differentialevolution import differential_evolution\nfrom ._lsq import least_squares, lsq_linear\nfrom ._constraints import (NonlinearConstraint,\n LinearConstraint,\n Bounds)\nfrom ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1\nfrom ._shgo import shgo\nfrom ._dual_annealing import dual_annealing\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n\nfrom scipy._lib._testutils import PytestTester\ntest = PytestTester(__name__)\ndel PytestTester\n"
]
| [
[
"scipy._lib._testutils.PytestTester"
]
]
|
rraminen/pytorch | [
"f7d5d02a1d873ef61b10186225704b2b283a989a"
]
| [
"torch/utils/data/datapipes/dataframe/dataframe_wrapper.py"
]
| [
"try:\n import pandas # type: ignore[import]\n\n # pandas used only for prototyping, will be shortly replaced with TorchArrow\n WITH_PANDAS = True\nexcept ImportError:\n WITH_PANDAS = False\n\n\nclass PandasWrapper:\n @classmethod\n def create_dataframe(cls, data, columns):\n if not WITH_PANDAS:\n raise Exception(\"DataFrames prototype requires pandas to function\")\n return pandas.DataFrame(data, columns=columns)\n\n @classmethod\n def is_dataframe(cls, data):\n if not WITH_PANDAS:\n return False\n return isinstance(data, pandas.core.frame.DataFrame)\n\n @classmethod\n def is_column(cls, data):\n if not WITH_PANDAS:\n return False\n return isinstance(data, pandas.core.series.Series)\n\n @classmethod\n def iterate(cls, data):\n if not WITH_PANDAS:\n raise Exception(\"DataFrames prototype requires pandas to function\")\n for d in data:\n yield d\n\n @classmethod\n def concat(cls, buffer):\n if not WITH_PANDAS:\n raise Exception(\"DataFrames prototype requires pandas to function\")\n return pandas.concat(buffer)\n\n @classmethod\n def get_item(cls, data, idx):\n if not WITH_PANDAS:\n raise Exception(\"DataFrames prototype requires pandas to function\")\n return data[idx : idx + 1]\n\n @classmethod\n def get_len(cls, df):\n if not WITH_PANDAS:\n raise Exception(\"DataFrames prototype requires pandas to function\")\n return len(df.index)\n\n\n# When you build own implementation just override it with dataframe_wrapper.set_df_wrapper(new_wrapper_class)\ndefault_wrapper = PandasWrapper\n\n\ndef get_df_wrapper():\n return default_wrapper\n\n\ndef set_df_wrapper(wrapper):\n default_wrapper = wrapper\n\n\ndef create_dataframe(data, columns=None):\n wrapper = get_df_wrapper()\n wrapper.create_dataframe(data, columns)\n\n\ndef is_dataframe(data):\n wrapper = get_df_wrapper()\n wrapper.is_dataframe(data)\n\n\ndef is_column(data):\n wrapper = get_df_wrapper()\n wrapper.is_column(data)\n\n\ndef concat(buffer):\n wrapper = get_df_wrapper()\n wrapper.concat(buffer)\n\n\ndef iterate(data):\n wrapper = get_df_wrapper()\n wrapper.iterate(data)\n\n\ndef get_item(data, idx):\n wrapper = get_df_wrapper()\n wrapper.get_item(data, idx)\n\n\ndef get_len(df):\n wrapper = get_df_wrapper()\n wrapper.get_len(df)\n"
]
| [
[
"pandas.concat",
"pandas.DataFrame"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.